blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
197947df3f6c3b552f542cad538188861870d86f | 95c027e7302751b335b33d287e0efac7483edfc3 | /boj/BOJ_평균.py | aaf5066b2ef4cd7f9d8f10ec2c10ff292124ceba | [] | no_license | kimchaelin13/Algorithm | 01bd4bcb24c58d5d82714e60272d5af91d2d9ce8 | 53f7f3cff5a141cf705af3c9f31cdb9ae997caff | refs/heads/master | 2023-02-03T08:58:26.660299 | 2020-12-20T17:01:16 | 2020-12-20T17:01:16 | 296,996,924 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | import sys
sys.stdin = open("input.txt", "r")
s = []
for i in range(5):
s.append(int(input()))
for j in range(len(s)):
if s[j] < 40:
s[j]=40
print(round(sum(s)/len(s))) | [
"[email protected]"
] | |
3775521386c59304a0872b9053c2111fdfe7ca55 | da687718aa8ce62974090af63d25e057262e9dfe | /cap14-funcoes/extras/entrada.py | 8f9a269e72ba810cb2bb7d637f9fbdeaae697fbd | [] | no_license | frclasso/revisao_Python_modulo1 | 77928fa4409c97d49cc7deccdf291f44c337d290 | 1e83d0ef9657440db46a8e84b136ac5f9a7c556e | refs/heads/master | 2020-06-25T05:37:28.768343 | 2019-07-27T22:23:58 | 2019-07-27T22:23:58 | 199,217,969 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | def valida_inteiro(mensagem, minimo, maximo):
while True:
try:
v = int(input(mensagem))
if v >= minimo and v <= maximo:
return v
else:
print(f'Digite um valor entre {maximo} e {minimo}.')
except: print('Voce deve digitar um numero inteiro.') | [
"[email protected]"
] | |
1c1842851e7ef3306eade4b5362a299e7a952d0f | 4cdf4e243891c0aa0b99dd5ee84f09a7ed6dd8c8 | /django2/bookmarks/bookmarks/settings.py | 8277bde8c3c7f242eb407532c2ef68e2c0ae896b | [
"MIT"
] | permissive | gozeon/code-collections | 464986c7765df5dca980ac5146b847416b750998 | 13f07176a6c7b6ac13586228cec4c1e2ed32cae4 | refs/heads/master | 2023-08-17T18:53:24.189958 | 2023-08-10T04:52:47 | 2023-08-10T04:52:47 | 99,432,793 | 1 | 0 | NOASSERTION | 2020-07-17T09:25:44 | 2017-08-05T15:56:53 | JavaScript | UTF-8 | Python | false | false | 3,367 | py | """
Django settings for bookmarks project.
Generated by 'django-admin startproject' using Django 2.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a9va+)ulziy57*cci0qv^v#7lo04$%&t-qj*77hg@77q1_&#_d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'account.apps.AccountConfig',
'images.apps.ImagesConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bookmarks.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bookmarks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = 'dashboard'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
| [
"[email protected]"
] | |
990c0d14f1a9a11941085c4fae1209efd43555c4 | e9988eb38fd515baa386d8b06bb7cce30c34c50d | /sitevenv/lib/python2.7/site-packages/django/utils/translation/trans_real.py | 6ab071dabaf28cf2d985efd0f10d8189984cabaf | [] | no_license | Arrrrrrrpit/Hire_station | 8c2f293677925d1053a4db964ee504d78c3738d8 | f33f044628082f1e034484b5c702fd66478aa142 | refs/heads/master | 2020-07-01T01:24:18.190530 | 2016-09-25T20:33:05 | 2016-09-25T20:33:05 | 201,007,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,622 | py | """Translation helper functions."""
from __future__ import unicode_literals
import gettext as gettext_module
import os
import re
import sys
import warnings
from collections import OrderedDict
from threading import local
from django.apps import apps
from django.conf import settings
from django.conf.locale import LANG_INFO
from django.core.exceptions import AppRegistryNotReady
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils import lru_cache, six
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.safestring import SafeData, mark_safe
from django.utils.six import StringIO
from django.utils.translation import (
LANGUAGE_SESSION_KEY, TranslatorCommentWarning, trim_whitespace,
)
# Translations are cached in a dictionary for every language.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = local()
# The default translation is based on the settings file.
_default = None
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
# and RFC 3066, section 2.1
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*"
(?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
language_code_re = re.compile(
r'^[a-z]{1,8}(?:-[a-z0-9]{1,8})*(?:@[a-z0-9]{1,20})?$',
re.IGNORECASE
)
language_code_prefix_re = re.compile(r'^/([\w@-]+)(/|$)')
@receiver(setting_changed)
def reset_cache(**kwargs):
"""
Reset global state when LANGUAGES setting has been changed, as some
languages should no longer be accepted.
"""
if kwargs['setting'] in ('LANGUAGES', 'LANGUAGE_CODE'):
check_for_language.cache_clear()
get_languages.cache_clear()
get_supported_language_variant.cache_clear()
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower() + '_' + language[p + 1:].lower()
else:
# Get correct locale for sr-latn
if len(language[p + 1:]) > 2:
return language[:p].lower() + '_' + language[p + 1].upper() + language[p + 2:].lower()
return language[:p].lower() + '_' + language[p + 1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower() + '-' + locale[p + 1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct an object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
domain = 'django'
def __init__(self, language, domain=None, localedirs=None):
"""Create a GNUTranslations() using many locale directories"""
gettext_module.GNUTranslations.__init__(self)
if domain is not None:
self.domain = domain
self.set_output_charset('utf-8') # For Python 2 gettext() (#25720)
self.__language = language
self.__to_language = to_language(language)
self.__locale = to_locale(language)
self._catalog = None
if self.domain == 'django':
if localedirs is not None:
# A module-level cache is used for caching 'django' translations
warnings.warn("localedirs is ignored when domain is 'django'.", RuntimeWarning)
localedirs = None
self._init_translation_catalog()
if localedirs:
for localedir in localedirs:
translation = self._new_gnu_trans(localedir)
self.merge(translation)
else:
self._add_installed_apps_translations()
self._add_local_translations()
if self.__language == settings.LANGUAGE_CODE and self.domain == 'django' and self._catalog is None:
# default lang should have at least one translation file available.
raise IOError("No translation files found for default language %s." % settings.LANGUAGE_CODE)
self._add_fallback(localedirs)
if self._catalog is None:
# No catalogs found for this language, set an empty catalog.
self._catalog = {}
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def _new_gnu_trans(self, localedir, use_null_fallback=True):
"""
Returns a mergeable gettext.GNUTranslations instance.
A convenience wrapper. By default gettext uses 'fallback=False'.
Using param `use_null_fallback` to avoid confusion with any other
references to 'fallback'.
"""
return gettext_module.translation(
domain=self.domain,
localedir=localedir,
languages=[self.__locale],
codeset='utf-8',
fallback=use_null_fallback)
def _init_translation_catalog(self):
"""Creates a base catalog using global django translations."""
settingsfile = upath(sys.modules[settings.__module__].__file__)
localedir = os.path.join(os.path.dirname(settingsfile), 'locale')
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_installed_apps_translations(self):
"""Merges translations from each installed app."""
try:
app_configs = reversed(list(apps.get_app_configs()))
except AppRegistryNotReady:
raise AppRegistryNotReady(
"The translation infrastructure cannot be initialized before the "
"apps registry is ready. Check that you don't make non-lazy "
"gettext calls at import time.")
for app_config in app_configs:
localedir = os.path.join(app_config.path, 'locale')
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_local_translations(self):
"""Merges translations defined in LOCALE_PATHS."""
for localedir in reversed(settings.LOCALE_PATHS):
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_fallback(self, localedirs=None):
"""Sets the GNUTranslations() fallback with the default language."""
# Don't set a fallback for the default language or any English variant
# (as it's empty, so it'll ALWAYS fall back to the default language)
if self.__language == settings.LANGUAGE_CODE or self.__language.startswith('en'):
return
if self.domain == 'django':
# Get from cache
default_translation = translation(settings.LANGUAGE_CODE)
else:
default_translation = DjangoTranslation(
settings.LANGUAGE_CODE, domain=self.domain, localedirs=localedirs
)
self.add_fallback(default_translation)
def merge(self, other):
"""Merge another translation into this catalog."""
if not getattr(other, '_catalog', None):
return # NullTranslations() has no _catalog
if self._catalog is None:
# Take plural and _info from first catalog found (generally Django's).
self.plural = other.plural
self._info = other._info.copy()
self._catalog = other._catalog.copy()
else:
self._catalog.update(other._catalog)
def language(self):
"""Returns the translation language."""
return self.__language
def to_language(self):
"""Returns the translation language name."""
return self.__to_language
def translation(language):
"""
Returns a translation object in the default 'django' domain.
"""
global _translations
if language not in _translations:
_translations[language] = DjangoTranslation(language)
return _translations[language]
def activate(language):
"""
Fetches the translation object for a given language and installs it as the
current translation object for the current thread.
"""
if not language:
return
_active.value = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
_active.value.to_language = lambda *args: None
def get_language():
"""Returns the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
lang = get_language()
if lang is None:
return False
else:
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
# str() is allowing a bytestring message to remain bytestring on Python 2
eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n'))
if len(eol_message) == 0:
# Returns an empty value of the corresponding type if an empty message
# is given, instead of metadata, which is the default gettext behavior.
result = type(message)("")
else:
_default = _default or translation(settings.LANGUAGE_CODE)
translation_object = getattr(_active, "value", _default)
result = getattr(translation_object, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
"""
Returns a string of the translation of the message.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_translate(message, 'gettext')
if six.PY3:
ugettext = gettext
else:
def ugettext(message):
return do_translate(message, 'ugettext')
def pgettext(context, message):
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = ugettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
# force unicode, because lazy version expects unicode
result = force_text(message)
return result
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a string of the translation of either the singular or plural,
based on the number.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
if six.PY3:
ungettext = ngettext
else:
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def npgettext(context, singular, plural, number):
msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number)
result = ungettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = ungettext(singular, plural, number)
return result
def all_locale_paths():
"""
Returns a list of paths to user-provides languages files.
"""
globalpath = os.path.join(
os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
return [globalpath] + list(settings.LOCALE_PATHS)
@lru_cache.lru_cache(maxsize=1000)
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
# First, a quick check to make sure lang_code is well-formed (#21458)
if lang_code is None or not language_code_re.search(lang_code):
return False
for path in all_locale_paths():
if gettext_module.find('django', path, [to_locale(lang_code)]) is not None:
return True
return False
@lru_cache.lru_cache()
def get_languages():
"""
Cache of settings.LANGUAGES in an OrderedDict for easy lookups by key.
"""
return OrderedDict(settings.LANGUAGES)
@lru_cache.lru_cache(maxsize=1000)
def get_supported_language_variant(lang_code, strict=False):
"""
Returns the language-code that's listed in supported languages, possibly
selecting a more generic variant. Raises LookupError if nothing found.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
if lang_code:
# If 'fr-ca' is not supported, try special fallback or language-only 'fr'.
possible_lang_codes = [lang_code]
try:
possible_lang_codes.extend(LANG_INFO[lang_code]['fallback'])
except KeyError:
pass
generic_lang_code = lang_code.split('-')[0]
possible_lang_codes.append(generic_lang_code)
supported_lang_codes = get_languages()
for code in possible_lang_codes:
if code in supported_lang_codes and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported_lang_codes:
if supported_code.startswith(generic_lang_code + '-'):
return supported_code
raise LookupError(lang_code)
def get_language_from_path(path, strict=False):
"""
Returns the language-code if there is a valid language-code
found in the `path`.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
regex_match = language_code_prefix_re.match(path)
if not regex_match:
return None
lang_code = regex_match.group(1)
try:
return get_supported_language_variant(lang_code, strict=strict)
except LookupError:
return None
def get_language_from_request(request, check_path=False):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
if check_path:
lang_code = get_language_from_path(request.path_info)
if lang_code is not None:
return lang_code
supported_lang_codes = get_languages()
if hasattr(request, 'session'):
lang_code = request.session.get(LANGUAGE_SESSION_KEY)
if lang_code in supported_lang_codes and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
try:
return get_supported_language_variant(lang_code)
except LookupError:
pass
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
if not language_code_re.search(accept_lang):
continue
try:
return get_supported_language_variant(accept_lang)
except LookupError:
continue
try:
return get_supported_language_variant(settings.LANGUAGE_CODE)
except LookupError:
return settings.LANGUAGE_CODE
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
inline_re = re.compile(
# Match the trans 'some text' part
r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))"""
# Match and ignore optional filters
r"""(?:\s*\|\s*[^\s:]+(?::(?:[^\s'":]+|(?:"[^"]*?")|(?:'[^']*?')))?)*"""
# Match the optional context part
r"""(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*"""
)
block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
def templatize(src, origin=None):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from django.template.base import (
Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, TOKEN_COMMENT,
TRANSLATOR_COMMENT_MARK,
)
src = force_text(src, settings.FILE_CHARSET)
out = StringIO('')
message_context = None
intrans = False
inplural = False
trimmed = False
singular = []
plural = []
incomment = False
comment = []
lineno_comment_map = {}
comment_lineno_cache = None
# Adding the u prefix allows gettext to recognize the Unicode string
# (#26093).
raw_prefix = 'u' if six.PY3 else ''
def join_tokens(tokens, trim=False):
message = ''.join(tokens)
if trim:
message = trim_whitespace(message)
return message
for t in Lexer(src).tokenize():
if incomment:
if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
content = ''.join(comment)
translators_comment_start = None
for lineno, line in enumerate(content.splitlines(True)):
if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
translators_comment_start = lineno
for lineno, line in enumerate(content.splitlines(True)):
if translators_comment_start is not None and lineno >= translators_comment_start:
out.write(' # %s' % line)
else:
out.write(' #\n')
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
if message_context:
out.write(' npgettext({p}{!r}, {p}{!r}, {p}{!r},count) '.format(
message_context,
join_tokens(singular, trimmed),
join_tokens(plural, trimmed),
p=raw_prefix,
))
else:
out.write(' ngettext({p}{!r}, {p}{!r}, count) '.format(
join_tokens(singular, trimmed),
join_tokens(plural, trimmed),
p=raw_prefix,
))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
if message_context:
out.write(' pgettext({p}{!r}, {p}{!r}) '.format(
message_context,
join_tokens(singular, trimmed),
p=raw_prefix,
))
else:
out.write(' gettext({p}{!r}) '.format(
join_tokens(singular, trimmed),
p=raw_prefix,
))
for part in singular:
out.write(blankout(part, 'S'))
message_context = None
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
raise SyntaxError(
"Translation blocks must not include other block tags: "
"%s (%sline %d)" % (t.contents, filemsg, t.lineno)
)
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = t.contents.replace('%', '%%')
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
# Handle comment tokens (`{# ... #}`) plus other constructs on
# the same line:
if comment_lineno_cache is not None:
cur_lineno = t.lineno + t.contents.count('\n')
if comment_lineno_cache == cur_lineno:
if t.token_type != TOKEN_COMMENT:
for c in lineno_comment_map[comment_lineno_cache]:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
warn_msg = (
"The translator-targeted comment '%s' "
"(%sline %d) was ignored, because it wasn't "
"the last item on the line."
) % (c, filemsg, comment_lineno_cache)
warnings.warn(warn_msg, TranslatorCommentWarning)
lineno_comment_map[comment_lineno_cache] = []
else:
out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
comment_lineno_cache = None
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"':
g = g.strip('"')
elif g[0] == "'":
g = g.strip("'")
g = g.replace('%', '%%')
if imatch.group(2):
# A context is provided
context_match = context_re.match(imatch.group(2))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
out.write(' pgettext({p}{!r}, {p}{!r}) '.format(
message_context, g, p=raw_prefix
))
message_context = None
else:
out.write(' gettext({p}{!r}) '.format(g, p=raw_prefix))
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
if bmatch.group(1):
# A context is provided
context_match = context_re.match(bmatch.group(1))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
intrans = True
inplural = False
trimmed = 'trimmed' in t.split_contents()
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
elif t.contents == 'comment':
incomment = True
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':', 1)[1])
else:
out.write(blankout(p, 'F'))
elif t.token_type == TOKEN_COMMENT:
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
lineno_comment_map.setdefault(t.lineno,
[]).append(t.contents)
comment_lineno_cache = t.lineno
else:
out.write(blankout(t.contents, 'X'))
return out.getvalue()
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string.lower())
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i:i + 3]
if first:
return []
if priority:
try:
priority = float(priority)
except ValueError:
return []
if not priority: # if priority is 0.0 at this point make it 1.0
priority = 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return result
| [
"[email protected]"
] | |
ef04eda75ce46249309cb75b6cbb7b0c9858fbe3 | 9f3991f4e7b405c04f2ef03ac7747b5a69d26b4b | /openpyxl/csv_to_excel.py | 8efbab73e40cd93a497cd0cec0ccd755b3044f64 | [] | no_license | zcxyun/pythonDemo | f66eb5e6e4274db2137480786eae4d6ca7e73163 | adf18cf6b58282a7f2f9203aa09d5cb60ced2e35 | refs/heads/master | 2021-07-29T19:06:52.481792 | 2021-07-27T16:10:38 | 2021-07-27T16:10:38 | 101,542,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,467 | py | #!/usr/bin/env
# -*- coding: utf-8 -*-
import csv
import re
import openpyxl
import itertools
from datetime import datetime
# from openpyxl.utils import get_column_letter, column_index_from_string
money = []
payee = []
payeeNumber = []
firstPayDate = []
#with open('resource/example.csv', encoding='utf-8') as originFile:
# 将csv文件的数据提取到相应的列表中
with open('E:/新农合银行转账统计表/新农合网银统计银行下载原件/12.1-12.4.csv', encoding='utf-16') as originFile:
originReader = csv.reader(originFile, delimiter='\t')
originData = list(originReader)
for index, title in enumerate(originData[0]):
if title == '金额':
for m in originData[1: len(originData)+1]:
money.append(float(''.join(m[index].split(','))))
if title == '收款人名称':
for p in originData[1: len(originData)+1]:
payee.append(p[index])
if title == '收款人账号':
for pn in originData[1: len(originData)+1]:
payeeNumber.append(pn[index])
if title == '初次委托日期':
for fpd in originData[1: len(originData)+1]:
firstPayDate.append(fpd[index][:10])
# 将相应的列表转换为相应的迭代器
moneyIter = iter(money)
payeeIter = iter(payee)
payeeNumberIter = iter(payeeNumber)
firstPayDateIter = iter(firstPayDate)
# 加载 excel 文件
wb = openpyxl.load_workbook('E:/新农合银行转账统计表/2017-12-01至2017-12-31.xlsx')
# 获取工作表
sheet0 = wb.get_sheet_by_name('sheet0')
# 获取工作表模板
sheetTemplate = wb.get_sheet_by_name('sheetTemplate')
# 计数器
natuals = itertools.count(1)
ns = itertools.takewhile(lambda x: x <= len(money), natuals)
# csv 文件中的数据根据一定的规则复制到相应的 Excel 文件中
def copy(sheet):
try:
# print(sheet.title)
for rowOfCellObjects in sheet['B5':'H34']:
for index, cell in enumerate(rowOfCellObjects):
if cell.value == None:
if index == 0:
cell.value = next(payeeIter)
if index == 1:
cell.value = next(firstPayDateIter)
if index == 2:
cell.value = next(moneyIter)
if index == 3:
cell.value = next(payeeNumberIter)
if index == 4:
cell.value = rowOfCellObjects[0].value
if index == 5:
cell.value = rowOfCellObjects[2].value
# if index == 6:
# cell.value = datetime.now().date()
ws_next = wb.copy_worksheet(sheetTemplate)
ws_next.title = sheetTemplate.title[:5] + str(next(ns))
copy(ws_next)
except StopIteration as e:
return
copy(sheet0)
# 根据前一个工作表的索引建立新工作表的索引
def makeIndex(sheet):
title = re.match(r'^([a-zA-Z]+)(\d+)$', sheet.title)
titleStr = title.group(1)
titleExt = title.group(2)
titleExtToInt = int(titleExt)
# print(str(titleExtToInt+1))
sheetPrev = wb.get_sheet_by_name(titleStr + str(titleExtToInt-1))
# print(sheetPrev)
sheet['A5'] = sheetPrev['A34'].value + 1
# print(sheet['A2'].value)
for i in range(len(sheet['A5':'A34'])):
if i >= 1:
sheet['A5':'A34'][i][0].value = sheet['A5':'A34'][i-1][0].value + 1
# 合计支付金额
def moneySum(sheet):
sheet['D35'] = "=SUM(D5:D34)"
sheet['G35'] = "=SUM(G5:G34)"
for sh in wb:
moneySum(sh)
if sh.title != 'sheetTemplate' and sh.title != 'sheet0' :
makeIndex(sh)
wb.save('E:/新农合银行转账统计表/2017-12-01至2017-12-31.xlsx')
| [
"[email protected]"
] | |
a08b6a7a99b0ab5b2de2ff6bf12388fbf6319a48 | c4bfd8ba4c4c0f21bd6a54a9131f0985a5a4fa56 | /crescent/resources/s3/bucket_policy/constants.py | 5ba83647b2baf057d3d871cc99288b7e11f8f64e | [
"Apache-2.0"
] | permissive | mpolatcan/crescent | 405936ec001002e88a8f62d73b0dc193bcd83010 | 2fd0b1b9b21613b5876a51fe8b5f9e3afbec1b67 | refs/heads/master | 2022-09-05T04:19:43.745557 | 2020-05-25T00:09:11 | 2020-05-25T00:09:11 | 244,903,370 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | from crescent.core.constants import get_values
class _RequiredProperties:
class BucketPolicy:
BUCKET = "Bucket"
POLICY_DOCUMENT = "PolicyDocument"
# --------------------------------------------------
class ResourceRequiredProperties:
BUCKET_POLICY = get_values(_RequiredProperties.BucketPolicy)
| [
"[email protected]"
] | |
5aadabb6bec3aec95c8f54c9736e197ced6a47ab | 0daf6763c960cd898e9bb5612b1314d7e34b8870 | /mnist_1/data.py | b1bf29e2af4aca2bbe3f70fd3c775cddef6107cf | [
"MIT"
] | permissive | evanthebouncy/nnhmm | a6ba2a1f0ed2c90a0188de8b5e162351e6668565 | acd76edaa1b3aa0c03d39f6a30e60d167359c6ad | refs/heads/master | 2021-01-12T02:27:32.814908 | 2017-04-01T05:01:24 | 2017-04-01T05:01:24 | 77,956,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,180 | py | import numpy as np
from scipy.misc import imresize
from scipy.ndimage.filters import gaussian_filter
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
X_L = 10
L = 14
N_BATCH = 50
OBS_SIZE = 20
KEEP = 0.6
# ---------------------------- helpers
def black_white(img):
new_img = np.copy(img)
img_flat = img.flatten()
nonzeros = img_flat[np.nonzero(img_flat)]
sortedd = np.sort(nonzeros)
idxx = round(len(sortedd) * (1.0 - KEEP))
thold = sortedd[idxx]
mask_pos = img >= thold
mask_neg = img < thold
new_img[mask_pos] = 1.0
new_img[mask_neg] = 0.0
return new_img
def vectorize(coords):
retX, retY = np.zeros([L]), np.zeros([L])
retX[coords[0]] = 1.0
retY[coords[1]] = 1.0
return retX, retY
# show dimension of a data object (list of list or a tensor)
def show_dim(lst1):
if hasattr(lst1, '__len__') and len(lst1) > 0:
return [len(lst1), show_dim(lst1[0])]
else:
try:
return lst1.get_shape()
except:
try:
return lst1.shape
except:
return type(lst1)
# -------------------------------------- making the datas
# assume X is already a 2D matrix
def mk_query(X):
def query(O):
Ox, Oy = O
if X[Ox][Oy] == 1.0:
return [1.0, 0.0]
else:
return [0.0, 1.0]
return query
def sample_coord():
return np.random.randint(0, L), np.random.randint(0, L)
def sample_coord_center():
Ox, Oy = np.random.multivariate_normal([L/2,L/2], [[L*0.7, 0.0], [0.0, L*0.7]])
Ox, Oy = round(Ox), round(Oy)
if 0 <= Ox < L:
if 0 <= Oy < L:
return Ox, Oy
return sample_coord()
def sample_coord_bias(qq):
def find_positive(qq):
C = sample_coord()
if qq(C) == [1.0, 0.0]:
return C
else:
return find_positive(qq)
def find_negative(qq):
C = sample_coord()
if qq(C) == [0.0, 1.0]:
return C
else:
return find_negative(qq)
toss = np.random.random() < 0.5
if toss:
return find_positive(qq)
else:
return find_negative(qq)
def gen_O(X):
query = mk_query(X)
Ox, Oy = sample_coord()
O = (Ox, Oy)
return O, query(O)
def get_img_class(test=False):
img, _x = mnist.train.next_batch(1)
if test:
img, _x = mnist.test.next_batch(1)
img = np.reshape(img[0], [2*L,2*L])
# rescale the image to 14 x 14
# img = imresize(img, (14,14), interp='nearest') / 255.0
img = gaussian_filter(imresize(img, (14,14)) / 255.0, 0.11)
img = black_white(img)
return img, _x[0]
# a trace is named tuple
# (Img, S, Os)
# where Img is the black/white image
# where S is the hidden hypothesis (i.e. label of the img)
# Os is a set of Observations which is (qry_pt, label)
import collections
Trace = collections.namedtuple('Trace', 'Img S Os')
def gen_rand_trace(test=False):
img, _x = get_img_class(test)
obs = []
for ob_idx in range(OBS_SIZE):
obs.append(gen_O(img))
return Trace(img, _x, obs)
# a class to hold the experiences
class Experience:
def __init__(self, buf_len):
self.buf = []
self.buf_len = buf_len
def trim(self):
while len(self.buf) > self.buf_len:
self.buf.pop()
def add(self, trace):
self.buf.append(trace)
self.trim()
def sample(self):
idxxs = np.random.choice(len(self.buf), size=1, replace=False)
return self.buf[idxxs[0]]
def data_from_exp(exp):
traces = [exp.sample() for _ in range(N_BATCH)]
x = []
obs_x = [[] for i in range(OBS_SIZE)]
obs_y = [[] for i in range(OBS_SIZE)]
obs_tfs = [[] for i in range(OBS_SIZE)]
new_ob_x = []
new_ob_y = []
new_ob_tf = []
imgs = []
for bb in range(N_BATCH):
trr = traces[bb]
# generate a hidden variable X
# get a single thing out
img = trr.Img
_x = trr.S
imgs.append(img)
x.append(_x)
# generate a FRESH new observation for demanding an answer
_new_ob_coord, _new_ob_lab = gen_O(img)
_new_ob_x, _new_ob_y = vectorize(_new_ob_coord)
new_ob_x.append(_new_ob_x)
new_ob_y.append(_new_ob_y)
new_ob_tf.append(_new_ob_lab)
# generate observations for this hidden variable x
for ob_idx in range(OBS_SIZE):
_ob_coord, _ob_lab = trr.Os[ob_idx]
_ob_x, _ob_y = vectorize(_ob_coord)
obs_x[ob_idx].append(_ob_x)
obs_y[ob_idx].append(_ob_y)
obs_tfs[ob_idx].append(_ob_lab)
return np.array(x, np.float32),\
np.array(obs_x, np.float32),\
np.array(obs_y, np.float32),\
np.array(obs_tfs, np.float32),\
np.array(new_ob_x, np.float32),\
np.array(new_ob_y, np.float32),\
np.array(new_ob_tf, np.float32), imgs
# the thing is we do NOT use the trace observations, we need to generate random observations
# to be sure we can handle all kinds of randomizations
def inv_data_from_label_data(labelz, inputz):
labs = []
obss = []
for bb in range(N_BATCH):
img = inputz[bb]
lab = labelz[bb]
labs.append(lab)
obs = np.zeros([L,L,2])
# generate observations for this hidden variable x
for ob_idx in range(OBS_SIZE):
ob_coord, ob_lab = gen_O(img)
ox, oy = ob_coord
if ob_lab[0] == 1.0:
obs[ox][oy][0] = 1.0
if ob_lab[1] == 1.0:
obs[ox][oy][1] = 1.0
obss.append(obs)
return np.array(labs, np.float32),\
np.array(obss, np.float32)
# uses trace info
def inv_batch_obs(labz, batch_Os):
obss = []
for bb in range(N_BATCH):
Os = batch_Os[bb]
obs = np.zeros([L,L,2])
# generate observations for this hidden variable x
for ob_idx in range(OBS_SIZE):
ob_coord, ob_lab = Os[ob_idx]
ox, oy = ob_coord
if ob_lab[0] == 1.0:
obs[ox][oy][0] = 1.0
if ob_lab[1] == 1.0:
obs[ox][oy][1] = 1.0
obss.append(obs)
return np.array(labz, np.float32),\
np.array(obss, np.float32)
# def gen_data():
# x = []
#
# obs_x = [[] for i in range(OBS_SIZE)]
# obs_y = [[] for i in range(OBS_SIZE)]
# obs_tfs = [[] for i in range(OBS_SIZE)]
# new_ob_x = []
# new_ob_y = []
# new_ob_tf = []
#
# imgs = []
#
# for bb in range(N_BATCH):
# # generate a hidden variable X
# # get a single thing out
# img, _x = get_img_class()
# imgs.append(img)
#
# # add to x
# x.append(_x[0])
# # generate new observation
# _new_ob_coord, _new_ob_lab = gen_O(img)
# _new_ob_x, _new_ob_y = vectorize(_new_ob_coord)
# new_ob_x.append(_new_ob_x)
# new_ob_y.append(_new_ob_y)
# new_ob_tf.append(_new_ob_lab)
#
# # generate observations for this hidden variable x
# for ob_idx in range(OBS_SIZE):
# _ob_coord, _ob_lab = gen_O(img)
# _ob_x, _ob_y = vectorize(_ob_coord)
# obs_x[ob_idx].append(_ob_x)
# obs_y[ob_idx].append(_ob_y)
# obs_tfs[ob_idx].append(_ob_lab)
#
# return np.array(x, np.float32),\
# np.array(obs_x, np.float32),\
# np.array(obs_y, np.float32),\
# np.array(obs_tfs, np.float32),\
# np.array(new_ob_x, np.float32),\
# np.array(new_ob_y, np.float32),\
# np.array(new_ob_tf, np.float32), imgs
| [
"[email protected]"
] | |
a232db2616848042691d1e7c825e015af8882aab | 5fe72bb13baf3649058ebe11aa86ad4fc56c69ed | /hard-gists/3409403/snippet.py | 0e119325f7fbab1ea9ab0c1fde4fd52135828a49 | [
"Apache-2.0"
] | permissive | dockerizeme/dockerizeme | 8825fed45ff0ce8fb1dbe34959237e8048900a29 | 408f3fa3d36542d8fc1236ba1cac804de6f14b0c | refs/heads/master | 2022-12-10T09:30:51.029846 | 2020-09-02T13:34:49 | 2020-09-02T13:34:49 | 144,501,661 | 24 | 20 | Apache-2.0 | 2022-11-21T12:34:29 | 2018-08-12T21:21:04 | Python | UTF-8 | Python | false | false | 547 | py | from google.appengine.ext import db, ndb
from google.appengine.datastore import entity_pb
def db_entity_to_protobuf(e):
return db.model_to_protobuf(e).Encode()
def protobuf_to_db_entity(pb):
# precondition: model class must be imported
return db.model_from_protobuf(entity_pb.EntityProto(pb))
def ndb_entity_to_protobuf(e):
return ndb.ModelAdapter().entity_to_pb(e).Encode()
def protobuf_to_ndb_entity(pb):
# precondition: model class must be imported
return ndb.ModelAdapter().pb_to_entity(entity_pb.EntityProto(pb))
| [
"[email protected]"
] | |
9a3f3542a14276c1794492528c5d906908c7f791 | 6b9084d234c87d7597f97ec95808e13f599bf9a1 | /evaluation/logger/pytracking.py | 6e17451f77982c297479789660635ffca35a1ee4 | [] | no_license | LitingLin/ubiquitous-happiness | 4b46234ce0cb29c4d27b00ec5a60d3eeb52c26fc | aae2d764e136ca4a36c054212b361dd7e8b22cba | refs/heads/main | 2023-07-13T19:51:32.227633 | 2021-08-03T16:02:03 | 2021-08-03T16:02:03 | 316,664,903 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | import numpy as np
import os
class PyTrackingLogger:
def __init__(self, output_path=None):
self.output_path = output_path
def log_sequence_result(self, name: str, predicted_bboxes: np.ndarray, **kwargs):
print(f'Sequence: {name}')
print(f'FPS: {kwargs["fps"]}')
predicted_bboxes = predicted_bboxes.copy()
predicted_bboxes[:, 0] += 1
predicted_bboxes[:, 1] += 1
if self.output_path is not None:
np.savetxt(os.path.join(self.output_path, '{}.txt'.format(name)), predicted_bboxes, delimiter='\t',
fmt='%d')
| [
"[email protected]"
] | |
a9ab497015525833279bb4f7cb7b294f7e35efe7 | 5fd401dbc7b9ac782d387067c43a559971de5028 | /modules/file/upload.py | 5ad9b4a030323d70e88e69b37d7ef047af896c60 | [] | no_license | SagaieNet/weevely3 | 0b41be1bbd08a8ebde1e236775462483ad175c6e | c169bbf24807a581b3f61a455b9a43a5d48c8f52 | refs/heads/master | 2021-01-22T15:00:19.838734 | 2014-09-30T20:00:07 | 2014-09-30T20:00:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,139 | py | from core.vectors import PhpCmd, ModuleCmd
from core.module import Module
from core import messages
from core.loggers import log
import random
import hashlib
import base64
class Upload(Module):
"""Upload file to remote filesystem."""
def init(self):
self.register_info(
{
'author': [
'Emilio Pinna'
],
'license': 'GPLv3'
}
)
self.register_arguments(
# Declare mandatory arguments
mandatory = [
'lpath',
'rpath'
],
# Declare additional options
optional = {
'content': '',
'vector': ''
},
bind_to_vectors = 'vector')
self.register_vectors(
[
PhpCmd(
"(file_put_contents('${rpath}', base64_decode('${content}'))&&print(1)) || print(0);",
name = 'file_put_contents'
),
PhpCmd(
"""($h=fopen("${rpath}","a+")&&fwrite($h, base64_decode('${content}'))&&fclose($h)&&print(1)) || print(0);""",
name = "fwrite"
)
]
)
def run(self, args):
# Load local file
content_orig = args.get('content')
if not content_orig:
lpath = args.get('lpath')
try:
content_orig = open(lpath, 'r').read()
except Exception, e:
log.warning(
messages.generic.error_loading_file_s_s % (lpath, str(e)))
return
content = base64.b64encode(content_orig)
# Check remote file existence
rpath_exists = ModuleCmd('file_check', [ args['rpath'], 'exists' ]).run()
if rpath_exists:
log.warning(messages.generic.error_file_s_already_exists % args['rpath'])
return
vector_name, result = self.vectors.find_first_result(
format_args = { 'args' : args, 'content' : content },
condition = lambda result: True if result == '1' else False
)
| [
"[email protected]"
] | |
6bf462112c68e100b92acc5b9b8ed814e8f09d27 | ef4a1748a5bfb5d02f29390d6a66f4a01643401c | /algorithm/new_teacher_algorithm/AD/도약.py | 5c781e9d4bc4c9a28efdc8ca127c58b5528ef92d | [] | no_license | websvey1/TIL | aa86c1b31d3efc177df45503d705b3e58b800f8e | 189e797ba44e2fd22a033d1024633f9e0128d5cf | refs/heads/master | 2023-01-12T10:23:45.677578 | 2019-12-09T07:26:59 | 2019-12-09T07:26:59 | 162,102,142 | 0 | 1 | null | 2022-12-11T16:31:08 | 2018-12-17T08:57:58 | Python | UTF-8 | Python | false | false | 2,093 | py | import sys
sys.stdin = open("도약.txt")
###########################################################
########################## 두개 쓰기 ########################
###########################################################
# def lowerSearch(s,e,f):
# # f 이상 중에서 가장 작은 값의 위치를 리턴
# sol = -1
# while s<=e:
# m = (s+e)//2
# if data[m] >= f: # f 이상이면 왼쪽영역 재탐색(더 작은 값 찾기 위해)
# sol = m
# e = m-1
# else:
# s= m+1 #우측탐색)
# return sol
#
# def upperSearch(s,e,f):
# # f 이하중에서 가장 큰 값의 위치를 리턴
# sol = -1
# while s<=e:
# m = (s+e)//2
# if data[m] <= f: # 데이타 이하면 오른쪽 재탐색(더 큰걸 찾기위해)
# sol = m
# s = m+1
# else:
# e= m-1
# return sol
# N = int(input())
# data = sorted([(int(input())) for i in range(N)])
# cnt = 0
# for i in range(N-2):
# for j in range(i+1, N-1):
# S = data[j]+(data[j]-data[i])
# E = data[j] + (data[j] - data[i])*2
# lo = lowerSearch(j+1, N-1, S)
# if lo==-1 or data[lo]>E: continue
# up = upperSearch(j+1, N-1, E)
# cnt += (up-lo+1)
# print(cnt)
###########################################################
########################## 하나 쓰기########################
###########################################################
def upperSearch(s,e,f):
# f 이하중에서 가장 큰 값의 위치를 리턴
sol = -1
while s<=e:
m = (s+e)//2
if data[m] < f: # 데이타 이하면 오른쪽 재탐색(더 큰걸 찾기위해)
s = m + 1
sol = m
else:
e= m-1
return sol
N = int(input())
data = sorted([(int(input())) for i in range(N)])
cnt = 0
for i in range(N-2):
for j in range(i+1, N-1):
S = data[j]+(data[j]-data[i])
E = data[j] + (data[j] - data[i])*2
cnt += upperSearch(j, N- 1, E+1) - upperSearch(j, N-1, S)
print(cnt)
| [
"[email protected]"
] | |
f6721ea11faae85216d3a60224be9c9ec0207242 | 3fc4bca70bb817d8c14c2e6eaf29cda765e8cf19 | /venv/bin/pip3.6 | 548512482798bf15606d78d042dcc9e3e66cd6e0 | [] | no_license | OseiasBeu/ExemploDjango | 4841412dca9e15613671951cdcb18ea8d18ff68a | 31e4dfb12b84d217ff989bd7fc0fa5636312b8c4 | refs/heads/master | 2020-03-28T16:05:10.873540 | 2018-09-13T15:22:59 | 2018-09-13T15:22:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | 6 | #!/home/OseiasBeu/Documents/LPs/django/projeto/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
b563da1a4aa94a36c4599e6482162f6ded7d93e9 | 5b2218208aef68cf06609bcc3bf42b499d99d5f6 | /docs/source/conf.py | e94bc7c7e29be180973b828865b19e7712c33ad6 | [
"MIT"
] | permissive | c137digital/unv_app_template | c36cacfff3e0be0b00ecad6365b20b434836ffe7 | a1d1f2463334afc668cbf4e8acbf1dcaacc93e80 | refs/heads/master | 2020-05-25T19:24:17.098451 | 2020-04-24T19:33:08 | 2020-04-24T19:33:08 | 187,950,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,816 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# -- Project information -----------------------------------------------------
project = 'unv_app_template'
copyright = '2020, change'
author = 'change'
# The short X.Y version
version = '0.1'
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'unv_app_templatedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'unv_app_template.tex', 'unv\\_template Documentation',
'change', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'unv_app_template', 'unv_app_template Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'unv_app_template', 'unv_app_template Documentation',
author, 'unv_app_template', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| [
"[email protected]"
] | |
4c1e7c5233cb21118ed12162d9ba099c0665a80a | e0980f704a573894350e285f66f4cf390837238e | /.history/flex/models_20201029143145.py | b68119b214fd032d442ee5cf1b7492154ab67eb2 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import StreamField
from wagtail.admin.edit_handlers import StreamFieldPanel
from wagtail.snippets.blocks import SnippetChooserBlock
from wagtail.core import blocks as wagtail_
from streams import blocks
from home.models import new_table_options
class FlexPage(Page):
body = StreamField([
('title', blocks.TitleBlock()),
('cards', blocks.CardsBlock()),
('image_and_text', blocks.ImageAndTextBlock()),
('cta', blocks.CallToActionBlock()),
('testimonial', SnippetChooserBlock(
target_model='testimonials.Testimonial',
template = 'streams/testimonial_block.html'
)),
('pricing_table', blocks.PricingTableBlock(table_options=new_table_options)),
], null=True, blank=True)
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
class Meta:
verbose_name = 'Flex (misc) page'
verbose_name_plural = 'Flex (misc) pages' | [
"[email protected]"
] | |
71bfd188e3307f50316b5807460e05e6b0dab81e | 0be27c0a583d3a8edd5d136c091e74a3df51b526 | /int_long.py | 09d9178607925a32fd93bcf2ea90ca80acb00f96 | [] | no_license | ssangitha/guvicode | 3d38942f5d5e27a7978e070e14be07a5269b01fe | ea960fb056cfe577eec81e83841929e41a31f72e | refs/heads/master | 2020-04-15T05:01:00.226391 | 2019-09-06T10:08:23 | 2019-09-06T10:08:23 | 164,405,935 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | n=int(input())
if(n>=-2**15+1 and n<=2**15+1):
print ("INT")
elif n>=-2**31+1 and n<=2**31+1:
print("LONG")
else:
print ("LONG LONG")
#..int,long...longlong
| [
"[email protected]"
] | |
e32bd0130a28604d940e0a1e7d79496057d8a0cb | 66a9c25cf0c53e2c3029b423018b856103d709d4 | /tests/live_test.py | b71930af68b02cc6137cb3b01a6f80f39c0ef9f3 | [
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | fritzy/SleekXMPP | 1b02d3e2b22efeb6bf3f8f487e6c0343b9b85baf | cc1d470397de768ffcc41d2ed5ac3118d19f09f5 | refs/heads/develop | 2020-05-22T04:14:58.568822 | 2020-02-18T22:54:57 | 2020-02-18T22:54:57 | 463,405 | 658 | 254 | NOASSERTION | 2023-06-27T20:05:54 | 2010-01-08T05:54:45 | Python | UTF-8 | Python | false | false | 3,422 | py | import logging
from sleekxmpp.test import *
class TestLiveStream(SleekTest):
"""
Test that we can test a live stanza stream.
"""
def tearDown(self):
self.stream_close()
def testClientConnection(self):
"""Test that we can interact with a live ClientXMPP instance."""
self.stream_start(mode='client',
socket='live',
skip=False,
jid='user@localhost/test',
password='user')
# Use sid=None to ignore any id sent by the server since
# we can't know it in advance.
self.recv_header(sfrom='localhost', sid=None)
self.send_header(sto='localhost')
self.recv_feature("""
<stream:features>
<starttls xmlns="urn:ietf:params:xml:ns:xmpp-tls" />
<mechanisms xmlns="urn:ietf:params:xml:ns:xmpp-sasl">
<mechanism>DIGEST-MD5</mechanism>
<mechanism>PLAIN</mechanism>
</mechanisms>
</stream:features>
""")
self.send_feature("""
<starttls xmlns="urn:ietf:params:xml:ns:xmpp-tls" />
""")
self.recv_feature("""
<proceed xmlns="urn:ietf:params:xml:ns:xmpp-tls" />
""")
self.send_header(sto='localhost')
self.recv_header(sfrom='localhost', sid=None)
self.recv_feature("""
<stream:features>
<mechanisms xmlns="urn:ietf:params:xml:ns:xmpp-sasl">
<mechanism>DIGEST-MD5</mechanism>
<mechanism>PLAIN</mechanism>
</mechanisms>
</stream:features>
""")
self.send_feature("""
<auth xmlns="urn:ietf:params:xml:ns:xmpp-sasl"
mechanism="PLAIN">AHVzZXIAdXNlcg==</auth>
""")
self.recv_feature("""
<success xmlns="urn:ietf:params:xml:ns:xmpp-sasl" />
""")
self.send_header(sto='localhost')
self.recv_header(sfrom='localhost', sid=None)
self.recv_feature("""
<stream:features>
<bind xmlns="urn:ietf:params:xml:ns:xmpp-bind" />
<session xmlns="urn:ietf:params:xml:ns:xmpp-session" />
</stream:features>
""")
# Should really use send, but our Iq stanza objects
# can't handle bind element payloads yet.
self.send_feature("""
<iq type="set" id="1">
<bind xmlns="urn:ietf:params:xml:ns:xmpp-bind">
<resource>test</resource>
</bind>
</iq>
""")
self.recv_feature("""
<iq type="result" id="1">
<bind xmlns="urn:ietf:params:xml:ns:xmpp-bind">
<jid>user@localhost/test</jid>
</bind>
</iq>
""")
self.stream_close()
suite = unittest.TestLoader().loadTestsFromTestCase(TestLiveStream)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)-8s %(message)s')
tests = unittest.TestSuite([suite])
result = unittest.TextTestRunner(verbosity=2).run(tests)
test_ns = 'http://andyet.net/protocol/tests'
print("<tests xmlns='%s' %s %s %s %s />" % (
test_ns,
'ran="%s"' % result.testsRun,
'errors="%s"' % len(result.errors),
'fails="%s"' % len(result.failures),
'success="%s"' % result.wasSuccessful()))
| [
"[email protected]"
] | |
f5058ccbcc8449198222100dc98b9d6777472a89 | 2f6d017dedc68588b2615d65c1e8ca8bcdd90446 | /api/deploy/write_json.py | 01f5c1cf3ca63b0de2e90ca19f9b694b331c12f5 | [] | no_license | hysunflower/benchmark | 70fc952a4eb1545208543627539d72e991cef78a | c14f99c15b4be9e11f56ea378ca15d9c3da23bab | refs/heads/master | 2022-06-30T07:04:14.986050 | 2022-06-15T02:43:04 | 2022-06-15T02:43:04 | 224,449,279 | 1 | 0 | null | 2019-11-27T14:29:29 | 2019-11-27T14:29:29 | null | UTF-8 | Python | false | false | 3,347 | py | #!/bin/python
# -*- coding: UTF-8 -*-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
import op_benchmark_unit
COMPARE_RESULT_SHOWS = {
"Better": "优于",
"Equal": "打平",
"Less": "差于",
"Unknown": "未知",
"Unsupport": "不支持",
"Others": "其他",
"Total": "汇总"
}
def create_summary_json(compare_result, category):
summary_json_result = list()
compare_result_colors = {"Better": "green", "Less": "red"}
compare_result_keys = compare_result.compare_result_keys
titles = {"title": 1, "row_0": category}
for (i, compare_result_key) in enumerate(compare_result_keys, 1):
titles["row_%i" % i] = COMPARE_RESULT_SHOWS[compare_result_key]
summary_json_result.append(titles)
for device in ["gpu", "cpu"]:
for direction in ["forward", "backward"]:
for method in ["total", "kernel"]:
if device == "cpu": continue
data = {
"title": 0,
"row_0": "{} {} ({})".format(device.upper(),
direction.capitalize(),
method)
}
value = compare_result.get(device, direction, method)
num_total_cases = value["Total"]
for (i, compare_result_key) in enumerate(compare_result_keys,
1):
num_cases = value[compare_result_key]
if num_cases > 0:
ratio = float(num_cases) / float(num_total_cases)
this_str = "{} ({:.2f}%)".format(num_cases,
ratio * 100)
else:
this_str = "--"
data["row_%i" % i] = this_str
summary_json_result.append(data)
return summary_json_result
def dump_json(benchmark_result_list, output_path=None):
"""
dump data to a json file
"""
if output_path is None:
print("Output path is not specified, will not dump json.")
return
compare_result_case_level = op_benchmark_unit.summary_compare_result(
benchmark_result_list)
compare_result_op_level = op_benchmark_unit.summary_compare_result_op_level(
benchmark_result_list)
with open(output_path, 'w') as f:
summary_case_json = create_summary_json(compare_result_case_level,
"case_level")
summary_op_json = create_summary_json(compare_result_op_level,
"case_level")
f.write(json.dumps(summary_case_json + summary_op_json))
| [
"[email protected]"
] | |
89189e31f7eff193f8991a28da369417a28ae86d | 68cd659b44f57adf266dd37789bd1da31f61670d | /D2/D2_20190715파리퇴치.py | 37273d877cfb93458b8b8fdef4531e610039777c | [] | no_license | 01090841589/solved_problem | c0c6f5a46e4d48860dccb3b0288aa5b56868fbca | bbea2f31e5fe36cad100bc514eacd83545fb25b1 | refs/heads/master | 2023-07-02T23:55:51.631478 | 2021-08-04T13:57:00 | 2021-08-04T13:57:00 | 197,157,830 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | def arrr(N) :
for i in range(N) :
inp = input().split(' ')
inp = [int(j) for j in inp]
fly.append(inp)
return fly
def max_cal(fly,N,M):
sum_num = 0
max_num = 0
for i in range(N-M+1) :
for j in range(N-M+1) :
for l in range(M) :
for m in range(M) :
sum_num += fly[l+i][m+j]
if max_num < sum_num :
max_num = sum_num
sum_num = 0
return(max_num)
T = int(input())
for a in range(T):
N = input().split(' ')
fly = []
fly = arrr(int(N[0]))
print('#{0} {1}'.format(a+1, max_cal(fly,int(N[0]),int(N[1])))) | [
"[email protected]"
] | |
f21360c68557a49b1b4e4413627b85cd6737f75c | 73c9211d5627594e0191510f0b4d70a907f5c4c5 | /nn/keras_dataguru/lesson2/work2.py | 4dcacb8f30dd2feaffbd330256b8915e94435bcf | [] | no_license | tigerxjtu/py3 | 35378f270363532fb30962da8674dbcee99eb5ff | 5d24cd074f51bd0f17f6cc4f5f1a6e7cf0d48779 | refs/heads/master | 2021-07-13T05:34:15.080119 | 2020-06-24T09:36:33 | 2020-06-24T09:36:33 | 159,121,100 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import keras
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
# In[2]:
(x_train,y_train),(x_test,y_test)=mnist.load_data()
print('x_shape:',x_train.shape) #(60000,28,28)
print('y_shape:',y_train.shape) #(60000,)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
y_train = np_utils.to_categorical(y_train,num_classes=10)
y_test = np_utils.to_categorical(y_test,num_classes=10)
# In[8]:
# model=Sequential([Dense(units=10,input_dim=784,bias_initializer='one',activation='softmax')])
model=Sequential()
model.add(Dense(units=256,input_dim=x_train.shape[1],activation='relu'))
model.add(Dense(units=10,activation='softmax'))
sgd=SGD(lr=0.2)
model.compile(optimizer=sgd,loss='categorical_crossentropy',metrics=['accuracy'])
# In[9]:
model.fit(x_train,y_train,batch_size=32,epochs=10)
loss,accuracy=model.evaluate(x_test,y_test)
print('\ntest loss:',loss)
print('accuracy:',accuracy)
# In[ ]:
| [
"[email protected]"
] | |
9f719d70ff61b820cde1a602f393ba9c91b6514b | d83f50302702d6bf46c266b8117514c6d2e5d863 | /number-of-boomerangs.py | 7add2f05767711dbf020f6215cc8f92ec9b5a59c | [] | no_license | sfdye/leetcode | 19764a6bdb82de114a2c82986864b1b2210c6d90 | afc686acdda4168f4384e13fb730e17f4bdcd553 | refs/heads/master | 2020-03-20T07:58:52.128062 | 2019-05-05T08:10:41 | 2019-05-05T08:10:41 | 137,295,892 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | class Solution(object):
def numberOfBoomerangs(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
ans = 0
for p in points:
d = collections.defaultdict(int)
for q in points:
d[(p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2] += 1
for k in d.values():
ans += k * (k - 1)
return ans
| [
"[email protected]"
] | |
840b8e213aaeafea3b9c2b03e58bd84996694d5a | bafcde124dd3af37ef14e322e0e76e82d8684469 | /restapi/services/models/PasswordResetModel.py | 47e24ce9d570005a767967ad9265625a49a108e4 | [] | no_license | mentimun-mentah/balihot-property-backend | 1c7ac91c04f791ca55f5f97e872034fbc30a8d32 | b715cc3988ca70d16dbe2e89839653af310fa091 | refs/heads/master | 2022-12-27T15:01:23.196310 | 2020-09-06T17:00:53 | 2020-09-06T17:00:53 | 268,197,437 | 1 | 0 | null | 2020-10-20T16:55:31 | 2020-05-31T02:46:41 | Python | UTF-8 | Python | false | false | 1,310 | py | import uuid, os
from services.serve import db
from time import time
from flask import url_for
from sqlalchemy import func
from services.libs.MailSmtp import MailSmtp
class PasswordReset(db.Model):
__tablename__ = 'password_resets'
id = db.Column(db.String(100),primary_key=True)
email = db.Column(db.String(100),unique=True,index=True,nullable=False)
resend_expired = db.Column(db.Integer,nullable=True)
created_at = db.Column(db.DateTime,default=func.now())
def __init__(self,email: str):
self.email = email
self.resend_expired = int(time()) + 300 # add 5 minute expired
self.id = uuid.uuid4().hex
def send_email_reset_password(self) -> None:
link = os.getenv("APP_URL") + url_for('user.reset_password',token=self.id)
MailSmtp.send_email([self.email],'Reset Password','email/EmailResetPassword.html',link=link)
@property
def resend_is_expired(self) -> bool:
return int(time()) > self.resend_expired
def change_resend_expired(self) -> "PasswordReset":
self.resend_expired = int(time()) + 300 # add 5 minute expired
def save_to_db(self) -> None:
db.session.add(self)
db.session.commit()
def delete_from_db(self) -> None:
db.session.delete(self)
db.session.commit()
| [
"[email protected]"
] | |
014e8f1ddcd99487d99ffa878a6e6cfa7d50ed6c | d55bda4c4ba4e09951ffae40584f2187da3c6f67 | /h/admin/views/groups.py | 0caffe6dcf887350fc17bfffc50c9f1ecc8b64bc | [
"BSD-3-Clause",
"BSD-2-Clause-Views",
"BSD-2-Clause",
"MIT"
] | permissive | ficolo/h | 3d12f78fe95843b2a8f4fc37231363aa7c2868d9 | 31ac733d37e77c190f359c7ef5d59ebc9992e531 | refs/heads/master | 2021-01-15T21:08:17.554764 | 2016-06-09T15:42:01 | 2016-06-09T15:42:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,234 | py | # -*- coding: utf-8 -*-
from pyramid.view import view_config
from h import models
from h import paginator
@view_config(route_name='admin_groups',
request_method='GET',
renderer='h:templates/admin/groups.html.jinja2',
permission='admin_groups')
@paginator.paginate
def groups_index(context, request):
return models.Group.query.order_by(models.Group.created.desc())
@view_config(route_name='admin_groups_csv',
request_method='GET',
renderer='csv',
permission='admin_groups')
def groups_index_csv(request):
groups = models.Group.query
header = ['Group name', 'Group URL', 'Creator username',
'Creator email', 'Number of members']
rows = [[group.name,
request.route_url('group_read',
pubid=group.pubid,
slug=group.slug),
group.creator.username,
group.creator.email,
len(group.members)] for group in groups]
filename = 'groups.csv'
request.response.content_disposition = 'attachment;filename=' + filename
return {'header': header, 'rows': rows}
def includeme(config):
config.scan(__name__)
| [
"[email protected]"
] | |
b9aeff68654c2ed50000a30879c2e21c640d81e5 | 0206ac23a29673ee52c367b103dfe59e7733cdc1 | /src/nemo/compare_2nemo_simulations.py | 041bbfd0229b247c34b4796abf04bc639b9483ae | [] | no_license | guziy/RPN | 2304a93f9ced626ae5fc8abfcc079e33159ae56a | 71b94f4c73d4100345d29a6fbfa9fa108d8027b5 | refs/heads/master | 2021-11-27T07:18:22.705921 | 2021-11-27T00:54:03 | 2021-11-27T00:54:03 | 2,078,454 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,484 | py | from collections import namedtuple
from pathlib import Path
from matplotlib import cm
from matplotlib.gridspec import GridSpec
from nemo.nemo_yearly_files_manager import NemoYearlyFilesManager
__author__ = 'huziy'
# Compare 2 Nemo outputs
import matplotlib.pyplot as plt
import numpy as np
def main_compare_max_yearly_ice_conc():
"""
ice concentration
"""
var_name = ""
start_year = 1979
end_year = 1985
SimConfig = namedtuple("SimConfig", "path label")
base_config = SimConfig("/home/huziy/skynet3_rech1/offline_glk_output_daily_1979-2012", "ERAI-driven")
modif_config = SimConfig("/home/huziy/skynet3_rech1/one_way_coupled_nemo_outputs_1979_1985", "CRCM5")
nemo_manager_base = NemoYearlyFilesManager(folder=base_config.path, suffix="icemod.nc")
nemo_manager_modif = NemoYearlyFilesManager(folder=modif_config.path, suffix="icemod.nc")
icecov_base, icecov_ts_base = nemo_manager_base.get_max_yearly_ice_fraction(start_year=start_year,
end_year=end_year)
icecov_modif, icecov_ts_modif = nemo_manager_modif.get_max_yearly_ice_fraction(start_year=start_year,
end_year=end_year)
lons, lats, bmp = nemo_manager_base.get_coords_and_basemap()
xx, yy = bmp(lons.copy(), lats.copy())
# Plot as usual: model, obs, model - obs
img_folder = Path("nemo/{}vs{}".format(modif_config.label, base_config.label))
if not img_folder.is_dir():
img_folder.mkdir(parents=True)
img_file = img_folder.joinpath("compare_yearmax_icecov_{}_vs_{}_{}-{}.pdf".format(
modif_config.label, base_config.label, start_year, end_year))
fig = plt.figure()
gs = GridSpec(2, 3, width_ratios=[1, 1, 0.05])
cmap = cm.get_cmap("jet", 10)
diff_cmap = cm.get_cmap("RdBu_r", 10)
# base
ax = fig.add_subplot(gs[0, 0])
cs = bmp.contourf(xx, yy, icecov_base, cmap=cmap)
bmp.drawcoastlines(ax=ax)
ax.set_title(base_config.label)
# modif
ax = fig.add_subplot(gs[0, 1])
cs = bmp.contourf(xx, yy, icecov_modif, cmap=cmap, levels=cs.levels)
plt.colorbar(cs, cax=fig.add_subplot(gs[0, -1]))
bmp.drawcoastlines(ax=ax)
ax.set_title(modif_config.label)
# difference
ax = fig.add_subplot(gs[1, :])
cs = bmp.contourf(xx, yy, icecov_modif - icecov_base, cmap=diff_cmap, levels=np.arange(-1, 1.2, 0.2))
bmp.colorbar(cs, ax=ax)
bmp.drawcoastlines(ax=ax)
fig.tight_layout()
fig.savefig(str(img_file), bbox_inches="tight")
ax.set_title("{}-{}".format(modif_config.label, base_config.label))
plt.close(fig)
# Plot time series
img_file = img_folder.joinpath("ts_compare_yearmax_icecov_{}_vs_{}_{}-{}.pdf".format(
modif_config.label, base_config.label, start_year, end_year))
fig = plt.figure()
plt.plot(range(start_year, end_year + 1), icecov_ts_base, "b", lw=2, label=base_config.label)
plt.plot(range(start_year, end_year + 1), icecov_ts_modif, "r", lw=2, label=modif_config.label)
plt.legend()
plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
plt.grid()
plt.xlabel("Year")
fig.tight_layout()
fig.savefig(str(img_file), bbox_inches="tight")
if __name__ == '__main__':
import application_properties
application_properties.set_current_directory()
main_compare_max_yearly_ice_conc() | [
"[email protected]"
] | |
92e85c7b6e66817ecaf916d920cc1d86019397c2 | fe9573bad2f6452ad3e2e64539361b8bc92c1030 | /scapy_code/sniif_packet.py | 97cbf240c0083c9937735a47714341cd1d7da111 | [] | no_license | OceanicSix/Python_program | e74c593e2e360ae22a52371af6514fcad0e8f41f | 2716646ce02db00306b475bad97105b260b6cd75 | refs/heads/master | 2022-01-25T16:59:31.212507 | 2022-01-09T02:01:58 | 2022-01-09T02:01:58 | 149,686,276 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from scapy.all import *
def print_pkt(pkt):
print("---------------this is a new packet----------------------")
new_pkt = pkt[IP]
if new_pkt[ICMP]:
new_pkt.show()
sniff(filter= "icmp" , prn=print_pkt)
| [
"[email protected]"
] | |
bae78268a22dd8097a25f839b0a4b162beeae02c | 4d332c45578246847ef2cdcdeb827ca29ab06090 | /modules/Bio/Graphics/GenomeDiagram/_GraphSet.py | 0a2ed5a0bc20757fad28322a3c8d7e8873e6337e | [
"MIT"
] | permissive | prateekgupta3991/justforlearn | 616cc297a2a6119fa959b9337a5e91c77a11ebf7 | 3984c64063b356cf89003e17a914272983b6cf48 | refs/heads/master | 2021-03-12T22:09:12.184638 | 2014-01-28T10:37:07 | 2014-01-28T10:37:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | /usr/share/pyshared/Bio/Graphics/GenomeDiagram/_GraphSet.py | [
"[email protected]"
] | |
83a912f2fd9bb92402ffe65df2ebaf7a667edd7e | e590449a05b20712d777fc5f0fa52097678c089b | /python-client/test/test_stash_appscode_com_v1alpha1_api.py | 58eaf340d2c3e2c403e782c27e9854d90c2f4271 | [
"Apache-2.0"
] | permissive | Hardeep18/kube-openapi-generator | 2563d72d9f95196f8ef795896c08e8e21cd1a08e | 6607d1e208965e3a09a0ee6d1f2de7e462939150 | refs/heads/master | 2020-04-11T03:30:18.786896 | 2018-05-05T20:57:51 | 2018-05-05T20:57:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,173 | py | # coding: utf-8
"""
stash-server
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.stash_appscode_com_v1alpha1_api import StashAppscodeComV1alpha1Api # noqa: E501
from swagger_client.rest import ApiException
class TestStashAppscodeComV1alpha1Api(unittest.TestCase):
"""StashAppscodeComV1alpha1Api unit test stubs"""
def setUp(self):
self.api = swagger_client.api.stash_appscode_com_v1alpha1_api.StashAppscodeComV1alpha1Api() # noqa: E501
def tearDown(self):
pass
def test_create_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for create_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_create_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for create_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_create_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for create_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_delete_stash_appscode_com_v1alpha1_collection_namespaced_recovery(self):
"""Test case for delete_stash_appscode_com_v1alpha1_collection_namespaced_recovery
"""
pass
def test_delete_stash_appscode_com_v1alpha1_collection_namespaced_repository(self):
"""Test case for delete_stash_appscode_com_v1alpha1_collection_namespaced_repository
"""
pass
def test_delete_stash_appscode_com_v1alpha1_collection_namespaced_restic(self):
"""Test case for delete_stash_appscode_com_v1alpha1_collection_namespaced_restic
"""
pass
def test_delete_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for delete_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_delete_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for delete_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_delete_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for delete_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_get_stash_appscode_com_v1alpha1_api_resources(self):
"""Test case for get_stash_appscode_com_v1alpha1_api_resources
"""
pass
def test_list_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for list_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_list_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for list_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_list_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for list_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_list_stash_appscode_com_v1alpha1_recovery_for_all_namespaces(self):
"""Test case for list_stash_appscode_com_v1alpha1_recovery_for_all_namespaces
"""
pass
def test_list_stash_appscode_com_v1alpha1_repository_for_all_namespaces(self):
"""Test case for list_stash_appscode_com_v1alpha1_repository_for_all_namespaces
"""
pass
def test_list_stash_appscode_com_v1alpha1_restic_for_all_namespaces(self):
"""Test case for list_stash_appscode_com_v1alpha1_restic_for_all_namespaces
"""
pass
def test_patch_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for patch_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_patch_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for patch_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_patch_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for patch_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_read_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for read_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_read_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for read_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_read_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for read_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_replace_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for replace_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_replace_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for replace_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_replace_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for replace_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_recovery_list(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_recovery_list
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_repository_list(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_repository_list
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_restic_list(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_restic_list
"""
pass
def test_watch_stash_appscode_com_v1alpha1_recovery_list_for_all_namespaces(self):
"""Test case for watch_stash_appscode_com_v1alpha1_recovery_list_for_all_namespaces
"""
pass
def test_watch_stash_appscode_com_v1alpha1_repository_list_for_all_namespaces(self):
"""Test case for watch_stash_appscode_com_v1alpha1_repository_list_for_all_namespaces
"""
pass
def test_watch_stash_appscode_com_v1alpha1_restic_list_for_all_namespaces(self):
"""Test case for watch_stash_appscode_com_v1alpha1_restic_list_for_all_namespaces
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
c693954cad97f78d72668a79087d4930ccea1091 | a8b37bd399dd0bad27d3abd386ace85a6b70ef28 | /airbyte-integrations/connectors/source-opsgenie/source_opsgenie/source.py | 743694d15b54b6ca441b6e91b3a528af43f6b85c | [
"MIT",
"LicenseRef-scancode-free-unknown",
"Elastic-2.0"
] | permissive | thomas-vl/airbyte | 5da2ba9d189ba0b202feb952cadfb550c5050871 | 258a8eb683634a9f9b7821c9a92d1b70c5389a10 | refs/heads/master | 2023-09-01T17:49:23.761569 | 2023-08-25T13:13:11 | 2023-08-25T13:13:11 | 327,604,451 | 1 | 0 | MIT | 2021-01-07T12:24:20 | 2021-01-07T12:24:19 | null | UTF-8 | Python | false | false | 1,870 | py | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from typing import Any, List, Mapping, Tuple
import requests
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http.requests_native_auth import TokenAuthenticator
from .streams import AlertLogs, AlertRecipients, Alerts, Incidents, Integrations, Services, Teams, Users, UserTeams
# Source
class SourceOpsgenie(AbstractSource):
@staticmethod
def get_authenticator(config: Mapping[str, Any]):
return TokenAuthenticator(config["api_token"], auth_method="GenieKey")
def check_connection(self, logger, config) -> Tuple[bool, any]:
try:
auth = self.get_authenticator(config)
api_endpoint = f"https://{config['endpoint']}/v2/account"
response = requests.get(
api_endpoint,
headers=auth.get_auth_header(),
)
return response.status_code == requests.codes.ok, None
except Exception as error:
return False, f"Unable to connect to Opsgenie API with the provided credentials - {repr(error)}"
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
auth = self.get_authenticator(config)
args = {"authenticator": auth, "endpoint": config["endpoint"]}
incremental_args = {**args, "start_date": config.get("start_date", "")}
users = Users(**args)
alerts = Alerts(**incremental_args)
return [
alerts,
AlertRecipients(parent_stream=alerts, **args),
AlertLogs(parent_stream=alerts, **args),
Incidents(**incremental_args),
Integrations(**args),
Services(**args),
Teams(**args),
users,
UserTeams(parent_stream=users, **args),
]
| [
"[email protected]"
] | |
a0b0afefa29d9867d2ca4e7ea95add21f514f525 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/cognitiveservices/azure-mgmt-cognitiveservices/tests/disable_test_cli_mgmt_cognitiveservices.py | 36137183535e3c620a46e8cea7f926e0adadefd9 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 4,767 | py | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# TEST SCENARIO COVERAGE
# ----------------------
# Methods Total : 14
# Methods Covered : 14
# Examples Total : 15
# Examples Tested : 13
# Coverage % : 87
# ----------------------
import unittest
import azure.mgmt.cognitiveservices
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
AZURE_LOCATION = 'eastus'
class MgmtCognitiveServicesTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtCognitiveServicesTest, self).setUp()
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.cognitiveservices.CognitiveServicesManagementClient
)
@unittest.skip('hard to test')
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_cognitiveservices(self, resource_group):
SUBSCRIPTION_ID = self.settings.SUBSCRIPTION_ID
RESOURCE_GROUP = resource_group.name
ACCOUNT_NAME = "myAccount"
LOCATION = "myLocation"
# /Accounts/put/Create Account Min[put]
BODY = {
"location": "West US",
"kind": "CognitiveServices",
"sku": {
"name": "S0"
},
"identity": {
"type": "SystemAssigned"
}
}
result = self.mgmt_client.accounts.create(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, account=BODY)
# /Accounts/put/Create Account[put]
BODY = {
"location": "West US",
"kind": "Emotion",
"sku": {
"name": "S0"
},
"properties": {
"encryption": {
"key_vault_properties": {
"key_name": "KeyName",
"key_version": "891CF236-D241-4738-9462-D506AF493DFA",
"key_vault_uri": "https://pltfrmscrts-use-pc-dev.vault.azure.net/"
},
"key_source": "Microsoft.KeyVault"
},
"user_owned_storage": [
{
"resource_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Storage/storageAccountsfelixwatest"
}
]
},
"identity": {
"type": "SystemAssigned"
}
}
# result = self.mgmt_client.accounts.create(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, account=BODY)
# /Accounts/get/Get Usages[get]
result = self.mgmt_client.accounts.get_usages(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME)
# /Accounts/get/List SKUs[get]
result = self.mgmt_client.accounts.list_skus(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME)
# /Accounts/get/Get Account[get]
result = self.mgmt_client.accounts.get_properties(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME)
# /Accounts/get/List Accounts by Resource Group[get]
result = self.mgmt_client.accounts.list_by_resource_group(resource_group_name=RESOURCE_GROUP)
# /Accounts/get/List Accounts by Subscription[get]
result = self.mgmt_client.accounts.list()
# /ResourceSkus/get/Regenerate Keys[get]
result = self.mgmt_client.resource_skus.list()
# /Operations/get/Get Operations[get]
result = self.mgmt_client.operations.list()
# /Accounts/post/Regenerate Keys[post]
result = self.mgmt_client.accounts.regenerate_key(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, key_name="Key2")
# /Accounts/post/List Keys[post]
result = self.mgmt_client.accounts.list_keys(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME)
# /Accounts/patch/Update Account[patch]
BODY = {
"sku": {
"name": "S2"
}
}
# result = self.mgmt_client.accounts.update(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, account=BODY)
# //post/Check SKU Availability[post]
SKUS = [
"S0"
]
result = self.mgmt_client.check_sku_availability(location="eastus", skus=SKUS, kind="Face", type="Microsoft.CognitiveServices/accounts")
# /Accounts/delete/Delete Account[delete]
result = self.mgmt_client.accounts.delete(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
87db78fc9bb040bc77eeeb14ffba6ee78b8c43fa | 42394bd8cd674dcd0822ae288ddb4f4e749a6ed6 | /fluent_blogs/sitemaps.py | 97da332b7a014536107d1f7fe042d295b321ac83 | [
"Apache-2.0"
] | permissive | mmggbj/django-fluent-blogs | 4bca6e7effeca8b4cee3fdf4f8bb4eb4d192dfbe | 7fc3220d6609fe0615ad6ab44044c671d17d06a3 | refs/heads/master | 2021-05-08T13:02:51.896360 | 2018-01-31T21:54:27 | 2018-01-31T21:54:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,647 | py | from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.contrib.sitemaps import Sitemap
from fluent_blogs.models import get_entry_model, get_category_model
from fluent_blogs.urlresolvers import blog_reverse
from parler.models import TranslatableModel
User = get_user_model()
EntryModel = get_entry_model()
CategoryModel = get_category_model()
class EntrySitemap(Sitemap):
"""
The sitemap definition for the pages created with django-fluent-blogs.
"""
def items(self):
qs = EntryModel.objects.published().order_by('-publication_date')
if issubclass(EntryModel, TranslatableModel):
# Note that .active_translations() can't be combined with other filters for translations__.. fields.
qs = qs.active_translations()
return qs.order_by('-publication_date', 'translations__language_code')
else:
return qs.order_by('-publication_date')
def lastmod(self, urlnode):
"""Return the last modification of the entry."""
return urlnode.modification_date
def location(self, urlnode):
"""Return url of an entry."""
return urlnode.url
class CategoryArchiveSitemap(Sitemap):
def items(self):
only_ids = EntryModel.objects.published().values('categories').order_by().distinct()
return CategoryModel.objects.filter(id__in=only_ids)
def lastmod(self, category):
"""Return the last modification of the entry."""
lastitems = EntryModel.objects.published().order_by('-modification_date').filter(categories=category).only('modification_date')
return lastitems[0].modification_date
def location(self, category):
"""Return url of an entry."""
return blog_reverse('entry_archive_category', kwargs={'slug': category.slug}, ignore_multiple=True)
class AuthorArchiveSitemap(Sitemap):
def items(self):
only_ids = EntryModel.objects.published().values('author').order_by().distinct()
return User.objects.filter(id__in=only_ids)
def lastmod(self, author):
"""Return the last modification of the entry."""
lastitems = EntryModel.objects.published().order_by('-modification_date').filter(author=author).only('modification_date')
return lastitems[0].modification_date
def location(self, author):
"""Return url of an entry."""
return blog_reverse('entry_archive_author', kwargs={'slug': author.username}, ignore_multiple=True)
class TagArchiveSitemap(Sitemap):
def items(self):
# Tagging is optional. When it's not used, it's ignored.
if 'taggit' not in settings.INSTALLED_APPS:
return []
from taggit.models import Tag
only_instances = EntryModel.objects.published().only('pk')
# Until https://github.com/alex/django-taggit/pull/86 is merged,
# better use the field names directly instead of bulk_lookup_kwargs
return Tag.objects.filter(
taggit_taggeditem_items__object_id__in=only_instances,
taggit_taggeditem_items__content_type=ContentType.objects.get_for_model(EntryModel)
)
def lastmod(self, tag):
"""Return the last modification of the entry."""
lastitems = EntryModel.objects.published().order_by('-modification_date').filter(tags=tag).only('modification_date')
return lastitems[0].modification_date
def location(self, tag):
"""Return url of an entry."""
return blog_reverse('entry_archive_tag', kwargs={'slug': tag.slug}, ignore_multiple=True)
| [
"[email protected]"
] | |
83f1f904fbc11dea0d949ea539259c4155273c82 | 364e2632a334241c034448ee0a7242818b49cdf7 | /bigmler/tests/test_02_batch_predictions.py | f37bd5abba68ee95bee0e21639252ce78ebf007f | [
"Apache-2.0"
] | permissive | cybernetics/bigmler | 46f6bf508b9ac6c6000c3859b5ed32a7619a3dad | c1c91548169e6f6053aef1b09085fa1fb7a43e43 | refs/heads/master | 2021-01-18T01:07:10.519630 | 2015-06-12T23:56:28 | 2015-06-12T23:56:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,126 | py | # -*- coding: utf-8 -*-
#
# Copyright 2014-2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Testing batch prediction creation
"""
from __future__ import absolute_import
from bigmler.tests.world import (world, common_setup_module,
common_teardown_module, teardown_class)
import bigmler.tests.basic_tst_prediction_steps as test_pred
import bigmler.tests.basic_batch_tst_prediction_steps as test_batch_pred
import bigmler.tests.basic_anomaly_prediction_steps as anomaly_pred
def setup_module():
"""Setup for the module
"""
common_setup_module()
test = TestBatchPrediction()
test.setup_scenario2()
def teardown_module():
"""Teardown for the module
"""
common_teardown_module()
class TestBatchPrediction(object):
def teardown(self):
"""Calling generic teardown for every method
"""
teardown_class()
def setup(self):
"""No setup operations for every method at present
"""
pass
def test_scenario1(self):
"""
Scenario 1: Successfully building test predictions from scratch:
Given I create BigML resources uploading train "<data>" file to test "<test>" remotely with mapping file "<fields_map>" and log predictions in "<output>"
And I check that the source has been created
And I check that the dataset has been created
And I check that the model has been created
And I check that the source has been created from the test file
And I check that the dataset has been created from the test file
And I check that the batch prediction has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
| data | test | fields_map | output |predictions_file |
| ../data/grades.csv | ../data/test_grades.csv | ../data/grades_fields_map.csv | ./scenario_r1_r/predictions.csv | ./check_files/predictions_grades.csv |
"""
print self.test_scenario1.__doc__
examples = [
['data/grades.csv', 'data/test_grades.csv', 'data/grades_fields_map.csv', 'scenario_r1_r/predictions.csv', 'check_files/predictions_grades.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_create_all_resources_batch_map(self, data=example[0], test=example[1], fields_map=example[2], output=example[3])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_model(self)
test_batch_pred.i_check_create_test_source(self)
test_batch_pred.i_check_create_test_dataset(self)
test_batch_pred.i_check_create_batch_prediction(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[4])
def setup_scenario2(self):
"""
Scenario 2: Successfully building test predictions from scratch:
Given I create BigML resources uploading train "<data>" file to test "<test>" remotely and log predictions in "<output>"
And I check that the source has been created
And I check that the dataset has been created
And I check that the model has been created
And I check that the source has been created from the test file
And I check that the dataset has been created from the test file
And I check that the batch prediction has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
| data | test | output |predictions_file |
| ../data/iris.csv | ../data/test_iris.csv | ./scenario_r1/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.setup_scenario2.__doc__
examples = [
['data/iris.csv', 'data/test_iris.csv', 'scenario_r1/predictions.csv', 'check_files/predictions_iris.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_create_all_resources_batch(self, data=example[0], test=example[1], output=example[2])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_model(self)
test_batch_pred.i_check_create_test_source(self)
test_batch_pred.i_check_create_test_dataset(self)
test_batch_pred.i_check_create_batch_prediction(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[3])
def test_scenario3(self):
"""
Scenario 3: Successfully building test predictions from source
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using source to test the previous test source remotely and log predictions in "<output>"
And I check that the dataset has been created
And I check that the model has been created
And I check that the dataset has been created from the test file
And I check that the batch prediction has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs | output |predictions_file |
| scenario_r1| {"data": "../data/iris.csv", "output": "./scenario_r1/predictions.csv", "test": "../data/test_iris.csv"} |./scenario_r2/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.test_scenario3.__doc__
examples = [
['scenario_r1', '{"data": "data/iris.csv", "output": "scenario_r1/predictions.csv", "test": "data/test_iris.csv"}', 'scenario_r2/predictions.csv', 'check_files/predictions_iris.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_pred.i_create_resources_from_source_batch(self, output=example[2])
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_model(self)
test_batch_pred.i_check_create_test_dataset(self)
test_batch_pred.i_check_create_batch_prediction(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[3])
def test_scenario4(self):
"""
Scenario 4: Successfully building test predictions from dataset
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using dataset to test the previous test dataset remotely and log predictions in "<output>"
And I check that the model has been created
And I check that the batch prediction has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs | test | output |predictions_file |
| scenario_r1| {"data": "../data/iris.csv", "output": "./scenario_r1/predictions.csv", "test": "../data/test_iris.csv"} | ../data/test_iris.csv | ./scenario_r3/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.test_scenario4.__doc__
examples = [
['scenario_r1', '{"data": "data/iris.csv", "output": "scenario_r1/predictions.csv", "test": "data/test_iris.csv"}', 'scenario_r3/predictions.csv', 'check_files/predictions_iris.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_pred.i_create_resources_from_dataset_batch(self, output=example[2])
test_pred.i_check_create_model(self)
test_batch_pred.i_check_create_batch_prediction(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[3])
def test_scenario5(self):
"""
Scenario 5: Successfully building test predictions from dataset and prediction format info
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using a model to test the previous test dataset remotely with prediction headers and fields "<fields>" and log predictions in "<output>"
And I check that the batch prediction has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs | fields | output |predictions_file |
| scenario_r1| {"data": "../data/iris.csv", "output": "./scenario_r1/predictions.csv", "test": "../data/test_iris.csv"} | sepal length,sepal width | ./scenario_r4/predictions.csv | ./check_files/predictions_iris_format.csv |
"""
print self.test_scenario5.__doc__
examples = [
['scenario_r1', '{"data": "data/iris.csv", "output": "scenario_r1/predictions.csv", "test": "data/test_iris.csv"}', 'sepal length,sepal width', 'scenario_r4/predictions.csv', 'check_files/predictions_iris_format.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_pred.i_create_resources_from_model_batch(self, fields=example[2], output=example[3])
test_batch_pred.i_check_create_batch_prediction(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[4])
def test_scenario6(self):
"""
Scenario 6: Successfully building remote test predictions from scratch to a dataset:
Given I create BigML resources uploading train "<data>" file to test "<test>" remotely to a dataset with no CSV output and log resources in "<output_dir>"
And I check that the source has been created
And I check that the dataset has been created
And I check that the model has been created
And I check that the source has been created from the test file
And I check that the dataset has been created from the test file
And I check that the batch prediction has been created
Then I check that the batch predictions dataset exists
And no local CSV file is created
Examples:
| data | test | output_dir |
| ../data/iris.csv | ../data/test_iris.csv | ./scenario_r5 |
"""
print self.test_scenario6.__doc__
examples = [
['data/iris.csv', 'data/test_iris.csv', 'scenario_r5']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_create_all_resources_batch_to_dataset(self, data=example[0], test=example[1], output_dir=example[2])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_model(self)
test_batch_pred.i_check_create_test_source(self)
test_batch_pred.i_check_create_test_dataset(self)
test_batch_pred.i_check_create_batch_prediction(self)
test_batch_pred.i_check_create_batch_predictions_dataset(self)
anomaly_pred.i_check_no_local_CSV(self)
| [
"[email protected]"
] | |
27ea18a5756b03152493d5b0994a5db4e48e30db | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03227/s470552278.py | 434e9ba40f42f909254017bbb3c840bf9b0c6468 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | n = raw_input()
print n if len(n)==2 else n[::-1] | [
"[email protected]"
] | |
b149655165dbfc3253e689f968488cd68f3e18c6 | 3e660e22783e62f19e9b41d28e843158df5bd6ef | /script.me.syncsmashingfromgithub/smashingfavourites/scripts/oldscripts/smashingtvextended.py | 23aa7191d67b111064220b6ce41ecbc4caa91859 | [] | no_license | monthou66/repository.smashingfavourites | a9603906236000d2424d2283b50130c7a6103966 | f712e2e4715a286ff6bff304ca30bf3ddfaa112f | refs/heads/master | 2020-04-09T12:14:34.470077 | 2018-12-04T10:56:45 | 2018-12-04T10:56:45 | 160,341,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,059 | py |
# -*- coding: utf-8 -*-
# opens tv channel or guide groups via smashingfavourites and / or keymap.
import os
import os.path
import xbmc
import sys
# make sure dvbviewer is running - enable and wait if necessary
def enable():
if not xbmc.getCondVisibility('System.HasAddon(pvr.dvbviewer)'):
xbmc.executeJSONRPC('{"jsonrpc":"2.0","method":"Addons.SetAddonEnabled","id":7,"params":{"addonid":"pvr.dvbviewer","enabled":true}}')
xbmc.sleep(200)
# make sure dvbviewer is not running - disable if necessary
def disable():
xbmc.executeJSONRPC('{"jsonrpc":"2.0","method":"Addons.SetAddonEnabled","id":8,"params":{"addonid":"pvr.dvbviewer","enabled":false}}')
# define terms... c = count
# f=0 for just pvr disabled f = 1 (value) if channels, f=2 (value) if guides, f=3 if radio, f=4 if recordings,
# f=5 if timers, f=6 if search, f=7 if recording / recorded files, f=8 for timeshift, f=9 for permanently enable,
# f=10 for remove enable check.
# g = group number (value)... g=3 for last channel group / guide group
# define f
a = sys.argv[1]
f = int(a)
def terms():
b = sys.argv[2]
c = 2
g = int(b)
# f=3
def radio():
xbmc.executebuiltin('ActivateWindow(Radio)')
exit()
# f=4
def recordings():
xbmc.executebuiltin('ActivateWindow(tvrecordings)')
exit()
# f=5
def timers():
xbmc.executebuiltin('ActivateWindow(tvtimers)')
exit()
# f=6
def search():
xbmc.executebuiltin('ActivateWindow(tvsearch)')
exit()
# pvr can be disabled for recorded files - f=7
def recordedfiles():
xbmc.executebuiltin('Videos,smb://SourceTVRecordings/,return')
exit()
# pvr can be disabled for timeshift files - f=8
def timeshift():
xbmc.executebuiltin('Videos,smb://SourceTVRecordings/,return')
exit()
# print stars to show up in log and error notification
def printstar():
print "****************************************************************************"
print "****************************************************************************"
def error():
xbmc.executebuiltin('Notification(Check, smashingtv)')
exit()
# open channel or guide windows - f = 1,2
def opengroups():
if f == 1:
xbmc.executebuiltin('ActivateWindow(TVChannels)')
elif f == 2:
xbmc.executebuiltin('ActivateWindow(TVGuide)')
else:
xbmc.executebuiltin('Notification(Check, smashingtv)'); exit()
xbmc.executebuiltin('SendClick(28)')
xbmc.executebuiltin( "XBMC.Action(FirstPage)" )
# loop move down to correct group (if necessary)
if g > 1:
while (c <= g):
c = c + 1
xbmc.executebuiltin( "XBMC.Action(Down)" )
# open group if not using 'choose' option.
if g >=1:
xbmc.executebuiltin( "XBMC.Action(Select)" )
xbmc.executebuiltin( "XBMC.Action(Right)" )
xbmc.executebuiltin( "ClearProperty(SideBladeOpen)" )
# define file locations
def files():
SOURCEFILE = os.path.join(xbmc.translatePath('special://userdata/favourites/smashingtv/enablefile'), "enablepvr.txt")
TARGET = os.path.join(xbmc.translatePath('special://userdata/favourites/smashingtv'), "enablepvr.txt")
# permanentenable:
# Copy pvrenable.txt to favourites/smashingtv folder as marker and enable pvr.dvbviewer - f=9
# check if SOURCEFILE exists - if not give an error message
# check if TARGET exists - if so give a notification 'already enabled'
# copy SOURCEFILE to TARGET, enable and close
def permanentenable():
if not os.path.isfile(SOURCEFILE):
printstar()
print "smashingtv problem - check userdata/favourites/smashingtv/enablefile folder for missing pvrenable.txt"
printstar()
error()
if os.path.isfile(TARGET):
xbmc.executebuiltin('Notification(PVR is, already enabled)')
enable()
exit()
else:
shutil.copy(SOURCEFILE, TARGET)
xbmc.executebuiltin('Notification(PVR is, permanently enabled)')
enable()
exit()
#removepermanentcheck
# Remove pvrenable.txt from favourites/smashingtv folder f=10
def removepermanentcheck():
if not os.path.isfile(TARGET):
xbmc.executebuiltin('Notification(No PVR, lock found)')
disable()
exit()
else:
os.remove(TARGET)
xbmc.executebuiltin('Notification(PVR, unlocked)')
disable()
exit()
# Get on with it...
# disable or enable pvr.dvbviewer, exit if necessary, exit and print message if f is out of range
if f == 0:
disable()
exit()
elif f == 7 or f == 8:
disable()
elif f > 10 or f < 0:
printstar()
print "smashingtv exited 'cos f is out of range"
print "f is ",f
printstar()
error()
else:
enable()
if f == 1 or f == 2:
terms()
opengroups()
elif f == 3:
radio()
elif f == 4:
recordings()
elif f == 5:
timers()
elif f == 6:
search()
elif f == 7:
recordedfiles()
elif f == 8:
timeshift()
elif f == 9:
permanentenable()
enable()
elif f == 10:
removepermanentcheck()
disable()
else:
printstar()
print "smashingtv exited 'cos sumfink went rong"
printstar()
error()
| [
"[email protected]"
] | |
6042ebe496e64d755d312557f38f2f61d3e98e80 | 18e687608ff326fae4d1e1604cf452f086f99559 | /classroom/admin.py | 7da03566a6f4b009b1d4c24281b22e580476a82c | [
"Apache-2.0"
] | permissive | nu11byt3s/lms-ml | 955bad451ddcb78e464227294496ee0a92be08c0 | c0ea63f09d4125b0ae9033fd8b0a4aab2604bb42 | refs/heads/main | 2023-08-13T08:09:53.097312 | 2021-10-12T09:51:31 | 2021-10-12T09:51:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | from django.contrib import admin
from .models import ClassFiles, ClassRoom, MemberShip, RoomStream, Comment
admin.site.register(ClassRoom)
admin.site.register(ClassFiles)
admin.site.register(MemberShip)
admin.site.register(RoomStream)
admin.site.register(Comment)
| [
"[email protected]"
] | |
59bd29fb6eb52de6fe04df9967256812d218e790 | 4e60e8a46354bef6e851e77d8df4964d35f5e53f | /share/create_db/__init__.py | 89e75b0e45db463c422c246e51d237b4f3e7984d | [] | no_license | cq146637/DockerManagerPlatform | cbae4154ad66eac01772ddd902d7f70b62a2d856 | 9c509fb8dca6633ed3afdc92d4e6491b5d13e322 | refs/heads/master | 2021-04-09T13:58:14.117752 | 2018-03-19T13:41:04 | 2018-03-19T13:41:04 | 125,712,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | # -*- coding: utf-8 -*-
__author__ = 'CQ'
| [
"[email protected]"
] | |
f50a2c13091de7e6652bd032364405a6cb81e40a | f08336ac8b6f8040f6b2d85d0619d1a9923c9bdf | /148-sortList.py | deaa09cb9bde869ffaac11cb72d4a48498d2f6ed | [] | no_license | MarshalLeeeeee/myLeetCodes | fafadcc35eef44f431a008c1be42b1188e7dd852 | 80e78b153ad2bdfb52070ba75b166a4237847d75 | refs/heads/master | 2020-04-08T16:07:47.943755 | 2019-02-21T01:43:16 | 2019-02-21T01:43:16 | 159,505,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,383 | py | '''
148. Sort List
Sort a linked list in O(n log n) time using constant space complexity.
Example 1:
Input: 4->2->1->3
Output: 1->2->3->4
Example 2:
Input: -1->5->3->4->0
Output: -1->0->3->4->5
'''
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def solve(self,head,length):
if length == 1: return head
i, curr = 0, head
while i+1 < length//2:
curr = curr.next
i += 1
tail = curr.next
curr.next = None
newHead = self.solve(head,length//2)
newTail = self.solve(tail,length-length//2)
currHead, currTail, ansHead = newHead, newTail, ListNode(0)
curr = ansHead
while currHead and currTail:
if currHead.val < currTail.val: curr.next = currHead; currHead = currHead.next
else: curr.next = currTail; currTail = currTail.next
curr = curr.next
if not currHead: curr.next = currTail
else: curr.next = currHead
return ansHead.next
def sortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
l, curr = 0, head
while curr:
l += 1
curr = curr.next
if not l or l == 1: return head
else: return self.solve(head,l) | [
"[email protected]"
] | |
24aeb181fe8842ceab8dcbdae1e7eae470e32e96 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/2091.py | 2be5159255a480621d5ba476c77387286fc6d261 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py |
def get_values(f,line):
choices = [];
for i in range(0,4):
if i+1 == line:
choices.extend(f.readline().split())
else:
f.readline()
return choices
if __name__ == "__main__":
with open('problem.txt','r') as f:
trials = int(f.readline())
for i in range(0,trials):
first = int(f.readline())
first_choices = get_values(f,first)
second = int(f.readline())
second_choices = get_values(f,second)
combined = []
for a in first_choices:
if a in second_choices:
combined.append(a)
if len(combined) == 1:
print "Case #%s: %s"%(i+1,combined[0])
elif len(combined) > 1:
print "Case #%s: %s"%(i+1,"Bad magician!")
else:
print "Case #%s: %s"%(i+1,"Volunteer cheated!")
| [
"[email protected]"
] | |
94039fd9178ad4160ba0efb6f0dda17c0fde9816 | 8926a97e04be31c62a28ee9031520ca785f2947b | /flask/member_test/app3.py | 1879796d52c5957ea5c6d9b35167283e4c2e6a95 | [] | no_license | ragu6963/kfq_pyhton | 3d651357242892713f36ac12e31f7b586d6e7422 | bdad24e7620e8102902e2f0a71c8486fb0ad86c9 | refs/heads/master | 2022-11-26T16:02:49.838263 | 2020-07-31T00:40:59 | 2020-07-31T00:40:59 | 276,516,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,306 | py | from flask import Flask, request, render_template, redirect, url_for, jsonify, json
import pymysql, os, cx_Oracle
from flask_sqlalchemy import SQLAlchemy
from json import JSONEncoder
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "oracle://hr:[email protected]:1521/xe"
# app.config['SQLALCHEMY_DATABASE_URI'] = "mysql+pymysql://root:qwer1234@localhost/test"
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class User(db.Model):
# id = db.Column(db.Integer, primary_key = True)
userid = db.Column(db.String(20), primary_key=True)
userpw = db.Column(db.String(20))
username = db.Column(db.String(20))
userage = db.Column(db.Integer)
usermail = db.Column(db.String(20))
useradd = db.Column(db.String(20))
usergender = db.Column(db.String(20))
usertel = db.Column(db.String(20))
def __repr__(self):
return "<userid %r,username %r>" % (self.id, self.username)
def __init__(self, userid, userpw, username, userage, usermail, useradd, usergender, usertel):
self.userid = userid
self.userpw = userpw
self.username = username
self.userage = userage
self.usermail = usermail
self.useradd = useradd
self.usergender = usergender
self.usertel = usertel
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/usersform", methods=["POST", "GET"])
def usersform():
if request.method == "GET":
return render_template("usersform.html")
else:
userid = request.form.get("userid")
userpw = request.form.get("userpw")
username = request.form.get("username")
userage = request.form.get("userage")
usermail = request.form.get("useremail")
useradd = request.form.get("useradd")
usergender = request.form.get("usergender")
usertel = request.form.get("usertel")
my_user = User(userid, userpw, username, userage, usermail, useradd, usergender, usertel)
db.session.add(my_user)
db.session.commit()
return redirect("/list")
@app.route("/list")
def list():
all_data = User.query.all()
return render_template("list.html", list=all_data)
@app.route("/content/<userid>")
def content(userid):
result = User.query.filter_by(userid=userid).one()
return render_template("content.html", list=result)
@app.route("/updateform/<userid>", methods=["GET"])
def updateformget(userid):
result = User.query.filter_by(userid=userid).one()
return render_template("updateform.html", list=result)
@app.route("/updateform", methods=["POST"])
def updateformpost():
my_user = User.query.get(request.form.get("userid"))
my_user.userid = request.form.get("userid")
my_user.userpw = request.form.get("userpw")
my_user.username = request.form.get("username")
my_user.userage = request.form.get("userage")
my_user.usermail = request.form.get("usermail")
my_user.useradd = request.form.get("useradd")
my_user.usergender = request.form.get("usergender")
my_user.usertel = request.form.get("usertel")
db.session.commit()
return redirect("/list")
@app.route("/deleteform/<userid>")
def deleteformget(userid):
my_data = User.query.get(userid)
db.session.delete(my_data)
db.session.commit()
return redirect("/list")
@app.route("/ajaxlist", methods=["GET"])
def ajaxlistget():
all_data = User.query.all()
return render_template("ajaxlist.html", list=all_data)
@app.route("/ajaxlist", methods=["POST"])
def ajaxlistpost():
userid = request.form.get("userid")
query = User.query.filter(User.userid.like("%" + userid + "%")).order_by(User.userid)
all_data = query.all()
result = []
for data in all_data:
result.append(data.toJSON())
# return jsonify(all_data)
return result
@app.route("/imglist")
def imglist():
print(os.path.dirname(__file__))
dirname = os.path.dirname(__file__) + "/static/img/"
filenames = os.listdir(dirname)
print(filenames)
return render_template("imglist.html", filenames=filenames)
if __name__ == "__main__":
db.create_all()
app.run(debug=True, port=8089)
| [
"[email protected]"
] | |
89bb0587757a6aed99336fb24ec99c4a1cbfe575 | 8d2a785ffc06ec46a546cdf50af41054a382f05a | /classes/day02/class/3.pass语句.py | f2defecd7c5467b8ae56a75acfff0e26254f3c38 | [] | no_license | Pigcanflysohigh/Py27 | 4be0d9ad93f5d695c48fd89157952230ec4d111a | 2f6568fce2a6f09c73cdc08342a8b05645c87736 | refs/heads/master | 2020-06-18T08:09:08.217036 | 2019-11-20T16:14:05 | 2019-11-20T16:14:05 | 196,225,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | if 1==2:
... # 什么也不做,只是占个地方,表示我是明白python的语法规则的
n = 0
while n<6:
pass
| [
"[email protected]"
] | |
b40d364523a6a0d9a5830630e0d64197363cab87 | 95884a6b32f6831e68c95d7785bc968a56877121 | /cifar_imagenet/models/cifar/momentumnet_restart_lookahead_vel_learned_scalar_clip_mom.py | ba44f69eff7577785b303b3c9d7d192514916fc3 | [
"MIT",
"Apache-2.0"
] | permissive | minhtannguyen/RAdam | d89c4c6ce1ce0dd95b0be3aa2c20e70ea62da8b0 | 44f403288df375bae0785cc82dd8c888eaaaa441 | refs/heads/master | 2020-08-09T07:53:50.601789 | 2020-02-17T06:17:05 | 2020-02-17T06:17:05 | 214,041,479 | 0 | 0 | Apache-2.0 | 2019-10-09T23:11:14 | 2019-10-09T23:11:14 | null | UTF-8 | Python | false | false | 7,243 | py | # -*- coding: utf-8 -*-
"""
momentum net
"""
import torch
import torch.nn as nn
import math
from torch.nn.parameter import Parameter
__all__ = ['momentumnet_restart_lookahead_vel_learned_scalar_clip_mom']
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, step_size=2.0, momentum=0.5):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.downsample = downsample
self.stride = stride
# for momentum net
self.step_size = Parameter(torch.tensor(step_size), requires_grad=True)
self.momentum = Parameter(torch.tensor(momentum), requires_grad=True)
def forward(self, invec):
x, y = invec[0], invec[1]
residualx = x
residualy = y
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residualx = self.downsample(x)
residualy = self.downsample(y)
outy = residualx - self.step_size*out
outx = (1.0 + self.momentum) * outy - self.momentum * residualy
return [outx, outy]
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, step_size=2.0, momentum=0.5):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
# for momentum net
self.step_size = Parameter(torch.tensor(step_size), requires_grad=True)
self.momentum = Parameter(torch.tensor(momentum), requires_grad=True)
def forward(self, invec):
x, prex = invec[0], invec[1]
residualx = x
residualprex = prex
x = x + torch.clamp(input=self.momentum, min=0.0, max=1.0) * prex
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
residualx = self.downsample(residualx)
residualprex = torch.zeros_like(out)
outprex = torch.clamp(input=self.momentum, min=0.0, max=1.0) * residualprex - self.step_size * out
outx = residualx + outprex
return [outx, outprex]
class MomentumNet(nn.Module):
def __init__(self, depth, step_size=2.0, momentum=0.5, num_classes=1000, block_name='BasicBlock', feature_vec='x'):
super(MomentumNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = 16
# for momentum net
self.step_size = step_size
self.momentum = momentum
self.feature_vec = feature_vec
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,
bias=False)
self.layer1 = self._make_layer(block, 16, n, step_size=self.step_size, momentum=self.momentum)
self.layer2 = self._make_layer(block, 32, n, stride=2, step_size=self.step_size, momentum=self.momentum)
self.layer3 = self._make_layer(block, 64, n, stride=2, step_size=self.step_size, momentum=self.momentum)
self.bn = nn.BatchNorm2d(64 * block.expansion)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, step_size=2.0, momentum=0.5):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, step_size=step_size, momentum=momentum))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, step_size=step_size, momentum=momentum))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
out = [x, torch.zeros_like(x)]
out = self.layer1(out) # 32x32
out = self.layer2(out) # 16x16
out = self.layer3(out) # 8x8
if self.feature_vec=='x':
x = out[0]
else:
x = out[1]
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def momentumnet_restart_lookahead_vel_learned_scalar_clip_mom(**kwargs):
"""
Constructs a ResNet model.
"""
return MomentumNet(**kwargs)
# def momentum_net20(**kwargs):
# return MomentumNet(num_classes=10, depth=20, block_name="basicblock")
# def momentum_net56(**kwargs):
# return MomentumNet(num_classes=10, depth=56, block_name="bottleneck")
# def momentum_net110(**kwargs):
# return MomentumNet(num_classes=10, depth=110, block_name="bottleneck")
# def momentum_net164(**kwargs):
# return MomentumNet(num_classes=10, depth=164, block_name="bottleneck")
# def momentum_net290(**kwargs):
# return MomentumNet(num_classes=10, depth=290, block_name="bottleneck")
| [
"[email protected]"
] | |
066f467af220cf2210d237b5be14036c9a366795 | c4ee4a9d28425aa334038ad174c7b1d757ff45db | /py/survey_backdeck/SurveyBackdeckDB.py | 425ea9aadef024c30c34e6d6199380a96c70f5d1 | [
"MIT"
] | permissive | nwfsc-fram/pyFieldSoftware | 32b3b9deb06dba4a168083a77336613704c7c262 | 477ba162b66ede2263693cda8c5a51d27eaa3b89 | refs/heads/master | 2023-08-03T07:38:24.117376 | 2021-10-20T22:49:51 | 2021-10-20T22:49:51 | 221,750,910 | 1 | 1 | MIT | 2023-07-20T13:13:25 | 2019-11-14T17:23:47 | Python | UTF-8 | Python | false | false | 1,155 | py | __author__ = 'Todd.Hay'
# -------------------------------------------------------------------------------
# Name: TrawlBackdeckDB.py
# Purpose: Provides connection to the trawl_backdeck.db SQLite database
# Author: Todd.Hay
# Email: [email protected]
#
# Created: Jan 08, 2016
# License: MIT
#-------------------------------------------------------------------------------
import unittest
from py.common import CommonDB
class HookAndLineHookCutterDB(CommonDB.CommonDB):
"""
Subclass the CommonDB class, which makes the actual database connection
"""
def __init__(self, db_filename="hookandline_cutter.db"):
super().__init__(db_filename)
class TestTrawlBackdeckDB(unittest.TestCase):
"""
Test basic SQLite connectivity
"""
def setUp(self):
self._db = HookAndLineHookCutterDB('hookandline_cutter.db')
def tearDown(self):
self._db.disconnect()
def test_query(self):
count = 0
for t in self._db.execute('SELECT * FROM SETTINGS'):
count += 1
self.assertGreater(count, 200)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
56cb0a401e086cffa893b8b4dcd75edf07ca9e4c | dacf092e82b5cc841554178e5117c38fd0b28827 | /day24_program4/server_start.py | 80694736d2929c195b9ccc3caf71539ee8767758 | [] | no_license | RainMoun/python_programming_camp | f9bbee707e7468a7b5d6633c2364f5dd75abc8a4 | f8e06cdd2e6174bd6986d1097cb580a6a3b7201f | refs/heads/master | 2020-04-15T11:27:09.680587 | 2019-04-06T02:21:14 | 2019-04-06T02:21:14 | 164,630,838 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,604 | py | import socket
import os
from conf import setting
from interface import common_interface
from db import models, db_handler
import logging.config
def upload_file(): # 接收文件
file_path = os.path.join(BASE_DIR, 'db', 'file_upload')
if not os.path.exists(file_path):
os.makedirs(file_path)
path = os.path.join(BASE_DIR, 'db', 'file_upload', file_name)
if not os.path.exists(path):
f = open(path, 'w')
f.close()
f = open(path, 'ab')
has_received = 0
while has_received != file_size:
data_once = conn.recv(1024)
f.write(data_once)
has_received += len(data_once)
f.close()
file_md5_finish = common_interface.get_file_md5(path)
if file_md5_finish == file_md5:
file_upload = models.File(file_name, file_size, file_md5, admin_name)
db_handler.save_upload_file_message(file_upload)
logging.info('{} upload {}, the md5 is {}'.format(admin_name, file_name, file_md5))
print('{} upload {}, the md5 is {}'.format(admin_name, file_name, file_md5))
func_dict = {
'post': upload_file
}
if __name__ == '__main__':
sk = socket.socket()
sk.bind(setting.SERVER_ADDRESS)
sk.listen(3)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
while True:
conn, addr = sk.accept()
while True:
data = conn.recv(1024)
print(data.decode('utf-8'))
flag, admin_name, file_name, file_size, file_md5 = data.decode('utf-8').split('|')
file_size = int(file_size)
func_dict[flag]()
break
sk.close()
| [
"[email protected]"
] | |
50a805dc8a946792bd4c670cf4d42f3317b82c4d | 5925c0fc934760f1726818a18d6098499dcfb981 | /GAN_upscale_28x28/GAN_upscale_28x28.py | 0c4f60692043ba8d1aaf07a121723c78c1911750 | [
"MIT"
] | permissive | PsycheShaman/Keras-GAN | 64b4b20471f4b185580860f28caa9320ed615a51 | 9a1f2576af8f67fad7845421ea5feb53012c1c9f | refs/heads/master | 2020-06-17T11:03:23.993616 | 2019-09-14T08:04:25 | 2019-09-14T08:04:25 | 195,858,154 | 2 | 0 | MIT | 2019-09-14T08:04:26 | 2019-07-08T17:33:39 | Python | UTF-8 | Python | false | false | 6,859 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 11 12:14:02 2019
@author: gerhard
"""
from __future__ import print_function, division
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import sys
import numpy as np
import glob
import pickle
def load_data():
x_files = glob.glob("C:\\Users\\gerhard\\Documents\\msc-thesis-data\\cnn\\x_*.pkl")
with open(x_files[0],'rb') as x_file:
x = pickle.load(x_file)
for i in x_files[1:]:
print(i)
with open(i,'rb') as x_file:
print(i)
xi = pickle.load(x_file)
x = np.concatenate((x,xi),axis=0)
print(x.shape)
return(x)
def scale(x, out_range=(-1, 1)):
domain = np.min(x), np.max(x)
y = (x - (domain[1] + domain[0]) / 2) / (domain[1] - domain[0])
return y * (out_range[1] - out_range[0]) + (out_range[1] + out_range[0]) / 2
class GAN():
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 5
optimizer_discr = Adam(0.0004, 0.5)
optimizer_gen = Adam(0.0001, 0.5)
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer_discr,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generates imgs
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated images as input and determines validity
validity = self.discriminator(img)
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = Model(z, validity)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer_gen)
def build_generator(self):
model = Sequential()
model.add(Dense(256, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.img_shape), activation='tanh'))
model.add(Reshape(self.img_shape))
model.summary()
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
def build_discriminator(self):
model = Sequential()
model.add(Flatten(input_shape=self.img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
def train(self, epochs, batch_size=128, sample_interval=50):
# Load the dataset
X_train = load_data()
new_x = np.zeros((X_train.shape[0],28,28))
for i in range(0,X_train.shape[0]):
x_new_i = np.zeros((28,28))
x_old_i = X_train[i,:,:]
x_new_i[5:x_old_i.shape[0]+5,2:x_old_i.shape[1]+2] = x_old_i
new_x[i,:,:] = x_new_i
X_train = new_x
del new_x
# Rescale -1 to 1
# X_train = X_train / 127.5 - 1.
X_train = scale(X_train)
X_train = np.expand_dims(X_train, axis=3)
# Adversarial ground truths
# valid = np.ones((batch_size, 1))
# fake = np.zeros((batch_size, 1))
valid = np.full(shape=(batch_size,1),fill_value=0.975)
fake = np.full(shape=(batch_size,1),fill_value=0.025)
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Generate a batch of new images
gen_imgs = self.generator.predict(noise)
# Train the discriminator
d_loss_real = self.discriminator.train_on_batch(imgs, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Train the generator (to have the discriminator label samples as valid)
g_loss = self.combined.train_on_batch(noise, valid)
# Plot the progress
print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
def sample_images(self, epoch):
# r, c = 5, 5
noise = np.random.normal(0, 1, (2, self.latent_dim))
gen_imgs = self.generator.predict(noise)
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
plt.imshow(gen_imgs[1,:,:,0],cmap='gray')
# fig, axs = plt.subplots(r, c)
# cnt = 0
# for i in range(r):
# for j in range(c):
# axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')
# axs[i,j].axis('off')
# cnt += 1
plt.savefig("images/%d.png" % epoch)
plt.close()
if __name__ == '__main__':
gan = GAN()
gan.train(epochs=30000, batch_size=32, sample_interval=10) | [
"[email protected]"
] | |
babc9118bdcf7d2c25b4dc9e551d807e63b0a18f | 6b6c00e637e4659e4c960ff5dc48c8b25f09c543 | /myihome/web_html.py | 86d5f26bfaab5886725ef16013e4324d6595659e | [] | no_license | too-hoo/myiHome | 881d24f7835a1d6f95fea8cc97d68194d078d451 | 54c57755f5c9a700330cb38a47fe88e4f9f7ab3a | refs/heads/master | 2020-07-13T11:26:53.915246 | 2019-09-04T11:43:01 | 2019-09-04T11:43:01 | 205,073,018 | 9 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,638 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# 静态文件加载的显示文件
from flask import Blueprint, current_app, make_response
from flask_wtf import csrf # 引入CSRF防御
# 提供静态文件的蓝图
html = Blueprint("web_html", __name__)
# 127.0.0.1:5000/()
# 127.0.0.1:5000/(index.html)
# 127.0.0.1:5000/(register.html)
# 127.0.0.1:5000/(favico.ico) # 浏览器会自己请求这个资源,它是网站的标志
# 可能什么都提取不到也有可能提取到一个文件名.*代表最少是0个,html_file_name对应的是我们提取的文件名字
@html.route("/<re(r'.*'):html_file_name>")
def get_html(html_file_name):
"""提供html文件"""
# 可以直接到静态文件哪里找到返回,也可以使用flask提供的一个方法current_app.send_static_file,专门让我们返回静态文件的
# 如果html_file_name为空,表示访问的路径为/ , 请求的是主页,直接等于index.html即可
if not html_file_name:
html_file_name = 'index.html'
# 如果html_file_name不是favicon.ico
if html_file_name != 'favicon.ico':
html_file_name = 'html/' + html_file_name # 直接拼接html/
# 创建一个csrf_token的值
csrf_token = csrf.generate_csrf()
# flask 提供的返回静态文件的方法,默认是到static目录下面去找
# flask 提供的返回静态文件的方法, 在返回之前先使用make_response接受一下响应体设置cookie之后再返回
resp = make_response(current_app.send_static_file(html_file_name))
# 设置cookie值包含CSRF的token值
resp.set_cookie('csrf_token', csrf_token)
return resp
| [
"[email protected]"
] | |
328588836d307293f0d666768429a1b18e4e1007 | 3a1c5ae00324fc26728ad9549cd4cf710a6dfcca | /trackmeapp/serializers.py | ecb9872ce6f7e0f07739fb553cc72ca93c734d99 | [] | no_license | EnockOMONDI/TRACK-ME | c39b3f6ed04957a011c96735526475dae596941c | aff6704f2ce1645a6b7f044a52abf0769de7d604 | refs/heads/master | 2021-06-24T22:26:49.841861 | 2019-06-24T07:19:22 | 2019-06-24T07:19:22 | 193,167,979 | 0 | 0 | null | 2021-06-10T21:37:58 | 2019-06-21T22:46:08 | Python | UTF-8 | Python | false | false | 309 | py | from rest_framework import serializers
from trackmeapp.models import Task
# transforming objects to JSON and vice versa
class TaskSerializer(serializers.ModelSerializer):
class Meta:
model = Task
fields = ('item_id', 'title', 'description', 'created_at', 'comp_date', 'status')
| [
"[email protected]"
] | |
4835bc6e70d40dff4467464c94615a922a6eeb0d | 82ef9a0dd1618a28770597227acfc0150b948af2 | /wearnow/tex/plug/_plugin.py | dd765acd80043d80efc8cafe2aca871e1af2e00f | [] | no_license | bmcage/wearnow | ef32a7848472e79e56763b38551835aa97864b21 | c8dfa75e1ea32b0c021d71c4f366ab47104c207e | refs/heads/master | 2021-01-16T00:27:59.597812 | 2016-01-19T11:55:03 | 2016-01-19T11:55:03 | 37,195,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,318 | py | #
# WearNow - a GTK+/GNOME based program
#
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
This module provides the base class for plugins.
"""
class Plugin(object):
"""
This class serves as a base class for all plugins that can be registered
with the plugin manager
"""
def __init__(self, name, description, module_name):
"""
:param name: A friendly name to call this plugin.
Example: "GEDCOM Import"
:type name: string
:param description: A short description of the plugin.
Example: "This plugin will import a GEDCOM file into a database"
:type description: string
:param module_name: The name of the module that contains this plugin.
Example: "gedcom"
:type module_name: string
:return: nothing
"""
self.__name = name
self.__desc = description
self.__mod_name = module_name
def get_name(self):
"""
Get the name of this plugin.
:return: a string representing the name of the plugin
"""
return self.__name
def get_description(self):
"""
Get the description of this plugin.
:return: a string that describes the plugin
"""
return self.__desc
def get_module_name(self):
"""
Get the name of the module that this plugin lives in.
:return: a string representing the name of the module for this plugin
"""
return self.__mod_name
| [
"[email protected]"
] | |
35d145eae9baf5f65b192fdad8fad0e7408f07eb | 9eff544e604f9cff4384d4154204ab3276a56a23 | /mpsci/distributions/gumbel_max.py | c6080c33985af06b004adc47893c0f19297ed96c | [
"BSD-2-Clause"
] | permissive | WarrenWeckesser/mpsci | 844d22b0230bc8fb5bf2122e7705c6fd1f0744d9 | cd4626ee34680627abd820d9a80860b45de1e637 | refs/heads/main | 2023-08-31T11:01:11.412556 | 2023-08-25T20:30:43 | 2023-08-25T20:30:43 | 159,705,639 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,575 | py | """
Gumbel probability distribution (for maxima)
--------------------------------------------
This is the same distribution as:
* `scipy.stats.gumbel_r`;
* NumPy's `numpy.random.Generator.gumbel`;
* the Gumbel distribution discussed in the wikipedia article
"Gumbel distribtion" (https://en.wikipedia.org/wiki/Gumbel_distribution);
* the Type I extreme value distribution used in the text "An Introduction
to Statistical Modeling of Extreme Values" by Stuart Coles (Springer, 2001);
* the Gumbel distribution given in the text "Modelling Extremal Events" by
Embrechts, Klüppelberg and Mikosch (Springer, 1997);
* the Gumbel distribution in the text "Statistical Distribution" (fourth ed.)
by Forbes, Evans, Hastings and Peacock (Wiley, 2011);
* the `extreme_value_distribution` class implemented in the Boost/math C++
library;
* the `Gumbel` distribution in the Rust `rand_distr` crate.
"""
from mpmath import mp
from .. import stats
from mpsci.stats import mean as _mean
from ._common import _seq_to_mp
__all__ = ['pdf', 'logpdf', 'cdf', 'invcdf', 'sf', 'invsf', 'mean', 'var',
'nll', 'mle', 'mom']
def pdf(x, loc, scale):
"""
Probability density function for the Gumbel distribution (for maxima).
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
x = mp.mpf(x)
loc = mp.mpf(loc)
scale = mp.mpf(scale)
return mp.exp(logpdf(x, loc, scale))
def logpdf(x, loc, scale):
"""
Log of the PDF of the Gumbel distribution.
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
x = mp.mpf(x)
loc = mp.mpf(loc)
scale = mp.mpf(scale)
z = (x - loc) / scale
return -(z + mp.exp(-z)) - mp.log(scale)
def cdf(x, loc, scale):
"""
Cumulative distribution function for the Gumbel distribution.
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
x = mp.mpf(x)
loc = mp.mpf(loc)
scale = mp.mpf(scale)
z = (x - loc) / scale
return mp.exp(-mp.exp(-z))
def invcdf(p, loc, scale):
"""
Inverse of the CDF for the Gumbel distribution.
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
p = mp.mpf(p)
loc = mp.mpf(loc)
scale = mp.mpf(scale)
z = -mp.log(-mp.log(p))
x = scale*z + loc
return x
def sf(x, loc, scale):
"""
Survival function for the Gumbel distribution.
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
x = mp.mpf(x)
loc = mp.mpf(loc)
scale = mp.mpf(scale)
z = (x - loc) / scale
return -mp.expm1(-mp.exp(-z))
def invsf(p, loc, scale):
"""
Inverse of the survival function for the Gumbel distribution.
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
p = mp.mpf(p)
loc = mp.mpf(loc)
scale = mp.mpf(scale)
z = -mp.log(-mp.log1p(-p))
x = scale*z + loc
return x
def mean(loc, scale):
"""
Mean of the Gumbel distribution.
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
loc = mp.mpf(loc)
scale = mp.mpf(scale)
return loc + mp.euler*scale
def var(loc, scale):
"""
Variance of the Gumbel distribution.
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
loc = mp.mpf(loc)
scale = mp.mpf(scale)
return mp.pi**2/6 * scale**2
def nll(x, loc, scale):
"""
Negative log-likelihood function for the Gumbel distribution.
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
loc = mp.mpf(loc)
scale = mp.mpf(scale)
n = len(x)
z = [(mp.mpf(xi) - loc)/scale for xi in x]
t1 = n*mp.log(scale)
t2 = mp.fsum(z)
t3 = mp.fsum([mp.exp(-zi) for zi in z])
return t1 + t2 + t3
def _mle_scale_func(scale, x, xbar):
emx = [mp.exp(-xi/scale) for xi in x]
s1 = mp.fsum([xi * emxi for xi, emxi in zip(x, emx)])
s2 = mp.fsum(emx)
return s2*(xbar - scale) - s1
def _solve_mle_scale(x):
xbar = stats.mean(x)
# Very rough guess of the scale parameter:
s0 = stats.std(x)
if s0 == 0:
# The x values are all the same.
return s0
# Find an interval in which there is a sign change of
# _mle_scale_func.
s1 = s0
s2 = s0
sign2 = mp.sign(_mle_scale_func(s2, x, xbar))
while True:
s1 = 0.9*s1
sign1 = mp.sign(_mle_scale_func(s1, x, xbar))
if (sign1 * sign2) <= 0:
break
s2 = 1.1*s2
sign2 = mp.sign(_mle_scale_func(s2, x, xbar))
if (sign1 * sign2) <= 0:
break
# Did we stumble across the root while looking for an interval
# with a sign change? Not likely, but check anyway...
if sign1 == 0:
return s1
if sign2 == 0:
return s2
root = mp.findroot(lambda t: _mle_scale_func(t, x, xbar),
[s1, s2], solver='anderson')
return root
def _mle_scale_with_fixed_loc(scale, x, loc):
z = [(xi - loc) / scale for xi in x]
ez = [mp.expm1(-zi)*zi for zi in z]
return stats.mean(ez) + 1
def mle(x, loc=None, scale=None):
"""
Maximum likelihood estimates for the Gumbel distribution.
`x` must be a sequence of numbers--it is the data to which the
Gumbel distribution is to be fit.
If either `loc` or `scale` is not None, the parameter is fixed
at the given value, and only the other parameter will be fit.
Returns maximum likelihood estimates of the `loc` and `scale`
parameters.
Examples
--------
Imports and mpmath configuration:
>>> from mpmath import mp
>>> mp.dps = 20
>>> from mpsci.distributions import gumbel_max
The data to be fit:
>>> x = [6.86, 14.8 , 15.65, 8.72, 8.11, 8.15, 13.01, 13.36]
Unconstrained MLE:
>>> gumbel_max.mle(x)
(mpf('9.4879877926148360358863'), mpf('2.727868138859403832702'))
If we know the scale is 2, we can add the argument `scale=2`:
>>> gumbel_max.mle(x, scale=2)
(mpf('9.1305625326153555632872'), mpf('2.0'))
"""
with mp.extradps(5):
x = _seq_to_mp(x)
if scale is None and loc is not None:
# Estimate scale with fixed loc.
loc = mp.mpf(loc)
# Initial guess for findroot.
s0 = stats.std([xi - loc for xi in x])
scale = mp.findroot(
lambda t: _mle_scale_with_fixed_loc(t, x, loc), s0
)
return loc, scale
if scale is None:
scale = _solve_mle_scale(x)
else:
scale = mp.mpf(scale)
if loc is None:
ex = [mp.exp(-xi / scale) for xi in x]
loc = -scale * mp.log(stats.mean(ex))
else:
loc = mp.mpf(loc)
return loc, scale
def mom(x):
"""
Method of moments parameter estimation for the Gumbel-max distribution.
x must be a sequence of real numbers.
Returns (loc, scale).
"""
with mp.extradps(5):
M1 = _mean(x)
M2 = _mean([mp.mpf(t)**2 for t in x])
scale = mp.sqrt(6*(M2 - M1**2))/mp.pi
loc = M1 - scale*mp.euler
return loc, scale
| [
"[email protected]"
] | |
af43c669459f8dd1c60ac2dae04418f9744f6a29 | 44a7b4879c1da661cc2e8aa51c7fcc24cfb0fd3b | /tests/psu/psu_version_test.py | b3942b4c3523ed8b6ec2b0b234eb1705e2bb042b | [
"MIT"
] | permissive | seoss/scs_core | 21cd235c9630c68f651b9a8c88120ab98fe5f513 | a813f85f86b6973fa77722a7d61cc93762ceba09 | refs/heads/master | 2021-08-08T08:09:56.905078 | 2020-04-16T19:46:52 | 2020-04-16T19:46:52 | 156,239,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | #!/usr/bin/env python3
"""
Created on 13 Nov 2017
@author: Bruno Beloff ([email protected])
"""
import json
from scs_core.data.json import JSONify
from scs_core.psu.psu_version import PSUVersion
# --------------------------------------------------------------------------------------------------------------------
jstr = '{"id": "South Coast Science PSU", "tag": "1.2.3", "c-date": "Aug 8 2017", "c-time": "08:35:25"}'
print(jstr)
print("-")
jdict = json.loads(jstr)
print(jdict)
print("-")
status = PSUVersion.construct_from_jdict(jdict)
print(status)
print("-")
jdict = status.as_json()
print(jdict)
print("-")
jstr = JSONify.dumps(jdict)
print(jstr)
print("-")
| [
"[email protected]"
] | |
3206f80ebb7cd794ba25b8f13f14e0c5a68d2477 | 6f8c5d58ccd771d1ba92dc053b54caa44be9515c | /inst_fluid_en/models.py | 7fba96ece1c1d3dfd9ed2d96070362bda8abf302 | [
"MIT"
] | permissive | manumunoz/8004 | b6f32cf446a3d0d66988d34a03a252ba4d885da1 | 06b5a5a8cdea53294ff85c4bedb6be163d2da25a | refs/heads/master | 2020-03-24T22:24:37.155460 | 2019-03-28T07:02:18 | 2019-03-28T07:02:18 | 143,083,725 | 0 | 0 | NOASSERTION | 2018-12-07T15:17:27 | 2018-08-01T00:29:45 | HTML | UTF-8 | Python | false | false | 4,478 | py | from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
import itertools
author = 'Manu Munoz'
doc = """
Identity Switch - Networks: Instructions FLUID
"""
class Constants(BaseConstants):
#------------------------------------------
name_in_url = 'inst_fluid_en'
names = ['1','2','3','4','5','6','7']
players_per_group = len(names)
instructions_template = 'inst_fluid_en/Instructions.html'
periods = 1
num_rounds = periods
#------------------------------------------
# Treatment & Group parameters
others = len(names) - 1
total_circles = 4
total_triangles = 3
part_name = 1
part_fixed = 2
part_fluid = 3
part_alloc = 4
rounds_fixed = 10
#------------------------------------------
# Payoffs
exp_currency = "points"
currency = "pesos"
currency_exchange = 1000
points_exchange = 1
min_pay = 10000
link_cost = 2
liked_gain = 6
disliked_gain = 4
switch_cost = 6
#------------------------------------------
# Group Names
group_a = 'Lions' #Leones
group_b = 'Tigers' #Tigres
group_c = 'Leopards' #Leopardos
group_d = 'Jaguars' #Jaguares
group_e = 'Cats' #Gatos
group_f = 'Coyotes' #Coyotes
group_g = 'Jackals' #Chacales
group_h = 'Wolves' #Lobos
group_i = 'Foxes' #Zorros
group_j = 'Dogs' #Perros
#------------------------------------------
class Subsession(BaseSubsession):
def creating_session(self):
treat = itertools.cycle([1, 2, 3, 4, 5, 6])
# 1: Full-Free, 2: Sticky-Free, 3: Blurry-Free, 4: Full-Cost, 5: Sticky-Cost, 6: Blurry-Cost
# for p in self.get_players():
# p.treat = next(treat)
for p in self.get_players():
if 'treatment' in self.session.config:
# demo mode
p.treat = self.session.config['treatment']
else:
# live experiment mode
p.treat = next(treat)
class Group(BaseGroup):
pass
class Player(BasePlayer):
treat = models.IntegerField() # Treatments from 1 to 6
given_group = models.PositiveIntegerField(
choices=[
[1, 'It is fixed and does not change'],
[2, 'The computer changes it in each round'],
[3, 'I can change it in each round'],
],
widget=widgets.RadioSelect
)
appearance = models.PositiveIntegerField(
choices=[
[1, 'It is fixed and does not change'],
[2, 'The computer changes it in each round'],
[3, 'I can change it in each round by changing my group'],
],
widget=widgets.RadioSelect
)
label = models.PositiveIntegerField(
choices=[
[1, 'It is fixed and does not change'],
[2, 'The computer changes it in each round'],
[3, 'I can change it in each round'],
],
widget=widgets.RadioSelect
)
pay_coord = models.PositiveIntegerField(
choices=[
[1, 'I gain 6 and pay the cost of 2 = 4 points in total'],
[2, 'I gain 4 and pay the cost of 2 = 2 points in total'],
[3, 'I gain 0 and pay the cost of 2 = -2 points in total']
],
widget=widgets.RadioSelect
)
pay_coord2 = models.PositiveIntegerField(
choices=[
[1, 'I gain 6 and pay the cost of 2 = 4 points in total'],
[2, 'I gain 4 and pay the cost of 2 = 2 points in total'],
[3, 'I gain 0 and pay the cost of 2 = -2 points in total']
],
widget=widgets.RadioSelect
)
information = models.PositiveIntegerField(
choices=[
[1, 'They can see the group I choose and my new appearance'],
[2, 'They can see the group I choose and my appearance from Part {}'.format(Constants.part_fixed)],
[3, 'They cannot see the group I choose only my appearance from Part {}'.format(Constants.part_fixed)],
],
widget=widgets.RadioSelect
)
def vars_for_template(self):
return {
'circles_name': self.participant.vars['circles_name'],
'triangles_name': self.participant.vars['triangles_name'],
'circles_label': self.participant.vars['circles_label'],
'triangles_label': self.participant.vars['triangles_label'],
'names': len(Constants.names)
} | [
"[email protected]"
] | |
389d80a51e57aab9b9e90dd9cf236befa5c6cd93 | 457c673c8c8d704ec150322e4eeee2fde4f827ca | /Python Fundamentals - January 2020/Functions/03_Repeat_string.py | e36aa36cc0812058b32ec3f857f6917eeb7e1787 | [] | no_license | xMrShadyx/SoftUni | 13c08d56108bf8b1ff56d17bb2a4b804381e0d4e | ce4adcd6e8425134d138fd8f4b6101d4eb1c520b | refs/heads/master | 2023-08-02T03:10:16.205251 | 2021-06-20T05:52:15 | 2021-06-20T05:52:15 | 276,562,926 | 5 | 1 | null | 2021-09-22T19:35:25 | 2020-07-02T06:07:35 | Python | UTF-8 | Python | false | false | 94 | py | def repeat(s, rep):
return s * rep
s = input()
rep = int(input())
print(repeat(s, rep))
| [
"[email protected]"
] | |
b1a7b240bbf7f17341177d6f03acfd50f21dbac8 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/arc094/B/4253154.py | 6d7383b45734c25c5fe494a308375919ce98d92e | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
def solve(a, b):
a, b = min(a, b), max(a, b)
if a == b:
return 2 * a - 2
c = int(math.sqrt(a * b)) + 2
while True:
if c * c < a * b:
if c * (c + 1) >= a * b:
return 2 * c - 2
else:
return 2 * c - 1
else:
c -= 1
Q = int(input())
for _ in range(Q):
a, b = map(int, input().split())
print(solve(a, b)) | [
"[email protected]"
] | |
4dd6abce27312bede65ef9f29a7d675967b34fe5 | 0f799b88204d002d655f48894a3095bf006f9d1f | /chapter12/file_pickle.py | 282b759494246bb4280bf6f94244a262ea552c48 | [] | no_license | liuyuzhou/pythonsourcecode | 4008b4adf0342e61a1b6c03f12ab55ad04252e7e | 476c75ff6b83953565175a66e21cad3139f06ad2 | refs/heads/master | 2020-05-16T01:39:20.518816 | 2019-04-22T02:48:49 | 2019-04-22T02:48:49 | 182,607,905 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | import pickle
d = dict(name='xiao zhi', num=1002)
print(pickle.dumps(d))
| [
"[email protected]"
] | |
049e4a9ab11ab90ed899286b280e96eef25db4de | cadd27a5c72644fe87940e156e4f40f1131b9a57 | /udemy/_internal.py | 0cc13463a4fdad5b5c127e648080b4c565dc88a2 | [
"MIT"
] | permissive | mrbrazzi/udemy-dl | 1ef576800bd01ed2724911144a1fd8bad70b18f9 | 0f0a3ff00167b3b4614d5afc0d4dc6461e46be97 | refs/heads/master | 2022-11-13T17:41:38.170496 | 2020-07-01T18:17:24 | 2020-07-01T18:17:24 | 274,795,490 | 1 | 0 | MIT | 2020-06-25T00:16:33 | 2020-06-25T00:16:32 | null | UTF-8 | Python | false | false | 6,961 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Author : Nasir Khan (r0ot h3x49)
Github : https://github.com/r0oth3x49
License : MIT
Copyright (c) 2020 Nasir Khan (r0ot h3x49)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import time
from ._colorized import *
from ._extract import Udemy
from ._shared import (
UdemyCourse,
UdemyChapters,
UdemyLectures,
UdemyLectureStream,
UdemyLectureAssets,
UdemyLectureSubtitles
)
class InternUdemyCourse(UdemyCourse, Udemy):
def __init__(self, *args, **kwargs):
self._info = ''
super(InternUdemyCourse, self).__init__(*args, **kwargs)
def _fetch_course(self):
if self._have_basic:
return
if not self._cookies:
auth = self._login(username=self._username, password=self._password)
if self._cookies:
auth = self._login(cookies=self._cookies)
if auth.get('login') == 'successful':
sys.stdout.write(fc + sd + "[" + fm + sb + "+" + fc + sd + "] : " + fg + sb + "Logged in successfully.\n")
sys.stdout.write('\r' + fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Downloading course information .. \r")
self._info = self._real_extract(self._url)
time.sleep(1)
sys.stdout.write('\r' + fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Downloaded course information .. (done)\r\n")
self._id = self._info['course_id']
self._title = self._info['course_title']
self._chapters_count = self._info['total_chapters']
self._total_lectures = self._info['total_lectures']
self._chapters = [InternUdemyChapter(z) for z in self._info['chapters']]
sys.stdout.write(fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Trying to logout now...\n")
if not self._cookies:
self._logout()
sys.stdout.write(fc + sd + "[" + fm + sb + "+" + fc + sd + "] : " + fg + sb + "Logged out successfully.\n")
self._have_basic = True
if auth.get('login') == 'failed':
sys.stdout.write(fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "Failed to login ..\n")
sys.exit(0)
class InternUdemyChapter(UdemyChapters):
def __init__(self, chapter):
super(InternUdemyChapter, self).__init__()
self._chapter_id = chapter['chapter_id']
self._chapter_title = chapter['chapter_title']
self._unsafe_title = chapter['unsafe_chapter']
self._chapter_index = chapter['chapter_index']
self._lectures_count = chapter.get('lectures_count', 0)
self._lectures = [InternUdemyLecture(z) for z in chapter['lectures']] if self._lectures_count > 0 else []
class InternUdemyLecture(UdemyLectures):
def __init__(self, lectures):
super(InternUdemyLecture, self).__init__()
self._info = lectures
self._lecture_id = self._info['lectures_id']
self._lecture_title = self._info['lecture_title']
self._unsafe_title = self._info['unsafe_lecture']
self._lecture_index = self._info['lecture_index']
self._subtitles_count = self._info.get('subtitle_count', 0)
self._sources_count = self._info.get('sources_count', 0)
self._assets_count = self._info.get('assets_count', 0)
self._extension = self._info.get('extension')
self._html_content = self._info.get('html_content')
self._duration = self._info.get('duration')
if self._duration:
duration = int(self._duration)
(mins, secs) = divmod(duration, 60)
(hours, mins) = divmod(mins, 60)
if hours == 0:
self._duration = "%02d:%02d" % (mins, secs)
else:
self._duration = "%02d:%02d:%02d" % (hours, mins, secs)
def _process_streams(self):
streams = [InternUdemyLectureStream(z, self) for z in self._info['sources']] if self._sources_count > 0 else []
self._streams = streams
def _process_assets(self):
assets = [InternUdemyLectureAssets(z, self) for z in self._info['assets']] if self._assets_count > 0 else []
self._assets = assets
def _process_subtitles(self):
subtitles = [InternUdemyLectureSubtitles(z, self) for z in self._info['subtitles']] if self._subtitles_count > 0 else []
self._subtitles = subtitles
class InternUdemyLectureStream(UdemyLectureStream):
def __init__(self, sources, parent):
super(InternUdemyLectureStream, self).__init__(parent)
self._mediatype = sources.get('type')
self._extension = sources.get('extension')
height = sources.get('height', 0)
width = sources.get('width', 0)
self._resolution = '%sx%s' % (width, height)
self._dimention = width, height
self._quality = self._resolution
self._url = sources.get('download_url')
class InternUdemyLectureAssets(UdemyLectureAssets):
def __init__(self, assets, parent):
super(InternUdemyLectureAssets, self).__init__(parent)
self._mediatype = assets.get('type')
self._extension = assets.get('extension')
self._filename = '{0:03d} {1!s}'.format(parent._lecture_index, assets.get('filename'))
self._url = assets.get('download_url')
class InternUdemyLectureSubtitles(UdemyLectureSubtitles):
def __init__(self, subtitles, parent):
super(InternUdemyLectureSubtitles, self).__init__(parent)
self._mediatype = subtitles.get('type')
self._extension = subtitles.get('extension')
self._language = subtitles.get('language')
self._url = subtitles.get('download_url')
| [
"[email protected]"
] | |
6d9aa23aac7fc83f966804c5d25d3a1d9096f76b | 81f9d88a560edb2b92997855c6445628cf0e6eaa | /homura/__init__.py | 53bd57eca602723adbb53d18674e93be0451c1cd | [
"Apache-2.0"
] | permissive | haiyan-he/homura | e1de6e0162a0a3d5c2f2ce142f551562dc30f4c3 | 2b98d4e0071b926233869e5396a02e70638d19f7 | refs/heads/master | 2023-02-06T18:25:15.557698 | 2020-10-01T07:54:23 | 2020-10-01T07:54:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | from .register import Registry
from .utils import TensorDataClass, TensorTuple, distributed_print, enable_accimage, get_args, get_environ, \
get_git_hash, get_global_rank, get_local_rank, get_num_nodes, get_world_size, if_is_master, init_distributed, \
is_accimage_available, is_distributed, is_distributed_available, is_faiss_available, is_master, set_deterministic, \
set_seed
Registry.import_modules('homura.vision')
| [
"[email protected]"
] | |
10682582ea1863fd922d5da0927d22778286f60e | 38c35956be6343855914b1c58b8fbd2e40c6e615 | /Grafos/2131.py | 2dd6d299343e38504e64253ea933e1dd5ba17cd3 | [] | no_license | LucasBarbosaRocha/URI | b43e4f4a6b3beed935f24839001bea354411c4bd | 2c9bcc13300a9f6243242e483c8f9ec3296a88ad | refs/heads/master | 2020-06-25T05:06:51.297824 | 2019-08-22T04:50:11 | 2019-08-22T04:50:11 | 199,210,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,953 | py | # -*- coding: utf-8 -*-
# Funcao que cria um grafo
def cria_grafo(lista_de_vs, lista_de_arestas):
grafo = {}
for v in lista_de_vs:
grafo[v] = []
for aresta in lista_de_arestas:
grafo[aresta[0]].append(aresta[1])
return grafo
# Busca em profundidade personalizada
def dfs_iterative(grafo, i, n, verticesValidos):
cores = [-1]*(n+1)
for j in range(i, n + 1):
if (j in verticesValidos):
stack = [j]
cores[j] = 1
while stack:
v = stack.pop()
#print ("BLABLA")
#print (stack)
for adjacencia in grafo[v]:
if (cores[adjacencia] == -1): # Em y (adjacencias) eh a cor invertida da cor do pai
cores[adjacencia] = 1 - cores[v]
stack.append(adjacencia) # Coloco a adjacencia na pilha
elif (cores[adjacencia] == cores[v]): # Se a adjacencia tiver a mesma cor que o pai nao eh bipartido
return False
verticesValidos.remove(v)
#print (cores)
return True
k = 1
while True:
try:
entrada = raw_input().split(" ")
n = int(entrada[0])
m = int(entrada[1])
vertices = []
verticesValidos = []
for i in range(1, n + 1):
vertices.append(i)
arestas = []
grafo = []
caminho = []
totalArestas = 0
print ("Instancia %d" %k)
# Verificar se o grafo eh bipartido
#print ("AQUI")
#print(n, m)
for i in range(m):
entrada = raw_input().split(" ")
v1 = int(entrada[0])
v2 = int(entrada[1])
if (verticesValidos == []):
verticesValidos.append(v1)
verticesValidos.append(v2)
if (v1 not in verticesValidos):
verticesValidos.append(v1)
if (v2 not in verticesValidos):
verticesValidos.append(v2)
arestas.append((v1, v2))
#arestas.append((v2, v1))
grafo = cria_grafo(verticesValidos, arestas)
#print (grafo)
#print (verticesValidos)
if (m == 0 or dfs_iterative(grafo, verticesValidos[0], n, verticesValidos) == True):
print ("sim\n")
else:
print ("nao\n")
k = k + 1
except :
break
| [
"[email protected]"
] | |
3b29475196e55b01c9672bfe50448f45d59c16f9 | 9ffd8754679f363f7c03fa2873dfd3b1f5af41a7 | /UserRegistration/admin.py | 90d6d157eb401162e4d94faa6e1e2727258d82ca | [] | no_license | cmrajib/django_fashion_ecommerce | 54848c7f3f1ede349bad77533647cd161d86c413 | 141e87ad77f4a4503e487de8ad360789a9e272f4 | refs/heads/main | 2023-02-21T12:31:07.457942 | 2021-01-19T17:58:31 | 2021-01-19T17:58:31 | 331,065,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | from django.contrib import admin
# Register your models here.
from UserRegistration.models import User, Coupon
class UserAdmin(admin.ModelAdmin):
list_display = ('email', 'full_name')
list_display_links = ('email', 'full_name')
# list_filter = ('user__email','full_name','city')
# list_editable = ('is_featured',)
search_fields =('full_name', 'phone')
list_per_page = 10
admin.site.register(User, UserAdmin)
admin.site.register(Coupon) | [
"[email protected]"
] | |
b5aa8a920de266e4b7fb60ea573ed638ecf93516 | ab4f74d127bfc89813ee359bb9c779eca5426ddc | /script/label_image.runfiles/org_tensorflow/tensorflow/python/ops/spectral_ops.py | 101e33656cb9435c28ff2b5093959cbdf275dfad | [
"MIT"
] | permissive | harshit-jain-git/ImageNET | cdfd5a340b62862ad8d1cc3b9a0f30cccc481744 | 1cd4c2b70917e4709ce75422c0205fe3735a1b01 | refs/heads/master | 2022-12-11T12:47:46.795376 | 2017-12-19T05:47:26 | 2017-12-19T05:47:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | /home/co/Documents/ImageClassifier/tensorflow/tensorflow/python/ops/spectral_ops.py | [
"[email protected]"
] | |
4049996983313e176aa4972cf259096df6168fe7 | eee6dd18897d3118f41cb5e6f93f830e06fbfe2f | /venv/lib/python3.6/site-packages/scipy/sparse/spfuncs.py | a5aeb7d325475944cf4927075b955f7bbfa4a436 | [] | no_license | georgeosodo/ml | 2148ecd192ce3d9750951715c9f2bfe041df056a | 48fba92263e9295e9e14697ec00dca35c94d0af0 | refs/heads/master | 2020-03-14T11:39:58.475364 | 2018-04-30T13:13:01 | 2018-04-30T13:13:01 | 131,595,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,759 | py | """ Functions that operate on sparse matrices
"""
__all__ = ['count_blocks','estimate_blocksize']
from .csr import isspmatrix_csr, csr_matrix
from .csc import isspmatrix_csc
from ._sparsetools import csr_count_blocks
def extract_diagonal(A):
raise NotImplementedError('use .diagonal() instead')
#def extract_diagonal(A):
# """extract_diagonal(A) returns the main diagonal of A."""
# #TODO extract k-th diagonal
# if isspmatrix_csr(A) or isspmatrix_csc(A):
# fn = getattr(sparsetools, A.format + "_diagonal")
# y = empty( min(A.shape), dtype=upcast(A.dtype) )
# fn(A.shape[0],A.shape[1],A.indptr,A.indices,A.data,y)
# return y
# elif isspmatrix_bsr(A):
# M,N = A.shape
# R,C = A.blocksize
# y = empty( min(M,N), dtype=upcast(A.dtype) )
# fn = sparsetools.bsr_diagonal(M//R, N//C, R, C, \
# A.indptr, A.indices, ravel(A.data), y)
# return y
# else:
# return extract_diagonal(csr_matrix(A))
def estimate_blocksize(A,efficiency=0.7):
"""Attempt to determine the blocksize of a sparse matrix
Returns a blocksize=(r,c) such that
- A.nnz / A.tobsr( (r,c) ).nnz > efficiency
"""
if not (isspmatrix_csr(A) or isspmatrix_csc(A)):
A = csr_matrix(A)
if A.nnz == 0:
return (1,1)
if not 0 < efficiency < 1.0:
raise ValueError('efficiency must satisfy 0.0 < efficiency < 1.0')
high_efficiency = (1.0 + efficiency) / 2.0
nnz = float(A.nnz)
M,N = A.shape
if M % 2 == 0 and N % 2 == 0:
e22 = nnz / (4 * count_blocks(A,(2,2)))
else:
e22 = 0.0
if M % 3 == 0 and N % 3 == 0:
e33 = nnz / (9 * count_blocks(A,(3,3)))
else:
e33 = 0.0
if e22 > high_efficiency and e33 > high_efficiency:
e66 = nnz / (36 * count_blocks(A,(6,6)))
if e66 > efficiency:
return (6,6)
else:
return (3,3)
else:
if M % 4 == 0 and N % 4 == 0:
e44 = nnz / (16 * count_blocks(A,(4,4)))
else:
e44 = 0.0
if e44 > efficiency:
return (4,4)
elif e33 > efficiency:
return (3,3)
elif e22 > efficiency:
return (2,2)
else:
return (1,1)
def count_blocks(A,blocksize):
"""For a given blocksize=(r,c) count the number of occupied
blocks in a sparse matrix A
"""
r,c = blocksize
if r < 1 or c < 1:
raise ValueError('r and c must be positive')
if isspmatrix_csr(A):
M,N = A.shape
return csr_count_blocks(M,N,r,c,A.indptr,A.indices)
elif isspmatrix_csc(A):
return count_blocks(A.T,(c,r))
else:
return count_blocks(csr_matrix(A),blocksize)
| [
"[email protected]"
] | |
fa00dc55b7191039833d1a3dea845bcd117e6125 | 55e9f3b00fc2e488597bab5225ed321c86efbd4b | /sdk/yapily/configuration.py | e060bb0c81cb4f122ad71cd083264392d20f804d | [
"MIT"
] | permissive | bs-yapily/yapily-sdk-python | ad9d04c28f3d744830734c3444c1cef8215206fd | 0bba45e351b674eb655425a51190f539c4e9896f | refs/heads/master | 2020-08-26T17:18:53.156429 | 2019-10-22T11:01:16 | 2019-10-22T11:01:16 | 217,085,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,451 | py | # coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
OpenAPI spec version: 0.0.155
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class TypeWithDefault(type):
def __init__(cls, name, bases, dct):
super(TypeWithDefault, cls).__init__(name, bases, dct)
cls._default = None
def __call__(cls):
if cls._default is None:
cls._default = type.__call__(cls)
return copy.copy(cls._default)
def set_default(cls, default):
cls._default = copy.copy(default)
class Configuration(six.with_metaclass(TypeWithDefault, object)):
"""NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
"""
def __init__(self):
"""Constructor"""
# Default Base url
self.host = "https://api.yapily.com"
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# access token for OAuth
self.access_token = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("yapily")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API
# from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
# Set this to True/False to enable/disable SSL hostname verification.
self.assert_hostname = None
# urllib3 connection pool's maximum number of connections saved
# per pool. urllib3 uses 1 connection as default value, but this is
# not the best value when you are making a lot of possibly parallel
# requests to the same host, which is often the case here.
# cpu_count * 5 is used as default value to increase performance.
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
# Proxy URL
self.proxy = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if (self.api_key.get(identifier) and
self.api_key_prefix.get(identifier)):
return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier] # noqa: E501
elif self.api_key.get(identifier):
return self.api_key[identifier]
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
'basicAuth':
{
'type': 'basic',
'in': 'header',
'key': 'Authorization',
'value': self.get_basic_auth_token()
},
'tokenAuth':
{
'type': 'oauth2',
'in': 'header',
'key': 'Authorization',
'value': 'Bearer ' + self.access_token
},
}
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 0.0.155\n"\
"SDK Package Version: 1.0.0".\
format(env=sys.platform, pyversion=sys.version)
| [
"[email protected]"
] | |
621d2aad3eb42bb8dbd059d46d0a17ff7f170215 | a8f6a8afd6b3609a947cafad5988d025454b4f9c | /datesFromLogs_Test.py | c15388862c6f16360d4caae0cd5764dee4ff4481 | [] | no_license | andreodendaal/100DaysOfCode | aede59a6e1f3f3ada30a1a534548939a7b9b375f | 282bf21c2d75fcd562ae935fa23e41a7c4c0cb45 | refs/heads/master | 2020-03-20T17:21:44.649611 | 2019-02-27T17:22:56 | 2019-02-27T17:22:56 | 137,557,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | import unittest
from datesFromLogs_d2 import datetime, timedelta
from datesFromLogs_d2 import loglines, convert_to_datetime, time_between_shutdowns
class TestDatesFromLogs(unittest.TestCase):
def test_convert_to_datetime(self):
line1 = 'ERROR 2014-07-03T23:24:31 supybot Invalid user dictionary file'
line2 = 'INFO 2015-10-03T10:12:51 supybot Shutdown initiated.'
line3 = 'INFO 2016-09-03T02:11:22 supybot Shutdown complete.'
self.assertEqual(convert_to_datetime(line1), datetime(2014, 7, 3, 23, 24, 31))
self.assertEqual(convert_to_datetime(line2), datetime(2015, 10, 3, 10, 12, 51))
self.assertEqual(convert_to_datetime(line3), datetime(2016, 9, 3, 2, 11, 22))
def test_time_between_events(self):
diff = time_between_shutdowns(loglines)
self.assertEqual(type(diff), timedelta)
self.assertEqual(str(diff), '0:03:31')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
aa8d5d425f8c8c07e9aa8bd8b01d8c38944a0232 | 295ecf4f254c42e9201657ef0a13ec2c68c40c9b | /buy/forms.py | c80566d1a697136ed495d48bc28f1acb92a3c021 | [] | no_license | zwolf21/StockAdmin-pre2 | 0236061284a6fe8801591608591d21129d4ea7c0 | b21d069ff215c17ce3bca040ecf9b8f48b452ed4 | refs/heads/master | 2021-05-01T09:28:59.818469 | 2016-11-30T17:33:30 | 2016-11-30T17:33:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | from django import forms
from .models import BuyItem, Buy
from datetime import date
class CreateBuyForm(forms.ModelForm):
date = forms.DateField(initial=date.today(), widget=forms.TextInput(attrs={'tabindex':'-1','readonly':'readonly'}))
class Meta:
model = Buy
fields = ['date']
class BuyItemAddForm(forms.ModelForm):
# name = forms.CharField(label='약품명', required=False)
amount = forms.IntegerField(label='수량', required=False, help_text='위아래 방향키로 수량조절')
class Meta:
model = BuyItem
fields = ['amount']
help_texts = {'amount':('위아래 방향키로 수량 조절')}
| [
"[email protected]"
] | |
c6ab3e4068fda9c8202f9017c37654175a68e019 | 393a387cdb286cde75b4b7d760625d5851b6b080 | /Sorting items from user in alphabetical and reverse.py | 90a60ccf36cb5d37082e64d25c13965862512f11 | [] | no_license | nami-h/Python | b57f12ae48d5bc17a3de72ec7c5abb5622ba0cd2 | 7b067950da29df705237405742564d2f127f1446 | refs/heads/master | 2021-06-27T16:00:10.113762 | 2020-09-22T19:59:05 | 2020-09-22T19:59:05 | 136,550,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | lis=[]
num=int(input("How many animals do you want to enter? "))
for add in range(num):
add=input("Enter animal: ")
lis.append(add)
animals=['horse','cat','mouse']
s=lis+animals
print("Our list consists of: ", s)
s.sort()
print("Alphabetically ordered: ", s)
s.reverse()
print("Reverse ordered: ",s)
| [
"[email protected]"
] | |
75abc5b6b01a6dcbb9c8d615f21e672899c50936 | f9e3a0fb511470561d3d94bc984dafaee06000cb | /PP4E-Examples-1.4/Examples/PP4E/Preview/person_start.py | c1ba77cd5ea9c9f2d7400fccde6645dfcf752e38 | [] | no_license | Sorath93/Programming-Python-book | 359b6fff4e17b44b9842662f484bbafb490cfd3d | ebe4c93e265edd4ae135491bd2f96904d08a911c | refs/heads/master | 2022-12-03T01:49:07.815439 | 2020-08-16T22:19:38 | 2020-08-16T22:19:38 | 287,775,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | class Person:
def __init__(self, name, age, pay=0, job=None):
self.name = name
self.age = age
self.pay = pay
self.job = job
if __name__ == '__main__':
bob = Person('Bob Smith', 42, 30000, 'software')
sue = Person('Sue Jones', 45, 40000, 'hardware')
print(bob.name, sue.pay)
print(bob.name.split()[-1])
sue.pay *= 1.10
print(sue.pay)
| [
"[email protected]"
] | |
5e46bc761055167ebad6fd4ddfafcf2a93b274cb | a7a3863acaf31d0fb679a482000c6ff89636a55c | /python/main_test_lfads_old.py | 64bb6d1eb50d2a138072ffea80eb887c69bcd805 | [] | no_license | gviejo/SWR_factors | 313280926b3e1f895578ca453c474900e1a3f320 | 87c74106c26b7c7ec03a197858b8013f10f74780 | refs/heads/master | 2021-11-10T02:57:39.280051 | 2021-11-05T01:37:58 | 2021-11-05T01:37:58 | 176,997,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,639 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lfads import LFADS
import numpy as np
import os
import tensorflow as tf
import re
import sys
import h5py
from functions import *
from time import time
from lfads.utils import *
from lfads.functions import *
import pickle
from sklearn.model_selection import train_test_split
hps = hps_dict_to_obj({
"data_dir": '../data/', #"Data for training"
"lfads_save_dir": '../data/lfads_test', #"model save dir"
"kind": "train", #"Type of model to build {train, posterior_sample_and_average, posterior_push_mean, prior_sample, write_model_params"
"output_dist": 'poisson', #"Type of output distribution, 'poisson' or 'gaussian'"
"allow_gpu_growth": False, #"If true, only allocate amount of memory needed for Session. Otherwise, use full GPU memory."
"checkpoint_pb_load_name": 'checkpoint', #"Name of checkpoint files, use 'checkpoint_lve' for best error"
"checkpoint_name": 'lfads_vae', #"Name of checkpoint files (.ckpt appended)"
"output_filename_stem": '', #"Name of output file (postfix will be added)"
"device": 'gpu:0', #"Which device to use (default: \"gpu:0\", can also be \"cpu:0\", \"gpu:1\", etc)"
"csv_log": 'fitlog', #"Name of file to keep running log of fit likelihoods, etc (.csv appended)"
"max_ckpt_to_keep": 5, #"Max # of checkpoints to keep (rolling)"
"ps_nexamples_to_process": sys.maxsize, #"Number of examples to process for posterior sample and average (not number of samples to average over)."
"max_ckpt_to_keep_lve": 5, #"Max # of checkpoints to keep for lowest validation error models (rolling)"
"ext_input_dim": 0, #"Dimension of external inputs"
"num_steps_for_gen_ic": sys.maxsize, #"Number of steps to train the generator initial conditon."
"inject_ext_input_to_gen": False, #"Should observed inputs be input to model via encoders, or injected directly into generator?"
"cell_weight_scale": 1.0, #"Input scaling for input weights in generator."
"ic_dim": 64, #"Dimension of h0"
"factors_dim": 50, #"Number of factors from generator"
"ic_enc_dim": 128, #"Cell hidden size, encoder of h0"
"gen_dim": 200, #"Cell hidden size, generator."
"gen_cell_input_weight_scale": 1.0, #"Input scaling for input weights in generator."
"gen_cell_rec_weight_scale": 1.0, #"Input scaling for rec weights in generator."
"ic_prior_var_min": 0.1, #"Minimum variance in posterior h0 codes."
"ic_prior_var_scale": 0.1, #"Variance of ic prior distribution"
"ic_prior_var_max": 0.1, #"Maximum variance of IC prior distribution."
"ic_post_var_min": 0.0001, #"Minimum variance of IC posterior distribution."
"co_prior_var_scale": 0.1, #"Variance of control input prior distribution."
"prior_ar_atau": 10.0, #"Initial autocorrelation of AR(1) priors."
"prior_ar_nvar": 0.1, #"Initial noise variance for AR(1) priors."
"do_train_prior_ar_atau": True, #"Is the value for atau an init: or the constant value?"
"do_train_prior_ar_nvar": True, #"Is the value for noise variance an init, or the constant value?"
"co_dim": 1, #"Number of control net outputs (>0 builds that graph)."
"do_causal_controller": False, #"Restrict the controller create only causal inferred inputs?"
"do_feed_factors_to_controller": True, #"Should factors[t-1] be input to controller at time t?"
"feedback_factors_or_rates": 'factors', #"Feedback the factors or the rates to the controller? Acceptable values: 'factors' or 'rates'."
"controller_input_lag": 1, #"Time lag on the encoding to controller t-lag for forward, t+lag for reverse."
"ci_enc_dim": 128, #"Cell hidden size: encoder of control inputs"
"con_dim": 128, #"Cell hidden size, controller"
"batch_size": 400, #"Batch size to use during training."
"learning_rate_init": 0.01, #"Learning rate initial value"
"learning_rate_decay_factor": 0.95, #"Learning rate decay, decay by this fraction every so often."
"learning_rate_stop": 0.00005, #"The lr is adaptively reduced, stop training at this value."
"learning_rate_n_to_compare": 10, #"Number of previous costs current cost has to be worse than, to lower learning rate."
"max_grad_norm": 200.0, #"Max norm of gradient before clipping."
"cell_clip_value": 5.0, #"Max value recurrent cell can take before being clipped."
"do_train_io_only": False, #"Train only the input (readin) and output (readout) affine functions."
"do_train_encoder_only": False, #"Train only the encoder weights."
"do_reset_learning_rate": False, #"Reset the learning rate to initial value."
"do_train_readin": True, #"Whether to train the readin matrices and bias vectors. False leaves them fixed at their initial values specified by the alignment matrices and vectors."
"keep_prob": 0.95, #"Dropout keep probability."
"temporal_spike_jitter_width": 0, #"Shuffle spikes around this window."
"l2_gen_scale": 2000.0, #"L2 regularization cost for the generator only."
"l2_con_scale": 0.0, #"L2 regularization cost for the controller only."
"co_mean_corr_scale": 0.0, #"Cost of correlation (thru time)in the means of controller output."
"kl_ic_weight": 1.0, #"Strength of KL weight on initial conditions KL penatly."
"kl_co_weight": 1.0, #"Strength of KL weight on controller output KL penalty."
"kl_start_step": 0, #"Start increasing weight after this many steps."
"kl_increase_steps": 2000, #"Increase weight of kl cost to avoid local minimum."
"l2_start_step": 0, #"Start increasing l2 weight after this many steps."
"l2_increase_steps": 2000, #"Increase weight of l2 cost to avoid local minimum."
})
t1 = time()
#####################################################################################
# load_datasets
#####################################################################################
datasets = pickle.load(open("../data/swr_hist_Mouse12.pickle", "rb"))
# s = list(datasets.keys())[0]
datasets = {s:datasets[s] for s in list(datasets.keys())[0:3]}
for s in datasets:
for k in ['train_truth', 'train_ext_input', 'valid_data','valid_truth', 'valid_ext_input', 'valid_train']:
if k not in datasets[s]:
datasets[s][k] = None
datasets[s]['all_data'] = datasets[s]['train_data']
hps.dataset_names = list(datasets.keys())
hps.dataset_dims = {k:datasets[k]['data_dim'] for k in datasets}
hps.num_steps = datasets[list(datasets.keys())[0]]['num_steps']
hps.ndatasets = len(hps.dataset_names)
if hps.num_steps_for_gen_ic > hps.num_steps: hps.num_steps_for_gen_ic = hps.num_steps
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as session:
#####################################################################################
# train
#####################################################################################
#####################################################################################
# build_model(hps, kind='train', datasets = datasets)
#####################################################################################
with tf.variable_scope("LFADS", reuse=None):
model = LFADS(hps, kind='train', datasets=datasets)
tf.global_variables_initializer().run()
session.run(model.learning_rate.initializer)
#####################################################################################
# model.train_model(datasets)
#####################################################################################
lr = session.run(model.learning_rate)
lr_stop = hps.learning_rate_stop
train_costs = []
valid_costs = []
learning_rates = []
count = 0
t1 = time()
while True:
learning_rates.append(lr)
#####################################
# shuffling between train and valid
#####################################
for s in datasets:
data_train, data_valid = train_test_split(datasets[s]['all_data'])
datasets[s]['train_data'] = data_train #[0:50]
datasets[s]['valid_data'] = data_valid #[0:10]
#####################################################################################
# self.train_epochs(datasets, do_save_ckpt=do_save_ckpt)
#####################################################################################
ops_to_eval = [model.cost, model.recon_cost, model.kl_cost, model.kl_weight, model.l2_cost, model.l2_weight, model.train_op]
#####################################################################################
# self.run_epochs(datasets, ops_to_eval, kind="train")
#####################################################################################
all_name_example_idx_pairs = model.shuffle_and_flatten_datasets(datasets, hps.kind)
collected_op_values = np.zeros((6,len(all_name_example_idx_pairs)))
for j, (name, example_idxs) in enumerate(all_name_example_idx_pairs):
data_dict = datasets[name]
data_bxtxd, ext_input_bxtxi = model.get_batch(data_dict['train_data'], data_dict['train_ext_input'],example_idxs=example_idxs)
feed_dict = model.build_feed_dict(name, data_bxtxd, ext_input_bxtxi, keep_prob=None)
evaled_ops_np = session.run(ops_to_eval, feed_dict=feed_dict)
collected_op_values[:,j] = np.array(evaled_ops_np[0:6])
#####################################################################################
mean_cost = collected_op_values.mean(1)
tr_total_cost, tr_recon_cost, tr_kl_cost, kl_weight, l2_cost, l2_weight = mean_cost
#####################################################################################
#####################################################################################
# self.eval_cost_epoch(datasets, kind='valid')
#####################################################################################
ops_to_eval = [model.cost, model.recon_cost, model.kl_cost]
#####################################################################################
# self.run_epochs(datasets, ops_to_eval, kind="valid", keep_prob = 1.0)
#####################################################################################
all_name_example_idx_pairs = model.shuffle_and_flatten_datasets(datasets, 'valid') # should be valid here
collected_op_values = np.zeros((3,len(all_name_example_idx_pairs)))
for j, (name, example_idxs) in enumerate(all_name_example_idx_pairs):
data_dict = datasets[name]
data_bxtxd, ext_input_bxtxi = model.get_batch(data_dict['valid_data'], data_dict['valid_ext_input'],example_idxs=example_idxs)
feed_dict = model.build_feed_dict(name, data_bxtxd, ext_input_bxtxi, keep_prob=1.0)
evaled_ops_np = session.run(ops_to_eval, feed_dict=feed_dict)
collected_op_values[:,j] = np.array(evaled_ops_np[0:3])
#####################################################################################
mean_cost = collected_op_values.mean(1)
ev_total_cost, ev_recon_cost, ev_kl_cost = mean_cost
#####################################################################################
valid_costs.append(ev_total_cost)
# Plot and summarize
values = { 'nepochs':count,
'has_any_valid_set': True,
'tr_total_cost':tr_total_cost,
'ev_total_cost':ev_total_cost,
'tr_recon_cost':tr_recon_cost,
'ev_recon_cost':ev_recon_cost,
'tr_kl_cost':tr_kl_cost,
'ev_kl_cost':ev_kl_cost,
'l2_weight':l2_weight,
'kl_weight':kl_weight,
'l2_cost':l2_cost
}
model.summarize_all(datasets, values)
# Manage learning rate.
n_lr = hps.learning_rate_n_to_compare
if len(train_costs) > n_lr and tr_total_cost > np.max(train_costs[-n_lr:]):
lr = session.run(model.learning_rate_decay_op)
print(" Decreasing learning rate to %f." % lr)
# Force the system to run n_lr times while at this lr.
train_costs.append(np.inf)
else:
train_costs.append(tr_total_cost)
if lr < lr_stop:
print("Stopping optimization based on learning rate criteria.")
break
print("Iteration %i ; Elapsed time : %d seconds" % (count, time()-t1))
count += 1
if count == 2:
break
#####################################################################################
print("Training time %d seconds" % (time()-t1))
#######################################################################################
# POSTERIOR SAMPLE AND AVERAGE
# write_model_runs(write_model_runs(hps, datasets, hps.output_filename_stem, push_mean=False))
# model.write_model_runs(datasets, output_fname, push_mean)
#######################################################################################
model.hps.kind = 'posterior_sample_and_average'
samples = {}
for data_name, data_dict in datasets.items():
samples[data_name] = {}
# data_tuple = [('train', data_dict['all_data'], data_dict['train_ext_input']), ('valid', data_dict['all_data'], data_dict['train_ext_input'])]
data_tuple = [('all', data_dict['all_data'], data_dict['train_ext_input'])]
for data_kind, data_extxd, ext_input_extxi in data_tuple:
fname = "model_runs_" + data_name + '_' + data_kind + '_' + model.hps.kind
###############################################################################
# model.eval_model_runs_avg_epoch
###############################################################################
hps = model.hps
batch_size = hps.batch_size
E, T, D = data_extxd.shape
E_to_process = np.minimum(hps.ps_nexamples_to_process, E)
if hps.ic_dim > 0:
prior_g0_mean = np.zeros([E_to_process, hps.ic_dim])
prior_g0_logvar = np.zeros([E_to_process, hps.ic_dim])
post_g0_mean = np.zeros([E_to_process, hps.ic_dim])
post_g0_logvar = np.zeros([E_to_process, hps.ic_dim])
if hps.co_dim > 0:
controller_outputs = np.zeros([E_to_process, T, hps.co_dim])
gen_ics = np.zeros([E_to_process, hps.gen_dim])
gen_states = np.zeros([E_to_process, T, hps.gen_dim])
factors = np.zeros([E_to_process, T, hps.factors_dim])
if hps.output_dist == 'poisson':
out_dist_params = np.zeros([E_to_process, T, D])
elif hps.output_dist == 'gaussian':
out_dist_params = np.zeros([E_to_process, T, D+D])
else:
assert False, "NIY"
costs = np.zeros(E_to_process)
nll_bound_vaes = np.zeros(E_to_process)
nll_bound_iwaes = np.zeros(E_to_process)
train_steps = np.zeros(E_to_process)
for es_idx in range(E_to_process):
print("Running %d of %d." % (es_idx+1, E_to_process))
example_idxs = es_idx * np.ones(batch_size, dtype=np.int32)
data_bxtxd, ext_input_bxtxi = model.get_batch(data_extxd, ext_input_extxi, batch_size=batch_size, example_idxs=example_idxs)
##############################################################################
# model_values = self.eval_model_runs_batch(data_name, data_bxtxd, ext_input_bxtxi, do_eval_cost=True, do_average_batch=True)
##############################################################################
# if fewer than batch_size provided, pad to batch_size
E, _, _ = data_bxtxd.shape
if E < hps.batch_size:
data_bxtxd = np.pad(data_bxtxd, ((0, hps.batch_size-E), (0, 0), (0, 0)), mode='constant', constant_values=0)
if ext_input_bxtxi is not None:
ext_input_bxtxi = np.pad(ext_input_bxtxi, ((0, hps.batch_size-E), (0, 0), (0, 0)), mode='constant', constant_values=0)
feed_dict = model.build_feed_dict(data_name, data_bxtxd, ext_input_bxtxi, keep_prob=1.0)
# Non-temporal signals will be batch x dim.
# Temporal signals are list length T with elements batch x dim.
tf_vals = [model.gen_ics, model.gen_states, model.factors, model.output_dist_params]
tf_vals.append(model.cost)
tf_vals.append(model.nll_bound_vae)
tf_vals.append(model.nll_bound_iwae)
tf_vals.append(model.train_step) # not train_op!
if model.hps.ic_dim > 0:
tf_vals += [model.prior_zs_g0.mean,
model.prior_zs_g0.logvar,
model.posterior_zs_g0.mean,
model.posterior_zs_g0.logvar]
if model.hps.co_dim > 0:
tf_vals.append(model.controller_outputs)
tf_vals_flat, fidxs = flatten(tf_vals)
np_vals_flat = session.run(tf_vals_flat, feed_dict=feed_dict)
# do average batch
gen_ics[es_idx] = np.mean(np_vals_flat[0], 0) # assuming E > hps.batch_size
costs[es_idx] = np_vals_flat[fidxs[4][0]]
nll_bound_vaes[es_idx] = np_vals_flat[fidxs[5][0]]
nll_bound_iwaes[es_idx] = np_vals_flat[fidxs[6][0]]
train_steps[es_idx] = np_vals_flat[fidxs[7][0]]
gen_states[es_idx] = np.mean(list_t_bxn_to_tensor_bxtxn([np_vals_flat[f] for f in fidxs[1]]), 0)
factors[es_idx] = np.mean(list_t_bxn_to_tensor_bxtxn([np_vals_flat[f] for f in fidxs[2]]), 0)
out_dist_params[es_idx] = np.mean(list_t_bxn_to_tensor_bxtxn([np_vals_flat[f] for f in fidxs[3]]), 0)
if model.hps.ic_dim > 0:
prior_g0_mean[es_idx] = np.mean(np_vals_flat[fidxs[8][0]], 0)
prior_g0_logvar[es_idx] = np.mean(np_vals_flat[fidxs[9][0]], 0)
post_g0_mean[es_idx] = np.mean(np_vals_flat[fidxs[10][0]], 0)
post_g0_logvar[es_idx] = np.mean(np_vals_flat[fidxs[11][0]], 0)
if model.hps.co_dim > 0:
controller_outputs[es_idx] = np.mean(list_t_bxn_to_tensor_bxtxn([np_vals_flat[f] for f in fidxs[12]]), 0)
##############################################################################
print('bound nll(vae): %.3f, bound nll(iwae): %.3f' % (nll_bound_vaes[es_idx], nll_bound_iwaes[es_idx]))
model_runs = {}
if model.hps.ic_dim > 0:
model_runs['prior_g0_mean'] = prior_g0_mean
model_runs['prior_g0_logvar'] = prior_g0_logvar
model_runs['post_g0_mean'] = post_g0_mean
model_runs['post_g0_logvar'] = post_g0_logvar
model_runs['gen_ics'] = gen_ics
if model.hps.co_dim > 0:
model_runs['controller_outputs'] = controller_outputs
model_runs['gen_states'] = gen_states
model_runs['factors'] = factors
model_runs['output_dist_params'] = out_dist_params
model_runs['costs'] = costs
model_runs['nll_bound_vaes'] = nll_bound_vaes
model_runs['nll_bound_iwaes'] = nll_bound_iwaes
model_runs['train_steps'] = train_steps
###############################################################################
full_fname = os.path.join(hps.lfads_save_dir, fname)
write_data(full_fname, model_runs, compression='gzip')
samples[data_name] = model_runs
sys.exit()
s = list(samples.keys())[0]
x = datasets[s]['train_data']
xp = samples[s]['train']['output_dist_params']
y = datasets[s]['train_data']
yp = samples[s]['train']['output_dist_params']
from pylab import *
figure()
plot(np.mean(x, 0)[:,0])
plot(np.mean(xp, 0)[:,0])
figure()
plot(np.mean(y, 0)[:,0])
plot(np.mean(yp, 0)[:,0])
show()
factors = []
for s in samples.keys():
dims = samples[s]['train']['factors'].shape
factors.append(samples[s]['train']['factors'].reshape(dims[0], dims[1] * dims[2]))
factors = np.vstack(factors)
from sklearn.manifold import TSNE
X = TSNE(n_components=2, perplexity = 30).fit_transform(factors)
scatter(X[:,0], X[:,1])
# POSTERIOR PUSH MEAN
# with sess.as_default():
# with tf.device(hps.device):
# if kind == "train":
# train(hps, datasets)
# elif kind == "posterior_sample_and_average":
# write_model_runs(hps, datasets, hps.output_filename_stem, push_mean=False)
# elif kind == "posterior_push_mean":
# write_model_runs(hps, datasets, hps.output_filename_stem, push_mean=True)
# elif kind == "prior_sample":
# write_model_samples(hps, datasets, hps.output_filename_stem)
# elif kind == "write_model_params":
# write_model_parameters(hps, hps.output_filename_stem, datasets)
# else:
# assert False, ("Kind %s is not implemented. " % kind)
| [
"[email protected]"
] | |
a52bc78e9d3b3e11b7cddbd0d97869f5a2a6bec2 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/5040/828005040.py | 4a6d507aea18f617baa5083c1e25b5d6ed822f7a | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 823 | py | from bots.botsconfig import *
from records005040 import recorddefs
syntax = {
'version': '00504',
'functionalgroup': 'DA',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BAU', MIN: 1, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'N2', MIN: 0, MAX: 99999},
{ID: 'N3', MIN: 0, MAX: 99999},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'PER', MIN: 0, MAX: 99999},
{ID: 'DAD', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'NM1', MIN: 0, MAX: 1},
{ID: 'N2', MIN: 0, MAX: 99999},
{ID: 'N3', MIN: 0, MAX: 99999},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'PER', MIN: 0, MAX: 99999},
]},
{ID: 'CTT', MIN: 1, MAX: 1},
{ID: 'AMT', MIN: 0, MAX: 1},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
] | |
2940503764898b98082340170692f4e10443826b | 8e59a43de9d427865c5d67fef39e9a50e44f07ce | /ppocr/modeling/heads/rec_multi_head.py | 0b4fa939eecad15c79f5e37384944720b1879205 | [
"Apache-2.0"
] | permissive | PaddlePaddle/PaddleOCR | af87db8a804b9a4f4eac8a0b2faf80d1dd71633a | 15963b0d242867a4cc4d76445626dc8965509b2f | refs/heads/release/2.7 | 2023-09-01T04:53:37.561932 | 2023-08-30T02:22:15 | 2023-08-30T02:22:15 | 262,296,122 | 34,195 | 7,338 | Apache-2.0 | 2023-09-14T06:08:11 | 2020-05-08T10:38:16 | Python | UTF-8 | Python | false | false | 4,301 | py | # copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle
from paddle import ParamAttr
import paddle.nn as nn
import paddle.nn.functional as F
from ppocr.modeling.necks.rnn import Im2Seq, EncoderWithRNN, EncoderWithFC, SequenceEncoder, EncoderWithSVTR
from .rec_ctc_head import CTCHead
from .rec_sar_head import SARHead
from .rec_nrtr_head import Transformer
class FCTranspose(nn.Layer):
def __init__(self, in_channels, out_channels, only_transpose=False):
super().__init__()
self.only_transpose = only_transpose
if not self.only_transpose:
self.fc = nn.Linear(in_channels, out_channels, bias_attr=False)
def forward(self, x):
if self.only_transpose:
return x.transpose([0, 2, 1])
else:
return self.fc(x.transpose([0, 2, 1]))
class MultiHead(nn.Layer):
def __init__(self, in_channels, out_channels_list, **kwargs):
super().__init__()
self.head_list = kwargs.pop('head_list')
self.gtc_head = 'sar'
assert len(self.head_list) >= 2
for idx, head_name in enumerate(self.head_list):
name = list(head_name)[0]
if name == 'SARHead':
# sar head
sar_args = self.head_list[idx][name]
self.sar_head = eval(name)(in_channels=in_channels, \
out_channels=out_channels_list['SARLabelDecode'], **sar_args)
elif name == 'NRTRHead':
gtc_args = self.head_list[idx][name]
max_text_length = gtc_args.get('max_text_length', 25)
nrtr_dim = gtc_args.get('nrtr_dim', 256)
num_decoder_layers = gtc_args.get('num_decoder_layers', 4)
self.before_gtc = nn.Sequential(
nn.Flatten(2), FCTranspose(in_channels, nrtr_dim))
self.gtc_head = Transformer(
d_model=nrtr_dim,
nhead=nrtr_dim // 32,
num_encoder_layers=-1,
beam_size=-1,
num_decoder_layers=num_decoder_layers,
max_len=max_text_length,
dim_feedforward=nrtr_dim * 4,
out_channels=out_channels_list['NRTRLabelDecode'])
elif name == 'CTCHead':
# ctc neck
self.encoder_reshape = Im2Seq(in_channels)
neck_args = self.head_list[idx][name]['Neck']
encoder_type = neck_args.pop('name')
self.ctc_encoder = SequenceEncoder(in_channels=in_channels, \
encoder_type=encoder_type, **neck_args)
# ctc head
head_args = self.head_list[idx][name]['Head']
self.ctc_head = eval(name)(in_channels=self.ctc_encoder.out_channels, \
out_channels=out_channels_list['CTCLabelDecode'], **head_args)
else:
raise NotImplementedError(
'{} is not supported in MultiHead yet'.format(name))
def forward(self, x, targets=None):
ctc_encoder = self.ctc_encoder(x)
ctc_out = self.ctc_head(ctc_encoder, targets)
head_out = dict()
head_out['ctc'] = ctc_out
head_out['ctc_neck'] = ctc_encoder
# eval mode
if not self.training:
return ctc_out
if self.gtc_head == 'sar':
sar_out = self.sar_head(x, targets[1:])
head_out['sar'] = sar_out
else:
gtc_out = self.gtc_head(self.before_gtc(x), targets[1:])
head_out['nrtr'] = gtc_out
return head_out
| [
"[email protected]"
] | |
47c310bae1e4abdaa6b75569a117b14e0647509e | a53998e56ee06a96d59d97b2601fd6ec1e4124d7 | /基础课/jichu/day16/seek.py | 9bc56e0f93452a501443ed06d36a8a5bd659e588 | [] | no_license | zh-en520/aid1901 | f0ec0ec54e3fd616a2a85883da16670f34d4f873 | a56f82d0ea60b2395deacc57c4bdf3b6bc73bd2e | refs/heads/master | 2020-06-28T21:16:22.259665 | 2019-08-03T07:09:29 | 2019-08-03T07:09:29 | 200,344,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | fr = open('20bytes.txt','rb')
print('当前读写位置是:',fr.tell())#0
b = fr.reed(2)
print(b)#b'AB'
print('当前读写位置是:',fe.tell())#2
#读写abcde这五个字节
fr.seek(5,0)#
# fr.seek(3,1)
# fr.seek(-15,2)
b = fr.read(5)#b'abcde'
print(b) | [
"[email protected]"
] | |
83c7d94c9223617493fa9162a58e518ded6fbd10 | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-cloudide/huaweicloudsdkcloudide/v2/model/show_price_response.py | 437f28d12f863bbca7d3ec3c11acd04386c74027 | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 3,513 | py | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ShowPriceResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'prices': 'list[ResourcePrice]',
'status': 'str'
}
attribute_map = {
'prices': 'prices',
'status': 'status'
}
def __init__(self, prices=None, status=None):
"""ShowPriceResponse - a model defined in huaweicloud sdk"""
super(ShowPriceResponse, self).__init__()
self._prices = None
self._status = None
self.discriminator = None
if prices is not None:
self.prices = prices
if status is not None:
self.status = status
@property
def prices(self):
"""Gets the prices of this ShowPriceResponse.
技术栈价格列表
:return: The prices of this ShowPriceResponse.
:rtype: list[ResourcePrice]
"""
return self._prices
@prices.setter
def prices(self, prices):
"""Sets the prices of this ShowPriceResponse.
技术栈价格列表
:param prices: The prices of this ShowPriceResponse.
:type: list[ResourcePrice]
"""
self._prices = prices
@property
def status(self):
"""Gets the status of this ShowPriceResponse.
状态
:return: The status of this ShowPriceResponse.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ShowPriceResponse.
状态
:param status: The status of this ShowPriceResponse.
:type: str
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowPriceResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
4a538cff9ad6cd0890340bd264318ca2cc7dc8c3 | cc9a0d5608b2209b02591ceace0a7416823a9de5 | /config/settings/local.py | cc31fc60988cb0f8692e336882a572a81a46a794 | [
"MIT"
] | permissive | morwen1/hack_your_body | 240838e75dd4447c944d47d37635d2064d4210fd | d4156d4fbe2dd4123d5b5bceef451803a50a39f8 | refs/heads/master | 2020-11-24T01:55:46.323849 | 2019-12-15T18:15:51 | 2019-12-15T18:15:51 | 226,505,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,759 | py | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="EGR9jaMwa7cjRCcM2wIxqFPD2RqJ6yIEAiL7KlbEUKIPVjjPcL9ZMHgprAJiT2T7",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1"]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = "localhost"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
#INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
#MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
"""
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}"""
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
if env("USE_DOCKER") == "yes":
import socket
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1" for ip in ips]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Celery
# ------------------------------------------------------------------------------
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-eager-propagates
CELERY_TASK_EAGER_PROPAGATES = True
# Your stuff...
# ------------------------------------------------------------------------------
| [
"[email protected]"
] | |
c67c6aa5d5f63958801f1bc6b365fa8cf502cda7 | bd933b02a85343e5c7b2bf4018c7d9e11d39b222 | /MODEL1303260003/model.py | e613b83d4e71b70742457b7c2808d360bbdebe8c | [
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] | permissive | biomodels/MODEL1303260003 | 8533d064f8a9a65e84258cc98d91386adb69eb1e | a2695ffb9d247f28b8c397bc516c8750ea6ab559 | refs/heads/master | 2016-09-05T13:57:50.132052 | 2014-10-16T05:49:12 | 2014-10-16T05:49:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1303260003.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString) | [
"[email protected]"
] | |
79e07c6d4eb631d0458dd3a369941042ebf329ff | 3a9f2b3d79cf214704829427ee280f4b49dca70a | /saigon/rat/RuckusAutoTest/scripts/zd/ats_ZD_Combo_CLI_Config_SNMP_Trap.py | 1cd2cc625e0a8cef8938c78b6c4bf089240f43cd | [] | no_license | jichunwei/MyGitHub-1 | ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791 | f826fc89a030c6c4e08052d2d43af0b1b4b410e3 | refs/heads/master | 2021-01-21T10:19:22.900905 | 2016-08-20T03:34:52 | 2016-08-20T03:34:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,956 | py | '''
Config SNMP Trap via ZD CLI
Config snmp trap via ZD CLI successfully
1. The trap information from CLI and GUI are same.
2. Enable trap v2, the information are same between get and set, CLI get and GUI get.
3. Enable trap v3, the information are same between get and set, CLI get and GUI get.
2. Disable trap, the information are same between get and set, CLI get and GUI get.
expect result: All steps should result properly.
How to:
1) Get snmp trap setting from CLI and GUI, verify they are same.
2) Enable snmp trap v2 via ZD CLI.
3) Get snmp trap information from GUI and CLI.
4) Compare the information are same between CLI set and CLI get.
5) Compare the information are same between CLI get and GUI get.
6) Verify ap join trap is received.
7) Repeat do 4)-6) for enable trap v3.
8) Disable snmp trap, repeat do 3)-6), verify trap is not received.
Created on 2011-4-25
@author: [email protected]
'''
import sys
import libZD_TestSuite as testsuite
from RuckusAutoTest.common import lib_KwList as kwlist
def define_test_cfg(tcfg):
test_cfgs = []
test_name = 'CB_Scaling_ZD_CLI_Process_Check'
common_name = 'apmgr and stamgr daemon pid mark'
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 0, False))
test_case_name = '[Current Trap Info GUI and CLI]'
test_name = 'CB_ZD_Get_SNMP_Trap_Info'
common_name = '%sGet SNMP Trap Info from GUI' % (test_case_name,)
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 1, False))
test_name = 'CB_ZD_CLI_Get_Sys_SNMP_Trap_Info'
common_name = '%sGet SNMP Trap Info from CLI' % (test_case_name,)
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
test_name = 'CB_ZD_CLI_Verify_SNMP_Trap_Info'
common_name = '%sVerify SNMP Trap Info between GUI Get and CLI Get' % (test_case_name,)
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
test_case_name = '[Enable SNMP Trap V2]'
test_name = 'CB_ZD_CLI_Set_SNMP_Trap'
common_name = '%sEnable SNMP Trap V2 from CLI' % (test_case_name,)
param_cfg = {'snmp_trap_cfg':tcfg['enable_v2_trap_cfg']}
test_cfgs.append((param_cfg, test_name, common_name, 1, False))
test_name = 'CB_ZD_CLI_Get_Sys_SNMP_Trap_Info'
common_name = '%sGet SNMP Trap V2 Info from CLI' % (test_case_name,)
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
test_name = 'CB_ZD_CLI_Verify_SNMP_Trap_Info_CLI_Get_Set'
common_name = '%sVerify SNMP Trap V2 Info between CLI Get and CLI Set' % (test_case_name,)
param_cfg = {'snmp_trap_cfg':tcfg['enable_v2_trap_cfg']}
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
test_name = 'CB_ZD_Get_SNMP_Trap_Info'
common_name = '%sGet SNMP Trap V2 Info from GUI' % (test_case_name,)
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
test_name = 'CB_ZD_CLI_Verify_SNMP_Trap_Info'
common_name = '%sVerify SNMP Trap V2 Info between GUI Get and CLI Get' % (test_case_name,)
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
test_name = 'CB_ZD_SNMP_Verify_AP_Join_Trap'
common_name = '%sVerify AP Join trap when trap is enable' % (test_case_name,)
param_cfg = {'snmp_trap_cfg':tcfg['enable_v2_trap_cfg']}
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
test_case_name = '[Enable SNMP Trap V3]'
test_name = 'CB_ZD_CLI_Set_SNMP_Trap'
common_name = '%sEnable SNMP Trap V3 from CLI' % (test_case_name,)
param_cfg = {'snmp_trap_cfg':tcfg['enable_v3_trap_cfg']}
test_cfgs.append((param_cfg, test_name, common_name, 1, False))
test_name = 'CB_ZD_CLI_Get_Sys_SNMP_Trap_Info'
common_name = '%sGet SNMP Trap V3 Info from CLI' % (test_case_name,)
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
test_name = 'CB_ZD_CLI_Verify_SNMP_Trap_Info_CLI_Get_Set'
common_name = '%sVerify SNMP Trap V3 Info between CLI Get and CLI Set' % (test_case_name,)
param_cfg = {'snmp_trap_cfg':tcfg['enable_v3_trap_cfg']}
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
test_name = 'CB_ZD_Get_SNMP_Trap_Info'
common_name = '%sGet SNMP Trap V3 Info from GUI' % (test_case_name,)
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
test_name = 'CB_ZD_CLI_Get_Sys_SNMP_Trap_Info'
common_name = '%sVerify SNMP Trap V3 Info between GUI Get and CLI Get' % (test_case_name,)
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
test_name = 'CB_ZD_SNMP_Verify_AP_Join_Trap'
common_name = '%sVerify AP Join trap when trap is enable' % (test_case_name,)
param_cfg = {'snmp_trap_cfg':tcfg['enable_v3_trap_cfg']}
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
test_case_name = '[Disable SNMP Trap]'
test_name = 'CB_ZD_CLI_Set_SNMP_Trap'
common_name = '%sDisable SNMP Trap from CLI' % (test_case_name,)
param_cfg = {'snmp_trap_cfg':tcfg['disable_trap_cfg']}
test_cfgs.append((param_cfg, test_name, common_name, 1, False))
test_name = 'CB_ZD_CLI_Get_Sys_SNMP_Trap_Info'
common_name = '%sGet SNMP Trap Info from CLI' % (test_case_name,)
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
test_name = 'CB_ZD_CLI_Verify_SNMP_Trap_Info_CLI_Get_Set'
common_name = '%sVerify SNMP Trap Info between CLI Get and CLI Set' % (test_case_name,)
param_cfg = {'snmp_trap_cfg':tcfg['disable_trap_cfg']}
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
test_name = 'CB_ZD_Get_SNMP_Trap_Info'
common_name = '%sGet SNMP Trap Info from GUI' % (test_case_name,)
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
test_name = 'CB_ZD_CLI_Verify_SNMP_Trap_Info'
common_name = '%sVerify SNMP Trap Info between GUI Get and CLI Get' % (test_case_name,)
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
disable_trap_cfg = {}
disable_trap_cfg.update(tcfg['enable_v2_trap_cfg'])
disable_trap_cfg['enabled'] = False
test_name = 'CB_ZD_SNMP_Verify_AP_Join_Trap'
common_name = '%sVerify AP Join v2 trap when trap is disable' % (test_case_name,)
param_cfg = {'snmp_trap_cfg':disable_trap_cfg}
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
disable_trap_cfg = {}
disable_trap_cfg.update(tcfg['enable_v3_trap_cfg'])
disable_trap_cfg['enabled'] = False
test_name = 'CB_ZD_SNMP_Verify_AP_Join_Trap'
common_name = '%sVerify AP Join v3 trap when trap is disable' % (test_case_name,)
param_cfg = {'snmp_trap_cfg':disable_trap_cfg}
test_cfgs.append((param_cfg, test_name, common_name, 2, False))
test_name = 'CB_Scaling_ZD_CLI_Process_Check'
common_name = 'apmgr and stamgr daemon pid checking.'
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 0, True))
return test_cfgs
def define_test_parameters(tbcfg, trap_server_ip):
server_ip = raw_input('Please input test engine ip address[%s]' % trap_server_ip)
if not server_ip:
server_ip = trap_server_ip
enable_v2_trap_cfg = {'version': 2,
'enabled': True,
'1': {'server_ip': server_ip},
}
enable_v3_trap_cfg = {'version': 3,
'enabled': True,
'1': {'sec_name': 'ruckus-read',
'server_ip': server_ip,
'auth_protocol': 'MD5',
'auth_passphrase': '12345678',
'priv_protocol': 'DES',
'priv_passphrase': '12345678',
}
}
disable_trap_cfg = {'enabled': False}
tcfg = {'enable_v2_trap_cfg': enable_v2_trap_cfg,
'enable_v3_trap_cfg': enable_v3_trap_cfg,
'disable_trap_cfg': disable_trap_cfg,
}
return tcfg
def create_test_suite(**kwargs):
tb = testsuite.getTestbed2(**kwargs)
tbcfg = testsuite.getTestbedConfig(tb)
if str(tb.tbtype) == "ZD_Stations_IPV6":
zd_ip_version = tbcfg['ip_cfg']['zd_ip_cfg']['ip_version']
ap_ip_version = tbcfg['ip_cfg']['ap_ip_cfg']['ip_version']
trap_server_ip = '2020:db8:1::10'
ts_name = 'ZD CLI ZD %s AP %s - SNMP V2 and V3 Trap Configuration' % (zd_ip_version, ap_ip_version)
else:
trap_server_ip = '192.168.0.10'
ts_name = 'ZD CLI - SNMP Trap Configuration'
#ts_name = 'ZD CLI - SNMP V2 and V3 Trap Configuration'
ts = testsuite.get_testsuite(ts_name, 'Verify SNMP Trap Configuration: CLI Set, GUI Get', combotest=True)
tcfg = define_test_parameters(tbcfg, trap_server_ip)
test_cfgs = define_test_cfg(tcfg)
test_order = 1
test_added = 0
for test_params, testname, common_name, exc_level, is_cleanup in test_cfgs:
if testsuite.addTestCase(ts, testname, common_name, test_params, test_order, exc_level, is_cleanup) > 0:
test_added += 1
test_order += 1
print "Add test case with test name: %s\n\t\common name: %s" % (testname, common_name)
print "\n-- Summary: added %d test cases into test suite '%s'" % (test_added, ts.name)
if __name__ == "__main__":
_dict = kwlist.as_dict(sys.argv[1:])
create_test_suite(**_dict)
| [
"[email protected]"
] | |
a00da9b3a8568ca40cac0d1ea67083ce9ef97c43 | 9af35b0e0c0ed4b102c61c563d7c7647a758bb72 | /braindecode/datautil/signal_target.py | ebb6280370c5f4026abc132b5482426657926cdc | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | TonioBall/braindecode | 7c0a5217c944e8718d74fd9763b3c609252c9feb | d5b8d87d959c96ea8422e21099e1ef4b71b9d05a | refs/heads/master | 2020-12-13T08:56:34.939109 | 2020-01-17T13:27:30 | 2020-01-17T13:27:30 | 234,367,901 | 0 | 0 | BSD-3-Clause | 2020-01-16T16:56:45 | 2020-01-16T16:56:44 | null | UTF-8 | Python | false | false | 911 | py | class SignalAndTarget(object):
"""
Simple data container class.
Parameters
----------
X: 3darray or list of 2darrays
The input signal per trial.
y: 1darray or list
Labels for each trial.
"""
def __init__(self, X, y):
assert len(X) == len(y)
self.X = X
self.y = y
def apply_to_X_y(fn, *sets):
"""
Apply a function to all `X` and `y` attributes of all given sets.
Applies function to list of X arrays and to list of y arrays separately.
Parameters
----------
fn: function
Function to apply
sets: :class:`.SignalAndTarget` objects
Returns
-------
result_set: :class:`.SignalAndTarget`
Dataset with X and y as the result of the
application of the function.
"""
X = fn(*[s.X for s in sets])
y = fn(*[s.y for s in sets])
return SignalAndTarget(X, y)
| [
"[email protected]"
] | |
3dcff99559d2d1556f67ba262ff9fb3338adab21 | d34087fa552d7492406def9871f756f101d3b6a6 | /source/packages/mailman_2.1.20-1_brcm63xx-tch/usr/local/mailman/Mailman/Gui/ContentFilter.py | 9043fe89179fa2d4b5f438060f3a35b49968b0ed | [] | no_license | fexofenadine/brcm63xx-tch | b0a582518418c43078bf6799e59f72e31c5c9e4f | b50c32f7bab074c7017d7b15d76da06430036a76 | refs/heads/master | 2023-07-22T10:22:20.838756 | 2023-07-08T02:57:10 | 2023-07-08T02:57:10 | 132,597,313 | 7 | 8 | null | 2022-10-19T00:54:37 | 2018-05-08T11:10:50 | Vim script | UTF-8 | Python | false | false | 9,149 | py | # Copyright (C) 2002-2005 by the Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
"""GUI component managing the content filtering options."""
from Mailman import mm_cfg
from Mailman.i18n import _
from Mailman.Gui.GUIBase import GUIBase
NL = '\n'
class ContentFilter(GUIBase):
def GetConfigCategory(self):
return 'contentfilter', _('Content filtering')
def GetConfigInfo(self, mlist, category, subcat=None):
if category <> 'contentfilter':
return None
WIDTH = mm_cfg.TEXTFIELDWIDTH
actions = [_('Discard'), _('Reject'), _('Forward to List Owner')]
if mm_cfg.OWNERS_CAN_PRESERVE_FILTERED_MESSAGES:
actions.append(_('Preserve'))
return [
_("""Policies concerning the content of list traffic.
<p>Content filtering works like this: when a message is
received by the list and you have enabled content filtering, the
individual attachments are first compared to the
<a href="?VARHELP=contentfilter/filter_mime_types">filter
types</a>. If the attachment type matches an entry in the filter
types, it is discarded.
<p>Then, if there are <a
href="?VARHELP=contentfilter/pass_mime_types">pass types</a>
defined, any attachment type that does <em>not</em> match a
pass type is also discarded. If there are no pass types defined,
this check is skipped.
<p>After this initial filtering, any <tt>multipart</tt>
attachments that are empty are removed. If the outer message is
left empty after this filtering, then the whole message is
discarded.
<p> Then, each <tt>multipart/alternative</tt> section will
be replaced by just the first alternative that is non-empty after
filtering if
<a href="?VARHELP=contentfilter/collapse_alternatives"
>collapse_alternatives</a> is enabled.
<p>Finally, any <tt>text/html</tt> parts that are left in the
message may be converted to <tt>text/plain</tt> if
<a href="?VARHELP=contentfilter/convert_html_to_plaintext"
>convert_html_to_plaintext</a> is enabled and the site is
configured to allow these conversions."""),
('filter_content', mm_cfg.Radio, (_('No'), _('Yes')), 0,
_("""Should Mailman filter the content of list traffic according
to the settings below?""")),
('filter_mime_types', mm_cfg.Text, (10, WIDTH), 0,
_("""Remove message attachments that have a matching content
type."""),
_("""Use this option to remove each message attachment that
matches one of these content types. Each line should contain a
string naming a MIME <tt>type/subtype</tt>,
e.g. <tt>image/gif</tt>. Leave off the subtype to remove all
parts with a matching major content type, e.g. <tt>image</tt>.
<p>Blank lines are ignored.
<p>See also <a href="?VARHELP=contentfilter/pass_mime_types"
>pass_mime_types</a> for a content type whitelist.""")),
('pass_mime_types', mm_cfg.Text, (10, WIDTH), 0,
_("""Remove message attachments that don't have a matching
content type. Leave this field blank to skip this filter
test."""),
_("""Use this option to remove each message attachment that does
not have a matching content type. Requirements and formats are
exactly like <a href="?VARHELP=contentfilter/filter_mime_types"
>filter_mime_types</a>.
<p><b>Note:</b> if you add entries to this list but don't add
<tt>multipart</tt> to this list, any messages with attachments
will be rejected by the pass filter.""")),
('filter_filename_extensions', mm_cfg.Text, (10, WIDTH), 0,
_("""Remove message attachments that have a matching filename
extension."""),),
('pass_filename_extensions', mm_cfg.Text, (10, WIDTH), 0,
_("""Remove message attachments that don't have a matching
filename extension. Leave this field blank to skip this filter
test."""),),
('collapse_alternatives', mm_cfg.Radio, (_('No'), _('Yes')), 0,
_("""Should Mailman collapse multipart/alternative to its
first part content?""")),
('convert_html_to_plaintext', mm_cfg.Radio, (_('No'), _('Yes')), 0,
_("""Should Mailman convert <tt>text/html</tt> parts to plain
text? This conversion happens after MIME attachments have been
stripped.""")),
('filter_action', mm_cfg.Radio, tuple(actions), 0,
_("""Action to take when a message matches the content filtering
rules."""),
_("""One of these actions is taken when the message matches one of
the content filtering rules, meaning, the top-level
content type matches one of the <a
href="?VARHELP=contentfilter/filter_mime_types"
>filter_mime_types</a>, or the top-level content type does
<strong>not</strong> match one of the
<a href="?VARHELP=contentfilter/pass_mime_types"
>pass_mime_types</a>, or if after filtering the subparts of the
message, the message ends up empty.
<p>Note this action is not taken if after filtering the message
still contains content. In that case the message is always
forwarded on to the list membership.
<p>When messages are discarded, a log entry is written
containing the Message-ID of the discarded message. When
messages are rejected or forwarded to the list owner, a reason
for the rejection is included in the bounce message to the
original author. When messages are preserved, they are saved in
a special queue directory on disk for the site administrator to
view (and possibly rescue) but otherwise discarded. This last
option is only available if enabled by the site
administrator.""")),
]
def _setValue(self, mlist, property, val, doc):
if property in ('filter_mime_types', 'pass_mime_types'):
types = []
for spectype in [s.strip() for s in val.splitlines()]:
ok = 1
slashes = spectype.count('/')
if slashes == 0 and not spectype:
ok = 0
elif slashes == 1:
maintype, subtype = [s.strip().lower()
for s in spectype.split('/')]
if not maintype or not subtype:
ok = 0
elif slashes > 1:
ok = 0
if not ok:
doc.addError(_('Bad MIME type ignored: %(spectype)s'))
else:
types.append(spectype.strip().lower())
if property == 'filter_mime_types':
mlist.filter_mime_types = types
elif property == 'pass_mime_types':
mlist.pass_mime_types = types
elif property in ('filter_filename_extensions',
'pass_filename_extensions'):
fexts = []
for ext in [s.strip() for s in val.splitlines()]:
fexts.append(ext.lower())
if property == 'filter_filename_extensions':
mlist.filter_filename_extensions = fexts
elif property == 'pass_filename_extensions':
mlist.pass_filename_extensions = fexts
else:
GUIBase._setValue(self, mlist, property, val, doc)
def getValue(self, mlist, kind, property, params):
if property == 'filter_mime_types':
return NL.join(mlist.filter_mime_types)
if property == 'pass_mime_types':
return NL.join(mlist.pass_mime_types)
if property == 'filter_filename_extensions':
return NL.join(mlist.filter_filename_extensions)
if property == 'pass_filename_extensions':
return NL.join(mlist.pass_filename_extensions)
return None
| [
"[email protected]"
] | |
9f7cd33b09083b7ca32ab65d512beb3d76667dc4 | 525bdfe2c7d33c901598a501c145df94a3e162b0 | /subject6_graphs/text.py | e255a01bf2155d3b55fd0ba8180d1b331aa5b94d | [] | no_license | davendiy/ads_course2 | f0a52108f1cab8619b2e6e2c6c4383a1a4615c15 | e44bf2b535b34bc31fb323c20901a77b0b3072f2 | refs/heads/master | 2020-04-06T09:37:12.983564 | 2019-05-09T10:28:22 | 2019-05-09T10:28:22 | 157,349,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,980 | py | #!/usr/bin/env python3
# -*-encoding: utf-8-*-
# created: 05.12.18
# by David Zashkolny
# 2 course, comp math
# Taras Shevchenko National University of Kyiv
# email: [email protected]
tasks = int(input())
for mini_task in range(tasks):
s = input().split()
n, m = int(s[0]), int(s[1])
l = []
x, y = -1, -1
xe, ye = -1, -1
cost = list(map(lambda x: int(x), input().split()))
for i in range(n):
buff = []
k = input()
if x == -1:
y, x = i, k.find('S')
if xe == -1:
ye, xe = i, k.find('E')
for j in k:
buff.append(j)
l.append(buff)
keys = [['', 0], ['R', cost[0]], ['G', cost[1]], ['B', cost[2]], ['Y', cost[3]], ['RG', cost[0] + cost[1]],
['RB', cost[0] + cost[2]], ['RY', cost[0] + cost[3]], ['GB', cost[1] + cost[2]], ['GY', cost[1] + cost[3]],
['BY', cost[2] + cost[3]], ['RGB', cost[0] + cost[1] + cost[2]], ['RGY', cost[0] + cost[1] + cost[3]],
['RBY', cost[0] + cost[2] + cost[3]], ['GBY', cost[1] + cost[2] + cost[3]], ['RGBY', sum(cost)]]
# keys = ['G']
keys.sort(key=lambda x: x[1])
way = [False, '']
for key in keys:
hl = [[-1 for i in range(m)] for j in range(n)]
hl[y][x] = 1
indexes = [[y, x]]
for i in range(n * m + 1):
buff = []
for j in indexes:
if 0 <= j[1] + 1 < m and (l[j[0]][j[1] + 1] == '.' or key[0].find(l[j[0]][j[1] + 1]) != -1 or
l[j[0]][j[1] + 1] == 'E') and hl[j[0]][j[1] + 1] == -1:
hl[j[0]][j[1] + 1] = hl[j[0]][j[1]] + 1
buff.append([j[0], j[1] + 1])
if 0 <= j[1] - 1 < m and (l[j[0]][j[1] - 1] == '.' or key[0].find(l[j[0]][j[1] - 1]) != -1 or
l[j[0]][j[1] - 1] == 'E') and hl[j[0]][
j[1] - 1] == -1:
hl[j[0]][j[1] - 1] = hl[j[0]][j[1]] + 1
buff.append([j[0], j[1] - 1])
if 0 <= j[0] + 1 < n and (l[j[0] + 1][j[1]] == '.' or key[0].find(l[j[0] + 1][j[1]]) != -1 or
l[j[0] + 1][j[1]] == 'E') and hl[j[0] + 1][
j[1]] == -1:
hl[j[0] + 1][j[1]] = hl[j[0]][j[1]] + 1
buff.append([j[0] + 1, j[1]])
if 0 <= j[0] - 1 < n and (l[j[0] - 1][j[1]] == '.' or key[0].find(l[j[0] - 1][j[1]]) != -1 or
l[j[0] - 1][j[1]] == 'E') and hl[j[0] - 1][j[1]] == -1:
hl[j[0] - 1][j[1]] = hl[j[0]][j[1]] + 1
buff.append([j[0] - 1, j[1]])
if j == [ye, xe]:
way = [True, key[1]]
break
indexes = buff
if way[0]:
break
if way[0]:
break
if way[0]:
print(way[1])
else:
print('Sleep') | [
"[email protected]"
] | |
8d5ab2ac2f9f3feed3bcb898040784ad256e54f9 | 8878700a71196cc33b7be00357b625cf9883043c | /store/tests/tests_viewset_OrderLine.py | e4296695c7e7c37c60581d5c5eff5512440e9821 | [
"MIT"
] | permissive | Jerome-Celle/Blitz-API | bc7db966cbbb45b29bbbe944adb954d6cb5a0040 | a0f870d6774abf302886ab70e169572a9d0225ef | refs/heads/master | 2021-06-10T06:05:03.753314 | 2018-11-30T15:40:38 | 2018-11-30T15:46:19 | 165,642,546 | 0 | 0 | MIT | 2019-01-14T10:32:29 | 2019-01-14T10:32:28 | null | UTF-8 | Python | false | false | 19,573 | py | import json
from datetime import timedelta
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from django.utils import timezone
from django.urls import reverse
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from blitz_api.factories import UserFactory, AdminFactory
from blitz_api.models import AcademicLevel
from ..models import Membership, Order, OrderLine, Package
User = get_user_model()
class OrderLineTests(APITestCase):
@classmethod
def setUpClass(cls):
super(OrderLineTests, cls).setUpClass()
cls.client = APIClient()
cls.user = UserFactory()
cls.admin = AdminFactory()
cls.package_type = ContentType.objects.get_for_model(Package)
cls.academic_level = AcademicLevel.objects.create(
name="University"
)
cls.membership_with_academic_level = Membership.objects.create(
name="basic_membership",
details="1-Year student membership",
available=True,
price=50,
duration=timedelta(days=365),
)
cls.membership_with_academic_level.academic_levels.set([
cls.academic_level
])
cls.membership = Membership.objects.create(
name="basic_membership",
details="1-Year student membership",
available=True,
price=50,
duration=timedelta(days=365),
)
cls.package = Package.objects.create(
name="extreme_package",
details="100 reservations package",
available=True,
price=400,
reservations=100,
)
cls.package.exclusive_memberships.set([
cls.membership,
])
cls.order = Order.objects.create(
user=cls.user,
transaction_date=timezone.now(),
authorization_id=1,
settlement_id=1,
)
cls.order_admin = Order.objects.create(
user=cls.admin,
transaction_date=timezone.now(),
authorization_id=1,
settlement_id=1,
)
cls.order_line = OrderLine.objects.create(
order=cls.order,
quantity=1,
content_type=cls.package_type,
object_id=1,
)
cls.order_line_admin = OrderLine.objects.create(
order=cls.order_admin,
quantity=99,
content_type=cls.package_type,
object_id=1,
)
def test_create_package(self):
"""
Ensure we can create an order line if user has permission.
"""
self.client.force_authenticate(user=self.admin)
data = {
'order': reverse('order-detail', args=[self.order.id]),
'quantity': 2,
'content_type': "package",
'object_id': 1,
}
response = self.client.post(
reverse('orderline-list'),
data,
)
content = {
'content_type': 'package',
'id': 3,
'object_id': 1,
'order': 'http://testserver/orders/1',
'quantity': 2,
'url': 'http://testserver/order_lines/3'
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_without_membership(self):
"""
Ensure we can't create an order line if user does not have the required
membership.
"""
self.client.force_authenticate(user=self.user)
data = {
'order': reverse('order-detail', args=[self.order.id]),
'quantity': 2,
'content_type': "package",
'object_id': 1,
}
response = self.client.post(
reverse('orderline-list'),
data,
format='json',
)
content = {
'object_id': [
'User does not have the required membership to order this '
'package.'
]
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_without_academic_level(self):
"""
Ensure we can't create an order line with a membership if the user does
not have the required academic level.
"""
self.client.force_authenticate(user=self.user)
data = {
'order': reverse('order-detail', args=[self.order.id]),
'quantity': 1,
'content_type': "membership",
'object_id': self.membership_with_academic_level.id,
}
response = self.client.post(
reverse('orderline-list'),
data,
format='json',
)
content = {
'object_id': [
'User does not have the required academic_level to order this '
'membership.'
]
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_inexistent_object(self):
"""
Ensure we can't create an order line if the reference object does
not exist.
"""
self.client.force_authenticate(user=self.user)
data = {
'order': reverse('order-detail', args=[self.order.id]),
'quantity': 2,
'content_type': "package",
'object_id': 999,
}
response = self.client.post(
reverse('orderline-list'),
data,
format='json',
)
content = {
'object_id': ['The referenced object does not exist.']
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_with_membership(self):
"""
Ensure we can create an order line if user has the required membership.
"""
self.user.membership = self.membership
self.client.force_authenticate(user=self.user)
data = {
'order': reverse('order-detail', args=[self.order.id]),
'quantity': 2,
'content_type': "package",
'object_id': 1,
}
response = self.client.post(
reverse('orderline-list'),
data,
format='json',
)
self.user.membership = None
content = {
'content_type': 'package',
'id': 3,
'object_id': 1,
'order': 'http://testserver/orders/1',
'quantity': 2,
'url': 'http://testserver/order_lines/3'
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_membership(self):
"""
Ensure we can create an order line if user has permission.
"""
self.client.force_authenticate(user=self.user)
data = {
'order': reverse('order-detail', args=[self.order.id]),
'quantity': 1,
'content_type': "membership",
'object_id': self.membership.id,
}
response = self.client.post(
reverse('orderline-list'),
data,
)
content = {
'content_type': 'membership',
'id': 3,
'object_id': self.membership.id,
'order': 'http://testserver/orders/1',
'quantity': 1,
'url': 'http://testserver/order_lines/3'
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_missing_field(self):
"""
Ensure we can't create an order line when required field are missing.
"""
self.client.force_authenticate(user=self.admin)
data = {}
response = self.client.post(
reverse('orderline-list'),
data,
format='json',
)
content = {
'content_type': ['This field is required.'],
'object_id': ['This field is required.'],
'order': ['This field is required.'],
'quantity': ['This field is required.']
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_null_field(self):
"""
Ensure we can't create an order line when required field are null.
"""
self.client.force_authenticate(user=self.admin)
data = {
'content_type': None,
'object_id': None,
'order': None,
'quantity': None,
}
response = self.client.post(
reverse('orderline-list'),
data,
format='json',
)
content = {
'content_type': ['This field may not be null.'],
'object_id': ['This field may not be null.'],
'order': ['This field may not be null.'],
'quantity': ['This field may not be null.']
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_invalid_field(self):
"""
Ensure we can't create an order when requi{
'object_id': [
'User does not have the required membership to order this '
'package.'
]
}red field are invalid.
"""
self.client.force_authenticate(user=self.admin)
data = {
'content_type': (1,),
'object_id': "invalid",
'order': "invalid",
'quantity': (1,),
}
response = self.client.post(
reverse('orderline-list'),
data,
format='json',
)
content = {
'content_type': ['Object with model=[1] does not exist.'],
'object_id': ['A valid integer is required.'],
'order': ['Invalid hyperlink - No URL match.'],
'quantity': ['A valid integer is required.']
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update(self):
"""
Ensure we can update an order line.
"""
self.client.force_authenticate(user=self.admin)
data = {
'order': reverse('order-detail', args=[self.order.id]),
'quantity': 99,
'content_type': "package",
'object_id': 1,
}
response = self.client.put(
reverse(
'orderline-detail',
kwargs={'pk': 1},
),
data,
format='json',
)
content = {
'content_type': 'package',
'id': 1,
'object_id': 1,
'order': 'http://testserver/orders/1',
'quantity': 99,
'url': 'http://testserver/order_lines/1'
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_update_partial(self):
"""
Ensure we can partially update an order line.
"""
self.user.membership = self.membership
self.client.force_authenticate(user=self.user)
data = {
'order': reverse('order-detail', args=[self.order.id]),
'quantity': 9999,
}
response = self.client.patch(
reverse(
'orderline-detail',
kwargs={'pk': 1},
),
data,
format='json',
)
self.user.membership = None
content = {
'content_type': 'package',
'id': 1,
'object_id': 1,
'order': 'http://testserver/orders/1',
'quantity': 9999,
'url': 'http://testserver/order_lines/1'
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete(self):
"""
Ensure we can delete an order line.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.delete(
reverse(
'orderline-detail',
kwargs={'pk': 1},
),
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_update_partial_without_membership(self):
"""
Ensure we can't partially update an order line without required
membership for ordered package.
"""
self.client.force_authenticate(user=self.user)
data = {
'order': reverse('order-detail', args=[self.order.id]),
'quantity': 9999,
}
response = self.client.patch(
reverse(
'orderline-detail',
kwargs={'pk': 1},
),
data,
format='json',
)
content = {
'object_id': [
'User does not have the required membership to order this '
'package.'
]
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete(self):
"""
Ensure we can delete an order line.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.delete(
reverse(
'orderline-detail',
kwargs={'pk': 1},
),
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_list(self):
"""
Ensure we can't list order lines as an unauthenticated user.
"""
response = self.client.get(
reverse('orderline-list'),
format='json',
)
data = json.loads(response.content)
content = {'detail': 'Authentication credentials were not provided.'}
self.assertEqual(data, content)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_list_owner(self):
"""
Ensure we can list owned order lines as an authenticated user.
"""
self.client.force_authenticate(user=self.user)
response = self.client.get(
reverse('orderline-list'),
format='json',
)
data = json.loads(response.content)
content = {
'count': 1,
'next': None,
'previous': None,
'results': [{
'content_type': 'package',
'id': 1,
'object_id': 1,
'order': 'http://testserver/orders/1',
'quantity': 1,
'url': 'http://testserver/order_lines/1'
}]
}
self.assertEqual(data, content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_list_admin(self):
"""
Ensure we can list all order lines as an admin.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.get(
reverse('orderline-list'),
format='json',
)
data = json.loads(response.content)
content = {
'count': 2,
'next': None,
'previous': None,
'results': [{
'content_type': 'package',
'id': 1,
'object_id': 1,
'order': 'http://testserver/orders/1',
'quantity': 1,
'url': 'http://testserver/order_lines/1'
}, {
'content_type': 'package',
'id': 2,
'object_id': 1,
'order': 'http://testserver/orders/2',
'quantity': 99,
'url': 'http://testserver/order_lines/2'
}]
}
self.assertEqual(data, content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read_unauthenticated(self):
"""
Ensure we can't read an order line as an unauthenticated user.
"""
response = self.client.get(
reverse(
'orderline-detail',
kwargs={'pk': 1},
),
)
content = {'detail': 'Authentication credentials were not provided.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_read_owner(self):
"""
Ensure we can read an order line owned by an authenticated user.
"""
self.client.force_authenticate(user=self.user)
response = self.client.get(
reverse(
'orderline-detail',
kwargs={'pk': 1},
),
)
content = {
'content_type': 'package',
'id': 1,
'object_id': 1,
'order': 'http://testserver/orders/1',
'quantity': 1,
'url': 'http://testserver/order_lines/1'
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read_owner_not_owned(self):
"""
Ensure we can't read an order line not owned by an authenticated user.
"""
self.client.force_authenticate(user=self.user)
response = self.client.get(
reverse(
'orderline-detail',
kwargs={'pk': 2},
),
)
content = {'detail': 'Not found.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_read_admin(self):
"""
Ensure we can read any order line as an admin.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.get(
reverse(
'orderline-detail',
kwargs={'pk': 1},
),
)
content = {
'content_type': 'package',
'id': 1,
'object_id': 1,
'order': 'http://testserver/orders/1',
'quantity': 1,
'url': 'http://testserver/order_lines/1'
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read_non_existent(self):
"""
Ensure we get not found when asking for an order line that doesn't
exist.
"""
self.client.force_authenticate(user=self.user)
response = self.client.get(
reverse(
'orderline-detail',
kwargs={'pk': 999},
),
)
content = {'detail': 'Not found.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
| [
"[email protected]"
] | |
cd6e8276b89ab78c02389cbb84c51b5ba844fbdb | 421d58c6b93b81e0724f8f4576119300eb344252 | /influencers/users/migrations/0005_auto_20181111_1505.py | 1d7c2ca75db84615820701ef82599ab6c705746f | [] | no_license | momen/influencers | 7728228c92a552bdff9ae62f85986ad03bce186e | f9c76cfc2970440112967f9579dc31f77063cb25 | refs/heads/master | 2020-06-03T22:20:03.881411 | 2019-06-15T07:48:43 | 2019-06-15T07:48:43 | 191,754,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | # Generated by Django 2.1.2 on 2018-11-11 15:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_user_is_removed'),
]
operations = [
migrations.AlterField(
model_name='user',
name='is_active',
field=models.BooleanField(default=False, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active'),
),
]
| [
"[email protected]"
] | |
e62fda6ebc68743af0af01a2bc34bc14ba0e666a | 3117852233ea6c2644e723587a7b28d6d6518d95 | /ttslang.py | 28c918e4b4594278eeae4e3af6f10d766e7db1e7 | [] | no_license | harishravi121/Pythoncodes | d70059a3b7785d668a4b03f3ec85b0777b33706f | 1d6d6ca0ed6348b6c5d07d27d24668fb567527ca | refs/heads/master | 2023-07-07T11:58:03.741814 | 2023-07-01T08:01:56 | 2023-07-01T08:01:56 | 211,642,477 | 3 | 0 | null | 2020-01-13T06:45:25 | 2019-09-29T10:00:32 | Python | UTF-8 | Python | false | false | 83 | py | import pyttsx3
engine = pyttsx3.init()
engine.say("HEllo")
engine.runAndWait()
| [
"[email protected]"
] | |
792451ae4deffa9b1fcfbae36e7e0397b0f3d802 | cf4f3c181dc04c4e698b53c3bb5dd5373b0cc1f4 | /meridian/tst/acupoints/test_zusanli213.py | f53e9b7310099603a6d346caed38546241f59172 | [
"Apache-2.0"
] | permissive | sinotradition/meridian | da3bba6fe42d3f91397bdf54520b3085f7c3bf1d | 8c6c1762b204b72346be4bbfb74dedd792ae3024 | refs/heads/master | 2021-01-10T03:20:18.367965 | 2015-12-14T14:58:35 | 2015-12-14T14:58:35 | 46,456,260 | 5 | 3 | null | 2015-11-29T15:00:20 | 2015-11-19T00:21:00 | Python | UTF-8 | Python | false | false | 301 | py | #!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
import unittest
from meridian.acupoints import zusanli213
class TestZusanli213Functions(unittest.TestCase):
def setUp(self):
pass
def test_xxx(self):
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
78114eb9b8af0d0f91311f5efe983687ab814067 | 71f00ed87cd980bb2f92c08b085c5abe40a317fb | /Data/GoogleCloud/google-cloud-sdk/lib/surface/ai_platform/operations/describe.py | 06aa812cb986b22a941e19bab4929cc2330eda88 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | factoryofthesun/Rao-NLP | 2bd8269a8eed1cb352c14c8fde88e3111ccca088 | 87f9723f5ee51bd21310d58c3425a2a7271ec3c5 | refs/heads/master | 2023-04-18T08:54:08.370155 | 2020-06-09T23:24:07 | 2020-06-09T23:24:07 | 248,070,291 | 0 | 1 | null | 2021-04-30T21:13:04 | 2020-03-17T20:49:03 | Python | UTF-8 | Python | false | false | 1,528 | py | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ai-platform jobs describe command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.ml_engine import operations
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.ml_engine import endpoint_util
from googlecloudsdk.command_lib.ml_engine import flags
from googlecloudsdk.command_lib.ml_engine import operations_util
def _AddDescribeArgs(parser):
flags.OPERATION_NAME.AddToParser(parser)
flags.GetRegionArg('operation').AddToParser(parser)
class Describe(base.DescribeCommand):
"""Describe an AI Platform operation."""
@staticmethod
def Args(parser):
_AddDescribeArgs(parser)
def Run(self, args):
with endpoint_util.MlEndpointOverrides(region=args.region):
client = operations.OperationsClient()
return operations_util.Describe(client, args.operation)
| [
"[email protected]"
] | |
1b661917f1bbf1889691f540f74bfadc8996f42a | 244189d49a3967b4b002af73f40ca8e8064c4771 | /modules/auxiliary/scanner/http/octopusdeploy_login.rb | e95f4eb8c6b5e148702da83e0b31045c7dcf1249 | [
"MIT"
] | permissive | darkcode357/thg-framework | 7540609fb79619bdc12bd98664976d51c79816aa | c1c3bd748aac85a8c75e52486ae608981a69d93a | refs/heads/master | 2023-03-01T05:06:51.399919 | 2021-06-01T14:00:32 | 2021-06-01T14:00:32 | 262,925,227 | 11 | 6 | NOASSERTION | 2023-02-10T23:11:02 | 2020-05-11T03:04:05 | Python | UTF-8 | Python | false | false | 2,528 | rb | ##
# This module requires Metasploit: https://metasploit.com/download
# Current source: https://github.com/rapid7/metasploit-framework
##
require 'metasploit/framework/credential_collection'
require 'metasploit/framework/login_scanner/octopusdeploy'
class MetasploitModule < Msf::Auxiliary
include Msf::Exploit::Remote::HttpClient
include Msf::Auxiliary::Report
include Msf::Auxiliary::AuthBrute
include Msf::Auxiliary::Scanner
def initialize
super(
'Name' => 'Octopus Deploy Login Utility',
'Description' => %q{
This module simply attempts to login to an Octopus Deploy server using a specific
username and password. It has been confirmed to work on version 3.4.4
},
'Author' => [ 'James Otten <jamesotten1[at]gmail.com>' ],
'License' => MSF_LICENSE
)
register_options(
[
Opt::RPORT(80),
OptString.new('TARGETURI', [true, 'URI for login. Default is /api/users/login', '/api/users/login'])
])
deregister_options('PASSWORD_SPRAY')
end
def run_host(ip)
cred_collection = Metasploit::Framework::CredentialCollection.new(
blank_passwords: datastore['BLANK_PASSWORDS'],
pass_file: datastore['PASS_FILE'],
password: datastore['PASSWORD'],
user_file: datastore['USER_FILE'],
userpass_file: datastore['USERPASS_FILE'],
username: datastore['USERNAME'],
user_as_pass: datastore['USER_AS_PASS']
)
scanner = Metasploit::Framework::LoginScanner::OctopusDeploy.new(
configure_http_login_scanner(
cred_details: cred_collection,
stop_on_success: datastore['STOP_ON_SUCCESS'],
bruteforce_speed: datastore['BRUTEFORCE_SPEED'],
connection_timeout: 10,
http_username: datastore['HttpUsername'],
http_password: datastore['HttpPassword'],
uri: datastore['TARGETURI']
)
)
scanner.scan! do |result|
credential_data = result.to_h
credential_data.merge!(
module_fullname: fullname,
workspace_id: myworkspace_id
)
if result.success?
credential_core = create_credential(credential_data)
credential_data[:core] = credential_core
create_credential_login(credential_data)
print_good "#{ip}:#{rport} - Login Successful: #{result.credential}"
else
invalidate_login(credential_data)
vprint_error "#{ip}:#{rport} - LOGIN FAILED: #{result.credential} (#{result.status})"
end
end
end
end
| [
"[email protected]"
] | |
adb6427317dfc161210edab8159cb6ef4ec06f21 | 23b44edcd663eb60d4deee64ced5a5b27ee3b7d2 | /thermosteam/chemicals/phase_change.py | ef5a32237d3cf1e9db68f454ccd5c957c0c7a959 | [
"LicenseRef-scancode-unknown-license-reference",
"NCSA",
"MIT"
] | permissive | sarangbhagwat/thermosteam | 7b13c6c3146fe2fc378b453fe3c732dc7397ea0c | 710ec22b17c257a742300bf172fd3121852abf98 | refs/heads/master | 2022-12-13T03:37:49.251727 | 2020-09-14T17:24:30 | 2020-09-14T17:24:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,344 | py | # -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020, Yoel Cortes-Pena <[email protected]>
#
# This module extends the phase_change module from the chemicals's library:
# https://github.com/CalebBell/chemicals
# Copyright (C) 2020 Caleb Bell <[email protected]>
#
# This module is under a dual license:
# 1. The UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
#
# 2. The MIT open-source license. See
# https://github.com/CalebBell/chemicals/blob/master/LICENSE.txt for details.
from chemicals import phase_change as pc
import numpy as np
from ..base import InterpolatedTDependentModel, TDependentHandleBuilder, functor
from .. import functional as fn
from chemicals.dippr import EQ106
from .data import (phase_change_data_Perrys2_150,
phase_change_data_VDI_PPDS_4,
VDI_saturation_dict,
phase_change_data_Alibakhshi_Cs,
lookup_VDI_tabular_data,
Hvap_data_CRC,
Hvap_data_Gharagheizi,
)
### Enthalpy of Vaporization at T
Clapeyron = functor(pc.Clapeyron, 'Hvap')
Pitzer = functor(pc.Pitzer, 'Hvap')
SMK = functor(pc.SMK, 'Hvap')
MK = functor(pc.MK, 'Hvap')
Velasco = functor(pc.Velasco, 'Hvap')
Watson = functor(pc.Watson, 'Hvap')
Alibakhshi = functor(pc.Alibakhshi, 'Hvap')
PPDS12 = functor(pc.PPDS12, 'Hvap')
def Clapeyron_hook(self, T, kwargs):
kwargs = kwargs.copy()
Psat = kwargs['Psat']
if callable(Psat): kwargs['Psat'] = Psat = Psat(T)
if 'V' in kwargs:
# Use molar volume to compute dZ if possible
V = kwargs.pop('V')
kwargs['dZ'] = fn.Z(T, Psat, V.g(T, Psat) - V.l(T, Psat))
return self.function(T, **kwargs)
Clapeyron.functor.hook = Clapeyron_hook
@TDependentHandleBuilder('Hvap')
def heat_of_vaporization_handle(handle, CAS, Tb, Tc, Pc, omega,
similarity_variable, Psat, V):
# if has_CoolProp and self.CASRN in coolprop_dict:
# methods.append(COOLPROP)
# self.CP_f = coolprop_fluids[self.CASRN]
# Tmins.append(self.CP_f.Tt); Tmaxs.append(self.CP_f.Tc)
add_model = handle.add_model
if CAS in phase_change_data_Perrys2_150:
Tc, C1, C2, C3, C4, Tmin, Tmax = phase_change_data_Perrys2_150[CAS]
data = (Tc, C1, C2, C3, C4)
add_model(EQ106.functor.from_args(data), Tmin, Tmax)
if CAS in phase_change_data_VDI_PPDS_4:
Tc, A, B, C, D, E = phase_change_data_VDI_PPDS_4[CAS]
add_model(PPDS12.functor.from_args(data=(Tc, A, B, C, D, E)), 0, Tc)
if all((Tc, Pc)):
model = Clapeyron.functor.from_args(data=(Tc, Pc, None, Psat))
model.V = V
add_model(model, 0, Tc)
data = (Tc, omega)
if all(data):
for f in (MK, SMK, Velasco, Pitzer):
add_model(f.functor.from_args(data), 0, Tc)
if CAS in VDI_saturation_dict:
Ts, Hvaps = lookup_VDI_tabular_data(CAS, 'Hvap')
add_model(InterpolatedTDependentModel(Ts, Hvaps, Ts[0], Ts[-1]))
if Tc:
if CAS in phase_change_data_Alibakhshi_Cs:
C = float(phase_change_data_Alibakhshi_Cs.get(CAS, 'C'))
add_model(Alibakhshi.functor.from_args(data=(Tc, C)), 0, Tc)
if CAS in Hvap_data_CRC:
Hvap = float(Hvap_data_CRC.get(CAS, 'HvapTb'))
if not np.isnan(Hvap):
Tb = float(Hvap_data_CRC.get(CAS, 'Tb'))
data = dict(Hvap_ref=Hvap, T_ref=Tb, Tc=Tc, exponent=0.38)
add_model(Watson.functor.from_kwargs(data), 0, Tc)
Hvap = float(Hvap_data_CRC.get(CAS, 'Hvap298'))
if not np.isnan(Hvap):
data = dict(Hvap_ref=Hvap, T_ref=298., Tc=Tc, exponent=0.38)
add_model(Watson.functor.from_kwargs(data), 0, Tc)
if CAS in Hvap_data_Gharagheizi:
Hvap = float(Hvap_data_Gharagheizi.get(CAS, 'Hvap298'))
data = dict(Hvap_ref=Hvap, T_ref=298., Tc=Tc, exponent=0.38)
add_model(Watson.functor.from_kwargs(data), 0, Tc)
data = (Tb, Tc, Pc)
if all(data):
for f in (pc.Riedel, pc.Chen, pc.Vetere, pc.Liu):
add_model(f(*data), 0, Tc)
pc.heat_of_vaporization_handle = heat_of_vaporization_handle | [
"[email protected]"
] | |
6b5240c09546ae48c6e11c2e580c271be45aea67 | b75ee1f07fcc50142da444e8ae9ba195bf49977a | /test/todo.py | fda75e5f8c9744005e5de098f819c7cadc1540c1 | [
"Apache-2.0"
] | permissive | FlorianLudwig/code-owl | 369bdb57a66c0f06e07853326be685c177e2802a | be6518c89fb49ae600ee004504f9485f328e1090 | refs/heads/master | 2016-08-04T02:26:07.445016 | 2014-05-25T19:19:13 | 2014-05-25T19:19:13 | 18,918,361 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | # this file contains tests for missing features
# this means the tests here do FAIL.
import codeowl.search
def match(query, code):
query = codeowl.search.generate_query(query)
code = codeowl.code.parse(code)
return codeowl.search.tokens(query, code, '<test>')
def test_py_import():
assert match(
'import foo',
'from foo import bar'
)
assert match(
'import foo.bar',
'from foo import bar'
)
assert not match(
'import foo',
'import bar; print foo'
)
def test_py_block():
"""Tree based matching
do semantic matching of code blocks."""
assert match(
'for: print i',
'for i in xrange(10):\n'
' pass\n'
' print i\n'
)
# same as above just a few spaces less
# since there are less not-maching tokens
# this actually scores better than the
# example above. But it should not match
# at all.
assert not match(
'for: print i',
'for i in xrange(10):\n'
' pass\n'
'print i\n'
) | [
"[email protected]"
] | |
2bf16d2a0b120a587301917cafe6e3763746f348 | 52a3beeb07ad326115084a47a9e698efbaec054b | /horizon/.venv/bin/heat | da3eeefdf4e4f11003d3c6597a0c69aea163d0fd | [
"Apache-2.0"
] | permissive | bopopescu/sample_scripts | 3dade0710ecdc8f9251dc60164747830f8de6877 | f9edce63c0a4d636f672702153662bd77bfd400d | refs/heads/master | 2022-11-17T19:19:34.210886 | 2018-06-11T04:14:27 | 2018-06-11T04:14:27 | 282,088,840 | 0 | 0 | null | 2020-07-24T00:57:31 | 2020-07-24T00:57:31 | null | UTF-8 | Python | false | false | 240 | #!/home/horizon/horizon/.venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from heatclient.shell import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
62de38f216a4bb285bd5c78c9ae8517e3d1c44dc | 4ddf82eeb31d46fb67802a4375390eb42a8f23b8 | /tests/pyb/adc.py | 875d31d732cc3648fff5484fb1e95c617dda69ad | [
"MIT"
] | permissive | pulkin/micropython | 1437a507b9e90c8824e80c3553e6209d89e64565 | c274c947c611f510fd2b1c4ef6cbd9f4283794fc | refs/heads/master | 2023-03-08T02:35:28.208819 | 2022-04-19T12:38:47 | 2022-04-19T12:38:47 | 167,732,676 | 103 | 36 | MIT | 2023-02-25T03:02:36 | 2019-01-26T19:57:59 | C | UTF-8 | Python | false | false | 1,546 | py | from pyb import ADC, Timer
adct = ADC(16) # Temperature 930 -> 20C
print(str(adct)[:19])
adcv = ADC(17) # Voltage 1500 -> 3.3V
print(adcv)
# read single sample; 2.5V-5V is pass range
val = adcv.read()
assert val > 1000 and val < 2000
# timer for read_timed
tim = Timer(5, freq=500)
# read into bytearray
buf = bytearray(b"\xff" * 50)
adcv.read_timed(buf, tim)
print(len(buf))
for i in buf:
assert i > 50 and i < 150
# read into arrays with different element sizes
import array
arv = array.array("h", 25 * [0x7FFF])
adcv.read_timed(arv, tim)
print(len(arv))
for i in arv:
assert i > 1000 and i < 2000
arv = array.array("i", 30 * [-1])
adcv.read_timed(arv, tim)
print(len(arv))
for i in arv:
assert i > 1000 and i < 2000
# Test read_timed_multi
arv = bytearray(b"\xff" * 50)
art = bytearray(b"\xff" * 50)
ADC.read_timed_multi((adcv, adct), (arv, art), tim)
for i in arv:
assert i > 60 and i < 125
# Wide range: unsure of accuracy of temp sensor.
for i in art:
assert i > 15 and i < 200
arv = array.array("i", 25 * [-1])
art = array.array("i", 25 * [-1])
ADC.read_timed_multi((adcv, adct), (arv, art), tim)
for i in arv:
assert i > 1000 and i < 2000
# Wide range: unsure of accuracy of temp sensor.
for i in art:
assert i > 50 and i < 2000
arv = array.array("h", 25 * [0x7FFF])
art = array.array("h", 25 * [0x7FFF])
ADC.read_timed_multi((adcv, adct), (arv, art), tim)
for i in arv:
assert i > 1000 and i < 2000
# Wide range: unsure of accuracy of temp sensor.
for i in art:
assert i > 50 and i < 2000
| [
"[email protected]"
] | |
ba2dba9f3f8b6287d25ffbb5992a661635b0b81c | f1b9dc71b2dafc2b331de495ef4ceab938734fbe | /test.py | b80d38b36f71633c8ecb41f54b9fdf2f08e362bc | [] | no_license | philippjfr/FOSS4G-2017-Talk | 7deb6e48755b71658f930aa55c06d3a903f1abc6 | e3f035dc648cfc4642e774e536d6c07c847417b5 | refs/heads/master | 2022-11-07T07:19:37.053629 | 2017-08-23T17:16:38 | 2017-08-23T17:16:38 | 100,023,633 | 2 | 4 | null | 2022-11-01T10:51:44 | 2017-08-11T11:09:21 | JavaScript | UTF-8 | Python | false | false | 1,261 | py | import holoviews as hv
import param
import parambokeh
import numpy as np
from bokeh.io import curdoc
renderer = hv.renderer('bokeh').instance(mode='server')
class CurveExample(hv.streams.Stream):
color = param.Color(default='#000000', precedence=0)
element = param.ObjectSelector(default=hv.Curve,
objects=[hv.Curve, hv.Scatter, hv.Area],
precedence=0)
amplitude = param.Number(default=2, bounds=(2, 5))
frequency = param.Number(default=2, bounds=(1, 10))
output = parambokeh.view.Plot()
def view(self, *args, **kwargs):
return self.element(self.amplitude*np.sin(np.linspace(0, np.pi*self.frequency)),
vdims=[hv.Dimension('y', range=(-5, 5))])(style=dict(color=self.color))
def event(self, **kwargs):
if not self.output or any(k in kwargs for k in ['color', 'element']):
self.output = hv.DynamicMap(self.view, streams=[self])
else:
super(CurveExample, self).event(**kwargs)
example = CurveExample(name='HoloViews Example')
doc = parambokeh.Widgets(example, callback=example.event, on_init=True, mode='server',
view_position='right', doc=curdoc())
| [
"[email protected]"
] | |
a8b75d380cc2dabf3b993334ea90a79166b071f7 | 729ee5bcb31708a82b08509775786597dac02263 | /coding-challenges/week06/AssignmentQ4.py | 40922ab75fb78db62f5805f72f03ea8d78695d15 | [] | no_license | pandey-ankur-au17/Python | 67c2478316df30c2ac8ceffa6704cf5701161c27 | 287007646a694a0dd6221d02b47923935a66fcf4 | refs/heads/master | 2023-08-30T05:29:24.440447 | 2021-09-25T16:07:23 | 2021-09-25T16:07:23 | 358,367,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | # Given an array of size n and a number k, find all elements that appear
# more than n/k times
# Input : k = 4 ,n=9 , A = [ 3 ,1, 2, 2, 2, 1, 4, 3, 3 ]
# # Output: - [ 3 , 2]
list1=list(map(int,input("Enter the list=").split()))
n=len(list1)
k=int(input("Enter the value of k="))
frequency={}
for i in list1:
if i in frequency:
frequency[i]=frequency[i]+1
else:
frequency[i]=1
output=[]
for i,j in frequency.items():
if j>n//k:
output.append(i)
else:
continue
print(output)
| [
"[email protected]"
] | |
98a3bb666aa53326b5eaed0135122f7aa1ea659d | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part002017.py | 3e62b15d60a4b9a7663df6952ee01bd0980a8d61 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,304 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher85189(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher85189._instance is None:
CommutativeMatcher85189._instance = CommutativeMatcher85189()
return CommutativeMatcher85189._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 85188
return
yield
from collections import deque | [
"[email protected]"
] | |
d04aaef21caf30a7b3162da917e0162e5972d2ce | 4e93e4275e82a08d3c114c9dd72deb0959d41a55 | /src/ch10/binary/__init__.py | 6f8ad9a2cdb18a4e50d6eeff4b4bd01581729622 | [] | no_license | wsjhk/wasm-python-book | fd38e5a278be32df49416616724f38a415614e8b | 872bc8fe754a6a3573436f534a8da696c0486c24 | refs/heads/master | 2023-03-16T02:18:48.869794 | 2020-09-17T11:31:49 | 2020-09-17T11:31:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: HuRuiFeng
@file: __init__.py
@time: 2020/8/19 1:55
@project: wasm-python-book
@desc:
"""
from ch10.binary import reader
decode_file = reader.decode_file
| [
"[email protected]"
] | |
9d9b777e9db5f14839481e9edb2dc062d203210a | 93b5da40708878016d953aeb4d9b908ff8af1e04 | /function/practice2.py | 3a807a52ef9fe652abe3614a37febbc4e2a91658 | [] | no_license | Isaccchoi/python-practice | e50e932d2a7bf13b54e5ca317a03a5d63b406c6b | 70e3e1f8590667cfe5ba4c094873eb39d555c44a | refs/heads/master | 2021-06-29T05:45:18.371743 | 2017-09-21T06:15:47 | 2017-09-21T06:15:47 | 103,229,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | def sequential_search(str, key):
count = 0
while count < len(str):
if str[count] == key:
return count
else:
count += 1
return 0
print(sequential_search("개구리고양이", "개"))
print(sequential_search("개구리고양이", "구"))
print(sequential_search("개구리고양이", "리"))
print(sequential_search("개구리고양이", "고"))
print(sequential_search("개구리고양이", "양"))
print(sequential_search("개구리고양이", "이"))
print(sequential_search("개구리고양이", "말"))
| [
"[email protected]"
] | |
d85ff8da66e28d47df80755796b5b30c21127fba | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /able_way/long_way/take_eye.py | cfcd983c2042f55d5d6e7c99ddba0ad24bf58cf2 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py |
#! /usr/bin/env python
def good_time(str_arg):
problem_and_work(str_arg)
print('high_thing')
def problem_and_work(str_arg):
print(str_arg)
if __name__ == '__main__':
good_time('able_fact_and_life')
| [
"[email protected]"
] | |
a4096b7f1c4116a6ffaf257384b64bd4bd388996 | 5d302c38acd02d5af4ad7c8cfe244200f8e8f877 | /String/6. ZigZag Conversion(Med).py | 1e80504fb03188e5ba70ed8751bed04c9c9c96c4 | [] | no_license | nerohuang/LeetCode | 2d5214a2938dc06600eb1afd21686044fe5b6db0 | f273c655f37da643a605cc5bebcda6660e702445 | refs/heads/master | 2023-06-05T00:08:41.312534 | 2021-06-21T01:03:40 | 2021-06-21T01:03:40 | 230,164,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | class Solution:
def convert(self, s: str, numRows: int) -> str:
if numRows == 1:
return s
store = [["" for _ in range(len(s))]for _ in range(numRows)];
i = 0;
m, n = -1, 1;
while i < len(s):
if m == -1:
m += 1;
n -= 1;
while m < numRows:
if i < len(s) and store[m][n] == "":
store[m][n] = s[i];
i += 1;
m += 1;
if m == numRows:
m -= 2;
n += 1;
while m >= 0:
if i < len(s):
store[m][n] = s[i];
m -= 1;
n += 1;
i += 1;
else:
break;
ans = ""
for i in range(len(store)):
for c in store[i]:
if store[i] != "":
ans += c
return(ans)
#class Solution:
# def convert(self, s: str, numRows: int) -> str:
# if numRows == 1:
# return s
#
# lines = [''] * numRows
# line_count = 0
# adder = 1
# for c in s:
# lines[line_count] = lines[line_count] + c
#
# if line_count + adder > numRows-1:
# adder = -1
# elif line_count + adder < 0:
# adder = 1
#
# line_count = line_count + adder
# return ''.join(lines) | [
"[email protected]"
] | |
83216ab4814f0ddc0657688c7f97149e35a3bdbb | 142362be3c4f8b19bd118126baccab06d0514c5b | /xapian64/site-packages/djapian/utils/decorators.py | 1deb7553ccf6014639b53b8845f5a841e5fbcb2e | [] | no_license | dkramorov/astwobytes | 84afa4060ffed77d5fd1a6e8bf5c5c69b8115de6 | 55071537c5c84d0a27757f11ae42904745cc1c59 | refs/heads/master | 2023-08-27T07:10:51.883300 | 2023-08-02T16:52:17 | 2023-08-02T16:52:17 | 191,950,319 | 0 | 0 | null | 2022-11-22T09:15:42 | 2019-06-14T13:44:23 | HTML | UTF-8 | Python | false | false | 898 | py | import xapian
def retry_if_except(errors, num_retry=4, cleanup_callback=None):
def _wrap(func):
def _inner(*args, **kwargs):
for n in reversed(range(num_retry)):
try:
return func(*args, **kwargs)
except errors:
# propagate the exception if we have run out of tries
if not n:
raise
# perform a clean up action before the next attempt if required
if callable(cleanup_callback):
cleanup_callback()
return _inner
return _wrap
def reopen_if_modified(database, num_retry=3,
errors=xapian.DatabaseModifiedError):
return retry_if_except(errors,
num_retry=num_retry,
cleanup_callback=lambda: database.reopen())
| [
"[email protected]"
] | |
14f7ea5a0fd4e2ab4ffa08421ed6e486da33ccfc | 4692f28f86ee84a76abfac8cc8a0dd41fcd402e4 | /tasks/github_tasks.py | ddf0c9af7bae0fe3c10ef1e08285fae600084aa1 | [
"CC0-1.0",
"BSD-3-Clause",
"Apache-2.0",
"GPL-1.0-or-later",
"MIT",
"0BSD",
"Unlicense",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"BSD-2-Clause-Views",
"MPL-2.0"
] | permissive | DataDog/datadog-agent | cc4b89839d6031903bf23aa12eccc2a3f3c7f213 | d960cdb7de8fa5d1c7138cfe58e754af80cb796a | refs/heads/main | 2023-09-04T10:45:08.138748 | 2023-09-04T09:13:43 | 2023-09-04T09:13:43 | 49,970,739 | 2,388 | 1,288 | Apache-2.0 | 2023-09-14T20:06:34 | 2016-01-19T17:40:41 | Go | UTF-8 | Python | false | false | 3,694 | py | import os
from invoke import Exit, task
from .libs.github_actions_tools import (
download_artifacts_with_retry,
follow_workflow_run,
print_workflow_conclusion,
trigger_macos_workflow,
)
from .utils import DEFAULT_BRANCH, load_release_versions
@task
def trigger_macos_build(
ctx,
datadog_agent_ref=DEFAULT_BRANCH,
release_version="nightly-a7",
major_version="7",
python_runtimes="3",
destination=".",
version_cache=None,
retry_download=3,
retry_interval=10,
):
env = load_release_versions(ctx, release_version)
github_action_ref = env["MACOS_BUILD_VERSION"]
run = trigger_macos_workflow(
workflow_name="macos.yaml",
github_action_ref=github_action_ref,
datadog_agent_ref=datadog_agent_ref,
release_version=release_version,
major_version=major_version,
python_runtimes=python_runtimes,
# Send pipeline id and bucket branch so that the package version
# can be constructed properly for nightlies.
gitlab_pipeline_id=os.environ.get("CI_PIPELINE_ID", None),
bucket_branch=os.environ.get("BUCKET_BRANCH", None),
version_cache_file_content=version_cache,
)
workflow_conclusion = follow_workflow_run(run)
print_workflow_conclusion(workflow_conclusion)
download_artifacts_with_retry(run, destination, retry_download, retry_interval)
if workflow_conclusion != "success":
raise Exit(code=1)
@task
def trigger_macos_test(
ctx,
datadog_agent_ref=DEFAULT_BRANCH,
release_version="nightly-a7",
python_runtimes="3",
destination=".",
version_cache=None,
retry_download=3,
retry_interval=10,
):
env = load_release_versions(ctx, release_version)
github_action_ref = env["MACOS_BUILD_VERSION"]
run = trigger_macos_workflow(
workflow_name="test.yaml",
github_action_ref=github_action_ref,
datadog_agent_ref=datadog_agent_ref,
python_runtimes=python_runtimes,
version_cache_file_content=version_cache,
)
workflow_conclusion = follow_workflow_run(run)
print_workflow_conclusion(workflow_conclusion)
download_artifacts_with_retry(run, destination, retry_download, retry_interval)
if workflow_conclusion != "success":
raise Exit(code=1)
@task
def lint_codeowner(_):
"""
Check every package in `pkg` has an owner
"""
base = os.path.dirname(os.path.abspath(__file__))
root_folder = os.path.join(base, "..")
os.chdir(root_folder)
owners = _get_code_owners(root_folder)
# make sure each root package has an owner
pkgs_without_owner = _find_packages_without_owner(owners, "pkg")
if len(pkgs_without_owner) > 0:
raise Exit(
f'The following packages in `pkg` directory don\'t have an owner in CODEOWNERS: {pkgs_without_owner}',
code=1,
)
def _find_packages_without_owner(owners, folder):
pkg_without_owners = []
for x in os.listdir(folder):
path = os.path.join("/" + folder, x)
if path not in owners:
pkg_without_owners.append(path)
return pkg_without_owners
def _get_code_owners(root_folder):
code_owner_path = os.path.join(root_folder, ".github", "CODEOWNERS")
owners = {}
with open(code_owner_path) as f:
for line in f:
line = line.strip()
line = line.split("#")[0] # remove comment
if len(line) > 0:
parts = line.split()
path = os.path.normpath(parts[0])
# example /tools/retry_file_dump ['@DataDog/agent-metrics-logs']
owners[path] = parts[1:]
return owners
| [
"[email protected]"
] | |
380ff41b42f8149fb0f95f550c5bedddd25c9232 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02724/s435287686.py | 462d510537b496c3a2a42ca1c7ffce6125472883 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | x = int(input())
hap = x // 500
hap2 = (x - hap*500)//5
ans = hap*1000 + hap2*5
print(ans) | [
"[email protected]"
] | |
019ab9a8348c516eab7132b6900f6f45b8172cdb | 243ce25168eea65144713a1100ca997a2d29f280 | /p68.py | aaea8a7b5cd711d48a2ecaed6cc2366716f5667f | [] | no_license | acadien/projecteuler | 6aa1efbb1141ecf36d6b23bb6b058070e5e881e0 | 2efb0b5577cee7f046ed4f67d0f01f438cbf3770 | refs/heads/master | 2020-04-28T21:33:49.631044 | 2013-12-06T19:25:20 | 2013-12-06T19:25:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | #!/usr/bin/python
from math import *
from random import *
from itertools import chain,permutations
o_ind=range(5)
i_ind=[[5,6],[6,7],[7,8],[8,9],[9,5]]
def trysum(A):
if 10 in A[5:]:
return False
B=set([A[i]+A[i_ind[i][0]]+A[i_ind[i][1]] for i in range(5)])
if len(B)==1:
return True
return False
def flatten(listOfLists):
return chain.from_iterable(listOfLists)
def tochain(A):
start=A.index(min(A[:5]))
return int("".join(map(str,flatten([[A[o_ind[j]],A[i_ind[j][0]],A[i_ind[j][1]]] for j in map(lambda x:x%5,range(start,start+5))]))))
mx=0
for A in permutations(range(1,11)):
if trysum(A):
Aval=tochain(A)
if Aval>mx:
print Aval
mx=Aval
| [
"[email protected]"
] | |
e1deab83389f3a158a86076670d0bded1a976175 | 99e57f00fcaf4469c1c1b79f2d17176aaef9a790 | /hr_payslip_batch_confirm/models/__init__.py | 07e6a958e3299a35542efcae0c6a19e9d50b4dda | [] | no_license | detian08/mcl | d007ffd0e869f3bd9a8c74bc8473119901f0de2a | 32d61148326c931aca0107c3894061773f287e33 | refs/heads/master | 2022-03-23T19:36:29.608645 | 2019-12-11T10:15:50 | 2019-12-11T10:15:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | from . import hr_payroll
from . import hr_payroll_run | [
"[email protected]"
] | |
ae9e61d3ae9ee479adabb49c6e4d75d50cecfd7e | b1f7c8eecdfc1e54e868430d7b6192b162f5a530 | /insta/signals.py | 2177664422d6238343c5645260f46897273c08a5 | [] | no_license | Nyagah-Tech/instagramWebApp | 490c9d8874c082132e9a0d78eb849e2b1136656b | abf3421a408ac1daf5f5bf20b76073ad73894eba | refs/heads/master | 2022-12-13T04:33:26.104920 | 2020-01-06T21:18:17 | 2020-01-06T21:18:17 | 229,194,006 | 0 | 0 | null | 2022-11-22T05:13:48 | 2019-12-20T05:08:18 | Python | UTF-8 | Python | false | false | 565 | py | from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from .models import Profile
@receiver(post_save, sender=User)
def create_profile(sender,instance,created,**kwargs):
'''
this is a function that creates a profile of a user after registration
'''
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_profile(sender,instance, **kwargs):
'''
this is a fuunction that saves the profile after been made
'''
instance.profile.save() | [
"[email protected]"
] | |
62b7897f6f243bde43c73bd0addea96c61ff23d3 | e2d23d749779ed79472a961d2ab529eeffa0b5b0 | /gcloud/tests/core/models/test_user_default_project.py | a8944c2c0e485a689a536b1e3088ebda6139b172 | [
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | manlucas/atom | 9fa026b3f914e53cd2d34aecdae580bda09adda7 | 94963fc6fdfd0568473ee68e9d1631f421265359 | refs/heads/master | 2022-09-30T06:19:53.828308 | 2020-01-21T14:08:36 | 2020-01-21T14:08:36 | 235,356,376 | 0 | 0 | NOASSERTION | 2022-09-16T18:17:08 | 2020-01-21T14:04:51 | Python | UTF-8 | Python | false | false | 2,282 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import factory
from django.db.models import signals
from django.test import TestCase
from gcloud.core.models import Project, UserDefaultProject
class UserDefaultProjectTestCase(TestCase):
@factory.django.mute_signals(signals.post_save, signals.post_delete)
def tearDown(self):
Project.objects.all().delete()
UserDefaultProject.objects.all().delete()
@factory.django.mute_signals(signals.post_save, signals.post_delete)
def test_init_user_default_project__first_set(self):
project = Project.objects.create(name='name',
creator='creator',
desc='', )
dp = UserDefaultProject.objects.init_user_default_project('username', project)
self.assertEqual(dp.default_project.id, project.id)
@factory.django.mute_signals(signals.post_save, signals.post_delete)
def test_init_user_default_project__second_set(self):
project_1 = Project.objects.create(name='name',
creator='creator',
desc='', )
project_2 = Project.objects.create(name='name',
creator='creator',
desc='', )
UserDefaultProject.objects.init_user_default_project('username', project_1)
dp = UserDefaultProject.objects.init_user_default_project('username', project_2)
self.assertEqual(dp.default_project.id, project_1.id)
| [
"[email protected]"
] | |
02778531cc548dda2bfadf226376a93af1bcd11f | 746bf62ae3599f0d2dcd620ae37cd11370733cc3 | /leetcode/contains-duplicate.py | 768cd64e1cb979d349fc2bf6872d9d0a27bb7e6b | [] | no_license | wanglinjie/coding | ec0e614343b39dc02191455165eb1a5c9e6747ce | 350f28cad5ec384df476f6403cb7a7db419de329 | refs/heads/master | 2021-04-22T14:00:48.825959 | 2017-05-02T12:49:05 | 2017-05-02T12:49:05 | 48,011,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Author: Wanglj
Create Time : 20151223
Last Modified:
判断列表中是否有重复的
'''
class Solution(object):
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
nums_set = set(nums)
if len(nums_set) < len(nums):
return True
else:
return False | [
"[email protected]"
] | |
f9d7f2202c7c7b8cfb47887171023887d23fb306 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_192/ch50_2020_03_31_18_26_15_429003.py | 9cc22791965cdb59d8ef25da1a85490c844cd611 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | def junta_nome_sobrenome(n, s):
n_s = []
espaco = [' ']*len(n)
i = 0
while i < len(n):
n_s.append(n[i] = espaco[i] + s[i])
i += 1
return n_s
| [
"[email protected]"
] | |
38723203b79a0913486767469b468bcf4790caac | 795ba44e09add69a6c3859adf7e476908fcb234c | /backend/mod_training_1_27492/urls.py | 58881d4c5e4d89926432059cb0802a8f7062e3db | [] | no_license | crowdbotics-apps/mod-training-1-27492 | 0df7b863ef18e7ba4e3f1c34f1bac7554e184553 | 91815c10d4f8af1e18bb550db97373b4099a4ae9 | refs/heads/master | 2023-04-26T00:57:13.854464 | 2021-06-01T19:48:41 | 2021-06-01T19:48:41 | 371,498,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,247 | py | """mod_training_1_27492 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "mod-training-1"
admin.site.site_title = "mod-training-1 Admin Portal"
admin.site.index_title = "mod-training-1 Admin"
# swagger
api_info = openapi.Info(
title="mod-training-1 API",
default_version="v1",
description="API documentation for mod-training-1 App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.