hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7398e8292797a50bf6c42c368fc2eb59c7ca47ec
| 5,612 |
py
|
Python
|
feeds.py
|
yoursantu/indiannewsplus
|
252f0367b43ec2edea636157bcf2d8a92dda6f3f
|
[
"MIT"
] | null | null | null |
feeds.py
|
yoursantu/indiannewsplus
|
252f0367b43ec2edea636157bcf2d8a92dda6f3f
|
[
"MIT"
] | null | null | null |
feeds.py
|
yoursantu/indiannewsplus
|
252f0367b43ec2edea636157bcf2d8a92dda6f3f
|
[
"MIT"
] | null | null | null |
"""RSS feeds for the `multilingual_news` app."""
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.shortcuts import get_current_site
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from cms.utils import get_language_from_request
from multilingual_tags.models import Tag, TaggedItem
from people.models import Person
from .models import NewsEntry
def is_multilingual():
return 'django.middleware.locale.LocaleMiddleware' in \
settings.MIDDLEWARE_CLASSES
def get_lang_name(lang):
return _(dict(settings.LANGUAGES)[lang])
class NewsEntriesFeed(Feed):
"""A news feed, that shows all entries."""
title_template = 'multilingual_news/feed/entries_title.html'
description_template = 'multilingual_news/feed/entries_description.html'
def get_object(self, request, **kwargs):
self.language_code = get_language_from_request(request)
self.site = get_current_site(request)
self.any_language = kwargs.get('any_language', None)
def feed_url(self, item):
if is_multilingual() or self.any_language:
return reverse('news_rss_any', kwargs={'any_language': True})
return reverse('news_rss')
def title(self, item):
if self.any_language or not is_multilingual():
return _(u"{0} blog entries".format(self.site.name))
return _(u"{0} blog entries in {1}".format(self.site.name,
get_lang_name(self.language_code)))
def link(self, item):
return reverse('news_list')
def item_link(self, item):
return item.get_absolute_url()
def description(self, item):
if self.any_language or not is_multilingual():
return _(u"{0} blog entries".format(self.site.name))
return _(u"{0} blog entries in {1}".format(self.site.name,
get_lang_name(self.language_code)))
def get_queryset(self, item):
if not is_multilingual() or self.any_language:
check_language = False
else:
check_language = True
return NewsEntry.objects.recent(limit=10,
check_language=check_language)
def items(self, item):
return self.get_queryset(item)
def item_pubdate(self, item):
return item.pub_date
class AuthorFeed(NewsEntriesFeed):
"""A news feed, that shows only entries from a certain author."""
title_template = 'multilingual_news/feed/author_title.html'
description_template = 'multilingual_news/feed/author_description.html'
def get_object(self, request, **kwargs):
super(AuthorFeed, self).get_object(request, **kwargs)
# Needs no try. If the author does not exist, we automatically get a
# 404 response.
self.author = Person.objects.get(pk=kwargs.get('author'))
def title(self, obj):
title = super(AuthorFeed, self).title(obj)
return _(u'{0} by {1}'.format(title, self.author))
def feed_url(self, obj):
if is_multilingual() or self.any_language:
return reverse('news_rss_any_author', kwargs={
'author': self.author.id, 'any_language': True})
return reverse('news_rss_author', kwargs={'author': self.author.id})
def link(self, obj):
# TODO Author specific archive
return reverse('news_list')
def description(self, obj):
description = super(AuthorFeed, self).description(obj)
return _(u'{0} by {1}'.format(description, self.author))
def get_queryset(self, obj):
if not is_multilingual() or self.any_language:
check_language = False
else:
check_language = True
return NewsEntry.objects.recent(limit=10,
check_language=check_language,
kwargs={'author': self.author})
class TaggedFeed(NewsEntriesFeed):
"""A news feed, that shows only entries with a special tag."""
title_template = 'multilingual_news/feed/author_title.html'
description_template = 'multilingual_news/feed/author_description.html'
def get_object(self, request, **kwargs):
super(TaggedFeed, self).get_object(request, **kwargs)
# Needs no try. If the tag does not exist, we automatically get a
# 404 response.
self.tag = Tag.objects.get(slug=kwargs.get('tag'))
def title(self, obj):
title = super(TaggedFeed, self).title(obj)
return _(u'{0} by {1}'.format(title, self.tag.name))
def feed_url(self, obj):
if is_multilingual() or self.any_language:
return reverse('news_rss_any_tagged', kwargs={
'tag': self.tag.slug, 'any_language': True})
return reverse('news_rss_tagged', kwargs={'tag': self.tag.slug})
def link(self, obj):
return reverse('news_archive_tagged', kwargs={'tag': self.tag.slug})
def description(self, obj):
description = super(TaggedFeed, self).description(obj)
return _(u'{0} by {1}'.format(description, self.tag.name))
def get_queryset(self, obj):
content_type = ContentType.objects.get_for_model(NewsEntry)
tagged_items = TaggedItem.objects.filter(
content_type=content_type, tag=self.tag)
entries = []
for tagged_item in tagged_items:
if tagged_item.object and tagged_item.object.is_public():
entries.append(tagged_item.object)
return entries[:10]
| 37.66443 | 76 | 0.661083 | 4,892 | 0.871703 | 0 | 0 | 0 | 0 | 0 | 0 | 1,118 | 0.199216 |
73991f48e7be2da65079b1e532a4f69842cc8cd4
| 15,814 |
py
|
Python
|
config/settings/base.py
|
kingsdigitallab/field-django
|
6ceba79866d6971a6891f0b81ca9ed2a2d5a32db
|
[
"MIT"
] | null | null | null |
config/settings/base.py
|
kingsdigitallab/field-django
|
6ceba79866d6971a6891f0b81ca9ed2a2d5a32db
|
[
"MIT"
] | 2 |
2020-08-12T23:53:01.000Z
|
2022-02-10T09:41:09.000Z
|
config/settings/base.py
|
kingsdigitallab/field-django
|
6ceba79866d6971a6891f0b81ca9ed2a2d5a32db
|
[
"MIT"
] | null | null | null |
"""
Base settings to build other settings files upon.
"""
import os
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# field/
APPS_DIR = ROOT_DIR / "field"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / ".env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "UTC"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-gb"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
# 'django_extensions', # legacy
]
THIRD_PARTY_APPS = [
"crispy_forms",
"allauth",
"allauth.account",
"allauth.socialaccount",
"django_elasticsearch_dsl",
# wagtail
"wagtail.contrib.forms",
"wagtail.contrib.redirects",
"wagtail.contrib.settings",
"wagtail.embeds",
"wagtail.sites",
"wagtail.users",
"wagtail.snippets",
"wagtail.documents",
"wagtail.images",
"wagtail.admin",
"wagtail.core",
'wagtail.search', # legacy
'wagtail.contrib.modeladmin', # legacy
"wagtail.contrib.sitemaps", # puput
'wagtail.contrib.routable_page', # legacy
'wagtail.contrib.table_block', # legacy
"modelcluster",
"django_social_share", # for puput
"django_comments", # for puput
"taggit", # for puput
'puput', # legacy
'colorful', # for puput
'wagtailmenus', # legacy
'captcha', # legacy, what for?
# KDL
'kdl_wagtail_page', # legacy, still used?
'controlled_vocabulary',
'dublincore_resource',
"kdl_wagtail.core",
'kdl_wagtail.people',
'django_kdl_timeline',
]
LOCAL_APPS = [
# "field.users.apps.UsersConfig", # ?
'field_timeline',
'field_wagtail',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "field.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
if 0:
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = "users:redirect"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = "account_login"
LOGIN_URL = '/wagtail/login/'
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation"
".UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"wagtail.contrib.redirects.middleware.RedirectMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(ROOT_DIR / "assets"),
str(APPS_DIR / "static"),
str(ROOT_DIR / "node_modules"),
]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
if not os.path.exists(MEDIA_ROOT):
os.makedirs(MEDIA_ROOT)
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(ROOT_DIR / "templates"), str(APPS_DIR / "templates")],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"field.utils.context_processors.settings_context",
'field_wagtail.context_processor.project_settings',
'field_wagtail.context_processor.mailing_list_footer',
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
# ADMINS = [("""King's Digital Lab""", "[email protected]")]
ADMINS = [("Geoffroy", "[email protected]")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = "field.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = "field.users.adapters.SocialAccountAdapter"
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/quickstart/#installation
INSTALLED_APPS += ["compressor"]
STATICFILES_FINDERS += ["compressor.finders.CompressorFinder"]
COMPRESS_CSS_FILTERS = [
# CSS minimizer
'compressor.filters.cssmin.CSSMinFilter'
]
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
# Elasticsearch
# ------------------------------------------------------------------------------
# https://github.com/django-es/django-elasticsearch-dsl
ELASTICSEARCH_DSL = {"default": {"hosts": "elasticsearch:9200"}}
# Wagtail
# ------------------------------------------------------------------------------
# https://docs.wagtail.io/en/v2.7.1/getting_started/integrating_into_django.html
WAGTAIL_SITE_NAME = "FIELD"
PROJECT_TITLE = 'FIELD'
# PUPUT
# ------------------------------------------------------------------------------
PUPUT_AS_PLUGIN = True
# https://github.com/APSL/puput/issues/222
PUPUT_COMMENTS_PROVIDER = 'puput.comments.DjangoCommentsCommentsProvider'
# Your stuff...
# ------------------------------------------------------------------------------
USE_BULMA = True
# 1: root, 2: site home page, 3: top level page
# default is 3, we change to 2 because our default main menu
# is just the home page, nothing else.
WAGTAILMENUS_SECTION_ROOT_DEPTH = 2
# Note that KCL was (still is?) the research grant recipient.
# Please make sure logo removal is agreed first with Wellcome & KCL.
HIDE_KCL_LOGO = True
# those settings vars will be available in template contexts
SETTINGS_VARS_IN_CONTEXT = [
'PROJECT_TITLE',
'GA_ID',
'USE_BULMA',
'MAILING_LIST_FORM_WEB_PATH',
'HIDE_KCL_LOGO',
]
# slug of the page which is the parent of the specific communities
FIELD_COMMUNITIES_ROOT_SLUG = 'groups'
if 1:
FABRIC_DEV_PACKAGES = [
{
'git': 'https://github.com/kingsdigitallab/django-kdl-wagtail.git',
'folder_git': 'django-kdl-wagtail',
'folder_package': 'kdl_wagtail',
'branch': 'develop',
'servers': ['lcl', 'dev', 'stg', 'liv'],
}
]
KDL_WAGTAIL_HIDDEN_PAGE_TYPES = [
('kdl_wagtail_page.richpage'),
('kdl_wagtail_core.streampage'),
('kdl_wagtail_core.indexpage'),
('kdl_wagtail_people.peopleindexpage'),
('kdl_wagtail_people.personpage'),
]
MAILING_LIST_FORM_WEB_PATH = '/mailing-list/'
# -----------------------------------------------------------------------------
# Django Simple Captcha
# -----------------------------------------------------------------------------
CAPTCHA_FONT_SIZE = 36
# Timeline settings
TIMELINE_IMAGE_FOLDER = '/images/'
TIMELINE_IMAGE_FORMAT = 'jpg'
# dublin core settings
# Set to True to disable the DublinCoreResource model and define your own
DUBLINCORE_RESOURCE_ABSTRACT_ONLY = False
# The path where resource file are uploaded, relative to your MEDIA path
DUBLINCORE_RESOURCE_UPLOAD_PATH = 'uploads/dublin_core/'
# ----------------------------------------------------------------------------
# Wagtail extra settings
# ----------------------------------------------------------------------------
WAGTAILIMAGES_IMAGE_MODEL = "field_wagtail.FieldImage"
# Google Analytics ID
GA_ID = 'UA-67707155-9'
# Field Mailchimp settings (May 2019)
MAILCHIMP_LIST_ID = env('MAILCHIMP_LIST_ID', default='')
MAILCHIMP_API_KEY = env('MAILCHIMP_API_KEY', default='')
| 36.437788 | 93 | 0.622992 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11,977 | 0.757367 |
73997218b858bff90d72a13225aff826e20a867f
| 5,464 |
py
|
Python
|
tests/test_subtyping_processing.py
|
phac-nml/biohansel
|
1f4da7081ed248fc0c2c52e36e0a4cf4adbb1c8d
|
[
"Apache-2.0"
] | 25 |
2018-09-24T16:14:06.000Z
|
2021-10-06T00:47:26.000Z
|
tests/test_subtyping_processing.py
|
phac-nml/biohansel
|
1f4da7081ed248fc0c2c52e36e0a4cf4adbb1c8d
|
[
"Apache-2.0"
] | 53 |
2018-07-13T16:13:43.000Z
|
2021-03-04T19:58:41.000Z
|
tests/test_subtyping_processing.py
|
phac-nml/bio_hansel
|
1f4da7081ed248fc0c2c52e36e0a4cf4adbb1c8d
|
[
"Apache-2.0"
] | 11 |
2018-09-24T16:14:11.000Z
|
2020-11-05T17:17:15.000Z
|
# -*- coding: utf-8 -*-
import pandas as pd
import pytest
from bio_hansel.qc import QC
from bio_hansel.subtype import Subtype
from bio_hansel.subtype_stats import SubtypeCounts
from bio_hansel.subtyper import absent_downstream_subtypes, sorted_subtype_ints, empty_results, \
get_missing_internal_subtypes
from bio_hansel.utils import find_inconsistent_subtypes, expand_degenerate_bases
def test_absent_downstream_subtypes():
assert absent_downstream_subtypes(subtype='1',
subtypes=pd.Series(['1.1', '1.2', '1.3', '1']),
scheme_subtypes=['1.1', '1.2', '1', '1.3']) is None
assert absent_downstream_subtypes(subtype='1',
subtypes=pd.Series(['1.1', '1.2', '1']),
scheme_subtypes=['1.1', '1.2', '1', '1.3']) == ['1.3']
assert absent_downstream_subtypes(subtype='1',
subtypes=pd.Series(['1']),
scheme_subtypes=['1.1', '1.2', '1', '1.3']) == ['1.1', '1.2', '1.3']
def test_sorted_subtype_ints():
assert sorted_subtype_ints(pd.Series([], dtype=object)) == []
exp_subtype_ints = [
[1],
[1, 1],
[1, 1, 1],
[1, 1, 1, 99]
]
assert sorted_subtype_ints(pd.Series(['1', '1.1', '1.1.1', '1.1.1.99'])) == exp_subtype_ints
series = pd.Series(['1', '1.1', '1.1.1', '1.1.1.99', '1.1', '1.1.1'])
assert sorted_subtype_ints(series) == exp_subtype_ints
def test_empty_results():
st = Subtype(sample='test',
file_path='tests/data/Retro1000data/10-1358.fastq',
scheme='enteritidis',
scheme_version='1.0.5',
subtype=None,
non_present_subtypes=None,
all_subtypes=None,
qc_status=QC.FAIL,
qc_message=QC.NO_TARGETS_FOUND)
df_empty = empty_results(st)
df_expected_empty = pd.DataFrame(
{
0: dict(
sample='test',
file_path='tests/data/Retro1000data/10-1358.fastq',
subtype=None,
refposition=None,
is_pos_kmer=None,
scheme='enteritidis',
scheme_version='1.0.5',
qc_status=QC.FAIL,
qc_message=QC.NO_TARGETS_FOUND)}).transpose()
assert ((df_empty == df_expected_empty) | (df_empty.isnull() == df_expected_empty.isnull())).values.all(), \
f'Empty result DataFrame should equal df_expected_empty: {df_expected_empty}'
def test_find_inconsistent_subtypes():
subtype_list = ['1',
'1.1',
'1.1.1',
'1.1.1.1', ]
consistent_subtypes = sorted_subtype_ints(pd.Series(subtype_list))
assert find_inconsistent_subtypes(consistent_subtypes) == [], \
'Expecting all subtypes to be consistent with each other'
subtype_list = ['1',
'1.1',
'1.1.1',
'1.1.1.1',
'1.1.1.2',
'1.1.1.3', ]
inconsistent_subtypes = sorted_subtype_ints(pd.Series(subtype_list))
exp_incon_subtypes = ['1.1.1.1',
'1.1.1.2',
'1.1.1.3', ]
assert find_inconsistent_subtypes(inconsistent_subtypes) == exp_incon_subtypes, \
f'Expecting subtypes {exp_incon_subtypes} to be inconsistent with each other'
subtypes_list = ['1',
'1.1',
'1.1.1',
'1.1.1.1',
'1.1.1.2',
'1.1.1.3',
'1.1.2',
'2', ]
inconsistent_subtypes = sorted_subtype_ints(pd.Series(subtypes_list))
assert set(find_inconsistent_subtypes(inconsistent_subtypes)) == set(subtypes_list), \
f'All subtypes should be inconsistent with each other in {subtypes_list}'
def test_subtype_regex():
good_values = ['1.1.1.1', '10', '77.10.1.9', '17.1.1.1.1.12.4', ]
for good_value in good_values:
assert SubtypeCounts._check_subtype(None, None, good_value) == good_value
bad_values = [
'1..',
'1..1',
'1.1..1.1',
'1....',
'100.',
'',
' ',
'a1.1.1',
'1.11.1a',
'a',
'not.a.valid.subtype',
'B.1.1.7'
]
for bad_value in bad_values:
with pytest.raises(ValueError):
assert SubtypeCounts._check_subtype(None, None, bad_value) == ''
def test_get_missing_internal_subtypes():
st_vals = ['1', '1', '1', '1']
pos_subtypes_set = {
'1',
'1.1',
'1.1.1',
'1.1.1.1'
}
exp_missing_internal_subtypes = set()
assert get_missing_internal_subtypes(st_vals, pos_subtypes_set) == exp_missing_internal_subtypes
st_vals = ['2', '22', '222', '2222', '22222']
pos_subtypes_set = {'2', '2.22.222.2222.22222'}
exp_missing_internal_subtypes = {
'2.22',
'2.22.222',
'2.22.222.2222'
}
assert get_missing_internal_subtypes(st_vals, pos_subtypes_set) == exp_missing_internal_subtypes
def test_expand_degenerate_bases():
assert len(expand_degenerate_bases('NNNNN')) == 1024
with open('tests/data/expand_degenerate_bases_DARTHVADR.txt') as f:
assert expand_degenerate_bases('DARTHVADR') == f.read().split('\n')
| 35.947368 | 112 | 0.548133 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,083 | 0.198206 |
7399721b18f0c510e440d6fd414b7fdd42d11e8d
| 8,869 |
py
|
Python
|
capreolus/benchmark/codesearchnet.py
|
seanmacavaney/capreolus
|
8695a471f9d8e911ad12778a82327e3973f92af0
|
[
"Apache-2.0"
] | null | null | null |
capreolus/benchmark/codesearchnet.py
|
seanmacavaney/capreolus
|
8695a471f9d8e911ad12778a82327e3973f92af0
|
[
"Apache-2.0"
] | null | null | null |
capreolus/benchmark/codesearchnet.py
|
seanmacavaney/capreolus
|
8695a471f9d8e911ad12778a82327e3973f92af0
|
[
"Apache-2.0"
] | null | null | null |
import gzip
import json
import pickle
from collections import defaultdict
from pathlib import Path
from zipfile import ZipFile
from tqdm import tqdm
from capreolus import ConfigOption, Dependency, constants
from capreolus.utils.common import download_file, remove_newline
from capreolus.utils.loginit import get_logger
from capreolus.utils.trec import topic_to_trectxt
from . import Benchmark
logger = get_logger(__name__)
PACKAGE_PATH = constants["PACKAGE_PATH"]
@Benchmark.register
class CodeSearchNetCorpus(Benchmark):
"""CodeSearchNet Corpus. [1]
[1] Hamel Husain, Ho-Hsiang Wu, Tiferet Gazit, Miltiadis Allamanis, and Marc Brockschmidt. 2019. CodeSearchNet Challenge: Evaluating the State of Semantic Code Search. arXiv 2019.
"""
module_name = "codesearchnet_corpus"
dependencies = [Dependency(key="collection", module="collection", name="codesearchnet")]
url = "https://s3.amazonaws.com/code-search-net/CodeSearchNet/v2"
query_type = "title"
file_fn = PACKAGE_PATH / "data" / "csn_corpus"
qrel_dir = file_fn / "qrels"
topic_dir = file_fn / "topics"
fold_dir = file_fn / "folds"
qidmap_dir = file_fn / "qidmap"
docidmap_dir = file_fn / "docidmap"
config_spec = [ConfigOption("lang", "ruby", "CSN language dataset to use")]
def build(self):
lang = self.config["lang"]
self.qid_map_file = self.qidmap_dir / f"{lang}.json"
self.docid_map_file = self.docidmap_dir / f"{lang}.json"
self.qrel_file = self.qrel_dir / f"{lang}.txt"
self.topic_file = self.topic_dir / f"{lang}.txt"
self.fold_file = self.fold_dir / f"{lang}.json"
for file in [var for var in vars(self) if var.endswith("file")]:
getattr(self, file).parent.mkdir(exist_ok=True, parents=True)
self.download_if_missing()
@property
def qid_map(self):
if not hasattr(self, "_qid_map"):
if not self.qid_map_file.exists():
self.download_if_missing()
self._qid_map = json.load(open(self.qid_map_file, "r"))
return self._qid_map
@property
def docid_map(self):
if not hasattr(self, "_docid_map"):
if not self.docid_map_file.exists():
self.download_if_missing()
self._docid_map = json.load(open(self.docid_map_file, "r"))
return self._docid_map
def download_if_missing(self):
files = [self.qid_map_file, self.docid_map_file, self.qrel_file, self.topic_file, self.fold_file]
if all([f.exists() for f in files]):
return
lang = self.config["lang"]
tmp_dir = Path("/tmp")
zip_fn = tmp_dir / f"{lang}.zip"
if not zip_fn.exists():
download_file(f"{self.url}/{lang}.zip", zip_fn)
with ZipFile(zip_fn, "r") as zipobj:
zipobj.extractall(tmp_dir)
# prepare docid-url mapping from dedup.pkl
pkl_fn = tmp_dir / f"{lang}_dedupe_definitions_v2.pkl"
doc_objs = pickle.load(open(pkl_fn, "rb"))
self._docid_map = self._prep_docid_map(doc_objs)
assert self._get_n_docid() == len(doc_objs)
# prepare folds, qrels, topics, docstring2qid # TODO: shall we add negative samples?
qrels, self._qid_map = defaultdict(dict), {}
qids = {s: [] for s in ["train", "valid", "test"]}
topic_file = open(self.topic_file, "w", encoding="utf-8")
qrel_file = open(self.qrel_file, "w", encoding="utf-8")
def gen_doc_from_gzdir(dir):
""" generate parsed dict-format doc from all jsonl.gz files under given directory """
for fn in sorted(dir.glob("*.jsonl.gz")):
f = gzip.open(fn, "rb")
for doc in f:
yield json.loads(doc)
for set_name in qids:
set_path = tmp_dir / lang / "final" / "jsonl" / set_name
for doc in gen_doc_from_gzdir(set_path):
code = remove_newline(" ".join(doc["code_tokens"]))
docstring = remove_newline(" ".join(doc["docstring_tokens"]))
n_words_in_docstring = len(docstring.split())
if n_words_in_docstring >= 1024:
logger.warning(
f"chunk query to first 1000 words otherwise TooManyClause would be triggered "
f"at lucene at search stage, "
)
docstring = " ".join(docstring.split()[:1020]) # for TooManyClause
docid = self.get_docid(doc["url"], code)
qid = self._qid_map.get(docstring, str(len(self._qid_map)))
qrel_file.write(f"{qid} Q0 {docid} 1\n")
if docstring not in self._qid_map:
self._qid_map[docstring] = qid
qids[set_name].append(qid)
topic_file.write(topic_to_trectxt(qid, docstring))
topic_file.close()
qrel_file.close()
# write to qid_map.json, docid_map, fold.json
json.dump(self._qid_map, open(self.qid_map_file, "w"))
json.dump(self._docid_map, open(self.docid_map_file, "w"))
json.dump(
{"s1": {"train_qids": qids["train"], "predict": {"dev": qids["valid"], "test": qids["test"]}}},
open(self.fold_file, "w"),
)
def _prep_docid_map(self, doc_objs):
"""
construct a nested dict to map each doc into a unique docid
which follows the structure: {url: {" ".join(code_tokens): docid, ...}}
For all the lanugage datasets the url uniquely maps to a code_tokens yet it's not the case for but js and php
which requires a second-level mapping from raw_doc to docid
:param doc_objs: a list of dict having keys ["nwo", "url", "sha", "identifier", "arguments"
"function", "function_tokens", "docstring", "doctring_tokens",],
:return:
"""
# TODO: any way to avoid the twice traversal of all url and make the return dict structure consistent
lang = self.config["lang"]
url2docid = defaultdict(dict)
for i, doc in tqdm(enumerate(doc_objs), desc=f"Preparing the {lang} docid_map"):
url, code_tokens = doc["url"], remove_newline(" ".join(doc["function_tokens"]))
url2docid[url][code_tokens] = f"{lang}-FUNCTION-{i}"
# remove the code_tokens for the unique url-docid mapping
for url, docids in tqdm(url2docid.items(), desc=f"Compressing the {lang} docid_map"):
url2docid[url] = list(docids.values()) if len(docids) == 1 else docids # {code_tokens: docid} -> [docid]
return url2docid
def _get_n_docid(self):
""" calculate the number of document ids contained in the nested docid map """
lens = [len(docs) for url, docs in self._docid_map.items()]
return sum(lens)
def get_docid(self, url, code_tokens):
""" retrieve the doc id according to the doc dict """
docids = self.docid_map[url]
return docids[0] if len(docids) == 1 else docids[code_tokens]
@Benchmark.register
class CodeSearchNetChallenge(Benchmark):
"""CodeSearchNet Challenge. [1]
This benchmark can only be used for training (and challenge submissions) because no qrels are provided.
[1] Hamel Husain, Ho-Hsiang Wu, Tiferet Gazit, Miltiadis Allamanis, and Marc Brockschmidt. 2019. CodeSearchNet Challenge: Evaluating the State of Semantic Code Search. arXiv 2019.
"""
module_name = "codesearchnet_challenge"
dependencies = [Dependency(key="collection", module="collection", name="codesearchnet")]
config_spec = [ConfigOption("lang", "ruby", "CSN language dataset to use")]
url = "https://raw.githubusercontent.com/github/CodeSearchNet/master/resources/queries.csv"
query_type = "title"
file_fn = PACKAGE_PATH / "data" / "csn_challenge"
topic_file = file_fn / "topics.txt"
qid_map_file = file_fn / "qidmap.json"
def download_if_missing(self):
""" download query.csv and prepare queryid - query mapping file """
if self.topic_file.exists() and self.qid_map_file.exists():
return
tmp_dir = Path("/tmp")
tmp_dir.mkdir(exist_ok=True, parents=True)
self.file_fn.mkdir(exist_ok=True, parents=True)
query_fn = tmp_dir / f"query.csv"
if not query_fn.exists():
download_file(self.url, query_fn)
# prepare qid - query
qid_map = {}
topic_file = open(self.topic_file, "w", encoding="utf-8")
query_file = open(query_fn)
for qid, line in enumerate(query_file):
if qid != 0: # ignore the first line "query"
topic_file.write(topic_to_trectxt(qid, line.strip()))
qid_map[qid] = line
topic_file.close()
json.dump(qid_map, open(self.qid_map_file, "w"))
| 39.95045 | 183 | 0.628481 | 8,355 | 0.942045 | 2,942 | 0.331717 | 8,395 | 0.946555 | 0 | 0 | 2,960 | 0.333747 |
739b66623c870e2641dd70a59dd1c2539187536e
| 1,161 |
py
|
Python
|
tests/cli.py
|
chriswmackey/honeybee-radiance-folder
|
5576df94d781fd131c683c8b05aa04ac42df34b8
|
[
"MIT"
] | null | null | null |
tests/cli.py
|
chriswmackey/honeybee-radiance-folder
|
5576df94d781fd131c683c8b05aa04ac42df34b8
|
[
"MIT"
] | 113 |
2019-07-18T03:38:26.000Z
|
2022-03-26T03:26:06.000Z
|
tests/cli.py
|
chriswmackey/honeybee-radiance-folder
|
5576df94d781fd131c683c8b05aa04ac42df34b8
|
[
"MIT"
] | 6 |
2019-07-18T00:05:26.000Z
|
2021-10-04T08:50:26.000Z
|
from click.testing import CliRunner
from honeybee_radiance_folder.cli import filter_json_file
import json
import os
def test_filter_file():
runner = CliRunner()
input_file = './tests/assets/project_folder/grid_info.json'
output_file = './tests/assets/temp/grid_filtered_0.json'
result = runner.invoke(
filter_json_file, [
input_file, 'group:daylight_grids', '--output-file', output_file
]
)
assert result.exit_code == 0
# check the file is created
with open(output_file) as inf:
data = json.load(inf)
assert len(data) == 1
os.unlink(output_file)
def test_filter_file_remove():
runner = CliRunner()
input_file = './tests/assets/project_folder/grid_info.json'
output_file = './tests/assets/project_folder/grid_filtered_1.json'
result = runner.invoke(
filter_json_file, [
input_file, 'group:daylight_grids', '--output-file', output_file, '--remove'
]
)
assert result.exit_code == 0
# check the file is created
with open(output_file) as inf:
data = json.load(inf)
assert len(data) == 8
os.unlink(output_file)
| 29.769231 | 88 | 0.669251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 324 | 0.27907 |
739ba1a424b3444916622cc94f3e8ea065012ebc
| 13,648 |
py
|
Python
|
perma_web/perma/forms.py
|
leppert/perma
|
adb0cec29679c3d161d72330e19114f89f8c42ac
|
[
"MIT",
"Unlicense"
] | null | null | null |
perma_web/perma/forms.py
|
leppert/perma
|
adb0cec29679c3d161d72330e19114f89f8c42ac
|
[
"MIT",
"Unlicense"
] | null | null | null |
perma_web/perma/forms.py
|
leppert/perma
|
adb0cec29679c3d161d72330e19114f89f8c42ac
|
[
"MIT",
"Unlicense"
] | null | null | null |
import logging
from django import forms
from django.forms import ModelForm
from django.forms.widgets import flatatt
from django.utils.html import mark_safe
from perma.models import Registrar, Organization, LinkUser
logger = logging.getLogger(__name__)
class RegistrarForm(ModelForm):
class Meta:
model = Registrar
fields = ['name', 'email', 'website']
class OrganizationWithRegistrarForm(ModelForm):
registrar = forms.ModelChoiceField(queryset=Registrar.objects.all().order_by('name'), empty_label=None)
class Meta:
model = Organization
fields = ['name', 'registrar']
class OrganizationForm(ModelForm):
class Meta:
model = Organization
fields = ['name']
class CreateUserForm(forms.ModelForm):
"""
stripped down user reg form
This is mostly a django.contrib.auth.forms.UserCreationForm
"""
class Meta:
model = LinkUser
fields = ["first_name", "last_name", "email"]
error_messages = {
'duplicate_email': "A user with that email address already exists.",
}
email = forms.EmailField()
def clean_email(self):
# Since User.email is unique, this check is redundant,
# but it sets a nicer error message than the ORM.
email = self.cleaned_data["email"]
try:
LinkUser.objects.get(email=email)
except LinkUser.DoesNotExist:
return email
raise forms.ValidationError(self.error_messages['duplicate_email'])
class CreateUserFormWithRegistrar(CreateUserForm):
"""
add registrar to the create user form
"""
registrar = forms.ModelChoiceField(queryset=Registrar.objects.all().order_by('name'), empty_label=None)
class Meta:
model = LinkUser
fields = ["first_name", "last_name", "email", "registrar"]
def clean_registrar(self):
registrar = self.cleaned_data["registrar"]
return registrar
class CreateUserFormWithCourt(CreateUserForm):
"""
add court to the create user form
"""
requested_account_note = forms.CharField(required=True)
class Meta:
model = LinkUser
fields = ["first_name", "last_name", "email", "requested_account_note"]
def __init__(self, *args, **kwargs):
super(CreateUserFormWithCourt, self).__init__(*args, **kwargs)
self.fields['requested_account_note'].label = "Your court"
self.fields['first_name'].label = "Your first name"
self.fields['last_name'].label = "Your last name"
self.fields['email'].label = "Your email"
class CreateUserFormWithUniversity(CreateUserForm):
"""
add court to the create user form
"""
requested_account_note = forms.CharField(required=True)
class Meta:
model = LinkUser
fields = ["first_name", "last_name", "email", "requested_account_note"]
def __init__(self, *args, **kwargs):
super(CreateUserFormWithUniversity, self).__init__(*args, **kwargs)
self.fields['requested_account_note'].label = "Your university"
class CustomSelectSingleAsList(forms.SelectMultiple):
# Thank you, http://stackoverflow.com/a/14971139
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
final_attrs = self.build_attrs(attrs, name=name)
output = [u'<select %s>' % flatatt(final_attrs)] # NOTE removed the multiple attribute
options = self.render_options(choices, value)
if options:
output.append(options)
output.append('</select>')
return mark_safe(u'\n'.join(output))
class CreateUserFormWithOrganization(CreateUserForm):
"""
stripped down user reg form
This is mostly a django.contrib.auth.forms.UserCreationForm
"""
def __init__(self, *args, **kwargs):
registrar_id = False
org_member_id = False
if 'registrar_id' in kwargs:
registrar_id = kwargs.pop('registrar_id')
if 'org_member_id' in kwargs:
org_member_id = kwargs.pop('org_member_id')
super(CreateUserFormWithOrganization, self).__init__(*args, **kwargs)
if registrar_id:
self.fields['organizations'].queryset = Organization.objects.filter(registrar_id=registrar_id).order_by('name')
elif org_member_id:
user = LinkUser.objects.get(id=org_member_id)
self.fields['organizations'].queryset = user.organizations.all()
else:
self.fields['organizations'].queryset = Organization.objects.all().order_by('name')
class Meta:
model = LinkUser
fields = ["first_name", "last_name", "email", "organizations"]
organizations = forms.ModelMultipleChoiceField(queryset=Organization.objects.all().order_by('name'),label="Organization", widget=CustomSelectSingleAsList)
def clean_organization(self):
organizations = self.cleaned_data["organizations"]
return organizations
class UserFormEdit(forms.ModelForm):
"""
stripped down user reg form
This is mostly a django.contrib.auth.forms.UserCreationForm
This is the edit form, so we strip it down even more
"""
error_messages = {
}
email = forms.EmailField()
class Meta:
model = LinkUser
fields = ["first_name", "last_name", "email"]
class RegistrarMemberFormEdit(UserFormEdit):
"""
stripped down user reg form
This is mostly a django.contrib.auth.forms.UserCreationForm
This is the edit form, so we strip it down even more
"""
registrar = forms.ModelChoiceField(queryset=Registrar.objects.all().order_by('name'), empty_label=None)
class Meta:
model = LinkUser
fields = ["first_name", "last_name", "email", "registrar"]
class OrganizationMemberWithOrganizationFormEdit(forms.ModelForm):
"""
stripped down user reg form
This is mostly a django.contrib.auth.forms.UserCreationForm
This is stripped down even further to match out editing needs
"""
def __init__(self, *args, **kwargs):
registrar_id = False
if 'registrar_id' in kwargs:
registrar_id = kwargs.pop('registrar_id')
super(OrganizationMemberWithOrganizationFormEdit, self).__init__(*args, **kwargs)
if registrar_id:
self.fields['organizations'].queryset = Organization.objects.filter(registrar_id=registrar_id).order_by('name')
class Meta:
model = LinkUser
fields = ["organizations"]
org = forms.ModelMultipleChoiceField(queryset=Organization.objects.all().order_by('name'),label="Organization", required=False,)
class OrganizationMemberWithOrganizationOrgAsOrganizationMemberFormEdit(forms.ModelForm):
"""
TODO: this form has a gross name. rename it.
"""
def __init__(self, *args, **kwargs):
user_id = False
if 'organization_user_id' in kwargs:
organization_user_id = kwargs.pop('organization_user_id')
super(OrganizationMemberWithOrganizationOrgAsOrganizationMemberFormEdit, self).__init__(*args, **kwargs)
if organization_user_id:
editing_user = LinkUser.objects.get(pk=organization_user_id)
self.fields['organizations'].queryset = editing_user.organizations.all().order_by('name')
class Meta:
model = LinkUser
fields = ["organizations"]
org = forms.ModelMultipleChoiceField(queryset=Organization.objects.all().order_by('name'),label="Organization", required=False,)
class OrganizationMemberWithGroupFormEdit(UserFormEdit):
"""
stripped down user reg form
This is mostly a django.contrib.auth.forms.UserCreationForm
This is stripped down even further to match out editing needs
"""
def __init__(self, *args, **kwargs):
registrar_id = False
if 'registrar_id' in kwargs:
registrar_id = kwargs.pop('registrar_id')
super(OrganizationMemberWithGroupFormEdit, self).__init__(*args, **kwargs)
if registrar_id:
self.fields['organizations'].queryset = Organization.objects.filter(registrar_id=registrar_id).order_by('name')
class Meta:
model = LinkUser
fields = ("first_name", "last_name", "email", "organizations",)
org = forms.ModelChoiceField(queryset=Organization.objects.all().order_by('name'), empty_label=None, label="Organization", required=False,)
class UserAddRegistrarForm(forms.ModelForm):
"""
stripped down user reg form
This is mostly a django.contrib.auth.forms.UserCreationForm
This is stripped down even further to match out editing needs
"""
class Meta:
model = LinkUser
fields = ("registrar",)
registrar = forms.ModelChoiceField(queryset=Registrar.objects.all().order_by('name'), empty_label=None)
class UserAddOrganizationForm(forms.ModelForm):
"""
add an org when a regular user is promoted to an org user
"""
def __init__(self, *args, **kwargs):
registrar_id = False
org_member_id = False
target_user_id = False
if 'registrar_id' in kwargs:
registrar_id = kwargs.pop('registrar_id')
if 'org_member_id' in kwargs:
org_member_id = kwargs.pop('org_member_id')
if 'target_user_id' in kwargs:
target_user_id = kwargs.pop('target_user_id')
super(UserAddOrganizationForm, self).__init__(*args, **kwargs)
target_user = LinkUser.objects.get(pk=target_user_id)
# Registrars can only edit their own organization members
if registrar_id:
# Get the orgs the logged in user admins. Exclude the ones
# the target user is already in
orgs = Organization.objects.filter(registrar_id=registrar_id).exclude(pk__in=target_user.organizations.all())
elif org_member_id:
# Get the orgs the logged in user admins. Exclude the ones
# the target user is already in
org_member = LinkUser.objects.get(pk=org_member_id)
orgs = org_member.organizations.all().exclude(pk__in=target_user.organizations.all())
else:
# Must be registry member.
orgs = Organization.objects.all().exclude(pk__in=target_user.organizations.all())
self.fields['organizations'] = forms.ModelMultipleChoiceField(queryset=orgs.order_by('name'), label="Organization", widget=CustomSelectSingleAsList)
class Meta:
model = LinkUser
fields = ("organizations",)
def save(self, commit=True):
user = super(UserAddOrganizationForm, self).save(commit=False)
if commit:
user.save()
return user
class UserRegForm(forms.ModelForm):
"""
stripped down user reg form
This is mostly a django.contrib.auth.forms.UserCreationForm
"""
error_messages = {
'duplicate_email': "A user with that email address already exists.",
}
email = forms.EmailField()
#password = forms.CharField(label="Password", widget=forms.PasswordInput)
class Meta:
model = LinkUser
fields = ("email", "first_name", "last_name")
def clean_email(self):
# Since User.email is unique, this check is redundant,
# but it sets a nicer error message than the ORM.
email = self.cleaned_data["email"]
try:
LinkUser.objects.get(email=email)
except LinkUser.DoesNotExist:
return email
raise forms.ValidationError(self.error_messages['duplicate_email'])
class UserFormSelfEdit(forms.ModelForm):
"""
stripped down user reg form
This is mostly a django.contrib.auth.forms.UserCreationForm
This is stripped down even further to match our editing needs
"""
class Meta:
model = LinkUser
fields = ("first_name", "last_name", "email")
email = forms.EmailField()
class SetPasswordForm(forms.Form):
"""
A form that lets a user change set his/her password without entering the
old password
"""
error_messages = {
'password_mismatch': "The two password fields didn't match.",
}
new_password1 = forms.CharField(label="New password",
widget=forms.PasswordInput)
new_password2 = forms.CharField(label="New password confirmation",
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(SetPasswordForm, self).__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
logger.debug('mismatch')
raise forms.ValidationError(self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['new_password1'])
if commit:
self.user.save()
return self.user
class UploadFileForm(forms.Form):
title = forms.CharField(required=True)
url = forms.URLField(required=True)
file = forms.FileField(required=True)
class ContactForm(forms.Form):
"""
The form we use on the contact page. Just an email (optional)
and a message
"""
email = forms.EmailField()
message = forms.CharField(widget=forms.Textarea)
| 31.81352 | 158 | 0.656726 | 13,203 | 0.967394 | 0 | 0 | 0 | 0 | 0 | 0 | 4,011 | 0.293889 |
739baac2ff5ef50ecd5e6693fbb6afb0bb494d6a
| 5,403 |
py
|
Python
|
samples/sample-2.py
|
shoriwe/LVaED
|
68ca38eed2b4c2b1b7a6a8304c8effbcf2f977f7
|
[
"MIT"
] | null | null | null |
samples/sample-2.py
|
shoriwe/LVaED
|
68ca38eed2b4c2b1b7a6a8304c8effbcf2f977f7
|
[
"MIT"
] | 19 |
2021-02-08T22:14:16.000Z
|
2021-03-03T15:13:07.000Z
|
samples/sample-2.py
|
shoriwe/LVaED
|
68ca38eed2b4c2b1b7a6a8304c8effbcf2f977f7
|
[
"MIT"
] | 3 |
2021-08-30T01:06:32.000Z
|
2022-02-21T03:22:28.000Z
|
import io
import os
import re
import zipfile
import flask
import markdown
import blueprints.example
import blueprints.home
import blueprints.presentation
import blueprints.transformations
class Zipper(object):
def __init__(self):
self._content = None
self._content_handler = io.BytesIO()
def append(self, filename: str, content: bytes):
zip_file = zipfile.ZipFile(self._content_handler, "a", zipfile.ZIP_DEFLATED, False)
zip_file.writestr(filename, content)
for file in zip_file.filelist:
file.create_system = 0
zip_file.close()
self._content_handler.seek(0)
self._content = self._content_handler.read()
def append_directory(self, path: str):
for directory_path, directories, files in os.walk(path):
for file in files:
file_path = os.path.join(directory_path, file)
with open(file_path, "rb") as file_object:
self.append(file_path, file_object.read())
self._content_handler.seek(0)
self._content = self._content_handler.read()
def content(self) -> bytes:
return self._content
def pygmentize(raw_markdown: str) -> str:
languages = re.findall(re.compile("(?<=^```)\\w+$", re.M), raw_markdown)
last_index = 0
for language in languages:
list_markdown = raw_markdown.split("\n")
code_block_start_index = list_markdown.index(f"```{language}", last_index)
code_block_end_index = list_markdown.index("```", code_block_start_index)
for index in range(code_block_start_index + 1, code_block_end_index):
list_markdown[index] = f"\t{list_markdown[index]}"
list_markdown[code_block_start_index] = "\t" + list_markdown[code_block_start_index].replace("```", ":::")
list_markdown[code_block_end_index] = "\n"
raw_markdown = "\n".join(list_markdown)
last_index = code_block_end_index
return raw_markdown
def render_article(article_path: str) -> str:
with open(article_path) as file:
content = file.read()
html = markdown.markdown(pygmentize(content), extensions=["codehilite"])
return html
def zip_library(library_directory: str) -> Zipper:
z = Zipper()
z.append_directory(library_directory)
return z
def load_articles(app: flask.Flask):
app.config["articles"] = {
"list": render_article("markdown/articles/list.md"),
"stack": render_article("markdown/articles/stack.md"),
"queue": render_article("markdown/articles/queue.md")
}
def load_libraries(app: flask.Flask):
app.config["libraries"] = {
"all": zip_library("DataTypes").content(),
"c": zip_library("DataTypes/C").content(),
"java": zip_library("DataTypes/Java").content(),
"python": zip_library("DataTypes/Python").content()
}
def load_examples(app: flask.Flask):
app.config["examples"] = {}
app.config["examples"]["c"] = {
"simple_list": render_article("markdown/examples/c/simple_list.md"),
"double_list": render_article("markdown/examples/c/double_list.md"),
"circular_simple_list": render_article("markdown/examples/c/circular_simple_list.md"),
"circular_double_list": render_article("markdown/examples/c/circular_double_list.md"),
"array_stack": render_article("markdown/examples/c/array_stack.md"),
"list_stack": render_article("markdown/examples/c/list_stack.md"),
"array_queue": render_article("markdown/examples/c/array_queue.md"),
"list_queue": render_article("markdown/examples/c/list_queue.md"),
"priority_queue": render_article("markdown/examples/c/priority_queue.md")
}
app.config["examples"]["java"] = {
"simple_list": render_article("markdown/examples/java/simple_list.md"),
"double_list": render_article("markdown/examples/java/double_list.md"),
"circular_simple_list": render_article("markdown/examples/java/circular_simple_list.md"),
"circular_double_list": render_article("markdown/examples/java/circular_double_list.md"),
"array_stack": render_article("markdown/examples/java/array_stack.md"),
"list_stack": render_article("markdown/examples/java/list_stack.md"),
"array_queue": render_article("markdown/examples/java/array_queue.md"),
"list_queue": render_article("markdown/examples/java/list_queue.md"),
"priority_queue": render_article("markdown/examples/java/priority_queue.md")
}
app.config["examples"]["python"] = {
"simple_list": render_article("markdown/examples/python/simple_list.md"),
"double_list": render_article("markdown/examples/python/double_list.md"),
"circular_simple_list": render_article("markdown/examples/python/circular_simple_list.md"),
"circular_double_list": render_article("markdown/examples/python/circular_double_list.md"),
"array_stack": render_article("markdown/examples/python/array_stack.md"),
"list_stack": render_article("markdown/examples/python/list_stack.md"),
"array_queue": render_article("markdown/examples/python/array_queue.md"),
"list_queue": render_article("markdown/examples/python/list_queue.md"),
"priority_queue": render_article("markdown/examples/python/priority_queue.md")
}
def setup() -> flask.Flask:
app = flask.Flask(__name__, template_folder="templates")
app.register_blueprint(blueprints.home.home_blueprint)
app.register_blueprint(blueprints.presentation.presentation_blueprint)
app.register_blueprint(blueprints.example.example_blueprint)
app.register_blueprint(blueprints.transformations.transformations_blueprint)
load_articles(app)
load_libraries(app)
load_examples(app)
return app
def main():
app = setup()
app.run("127.0.0.1", 5000, debug=False)
if __name__ == '__main__':
main()
| 37.006849 | 108 | 0.760689 | 837 | 0.154914 | 0 | 0 | 0 | 0 | 0 | 0 | 1,911 | 0.353692 |
739bd82ee95264fe3d722473cc7aa6319a24720f
| 4,420 |
py
|
Python
|
yexinyang/scripts/main.py
|
TheSignPainter/MLproject-docknet
|
5d5647356f116d34ef57267524851e44595e5e93
|
[
"MIT"
] | null | null | null |
yexinyang/scripts/main.py
|
TheSignPainter/MLproject-docknet
|
5d5647356f116d34ef57267524851e44595e5e93
|
[
"MIT"
] | null | null | null |
yexinyang/scripts/main.py
|
TheSignPainter/MLproject-docknet
|
5d5647356f116d34ef57267524851e44595e5e93
|
[
"MIT"
] | 4 |
2019-05-29T12:31:51.000Z
|
2019-05-30T12:00:12.000Z
|
import os, time
import numpy as np
import logging
import fire
import torch
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import DataLoader
from model import *
from dataset import *
def train(dataloader, model, optimizer, criterion, device):
epoch_loss = 0.0
total_num = 0
for data, target in dataloader:
data, target = data.to(device), target.to(device).squeeze()
total_num += len(data)
optimizer.zero_grad()
# out = model(data)
out = model(data, target)
loss = criterion(out, target)
epoch_loss += loss.item()
loss.backward()
optimizer.step()
return epoch_loss / total_num
def evaluate(dataloader, model, device):
c = 0
total_num = 0
with torch.no_grad():
for data, target in dataloader:
data, target = data.to(device), target.to(device).squeeze()
total_num += len(data)
out = model(data)
predicted = torch.max(out, 1)[1]
c += (predicted == target).sum().item()
return c * 100.0 / total_num
def main(**kwargs):
data_dir = kwargs.get('data_dir', '../../dataset_docknet/data')
model_dir = kwargs.get('model_dir', 'models')
log_file = kwargs.get('log_file', 'LOG')
epoch = kwargs.get('epoch', 10)
batch_size = kwargs.get('batch_size', 32)
lr = kwargs.get('lr', 1e-2)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
formatter = logging.Formatter(
"[ %(levelname)s: %(asctime)s ] - %(message)s"
)
logging.basicConfig(level=logging.DEBUG,
format="[ %(levelname)s: %(asctime)s ] - %(message)s")
logger = logging.getLogger("Pytorch")
fh = logging.FileHandler(log_file)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info(kwargs)
train_dataset = DockDataset(featdir=os.path.join(data_dir, 'train'), is_train=True)
cv_dataset = DockDataset(featdir=os.path.join(data_dir, 'valid'), is_train=False, shuffle=False)
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
num_workers=4,
shuffle=True,
drop_last=True,
)
cv_loader = DataLoader(
cv_dataset,
batch_size=batch_size,
num_workers=4,
shuffle=False,
drop_last=True,
)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# model = resnet18(pretrained=True, progress=True).to(device)
model = resnet18_lsoftmax(pretrained=True, progress=True, device=device).to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
logger.info(model)
best_acc = 0.0
for e in range(epoch):
model.train()
train_loss = train(train_loader, model, optimizer, criterion, device)
model.eval()
cv_acc = evaluate(cv_loader, model, device)
message = { f"[*] Epoch: [{e+1:3d}/{epoch:3d}] - "
f"Training Loss: {train_loss:.5f}, "
f"CV Acc: {cv_acc:.2f}%" }
logger.info(message)
torch.save(model.state_dict(), os.path.join(model_dir, f"checkpoint_{e+1}.pth"))
if cv_acc >= best_acc:
torch.save(model.state_dict(), os.path.join(model_dir, f"model_best.pth"))
best_acc = cv_acc
def score(**kwargs):
data_dir = kwargs.get('data_dir', '../../dataset_docknet/data')
model_dir = kwargs.get('model_dir', 'models')
batch_size = kwargs.get('batch_size', 32)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
eval_dataset = DockDataset(featdir=os.path.join(data_dir, 'test'), is_train=False, shuffle=False)
eval_loader = DataLoader(
eval_dataset,
batch_size=batch_size,
shuffle=False,
drop_last=False,
)
# model = resnet18()
model = resnet18_lsoftmax(device=device)
model.load_state_dict(torch.load(os.path.join(model_dir, "model_best.pth")))
model.to(device)
model.eval()
eval_acc = evaluate(eval_loader, model, device)
print(f"Test Accuracy is: {eval_acc:.2f}%")
if __name__ == '__main__':
fire.Fire({
'train': main,
'test': score,
})
| 30.694444 | 102 | 0.601357 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 627 | 0.141855 |
739c941ac4971ed7f222b2a59535b53c9bba54d7
| 1,018 |
py
|
Python
|
myconnectome/utils/download_file.py
|
poldrack/myconnectome
|
201f414b3165894d6fe0be0677c8a58f6d161948
|
[
"MIT"
] | 28 |
2015-04-02T16:43:14.000Z
|
2020-06-17T20:04:26.000Z
|
myconnectome/utils/download_file.py
|
poldrack/myconnectome
|
201f414b3165894d6fe0be0677c8a58f6d161948
|
[
"MIT"
] | 11 |
2015-05-19T02:57:22.000Z
|
2017-03-17T17:36:16.000Z
|
myconnectome/utils/download_file.py
|
poldrack/myconnectome
|
201f414b3165894d6fe0be0677c8a58f6d161948
|
[
"MIT"
] | 10 |
2015-05-21T17:01:26.000Z
|
2020-11-11T04:28:08.000Z
|
# -*- coding: utf-8 -*-
"""
download file using requests
Created on Fri Jul 3 09:13:04 2015
@author: poldrack
"""
import requests
import os
from requests.packages.urllib3.util import Retry
from requests.adapters import HTTPAdapter
from requests import Session, exceptions
# from http://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py
def DownloadFile(url,local_filename):
if not os.path.exists(os.path.dirname(local_filename)):
os.makedirs(os.path.dirname(local_filename))
s=requests.Session()
s.mount('http://',HTTPAdapter(max_retries=Retry(total=10,status_forcelist=[500])))
connect_timeout = 10.0
r = s.get(url=url,timeout=(connect_timeout, 10.0))
#except requests.exceptions.ConnectTimeout:
# print "Too slow Mojo!"
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return
| 30.848485 | 104 | 0.698428 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 336 | 0.330059 |
739ceade8d1851b8f8c7cabe7fe9035c80fe7143
| 9,388 |
py
|
Python
|
django-openstack/django_openstack/syspanel/views/instances.py
|
tylesmit/openstack-dashboard
|
8199011a98aa8bc5672e977db014f61eccc4668c
|
[
"Apache-2.0"
] | 2 |
2015-05-18T13:50:23.000Z
|
2015-05-18T14:47:08.000Z
|
django-openstack/django_openstack/syspanel/views/instances.py
|
tylesmit/openstack-dashboard
|
8199011a98aa8bc5672e977db014f61eccc4668c
|
[
"Apache-2.0"
] | null | null | null |
django-openstack/django_openstack/syspanel/views/instances.py
|
tylesmit/openstack-dashboard
|
8199011a98aa8bc5672e977db014f61eccc4668c
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Fourth Paradigm Development, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from django import http
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.utils.translation import ugettext as _
import datetime
import logging
from django.contrib import messages
from django_openstack import api
from django_openstack import forms
from django_openstack.dash.views import instances as dash_instances
from openstackx.api import exceptions as api_exceptions
TerminateInstance = dash_instances.TerminateInstance
RebootInstance = dash_instances.RebootInstance
LOG = logging.getLogger('django_openstack.syspanel.views.instances')
def _next_month(date_start):
y = date_start.year + (date_start.month + 1)/13
m = ((date_start.month + 1)%13)
if m == 0:
m = 1
return datetime.date(y, m, 1)
def _current_month():
today = datetime.date.today()
return datetime.date(today.year, today.month,1)
def _get_start_and_end_date(request):
try:
date_start = datetime.date(int(request.GET['date_year']), int(request.GET['date_month']), 1)
except:
today = datetime.date.today()
date_start = datetime.date(today.year, today.month,1)
date_end = _next_month(date_start)
datetime_start = datetime.datetime.combine(date_start, datetime.time())
datetime_end = datetime.datetime.combine(date_end, datetime.time())
if date_end > datetime.date.today():
datetime_end = datetime.datetime.utcnow()
return (date_start, date_end, datetime_start, datetime_end)
@login_required
def usage(request):
(date_start, date_end, datetime_start, datetime_end) = _get_start_and_end_date(request)
service_list = []
usage_list = []
max_vcpus = max_gigabytes = 0
total_ram = 0
if date_start > _current_month():
messages.error(request, 'No data for the selected period')
date_end = date_start
datetime_end = datetime_start
else:
try:
service_list = api.service_list(request)
except api_exceptions.ApiException, e:
LOG.error('ApiException fetching service list in instance usage',
exc_info=True)
messages.error(request,
'Unable to get service info: %s' % e.message)
for service in service_list:
if service.type == 'nova-compute':
max_vcpus += service.stats['max_vcpus']
max_gigabytes += service.stats['max_gigabytes']
total_ram += settings.COMPUTE_HOST_RAM_GB
try:
usage_list = api.usage_list(request, datetime_start, datetime_end)
except api_exceptions.ApiException, e:
LOG.error('ApiException fetching usage list in instance usage'
' on date range "%s to %s"' % (datetime_start,
datetime_end),
exc_info=True)
messages.error(request, 'Unable to get usage info: %s' % e.message)
dateform = forms.DateForm()
dateform['date'].field.initial = date_start
global_summary = {'max_vcpus': max_vcpus, 'max_gigabytes': max_gigabytes,
'total_active_disk_size': 0, 'total_active_vcpus': 0,
'total_active_ram_size': 0}
for usage in usage_list:
# FIXME: api needs a simpler dict interface (with iteration) - anthony
# NOTE(mgius): Changed this on the api end. Not too much neater, but
# at least its not going into private member data of an external
# class anymore
#usage = usage._info
for k in usage._attrs:
v = usage.__getattr__(k)
if type(v) in [float, int]:
if not k in global_summary:
global_summary[k] = 0
global_summary[k] += v
max_disk_tb = used_disk_tb = available_disk_tb = 0
max_disk_tb = global_summary['max_gigabytes'] / float(1000)
used_disk_tb = global_summary['total_active_disk_size'] / float(1000)
available_disk_tb = (global_summary['max_gigabytes'] / float(1000) - \
global_summary['total_active_disk_size'] / float(1000))
used_ram = global_summary['total_active_ram_size'] / float(1024)
avail_ram = total_ram - used_ram
ram_unit = "GB"
if total_ram > 999:
ram_unit = "TB"
total_ram /= float(1024)
used_ram /= float(1024)
avail_ram /= float(1024)
return render_to_response(
'syspanel_usage.html',{
'dateform': dateform,
'usage_list': usage_list,
'global_summary': global_summary,
'available_cores': global_summary['max_vcpus'] - global_summary['total_active_vcpus'],
'available_disk': global_summary['max_gigabytes'] - global_summary['total_active_disk_size'],
'max_disk_tb': max_disk_tb,
'used_disk_tb': used_disk_tb,
'available_disk_tb': available_disk_tb,
'total_ram': total_ram,
'used_ram': used_ram,
'avail_ram': avail_ram,
'ram_unit': ram_unit,
'external_links': settings.EXTERNAL_MONITORING,
}, context_instance = template.RequestContext(request))
@login_required
def tenant_usage(request, tenant_id):
(date_start, date_end, datetime_start, datetime_end) = _get_start_and_end_date(request)
if date_start > _current_month():
messages.error(request, 'No data for the selected period')
date_end = date_start
datetime_end = datetime_start
dateform = forms.DateForm()
dateform['date'].field.initial = date_start
usage = {}
try:
usage = api.usage_get(request, tenant_id, datetime_start, datetime_end)
except api_exceptions.ApiException, e:
LOG.error('ApiException getting usage info for tenant "%s"'
' on date range "%s to %s"' % (tenant_id,
datetime_start,
datetime_end))
messages.error(request, 'Unable to get usage info: %s' % e.message)
running_instances = []
terminated_instances = []
if hasattr(usage, 'instances'):
now = datetime.datetime.now()
for i in usage.instances:
# this is just a way to phrase uptime in a way that is compatible
# with the 'timesince' filter. Use of local time intentional
i['uptime_at'] = now - datetime.timedelta(seconds=i['uptime'])
if i['ended_at']:
terminated_instances.append(i)
else:
running_instances.append(i)
return render_to_response('syspanel_tenant_usage.html', {
'dateform': dateform,
'usage': usage,
'instances': running_instances + terminated_instances,
'tenant_id': tenant_id,
}, context_instance = template.RequestContext(request))
@login_required
def index(request):
for f in (TerminateInstance, RebootInstance):
_, handled = f.maybe_handle(request)
if handled:
return handled
instances = []
try:
instances = api.server_list(request)
except Exception as e:
LOG.error('Unspecified error in instance index', exc_info=True)
messages.error(request, 'Unable to get instance list: %s' % e.message)
# We don't have any way of showing errors for these, so don't bother
# trying to reuse the forms from above
terminate_form = TerminateInstance()
reboot_form = RebootInstance()
return render_to_response('syspanel_instances.html', {
'instances': instances,
'terminate_form': terminate_form,
'reboot_form': reboot_form,
}, context_instance=template.RequestContext(request))
@login_required
def refresh(request):
for f in (TerminateInstance, RebootInstance):
_, handled = f.maybe_handle(request)
if handled:
return handled
instances = []
try:
instances = api.server_list(request)
except Exception as e:
messages.error(request, 'Unable to get instance list: %s' % e.message)
# We don't have any way of showing errors for these, so don't bother
# trying to reuse the forms from above
terminate_form = TerminateInstance()
reboot_form = RebootInstance()
return render_to_response('_syspanel_instance_list.html', {
'instances': instances,
'terminate_form': terminate_form,
'reboot_form': reboot_form,
}, context_instance=template.RequestContext(request))
| 36.96063 | 101 | 0.659139 | 0 | 0 | 0 | 0 | 6,973 | 0.742757 | 0 | 0 | 2,684 | 0.285897 |
739e11e44ead5664c57ce1862ebd696671d1bb6a
| 612 |
py
|
Python
|
image_png.py
|
tomasdisk/tommGL-py
|
63876cc7211610908f388c2fd9b2b5f4dbd4411c
|
[
"MIT"
] | 1 |
2018-06-19T21:19:20.000Z
|
2018-06-19T21:19:20.000Z
|
image_png.py
|
tomasdisk/tommGL-py
|
63876cc7211610908f388c2fd9b2b5f4dbd4411c
|
[
"MIT"
] | null | null | null |
image_png.py
|
tomasdisk/tommGL-py
|
63876cc7211610908f388c2fd9b2b5f4dbd4411c
|
[
"MIT"
] | null | null | null |
from datetime import datetime as dt
from bitmap import Bitmap, PilBitmap
h = 500
w = 500
image = Bitmap(w, h, alpha=True)
pil_image = PilBitmap(w, h, alpha=True)
color_red = 0
for i in range(h):
for j in range(w):
image.set_rgba_pixel(j, i, color_red, 0, 0, 150)
pil_image.set_rgba_pixel(j, i, color_red, 0, 0, 150)
color_red += 1
path = "images/im1_" + dt.now().strftime("%Y-%m-%d_%H:%M:%S") + ".png"
print("Image saved: " + path)
image.save_as_png(path)
path = "images/im2_" + dt.now().strftime("%Y-%m-%d_%H:%M:%S") + ".png"
print("Image saved: " + path)
pil_image.save_as_png(path)
| 27.818182 | 70 | 0.643791 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.173203 |
739e6d0875de7997feffc9f90decf0de25b225f9
| 9,157 |
py
|
Python
|
src/memberdef.py
|
alljoyn/devtools-codegen
|
388cac15e584dce3040d5090e8f627e5360e5c0f
|
[
"0BSD"
] | null | null | null |
src/memberdef.py
|
alljoyn/devtools-codegen
|
388cac15e584dce3040d5090e8f627e5360e5c0f
|
[
"0BSD"
] | null | null | null |
src/memberdef.py
|
alljoyn/devtools-codegen
|
388cac15e584dce3040d5090e8f627e5360e5c0f
|
[
"0BSD"
] | null | null | null |
# Copyright AllSeen Alliance. All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import validate
import common
class MemberDef:
"""Common base class for field and argument definitions."""
def __init__(self, name = None, arg_type = None):
"""Initialize an instance of the MemberDef class."""
self.name = name
self.arg_type = arg_type
# This is initialized in Interface.parse()
self.interface = None
return
def get_flattened_signature(self):
"""Flatten the signature by replacing all [NamedTypes] with their expanded signature."""
basesig = self.get_base_signature()
if basesig[0] != '[':
return self.arg_type
prefix = 'a'*(len(self.arg_type)-len(basesig))
basetype = self.get_named_type().get_flattened_signature()
return prefix+basetype
def get_flattened_base_signature(self):
"""Return the flattened base signature."""
return get_base_signature(self.get_flattened_signature())
def get_base_signature(self):
"""Return the base signature i.e. 'i', 'ai', and 'aai' all return 'i'."""
return get_base_signature(self.arg_type)
def get_named_type(self):
"""Returns the named type definition this argument refers to, or None."""
if self.interface is None:
return None
basesig = self.get_base_signature()
if basesig[0] == '[':
return self.interface.get_named_type(basesig[1:-1])
return None
def references_named_type(self):
"""Returns true if arg_type contains a [NamedType] reference."""
basesig = self.get_base_signature()
return basesig[0] == '['
def is_basic_type(self):
"""Return True if this argument is a basic type."""
return (not self.references_named_type()) and is_basic_type(self.arg_type)
def is_array(self):
"""Return True if this argument is an array. A dictionary is considered an array."""
return is_array(self.get_flattened_signature())
def is_structure(self):
"""Return True if the base argument type is a structure."""
return is_structure(self.get_flattened_signature())
def is_dictionary(self):
"""Return True if the base argument type is a dictionary."""
return is_dictionary(self.get_flattened_signature())
def is_dictionary_array(self):
"""Return True if the base argument type is an array of dictionaries."""
return is_dictionary_array(self.get_flattened_signature())
def get_indirection_level(self):
"""Get the number of dimensions in the array or 0 if not an array."""
return get_indirection_level(self.get_flattened_signature())
def get_max_array_dimension(self):
"""Gets the number of array dimensions in this signature."""
return get_max_array_dimension(self.get_flattened_signature())
def get_max_structure_depth(self):
"""Return the maximum depth of structures in this type.
Examples:
"bud" returns 0
"(bud)" returns 1
"(bud)(did)" returns 1
"(bud(did))" returns 2
"(q(bud)(did))" returns 2
"(i((bud(did))i))" returns 4
"""
return get_max_structure_depth(self.get_flattened_signature())
def get_max_dictionary_depth(self):
"""Return the maximum depth of dictionaries in this type.
Examples:
"bud" returns 0
"a{bud}" returns 1
"a{bud}a{did}" returns 1
"a{buda{did}}" returns 2
"a{qa{bud}a{did})" returns 2
"a{ia{a{buda{did}}i}}" returns 4
"""
return get_max_dictionary_depth(self.get_flattened_signature())
def __str__(self):
return "{0} : {1}".format(self.name, self.arg_type)
def __eq__(self, other):
"""Compares this member definition to another and returns true if equal."""
return self.name == other.name and self.arg_type == other.arg_type
def __ne__(self, other):
"""Implements the '!=' operator."""
if self == other:
return False
return True
def get_indirection_level(signature):
"""Get the number of dimensions in the array or 0 if not an array."""
return len(signature) - len(signature.lstrip('a'))
def get_base_signature(signature, index = 0):
"""Return the base signature i.e. 'i', 'ai', and 'aai' all return 'i'."""
return signature[index:len(signature)].lstrip('a')
def is_array(signature):
"""Return True if this argument is an array. A dictionary is considered an array."""
return signature[0] == "a"
def is_structure(signature):
"""Return True if the base argument type is a structure."""
sig = get_base_signature(signature)
return sig[0] == '('
def is_dictionary(signature):
"""Return True if the base argument type is a dictionary."""
sig = get_base_signature(signature)
return signature[0] == 'a' and sig[0] == '{'
def is_dictionary_array(signature):
"""Return True if the base argument type is an array of dictionaries."""
return is_dictionary(signature) and get_indirection_level(signature) > 1
def __find_end_of_type(signature, index = 0):
"""Returns the index of the start of the next type starting at 'index'.
If there are no more types then return the end of the type signature.
For example:
("ab", 0) returns 1
("ab", 1) returns 2
("aab", 0) returns 1
("aab", 1) returns 1
("aab", 2) returns 3
("abb", 1) returns 2
("abb", 2) returns 3
("bqd", 0) returns 1
("bqd", 1) returns 2
("bqd", 2) returns 3
("(bqd)", 0) returns 4
("(bqd)", 1) returns 2
("(bqd)", 2) returns 3
("(bqd)", 3) returns 4
("(bqd)", 4) returns 5
("(bqd(bad))", 0) returns 9
("(bqd(bad))", 1) returns 2
("(bqd(bad))", 2) returns 3
("(bqd(bad))", 3) returns 4
("(bqd(bad))", 4) returns 8
("(bqd(bad))", 5) returns 6"""
assert(index < len(signature))
c = signature[index]
if c == '(':
end_index = __find_container_end(signature, index, ')')
elif c == '{':
end_index = __find_container_end(signature, index, '}')
elif c == 'a':
base = get_base_signature(signature, index)
end_index = __find_end_of_type(base)
end_index += index + get_indirection_level(signature, index)
else:
end_index = index + 1
return end_index
def is_basic_type(signature):
"""Returns True if the signature is a basic type
'a', '(', '{', and 'v' are not considered basic types because they usually
cannot be handled the same as other types."""
basic_types = ('b','d', 'g', 'i','n','o','q','s','t','u','x','y')
return signature in basic_types
def get_max_array_dimension(signature):
"""Gets the number of array dimensions in this signature."""
return_value = 0
while signature.find((return_value + 1) * 'a') != -1:
return_value += 1
return return_value
def get_max_structure_depth(signature):
return get_max_container_depth(signature, '(', ')')
def get_max_dictionary_depth(signature):
return get_max_container_depth(signature, '{', '}')
def get_max_container_depth(signature, start, stop):
return_value = 0
count = 0
for c in signature:
if c == start:
count += 1
elif c == stop:
count -= 1
if count > return_value:
return_value += 1
return return_value
def split_signature(sig):
"""splits a container signature into individual fields."""
components = []
index = 1
while index < len(sig)-1:
part = sig[index:]
startindex = get_indirection_level(part)
endindex = __find_end_of_type(part, startindex)
components.append(part[:endindex])
index = index + endindex
return components
def make_clean_name(signature):
clean_name = signature.replace("(", "_")
clean_name = clean_name.replace(")", "")
clean_name = clean_name.replace("{", "_")
clean_name = clean_name.replace("}", "")
clean_name = clean_name.replace("[", "_")
clean_name = clean_name.replace("]", "")
return clean_name
def __find_container_end(signature, index, end):
start = signature[index]
count = 0
while index < len(signature):
c = signature[index]
if c == start:
count += 1
elif c == end:
count -= 1
if count == 0:
index += 1
break
index += 1
return index
| 33.177536 | 96 | 0.644207 | 3,937 | 0.429944 | 0 | 0 | 0 | 0 | 0 | 0 | 3,875 | 0.423174 |
739eb239f78d72920cbdfea243f1d357367bd4a8
| 2,187 |
py
|
Python
|
ddcz/migrations/0010_creativepage_creativepageconcept_creativepagesection.py
|
Nathaka/graveyard
|
dcc5ba2fa1679318e65c0078f734cbfeeb287c32
|
[
"MIT"
] | 6 |
2018-06-10T09:47:50.000Z
|
2022-02-13T12:22:07.000Z
|
ddcz/migrations/0010_creativepage_creativepageconcept_creativepagesection.py
|
Nathaka/graveyard
|
dcc5ba2fa1679318e65c0078f734cbfeeb287c32
|
[
"MIT"
] | 268 |
2018-05-30T21:54:50.000Z
|
2022-01-08T21:00:03.000Z
|
ddcz/migrations/0010_creativepage_creativepageconcept_creativepagesection.py
|
jimmeak/graveyard
|
4c0f9d5e8b6c965171d9dc228c765b662f5b7ab4
|
[
"MIT"
] | 4 |
2018-09-14T03:50:08.000Z
|
2021-04-19T19:36:23.000Z
|
# Generated by Django 2.0.2 on 2018-06-13 22:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("ddcz", "0009_auto_20180610_2246"),
]
operations = [
migrations.CreateModel(
name="CreativePage",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=30)),
("slug", models.SlugField(max_length=30)),
("model_class", models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name="CreativePageConcept",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.TextField()),
(
"page",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to="ddcz.CreativePage",
),
),
],
),
migrations.CreateModel(
name="CreativePageSection",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=30)),
("slug", models.SlugField(max_length=30)),
],
),
]
| 30.375 | 68 | 0.40695 | 2,028 | 0.927298 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.103338 |
739f4a4af64c366326ef39984c42e5d44fc7cab0
| 8,145 |
py
|
Python
|
libml/preprocess.py
|
isabella232/l2p
|
4379849b009edd9d5fde71d625cbb9aa1166aa17
|
[
"Apache-2.0"
] | 45 |
2021-12-20T19:14:30.000Z
|
2022-03-31T14:08:44.000Z
|
libml/preprocess.py
|
google-research/l2p
|
98b10eaf07d3dd899a324fe4149bf6f01e26c589
|
[
"Apache-2.0"
] | 3 |
2021-12-29T03:53:22.000Z
|
2022-03-18T01:08:25.000Z
|
libml/preprocess.py
|
isabella232/l2p
|
4379849b009edd9d5fde71d625cbb9aa1166aa17
|
[
"Apache-2.0"
] | 5 |
2021-12-22T01:37:18.000Z
|
2022-02-14T23:17:38.000Z
|
# coding=utf-8
# Copyright 2020 The Learning-to-Prompt Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific Learning-to-Prompt governing permissions and
# limitations under the License.
# ==============================================================================
"""Input preprocesses."""
from typing import Any, Callable, Dict, Optional
import ml_collections
from augment import augment_utils
import tensorflow as tf
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
CIFAR10_MEAN = (0.4914, 0.4822, 0.4465)
CIFAR10_STD = (0.2471, 0.2435, 0.2616)
CIFAR100_MEAN = (0.5071, 0.4867, 0.4408)
CIFAR100_STD = (0.2675, 0.2565, 0.2761)
# Constants for configuring config.<name>
RANDOM_ERASING = "randerasing"
AUGMENT = "augment"
MIX = "mix"
COLORJITTER = "colorjitter"
create_mix_augment = augment_utils.create_mix_augment
def resize_small(image: tf.Tensor,
size: int,
*,
antialias: bool = False) -> tf.Tensor:
"""Resizes the smaller side to `size` keeping aspect ratio.
Args:
image: Single image as a float32 tensor.
size: an integer, that represents a new size of the smaller side of an input
image.
antialias: Whether to use an anti-aliasing filter when downsampling an
image.
Returns:
A function, that resizes an image and preserves its aspect ratio.
"""
h, w = tf.shape(image)[0], tf.shape(image)[1]
# Figure out the necessary h/w.
ratio = (tf.cast(size, tf.float32) / tf.cast(tf.minimum(h, w), tf.float32))
h = tf.cast(tf.round(tf.cast(h, tf.float32) * ratio), tf.int32)
w = tf.cast(tf.round(tf.cast(w, tf.float32) * ratio), tf.int32)
image = tf.image.resize(image, [h, w], antialias=antialias)
return image
def central_crop(image: tf.Tensor, size: int) -> tf.Tensor:
"""Makes central crop of a given size."""
h, w = size, size
top = (tf.shape(image)[0] - h) // 2
left = (tf.shape(image)[1] - w) // 2
image = tf.image.crop_to_bounding_box(image, top, left, h, w)
return image
def decode_and_random_resized_crop(image: tf.Tensor, rng,
resize_size: int) -> tf.Tensor:
"""Decodes the images and extracts a random crop."""
shape = tf.io.extract_jpeg_shape(image)
begin, size, _ = tf.image.stateless_sample_distorted_bounding_box(
shape,
tf.zeros([0, 0, 4], tf.float32),
seed=rng,
area_range=(0.05, 1.0),
min_object_covered=0, # Don't enforce a minimum area.
use_image_if_no_bounding_boxes=True)
top, left, _ = tf.unstack(begin)
h, w, _ = tf.unstack(size)
image = tf.image.decode_and_crop_jpeg(image, [top, left, h, w], channels=3)
image = tf.cast(image, tf.float32) / 255.0
image = tf.image.resize(image, (resize_size, resize_size))
return image
def train_preprocess(features: Dict[str, tf.Tensor],
crop_size: int = 224) -> Dict[str, tf.Tensor]:
"""Processes a single example for training."""
image = features["image"]
# This PRNGKey is unique to this example. We can use it with the stateless
# random ops in TF.
rng = features.pop("rng")
rng, rng_crop, rng_flip = tf.unstack(
tf.random.experimental.stateless_split(rng, 3))
image = decode_and_random_resized_crop(image, rng_crop, resize_size=crop_size)
image = tf.image.stateless_random_flip_left_right(image, rng_flip)
return {"image": image, "label": features["label"]}
def train_cifar_preprocess(features: Dict[str, tf.Tensor]):
"""Augmentation function for cifar dataset."""
image = tf.io.decode_jpeg(features["image"])
image = tf.image.resize_with_crop_or_pad(image, 32 + 4, 32 + 4)
rng = features.pop("rng")
rng, rng_crop, rng_flip = tf.unstack(
tf.random.experimental.stateless_split(rng, 3))
# Randomly crop a [HEIGHT, WIDTH] section of the image.
image = tf.image.stateless_random_crop(image, [32, 32, 3], rng_crop)
# Randomly flip the image horizontally
image = tf.image.stateless_random_flip_left_right(image, rng_flip)
image = tf.cast(image, tf.float32) / 255.0
return {"image": image, "label": features["label"]}
def _check_valid_mean_std(mean, std):
expected_shape = (1, 1, 3)
message = "%s shape invalid."
assert all([a == b for a, b in zip(expected_shape, mean.shape)
]), message % "mean"
assert all([a == b for a, b in zip(expected_shape, std.shape)
]), message % "std"
def get_augment_preprocess(
augment_params: ml_collections.ConfigDict,
*,
colorjitter_params: Optional[ml_collections.ConfigDict] = None,
randerasing_params: Optional[ml_collections.ConfigDict] = None,
mean: Optional[tf.Tensor] = None,
std: Optional[tf.Tensor] = None,
basic_process: Callable[[Dict[str, tf.Tensor]],
Dict[str, tf.Tensor]] = train_preprocess,
) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]:
"""Creates a custom augmented image preprocess."""
augmentor = None
# If augment_params.type is noop/default, we skip.
if augment_params and augment_params.get(
"type") and augment_params.type not in ("default", "noop"):
augmentor = augment_utils.create_augmenter(**augment_params.to_dict())
jitter = None
if colorjitter_params and colorjitter_params.type not in ("default", "noop"):
jitter = augment_utils.create_augmenter(**colorjitter_params.to_dict())
def train_custom_augment_preprocess(features):
rng = features.pop("rng")
rng, rng_aa, rng_re, rng_jt = tf.unstack(
tf.random.experimental.stateless_split(rng, 4))
features["rng"] = rng
outputs = basic_process(features)
image = outputs["image"]
# image after basic_process has been normalized to [0,1]
image = tf.saturate_cast(image * 255.0, tf.uint8)
if augmentor is not None:
image = augmentor(rng_aa, image)["image"]
if jitter is not None:
image = jitter(rng_jt, image)["image"]
image = tf.cast(image, tf.float32) / 255.0
if mean is not None:
_check_valid_mean_std(mean, std)
image = (image - mean) / std
if randerasing_params:
assert mean is not None, "Random erasing requires normalized images"
# Perform random erasing after mean/std normalization
image = augment_utils.create_random_erasing(
**randerasing_params.to_dict())(rng_re, image)
outputs["image"] = image
return outputs
return train_custom_augment_preprocess
def eval_preprocess(features: Dict[str, tf.Tensor],
mean: Optional[tf.Tensor] = None,
std: Optional[tf.Tensor] = None,
input_size: int = 256,
crop_size: int = 224) -> Dict[str, tf.Tensor]:
"""Process a single example for evaluation."""
image = features["image"]
assert image.dtype == tf.uint8
image = tf.cast(image, tf.float32) / 255.0
# image = resize_small(image, size=int(256 / 224 * input_size))
# image = central_crop(image, size=input_size)
image = resize_small(image, size=input_size) # e.g. 256, 448
image = central_crop(image, size=crop_size) # e.g. 224, 384
if mean is not None:
_check_valid_mean_std(mean, std)
image = (image - mean) / std
return {"image": image, "label": features["label"]}
def cifar_eval_preprocess(
features: Dict[str, tf.Tensor],
mean: Optional[tf.Tensor] = None,
std: Optional[tf.Tensor] = None) -> Dict[str, tf.Tensor]:
"""Processes a single example for evaluation for cifar."""
image = features["image"]
assert image.dtype == tf.uint8
image = tf.cast(image, tf.float32) / 255.0
if mean is not None:
_check_valid_mean_std(mean, std)
image = (image - mean) / std
return {"image": image, "label": features["label"]}
| 38.060748 | 80 | 0.672437 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,329 | 0.285942 |
73a022545603af3f26c0bf2eec8dadb8c4ffd178
| 2,693 |
py
|
Python
|
glue/viewers/matplotlib/qt/toolbar.py
|
tiagopereira/glue
|
85bf7ce2d252d7bc405e8160b56fc83d46b9cbe4
|
[
"BSD-3-Clause"
] | 1 |
2019-12-17T07:58:35.000Z
|
2019-12-17T07:58:35.000Z
|
glue/viewers/matplotlib/qt/toolbar.py
|
scalet98/glue
|
ff949ad52e205c20561f48c05f870b2abb39e0b0
|
[
"BSD-3-Clause"
] | null | null | null |
glue/viewers/matplotlib/qt/toolbar.py
|
scalet98/glue
|
ff949ad52e205c20561f48c05f870b2abb39e0b0
|
[
"BSD-3-Clause"
] | 1 |
2019-08-04T14:10:12.000Z
|
2019-08-04T14:10:12.000Z
|
from __future__ import absolute_import, division, print_function
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT
from glue.config import viewer_tool
from glue.viewers.common.tool import CheckableTool, Tool
__all__ = ['MatplotlibTool', 'MatplotlibCheckableTool', 'HomeTool', 'SaveTool',
'PanTool', 'ZoomTool']
def _ensure_mpl_nav(viewer):
# Set up virtual Matplotlib navigation toolbar (don't show it)
if not hasattr(viewer, '_mpl_nav'):
viewer._mpl_nav = NavigationToolbar2QT(viewer.central_widget.canvas, viewer)
viewer._mpl_nav.hide()
def _cleanup_mpl_nav(viewer):
if getattr(viewer, '_mpl_nav', None) is not None:
viewer._mpl_nav.setParent(None)
viewer._mpl_nav.parent = None
class MatplotlibTool(Tool):
def __init__(self, viewer=None):
super(MatplotlibTool, self).__init__(viewer=viewer)
_ensure_mpl_nav(viewer)
def close(self):
_cleanup_mpl_nav(self.viewer)
super(MatplotlibTool, self).close()
class MatplotlibCheckableTool(CheckableTool):
def __init__(self, viewer=None):
super(MatplotlibCheckableTool, self).__init__(viewer=viewer)
_ensure_mpl_nav(viewer)
def close(self):
_cleanup_mpl_nav(self.viewer)
super(MatplotlibCheckableTool, self).close()
@viewer_tool
class HomeTool(MatplotlibTool):
tool_id = 'mpl:home'
icon = 'glue_home'
action_text = 'Home'
tool_tip = 'Reset original zoom'
shortcut = 'H'
def activate(self):
if hasattr(self.viewer, 'state') and hasattr(self.viewer.state, 'reset_limits'):
self.viewer.state.reset_limits()
else:
self.viewer._mpl_nav.home()
@viewer_tool
class SaveTool(MatplotlibTool):
tool_id = 'mpl:save'
icon = 'glue_filesave'
action_text = 'Save plot to file'
tool_tip = 'Save the figure'
def activate(self):
self.viewer._mpl_nav.save_figure()
@viewer_tool
class PanTool(MatplotlibCheckableTool):
tool_id = 'mpl:pan'
icon = 'glue_move'
action_text = 'Pan'
tool_tip = 'Pan axes with left mouse, zoom with right'
shortcut = 'M'
def activate(self):
self.viewer._mpl_nav.pan()
def deactivate(self):
if hasattr(self.viewer, '_mpl_nav'):
self.viewer._mpl_nav.pan()
@viewer_tool
class ZoomTool(MatplotlibCheckableTool):
tool_id = 'mpl:zoom'
icon = 'glue_zoom_to_rect'
action_text = 'Zoom'
tool_tip = 'Zoom to rectangle'
shortcut = 'Z'
def activate(self):
self.viewer._mpl_nav.zoom()
def deactivate(self):
if hasattr(self.viewer, '_mpl_nav'):
self.viewer._mpl_nav.zoom()
| 24.935185 | 88 | 0.678797 | 1,864 | 0.692165 | 0 | 0 | 1,358 | 0.50427 | 0 | 0 | 443 | 0.164501 |
73a0ab5a7274a4ae6d6cb3e1e3d9e17024ee3ea6
| 1,003 |
py
|
Python
|
2_4_overfitting_underfitting/utils_overfitting.py
|
layerwise/training
|
21ad2a5684a3712192fb13f8214bc3bb4c975f3e
|
[
"MIT"
] | null | null | null |
2_4_overfitting_underfitting/utils_overfitting.py
|
layerwise/training
|
21ad2a5684a3712192fb13f8214bc3bb4c975f3e
|
[
"MIT"
] | null | null | null |
2_4_overfitting_underfitting/utils_overfitting.py
|
layerwise/training
|
21ad2a5684a3712192fb13f8214bc3bb4c975f3e
|
[
"MIT"
] | 1 |
2021-07-20T11:38:47.000Z
|
2021-07-20T11:38:47.000Z
|
import matplotlib.pyplot as plt
import numpy as np
from ipywidgets import interactive, interactive_output, fixed, HBox, VBox
import ipywidgets as widgets
def true_function_old(x):
x_copy = -1 * x
f = 2 * x_copy * np.sin(0.8*x_copy) + 0.5 * x_copy**2 - 5
return f
def sigmoid(x, L=10, k=2, x_0=20):
return L / (1 + np.exp(-k * (x - x_0)))
def true_function(x):
const = 17
lin = -0.25 * x
quad = 0.2*(x-20)**2
sig = sigmoid(x, L=-20, k=0.6, x_0=30)
# quad_sig = - sigmoid(xx, L=1, k=0.6, x_0=30) * (0.1 * (x-40)**2)
sig2 = sigmoid(x, L=-50, k=0.8, x_0=37)
f = const + lin + quad + sig + sig2
return f
def generate_data(n_samples=20, random_state=None):
rng = np.random.RandomState(random_state)
# Beobachtungen
x_sample = 40 * rng.rand(n_samples)
# Kennzeichnungen/Labels
f_sample = true_function(x_sample)
noise = 7 * rng.randn(n_samples)
y_sample = f_sample + noise
return x_sample[:, np.newaxis], y_sample
| 24.463415 | 73 | 0.62014 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.104686 |
73a4124d5d48a030e18fb459f88816554d8ff126
| 1,036 |
py
|
Python
|
analyze.py
|
sveitser/mandarin
|
474617971e5eb9120d5ea5454cc2c49bb40b4977
|
[
"MIT"
] | null | null | null |
analyze.py
|
sveitser/mandarin
|
474617971e5eb9120d5ea5454cc2c49bb40b4977
|
[
"MIT"
] | null | null | null |
analyze.py
|
sveitser/mandarin
|
474617971e5eb9120d5ea5454cc2c49bb40b4977
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
import jieba
import numpy as np
jieba.setLogLevel(60) # quiet
fname = sys.argv[1]
with open(fname) as f:
text = f.read()
tokenizer = jieba.Tokenizer()
tokens = list(tokenizer.cut(text))
occurences = np.array([tokenizer.FREQ[w] for w in tokens if w in tokenizer.FREQ])
difficulties = 1 / (occurences + 1)
max_occurence = np.max(list(tokenizer.FREQ.values()))
min_score = 1 / (max_occurence + 1)
max_score = 1
perc = 75
mean = np.mean(difficulties)
median = np.percentile(difficulties, perc)
def norm(x):
return (x - min_score) / (max_score - min_score)
normalized_mean = norm(mean)
normalized_median = norm(median)
print(
f"{os.path.basename(fname)}: "
f"mean: {normalized_mean:.6f}, {perc}th percentile: {normalized_median:.6f} "
f"in [0: trivial, 1: hardest]"
)
import matplotlib.pyplot as plt
clipped = difficulties[(difficulties <= 0.01) & (difficulties >= 0.0001)]
plt.hist(clipped, bins=20, density=True)
ax = plt.gca()
ax.set_title(fname)
plt.show()
| 20.313725 | 81 | 0.697876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.159266 |
73a548fe78fa2339c064396148e3d2072e173b7a
| 2,836 |
py
|
Python
|
brown_clustering/data.py
|
helpmefindaname/BrownClustering
|
1b9d3e424a58813dec13ef619ca18e3671d75819
|
[
"MIT"
] | 7 |
2021-11-30T13:35:46.000Z
|
2022-03-31T14:01:04.000Z
|
brown_clustering/data.py
|
helpmefindaname/BrownClustering
|
1b9d3e424a58813dec13ef619ca18e3671d75819
|
[
"MIT"
] | null | null | null |
brown_clustering/data.py
|
helpmefindaname/BrownClustering
|
1b9d3e424a58813dec13ef619ca18e3671d75819
|
[
"MIT"
] | null | null | null |
from itertools import tee
from typing import Dict, Iterator, List, Sequence, Tuple
from brown_clustering.defaultvaluedict import DefaultValueDict
Corpus = Sequence[Sequence[str]]
class BigramCorpus:
def __init__(
self,
corpus: Corpus,
alpha: float = 1,
start_symbol: str = '<s>',
end_symbol: str = '</s>',
min_count: int = 0
):
self.vocabulary: Dict[str, int] = DefaultValueDict(0)
self.gather_vocab(corpus, min_count)
word_count = len(self.vocabulary) + 2
self.alpha = alpha
self.n = alpha * word_count * word_count
self.unigrams: Dict[str, float] = DefaultValueDict(alpha * word_count)
self.bigrams: Dict[Tuple[str, str], float] = DefaultValueDict(alpha)
self.gather_statistics(corpus, start_symbol, end_symbol)
def gather_vocab(self, corpus: Corpus, min_count: int):
for sentence in corpus:
for word in sentence:
self.vocabulary[word] += 1
self.vocabulary = dict(filter(
lambda x: x[1] >= min_count,
self.vocabulary.items()
))
def gather_statistics(
self,
corpus: Corpus,
start_symbol: str = '<s>',
end_symbol: str = '</s>',
):
for sentence in corpus:
act_sentence = [start_symbol] + [
w for w in sentence if w in self.vocabulary
] + [end_symbol]
for word in act_sentence:
self.unigrams[word] += 1
grams = two_grams(act_sentence)
for w1, w2 in grams:
self.n += 1
self.bigrams[(w1, w2)] += 1
def bigram_propa(
self,
cluster1: Sequence[str],
cluster2: Sequence[str]
) -> float:
return sum(
self.bigrams[(w1, w2)]
for w1 in cluster1
for w2 in cluster2
) / self.n
def unigram_propa(self, cluster: Sequence[str]) -> float:
return sum(
self.unigrams[w]
for w in cluster
) / self.n
def ranks(self) -> List[Tuple[str, int]]:
return sorted(self.vocabulary.items(), key=lambda x: (-x[1], x[0]))
def print_stats(self):
extended_vocab = len(self.vocabulary) + 2
alpha_bonus = self.alpha * extended_vocab * extended_vocab
print(f"Vocab count: {len(self.vocabulary)}")
print(f"Token count: {sum(self.vocabulary.values())}")
print(f"unique 2gram count: {len(self.bigrams)}")
print(f"2gram count: {self.n - alpha_bonus}")
print(f"Laplace smoothing: {self.alpha}")
def two_grams(sequence: Sequence) -> Iterator[Tuple]:
iterables = tee(sequence, 2)
next(iterables[1], None)
return zip(*iterables)
| 30.494624 | 78 | 0.565233 | 2,507 | 0.883992 | 0 | 0 | 0 | 0 | 0 | 0 | 221 | 0.077927 |
73a58a2a727d6573f018385b2dad3ec0e4b46b5e
| 3,299 |
py
|
Python
|
xs/layers/ops.py
|
eLeVeNnN/xshinnosuke
|
69da91e0ea5042437edfc31c0e6ff9ef394c6cc9
|
[
"MIT"
] | 290 |
2020-07-06T02:13:12.000Z
|
2021-01-04T14:23:39.000Z
|
xs/layers/ops.py
|
E1eveNn/xshinnosuke
|
69da91e0ea5042437edfc31c0e6ff9ef394c6cc9
|
[
"MIT"
] | 1 |
2020-12-03T11:11:48.000Z
|
2020-12-03T11:11:48.000Z
|
xs/layers/ops.py
|
E1eveNn/xshinnosuke
|
69da91e0ea5042437edfc31c0e6ff9ef394c6cc9
|
[
"MIT"
] | 49 |
2020-07-16T00:27:47.000Z
|
2020-11-26T03:03:14.000Z
|
from .base import *
class Input(Layer):
def __init__(self, input_shape: Union[List, Tuple], **kwargs):
super(Input, self).__init__(input_shape=input_shape, **kwargs)
self._shape = input_shape
def call(self, x: F.Tensor, *args, **kwargs) -> F.Tensor:
self._data = x
return self._data
class Reshape(Layer):
def __init__(self, shape: Tuple, **kwargs):
super().__init__(shape=shape, **kwargs)
def call(self, x: F.Tensor, *args, **kwargs) -> F.Tensor:
self._data = F.view(x, (-1, ) + self._shape, self._data)
return self._data
def compute_output_shape(self, input_shape: Union[List, Tuple] = None) -> Union[List, Tuple]:
return self._shape
class ZeroPadding2D(Layer):
def __init__(self, padding, **kwargs):
self.padding = padding
super(ZeroPadding2D, self).__init__(**kwargs)
def call(self, x: F.Tensor, *args, **kwargs) -> F.Tensor:
self._data = F.pad2d(x, self.padding, self._data)
return self._data
def compute_output_shape(self, input_shape: Union[List, Tuple] = None) -> Union[List, Tuple]:
self._shape = (input_shape[0], input_shape[1] + 2 * self.padding[0], input_shape[2] + 2 * self.padding[1])
return self._shape
class Add(Layer):
def __call__(self, inbounds: List[Layer], *args, **kwargs):
for inbound in inbounds:
self._in_bounds.append(inbound)
inbound.add_out_bounds(self)
self._shape = inbound.shape
return self
def init_layer_out_tensor(self, x : F.Tensor = None):
x = self._in_bounds[0].data if x is None else x
if self._data is None or x.shape[0] > self._data.shape_capacity[0]:
self._data = Zeros()((x.shape[0],) + self.shape, requires_grad=self.trainable)
self._data.to('static')
for in_bound in self._in_bounds:
self._data.add_in_bounds(in_bound.data)
elif x.shape[0] < self._data.shape_capacity[0]:
if GLOBAL.TRAINING:
self._data.slices(slice(None, x.shape[0], None))
else:
self._data = Zeros()((x.shape[0],) + self.shape, requires_grad=self.trainable)
self._data.to('static')
for in_bound in self._in_bounds:
self._data.add_in_bounds(in_bound.data)
else:
self._data.slices(slice(None, None, None))
def forward(self, x: F.Tensor = None, *args, **kwargs) -> F.Tensor:
self._data.zero_()
for in_bound in self._in_bounds:
GLOBAL.np.add(self._data.eval, in_bound.data.eval, out=self._data.eval)
if GLOBAL.TRAINING and in_bound.data.requires_grad:
initialize_ops_grad(in_bound.data)
self._data.requires_grad = self._data.requires_grad or in_bound.data.requires_grad
return self._data
def compute_output_shape(self, input_shape: Union[List, Tuple] = None) -> Union[List, Tuple]:
return self._shape
def backward(self, gradients: F.Tensor = None):
for in_bound in self._in_bounds:
if in_bound.data.requires_grad:
GLOBAL.np.add(in_bound.data.grad.eval, self._data.grad.eval, out=in_bound.data.grad.eval)
self._data.zero_grad()
| 39.746988 | 114 | 0.620794 | 3,267 | 0.9903 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.00485 |
73a60122798b5b44ac1b77285ac69b9d5cb78587
| 2,888 |
py
|
Python
|
fcore/util.py
|
superwhyun/farmos
|
9292f3ba24b7d07002af0549ae510ce4edf09ce5
|
[
"BSD-3-Clause"
] | null | null | null |
fcore/util.py
|
superwhyun/farmos
|
9292f3ba24b7d07002af0549ae510ce4edf09ce5
|
[
"BSD-3-Clause"
] | null | null | null |
fcore/util.py
|
superwhyun/farmos
|
9292f3ba24b7d07002af0549ae510ce4edf09ce5
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 JiNong, Inc.
# All right reserved.
#
"""
Utility Functions를 정의함.
"""
import time
import math
import logging
import logging.handlers
def getdefaultlogger():
_logger = logging.getLogger('mate')
_logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s')
streamHandler.setFormatter(formatter)
_logger.addHandler(streamHandler)
return _logger
class SunTime:
def __init__(self, longitude, latitude):
self._longitude = longitude * -1
self._latitude = latitude
self._today = time.localtime(time.time())
def settoday(self):
self._today = time.localtime(time.time())
def getgamma(self):
return (2.0 * math.pi / 365.0) * self._today.tm_yday
def getgamma2(self, hour):
return (2.0 * math.pi / 365.0) * self._today.tm_yday + (hour/24.0)
def getequationtime(self, gamma):
return 229.18 * (0.000075 + 0.001868 * math.cos(gamma) - 0.032077 * math.sin(gamma) - 0.014615 * math.cos(2 * gamma) - 0.040849 * math.sin(2 * gamma))
def getsolardeclination(self, gamma):
return 0.006918 - 0.399912 * math.cos(gamma) + 0.070257 * math.sin(gamma) - 0.006758 * math.cos(2 * gamma) + 0.000907 * math.sin(2 * gamma)
def degtorad(self, deg):
return math.pi * deg / 180.0
def radtodeg(self, rad):
return 180 * rad / math.pi
def gethourangle(self, latitude, declination, tm):
latrad = self.degtorad(latitude)
hourangle = math.acos(math.cos(self.degtorad (90.833)) / (math.cos(latrad) * math.cos(declination))
- math.tan(latrad) * math.tan(declination))
if tm == 1:
return hourangle
elif tm == 0:
return -1 * hourangle
return 0
def gettime(self, gamma, isrise):
eqtime = self.getequationtime(gamma)
declination = self.getsolardeclination(gamma)
hourangle = self.gethourangle(self._latitude, declination, 1 if isrise == True else 0)
delta = self._longitude - self.radtodeg(hourangle)
return 720.0 + 4.0 * delta - eqtime
def getsunrise(self):
tm = self.gettime(self.getgamma (), True)
#return self.gettime(self.getgamma2(int(tm / 60.0)), True) + 540
m = self.gettime(self.getgamma2(int(tm / 60.0)), True) + 540
return int(m * 60)
def getsunset(self):
tm = self.gettime(self.getgamma (), False)
#return self.gettime(self.getgamma2(int(tm / 60.0)), False) + 540
m = self.gettime(self.getgamma2(int(tm / 60.0)), False) + 540
return int(m * 60)
if __name__ == '__main__':
st = SunTime(128.856632, 37.798953)
print("rise", st.getsunrise(), "set", st.getsunset())
| 33.976471 | 158 | 0.621191 | 2,206 | 0.76174 | 0 | 0 | 0 | 0 | 0 | 0 | 366 | 0.126381 |
73a657e874819eb1f55d87b508eba3c94d916b59
| 144 |
py
|
Python
|
src/lib/__init__.py
|
gfjiangly/RCNet
|
ef6860f23943eb8e21fdec565019f2f8eda17673
|
[
"MIT"
] | null | null | null |
src/lib/__init__.py
|
gfjiangly/RCNet
|
ef6860f23943eb8e21fdec565019f2f8eda17673
|
[
"MIT"
] | null | null | null |
src/lib/__init__.py
|
gfjiangly/RCNet
|
ef6860f23943eb8e21fdec565019f2f8eda17673
|
[
"MIT"
] | null | null | null |
# -*- encoding:utf-8 -*-
# @Time : 2019/10/23 15:45
# @Author : gfjiang
# @Site :
# @File : __init__.py
# @Software: PyCharm
| 18 | 30 | 0.513889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.944444 |
73a7196bbf0eb253a97a49fbb8e7cb7ec93df591
| 611 |
py
|
Python
|
tests/manual/i3wmcommands.py
|
diegoperezm/screencast-script
|
ac477c6f44a151cafa88ebfd981d2bbe34f792bd
|
[
"MIT"
] | null | null | null |
tests/manual/i3wmcommands.py
|
diegoperezm/screencast-script
|
ac477c6f44a151cafa88ebfd981d2bbe34f792bd
|
[
"MIT"
] | null | null | null |
tests/manual/i3wmcommands.py
|
diegoperezm/screencast-script
|
ac477c6f44a151cafa88ebfd981d2bbe34f792bd
|
[
"MIT"
] | null | null | null |
import sys
# for development
sys.path.append('../../src')
from screencastscript import ScreencastScript # noqa: E402
screencast = ScreencastScript()
screencast.sleep(1)
screencast.i3wm_focus_left()
screencast.sleep(1)
screencast.i3wm_zoom_in()
screencast.sleep(1)
screencast.i3wm_zoom_out()
screencast.sleep(1)
screencast.i3wm_focus_right()
screencast.sleep(1)
screencast.i3wm_focus_up()
screencast.sleep(1)
screencast.i3wm_focus_down()
screencast.sleep(1)
screencast.i3wm_toggle_fullscreen()
screencast.sleep(1)
screencast.i3wm_ws_2()
screencast.sleep(1)
screencast.i3wm_ws_1()
screencast.sleep(1)
| 16.972222 | 59 | 0.800327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.065466 |
73a7a553c3b396a8049a5ddf4e1a0e97e5a14ea4
| 1,003 |
py
|
Python
|
hippocampus/scripts/s04_hipp_cortex_fc_mean.py
|
CNG-LAB/cng-open
|
b775a8fd554a39ad3b4033e545bd4bf68f7ed46b
|
[
"MIT"
] | null | null | null |
hippocampus/scripts/s04_hipp_cortex_fc_mean.py
|
CNG-LAB/cng-open
|
b775a8fd554a39ad3b4033e545bd4bf68f7ed46b
|
[
"MIT"
] | null | null | null |
hippocampus/scripts/s04_hipp_cortex_fc_mean.py
|
CNG-LAB/cng-open
|
b775a8fd554a39ad3b4033e545bd4bf68f7ed46b
|
[
"MIT"
] | null | null | null |
"""
computes the mean hippocampal-cortical functional connectivity (fc) matrix,
for the left hemisphere subfields
"""
import os
import h5py
import numpy as np
# data dirs
ddir = '../data/'
conndir = '../data/tout_hippoc/'
odir = '../data/tout_group/'
# get HCP - S900 subject list
subjlist = '../data/subjectListS900_QC_gr.txt'
f = open(subjlist); mylist = f.read().split("\n"); f.close()
subjlist = joinedlist = mylist[:-1]
print('We have now %i subjects... ' % (len(subjlist))) # 709
fc_left = np.zeros((4096, 360))
j = 0
for subjID in subjlist:
fname = os.path.join(conndir, 'HCP_' + subjID + '_left.h5')
f = h5py.File(fname, 'r')
f = np.array(f['HCP_' + subjID])
fc_left = fc_left + f
j += 1
fc_left = fc_left / j
h = h5py.File('../data/tout_group/Hmean709_FC_left.h5', 'w')
h.create_dataset('data', data = fc_left)
h.close()
print(fc_left.min(), fc_left.max(), fc_left.shape, j)
# -0.005300521852874321, 0.39153784016161197, (4096, 360), 709
| 25.075 | 75 | 0.645065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 427 | 0.425723 |
73a85bf483c1c47a0091ad63bb16957bd6c8d4f4
| 3,907 |
py
|
Python
|
setup.py
|
sgp79/reptools
|
3290b8daab58a0c5f2965fb221f7b480c380966b
|
[
"MIT"
] | null | null | null |
setup.py
|
sgp79/reptools
|
3290b8daab58a0c5f2965fb221f7b480c380966b
|
[
"MIT"
] | 1 |
2021-12-10T13:09:54.000Z
|
2021-12-10T13:09:54.000Z
|
setup.py
|
sgp79/reptools
|
3290b8daab58a0c5f2965fb221f7b480c380966b
|
[
"MIT"
] | null | null | null |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
#To install:
# py -3 setup.py sdist
# pip3 install .
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import path
from io import open
#from reptools import __version__
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
#Get the version
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file,
re.M,
)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
name='reptools',
version=open("reptools/version.py").readlines()[-1].split()[-1].strip("\"'"),
# https://packagiATR01400 ng.python.org/specifications/core-metadata/#summary
description='Tools for processing Rep-seq data',
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description,
# https://packaging.python.org/specifications/core-metadata/#description-content-type-optional
long_description_content_type='text/markdown',
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
#url='', # Optional
author='Stephen Preston',
author_email='[email protected]',
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Immunologists',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
# Note that this is a string of words separated by whitespace, not a list.
#keywords='sample setuptools development', # Optional
#
packages=find_packages(exclude=['contrib', 'docs', 'tests']), # Required
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy','numba'],
python_requires='>=3.7',
#extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
#package_data={ # Optional
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])], # Optional
# The following provides a command called `reptools` which
# executes the function `main` from the reptools.cli package when invoked:
entry_points={
'console_scripts': [
'reptools=reptools.cli:main',
],
},
# List additional URLs that are relevant to your project as a dict.
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#project_urls={ # Optional
# 'Bug Reports': 'https://github.com/pypa/sampleproject/issues',
# 'Funding': 'https://donate.pypi.org',
# 'Say Thanks!': 'http://saythanks.io/to/example',
# 'Source': 'https://github.com/pypa/sampleproject/',
#},
)
| 32.831933 | 98 | 0.653187 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,728 | 0.698234 |
73a9012563f8e544e446267b12c23f24456df159
| 1,563 |
py
|
Python
|
peeldb/migrations/0033_auto_20171018_1423.py
|
ashwin31/opensource-job-portal
|
2885ea52f8660e893fe0531c986e3bee33d986a2
|
[
"MIT"
] | 1 |
2021-09-27T05:01:39.000Z
|
2021-09-27T05:01:39.000Z
|
peeldb/migrations/0033_auto_20171018_1423.py
|
kiran1415/opensource-job-portal
|
2885ea52f8660e893fe0531c986e3bee33d986a2
|
[
"MIT"
] | null | null | null |
peeldb/migrations/0033_auto_20171018_1423.py
|
kiran1415/opensource-job-portal
|
2885ea52f8660e893fe0531c986e3bee33d986a2
|
[
"MIT"
] | 1 |
2022-01-05T09:02:32.000Z
|
2022-01-05T09:02:32.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-10-18 14:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('peeldb', '0032_skill_skill_type'),
]
operations = [
migrations.AlterField(
model_name='jobpost',
name='job_type',
field=models.CharField(choices=[('full-time', 'Full Time'),
('internship', 'Internship'),
('walk-in', 'Walk-in'),
('government', 'Government'),
('Fresher', 'Fresher')], max_length=50),
),
migrations.AlterField(
model_name='searchresult',
name='job_type',
field=models.CharField(blank=True, choices=[('full-time', 'Full Time'),
('internship', 'Internship'),
('walk-in', 'Walk-in'),
('government', 'Government'),
('Fresher', 'Fresher')], max_length=20, null=True),
),
migrations.AlterField(
model_name='skill',
name='skill_type',
field=models.CharField(choices=[('it', 'IT'), ('non-it', 'Non-IT'), ('other', 'Other')], default='it', max_length=20),
),
]
| 40.076923 | 130 | 0.435061 | 1,405 | 0.898912 | 0 | 0 | 0 | 0 | 0 | 0 | 418 | 0.267434 |
73a9cda8e0d2bd2c5fe35622d180c1e9b443a525
| 1,905 |
py
|
Python
|
application/modules/post/windows-priv-check/wpc/report/issues.py
|
cys3c/viper-shell
|
e05a07362b7d1e6d73c302a24d2506846e43502c
|
[
"PSF-2.0",
"BSD-2-Clause"
] | 2 |
2018-06-30T03:21:30.000Z
|
2020-03-22T02:31:02.000Z
|
application/modules/post/windows-priv-check/wpc/report/issues.py
|
cys3c/viper-shell
|
e05a07362b7d1e6d73c302a24d2506846e43502c
|
[
"PSF-2.0",
"BSD-2-Clause"
] | null | null | null |
application/modules/post/windows-priv-check/wpc/report/issues.py
|
cys3c/viper-shell
|
e05a07362b7d1e6d73c302a24d2506846e43502c
|
[
"PSF-2.0",
"BSD-2-Clause"
] | 3 |
2017-11-15T11:08:20.000Z
|
2020-03-22T02:31:03.000Z
|
from wpc.report.issue import issue
import xml.etree.cElementTree as etree
from lxml import etree as letree
from operator import itemgetter, attrgetter, methodcaller
# TODO should this class contain info about the scan? or define a new class called report?
# Version of script
# Date, time of audit
# Who the audit ran as (username, groups, privs)
# ...
class issues:
def __init__(self):
self.issues = []
def get_by_id(self, identifier):
# search for issue
for i in self.issues:
if i.get_id() == identifier:
return i
# create new issue
i = issue(identifier)
self.add_issue(i)
return i
def add_issue(self, i):
self.issues.append(i)
def add_supporting_data(self, identifier, k, v):
self.get_by_id(identifier).add_supporting_data(k, v)
def get_all(self):
s = sorted(self.issues, key=methodcaller('get_confidence'), reverse=True)
return sorted(s, key=methodcaller('get_severity'), reverse=True)
def as_xml_string(self):
return etree.tostring(self.as_xml())
def as_xml(self):
r = etree.Element('issues')
for i in self.get_all():
r.append(i.as_xml())
return r
def as_text(self):
xslt_fh = open('xsl/text.xsl', 'r') # TODO need to be able to run from other dirs too!
xslt_str = xslt_fh.read()
xslt_fh.close()
xslt_root = letree.XML(xslt_str)
transform = letree.XSLT(xslt_root)
return str(transform(letree.XML(self.as_xml_string())))
def as_html(self):
xslt_fh = open('xsl/html.xsl', 'r') # TODO need to be able to run from other dirs too!
xslt_str = xslt_fh.read()
xslt_fh.close()
xslt_root = letree.XML(xslt_str)
transform = letree.XSLT(xslt_root)
return str(transform(letree.XML(self.as_xml_string())))
| 31.229508 | 95 | 0.632546 | 1,548 | 0.812598 | 0 | 0 | 0 | 0 | 0 | 0 | 392 | 0.205774 |
73aa3640a120523b4d2b177f875511cc1784ef46
| 1,456 |
py
|
Python
|
util/doxify.py
|
lanfangping/ravel
|
7be759f219828b09696faf0b3eb52e83243998f9
|
[
"Apache-2.0"
] | 9 |
2016-03-14T19:19:21.000Z
|
2020-03-24T07:04:39.000Z
|
util/doxify.py
|
lanfangping/ravel
|
7be759f219828b09696faf0b3eb52e83243998f9
|
[
"Apache-2.0"
] | null | null | null |
util/doxify.py
|
lanfangping/ravel
|
7be759f219828b09696faf0b3eb52e83243998f9
|
[
"Apache-2.0"
] | 10 |
2016-05-10T14:47:56.000Z
|
2021-11-08T05:47:47.000Z
|
#!/usr/bin/python
"""
From Mininet 2.2.1: convert simple documentation to epydoc/pydoctor-compatible markup
"""
from sys import stdin, stdout, argv
import os
from tempfile import mkstemp
from subprocess import call
import re
spaces = re.compile(r'\s+')
singleLineExp = re.compile(r'\s+"([^"]+)"')
commentStartExp = re.compile(r'\s+"""')
commentEndExp = re.compile(r'"""$')
returnExp = re.compile(r'\s+(returns:.*)')
lastindent = ''
comment = False
def fixParam(line):
"Change foo: bar to @foo bar"
result = re.sub(r'(\w+):', r'@param \1', line)
result = re.sub(r' @', r'@', result)
return result
def fixReturns(line):
"Change returns: foo to @return foo"
return re.sub('returns:', r'@returns', line)
def fixLine(line):
global comment
match = spaces.match(line)
if not match:
return line
else:
indent = match.group(0)
if singleLineExp.match(line):
return re.sub('"', '"""', line)
if commentStartExp.match(line):
comment = True
if comment:
line = fixReturns(line)
line = fixParam(line)
if commentEndExp.search(line):
comment = False
return line
def fixLines(lines, fid):
for line in lines:
os.write(fid, fixLine(line))
if __name__ == '__main__':
infile = open(argv[1])
outfid, outname = mkstemp()
fixLines(infile.readlines(), outfid)
infile.close()
os.close(outfid)
call([ 'doxypy', outname ])
| 23.483871 | 85 | 0.625687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 311 | 0.213599 |
73aa48515ec8f415bcd5c491e96baf51080aa39d
| 3,924 |
py
|
Python
|
mysite/stock/views.py
|
flohh-py/django-tutorial
|
feecb2b25d88abe0cdccdae4cef87658fa5d8ea7
|
[
"MIT"
] | null | null | null |
mysite/stock/views.py
|
flohh-py/django-tutorial
|
feecb2b25d88abe0cdccdae4cef87658fa5d8ea7
|
[
"MIT"
] | null | null | null |
mysite/stock/views.py
|
flohh-py/django-tutorial
|
feecb2b25d88abe0cdccdae4cef87658fa5d8ea7
|
[
"MIT"
] | null | null | null |
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy, reverse
from django.shortcuts import redirect
from .models import StockEntry, StockEntryLine
from .forms import StockEntryForm, StockEntryLineForm, StockEntryLineIF
from main.views import BaseView
class StockEntryList(BaseView, ListView):
model = StockEntry
template_name = 'stock/list.html'
paginate_by = 8
permission_required = 'stockentry.view_stockentry'
class StockEntryDetail(BaseView, DetailView):
model = StockEntry
form_class = StockEntryForm
template_name = 'stock/detail.html'
fields = "__all__"
pk_url_kwarg = 'pk'
permission_required = 'stockentry.view_stockentry'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
lines = StockEntryLine.objects.all().filter(parent=self.kwargs['pk'])
new_line = StockEntryLineForm(initial={'parent':self.object})
context['new_line'] = new_line
context['lines'] = lines
return context
class StockEntryCreate(BaseView, CreateView):
model = StockEntry
form_class = StockEntryForm
template_name = 'stock/create.html'
permission_required = 'stockentry.add_stockentry'
def get_success_url(self):
return reverse('stock:detail', kwargs={'pk':self.object.id})
class StockEntryUpdate(BaseView, UpdateView):
model = StockEntry
form_class = StockEntryForm
formset_class = StockEntryLineIF
template_name = 'stock/detail.html'
pk_url_kwarg = 'pk'
success_url = reverse_lazy('stock:detail')
permission_required = 'stockentry.change_stockentry'
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# lines = StockEntryLine.objects.all().filter(parent=self.kwargs['pk'])
# new_line = StockEntryLineForm(initial={'parent':self.object})
# context['new_line'] = new_line
# context['lines'] = lines
# return context
# def get_success_url(self):
# pk = self.kwargs['pk']
# return reverse('stock:detail', kwargs={'pk':pk})
def post(self, request, *args, **kwargs):
obj = self.get_object()
if kwargs.get('process') == 'submit':
obj.submit_stock_entry(obj.id)
if kwargs.get('process') == 'cancel':
obj.cancel_stock_entry(obj.id)
return redirect('stock:detail', pk=obj.id)
class StockEntryLineCreate(BaseView, CreateView):
model = StockEntryLine
form_class = StockEntryLineForm
template_name = 'stock/add_line.html'
pk_url_kwarg = 'pk'
permission_required = 'stockentryline.add_stockentryline'
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# context['parent'] = self.kwargs['pk']
# return context
def get_success_url(self):
# pk = self.kwargs['pk']
# parent = StockEntry.objects.get(pk=self.kwargs['pk'])
parent_id = self.request.POST['parent']
return reverse('stock:detail', kwargs={'pk':parent_id})
class StockEntryLineEdit(BaseView, UpdateView):
model = StockEntryLine
form_class = StockEntryLineForm
template_name = 'stock/edit_line.html'
pk_url_kwarg = 'pk'
permission_required = 'stockentryline.change_stockentryline'
def get_success_url(self):
line = StockEntryLine.objects.get(pk=self.kwargs['pk'])
return reverse('stock:detail', kwargs={'pk':line.parent.id})
class StockEntryLineDelete(BaseView, DeleteView):
model = StockEntryLine
template_name = 'stock/delete_line.html'
pk_url_kwarg = 'pk'
permission_required = 'stockentryline.delete_stockentryline'
def get_success_url(self):
return reverse('stock:detail', kwargs={'pk':self.object.parent.id})
| 34.421053 | 79 | 0.690367 | 3,534 | 0.900612 | 0 | 0 | 0 | 0 | 0 | 0 | 1,227 | 0.312691 |
73aaccfbd257c25514479c0a480ba43ed3380e07
| 2,589 |
py
|
Python
|
src/sentry/web/frontend/generic.py
|
erhuabushuo/sentry
|
8b3bad10155aaacfdff80910e5972e64304e880c
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/web/frontend/generic.py
|
erhuabushuo/sentry
|
8b3bad10155aaacfdff80910e5972e64304e880c
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/web/frontend/generic.py
|
erhuabushuo/sentry
|
8b3bad10155aaacfdff80910e5972e64304e880c
|
[
"BSD-3-Clause"
] | null | null | null |
"""
sentry.web.frontend.generic
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from sentry.models import Team
from sentry.permissions import can_create_teams
from sentry.plugins import plugins
from sentry.plugins.base import Response
from sentry.web.decorators import login_required
from sentry.web.helpers import render_to_response
@login_required
def dashboard(request, template='dashboard.html'):
team_list = Team.objects.get_for_user(request.user, with_projects=True)
if not team_list:
if can_create_teams(request.user):
return HttpResponseRedirect(reverse('sentry-new-team'))
return render_to_response('sentry/generic_error.html', {
'title': _('No Membership'),
'message': _('You are not a member of any teams in Sentry and you do not have access to create a new team.'),
}, request)
return render_to_response('sentry/select_team.html', {
'team_list': team_list.values(),
'can_create_teams': can_create_teams(request.user),
}, request)
def static_media(request, **kwargs):
"""
Serve static files below a given point in the directory structure.
"""
from django.contrib.staticfiles.views import serve
module = kwargs.get('module')
path = kwargs.get('path', '')
if module:
path = '%s/%s' % (module, path)
return serve(request, path, insecure=True)
def missing_perm(request, perm, **kwargs):
"""
Returns a generic response if you're missing permission to perform an
action.
Plugins may overwrite this with the ``missing_perm_response`` hook.
"""
response = plugins.first('missing_perm_response', request, perm, **kwargs)
if response:
if isinstance(response, HttpResponseRedirect):
return response
if not isinstance(response, Response):
raise NotImplementedError('Use self.render() when returning responses.')
return response.respond(request, {
'perm': perm,
})
if perm.label:
return render_to_response('sentry/generic_error.html', {
'title': _('Missing Permission'),
'message': _('You do not have the required permissions to %s.') % (perm.label,)
}, request)
return HttpResponseRedirect(reverse('sentry'))
| 31.573171 | 121 | 0.683275 | 0 | 0 | 0 | 0 | 701 | 0.270761 | 0 | 0 | 893 | 0.344921 |
73aaee020a07b3d8d2a092fd658dc4eb59eaed84
| 878 |
py
|
Python
|
setup.py
|
harsh020/synthetic_metric
|
acecba0150a37c58613a477918ad407373c4cd5c
|
[
"MIT"
] | 1 |
2021-11-08T09:19:02.000Z
|
2021-11-08T09:19:02.000Z
|
setup.py
|
harsh020/synthetic_metric
|
acecba0150a37c58613a477918ad407373c4cd5c
|
[
"MIT"
] | 2 |
2021-10-14T11:30:21.000Z
|
2021-10-14T11:55:50.000Z
|
setup.py
|
harsh020/synthetic_metric
|
acecba0150a37c58613a477918ad407373c4cd5c
|
[
"MIT"
] | null | null | null |
import setuptools
setuptools.setup(
name="synmetric",
version="0.2.dev1",
license='MIT',
author="Harsh Soni",
author_email="[email protected]",
description="Metric to evaluate data quality for synthetic data.",
url="https://github.com/harsh020/synthetic_metric",
download_url = 'https://github.com/harsh020/synthetic_metric/archive/v_02dev1.tar.gz',
project_urls={
"Bug Tracker": "https://github.com/harsh020/synthetic_metric/issues",
},
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
python_requires=">=3.6",
install_requires = [
'numpy',
'pandas',
'scikit-learn',
'scipy'
]
)
| 28.322581 | 90 | 0.624146 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 482 | 0.548975 |
73ac2455924ff0001809acc001de20f6e6bc1656
| 813 |
py
|
Python
|
neurokit2/microstates/__init__.py
|
danibene/NeuroKit
|
df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a
|
[
"MIT"
] | 1 |
2020-12-31T17:48:11.000Z
|
2020-12-31T17:48:11.000Z
|
neurokit2/microstates/__init__.py
|
danibene/NeuroKit
|
df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a
|
[
"MIT"
] | null | null | null |
neurokit2/microstates/__init__.py
|
danibene/NeuroKit
|
df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a
|
[
"MIT"
] | 2 |
2021-12-25T15:39:49.000Z
|
2021-12-25T15:44:16.000Z
|
"""Submodule for NeuroKit."""
from .microstates_clean import microstates_clean
from .microstates_peaks import microstates_peaks
from .microstates_static import microstates_static
from .microstates_dynamic import microstates_dynamic
from .microstates_complexity import microstates_complexity
from .microstates_segment import microstates_segment
from .microstates_classify import microstates_classify
from .microstates_plot import microstates_plot
from .microstates_findnumber import microstates_findnumber
__all__ = ["microstates_clean",
"microstates_peaks",
"microstates_static",
"microstates_dynamic",
"microstates_complexity",
"microstates_segment",
"microstates_classify",
"microstates_plot",
"microstates_findnumber"]
| 35.347826 | 58 | 0.771218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.266913 |
73ac5bc20db43b168b228169be2bbfd420f16a64
| 2,184 |
py
|
Python
|
notario/tests/validators/test_hybrid.py
|
alfredodeza/notario
|
036bdc8435778c6f20f059d3789c8eb8242cff92
|
[
"MIT"
] | 4 |
2015-08-20T20:14:55.000Z
|
2018-06-01T14:39:29.000Z
|
notario/tests/validators/test_hybrid.py
|
alfredodeza/notario
|
036bdc8435778c6f20f059d3789c8eb8242cff92
|
[
"MIT"
] | 9 |
2016-02-04T21:46:12.000Z
|
2018-11-14T04:43:10.000Z
|
notario/tests/validators/test_hybrid.py
|
alfredodeza/notario
|
036bdc8435778c6f20f059d3789c8eb8242cff92
|
[
"MIT"
] | 4 |
2015-04-29T20:40:12.000Z
|
2018-11-14T04:08:20.000Z
|
from pytest import raises
from notario.validators import Hybrid
from notario.exceptions import Invalid
from notario.decorators import optional
from notario import validate
def validator(x):
assert x, 'fail'
class TestHybrid(object):
def test_use_validator_passes(self):
schema = ()
hybrid = Hybrid(validator, schema)
assert hybrid(1) is None
def test_use_validator_fails(self):
schema = ()
hybrid = Hybrid(validator, schema)
with raises(Invalid) as exc:
hybrid(0)
error = exc.value.args[0]
assert '0 did not pass validation against callable' in error
def test_use_schema_passes(self):
schema = ('a', 1)
hybrid = Hybrid(validator, schema)
hybrid({0: ('a', 1)})
def test_use_schema_fails(self):
schema = ('a', 2)
hybrid = Hybrid(validator, schema)
with raises(Invalid) as exc:
hybrid({0: ('a', 1)})
error = exc.value.args[0]
assert 'a -> 1 did not match 2' in error
class TestFunctional(object):
def test_passes_single_value(self):
sschema = (1, 2)
schema = ('a', Hybrid(validator, sschema))
data = {'a': 2}
assert validate(data, schema) is None
def test_passes_object(self):
sschema = (1, 2)
schema = ('a', Hybrid(validator, sschema))
data = {'a': {1: 2}}
assert validate(data, schema) is None
def test_fail_object(self):
sschema = (1, 1)
schema = ('a', Hybrid(validator, sschema))
data = {'a': {1: 2}}
with raises(Invalid) as exc:
validate(data, schema)
error = exc.value.args[0]
assert '1 -> 2 did not match 1' in error
assert error.startswith('-> a -> 1')
def test_extra_unexpected_items(self):
optional_schema = (optional(1), 1)
schema = ('a', Hybrid(validator, optional_schema))
data = {'a': {'foo': 'bar'}}
with raises(Invalid) as exc:
validate(data, schema)
error = exc.value.args[0]
assert '-> a did not match {}' in error
assert exc.value.reason == 'unexpected extra items'
| 29.513514 | 68 | 0.588828 | 1,965 | 0.899725 | 0 | 0 | 0 | 0 | 0 | 0 | 202 | 0.092491 |
73ac608fd669eeeca5d58b623c5bbec41cd2e0ea
| 346 |
py
|
Python
|
players/urls.py
|
OnerInce/nfl-rest_api
|
8d66d68ae7f04476a1b9f509e69a9d0dc83bfcca
|
[
"Apache-2.0"
] | 2 |
2021-06-14T18:14:10.000Z
|
2022-01-29T18:45:28.000Z
|
players/urls.py
|
OnerInce/nfl-rest_api
|
8d66d68ae7f04476a1b9f509e69a9d0dc83bfcca
|
[
"Apache-2.0"
] | null | null | null |
players/urls.py
|
OnerInce/nfl-rest_api
|
8d66d68ae7f04476a1b9f509e69a9d0dc83bfcca
|
[
"Apache-2.0"
] | 1 |
2022-02-09T14:14:20.000Z
|
2022-02-09T14:14:20.000Z
|
from django.urls import path, include
from . import views
from rest_framework import routers
router = routers.SimpleRouter()
router.register(r'players', views.PlayerView, basename='players')
router.register(r'teams', views.TeamView, basename='teams')
urlpatterns = [
path('', views.APIWelcomeView),
path('', include((router.urls))),
]
| 28.833333 | 66 | 0.736994 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.109827 |
73ad356948f61ca0a0905878d21b428c799f6aa2
| 380 |
py
|
Python
|
watch/migrations/0014_auto_20201101_2304.py
|
msyoki/Neighborhood
|
d7eb55ba7772388850d8bcf04a867aba3fa81665
|
[
"Unlicense"
] | null | null | null |
watch/migrations/0014_auto_20201101_2304.py
|
msyoki/Neighborhood
|
d7eb55ba7772388850d8bcf04a867aba3fa81665
|
[
"Unlicense"
] | null | null | null |
watch/migrations/0014_auto_20201101_2304.py
|
msyoki/Neighborhood
|
d7eb55ba7772388850d8bcf04a867aba3fa81665
|
[
"Unlicense"
] | 1 |
2021-02-08T10:27:06.000Z
|
2021-02-08T10:27:06.000Z
|
# Generated by Django 2.0.2 on 2020-11-01 20:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('watch', '0013_alert'),
]
operations = [
migrations.AlterField(
model_name='alert',
name='news',
field=models.CharField(max_length=300, null=True),
),
]
| 20 | 62 | 0.584211 | 287 | 0.755263 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.207895 |
73aed6f56861e4609809462a9a1cf35c41cc4da9
| 612 |
py
|
Python
|
torchx/examples/apps/lightning_classy_vision/test/component_test.py
|
LaudateCorpus1/torchx
|
9ee0fdbf63882ba836c00d7522f6850c0c6dc418
|
[
"BSD-3-Clause"
] | 101 |
2021-06-12T20:00:09.000Z
|
2022-03-31T11:14:35.000Z
|
torchx/examples/apps/lightning_classy_vision/test/component_test.py
|
LaudateCorpus1/torchx
|
9ee0fdbf63882ba836c00d7522f6850c0c6dc418
|
[
"BSD-3-Clause"
] | 340 |
2021-06-14T18:16:12.000Z
|
2022-03-31T21:10:28.000Z
|
torchx/examples/apps/lightning_classy_vision/test/component_test.py
|
LaudateCorpus1/torchx
|
9ee0fdbf63882ba836c00d7522f6850c0c6dc418
|
[
"BSD-3-Clause"
] | 19 |
2021-06-13T06:17:21.000Z
|
2022-03-28T19:28:00.000Z
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torchx.examples.apps.lightning_classy_vision.component as lightning_classy_vision
from torchx.components.component_test_base import ComponentTestCase
class DistributedComponentTest(ComponentTestCase):
def test_trainer(self) -> None:
self.validate(lightning_classy_vision, "trainer")
def test_interpret(self) -> None:
self.validate(lightning_classy_vision, "interpret")
| 36 | 88 | 0.785948 | 243 | 0.397059 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.364379 |
73aff3784e37e6b27b43b9c61f5212221ec2b0ef
| 1,270 |
py
|
Python
|
app.py
|
cykreet/getV
|
429833b94fe9c40c594290c9d4b163e8559a4033
|
[
"MIT"
] | null | null | null |
app.py
|
cykreet/getV
|
429833b94fe9c40c594290c9d4b163e8559a4033
|
[
"MIT"
] | null | null | null |
app.py
|
cykreet/getV
|
429833b94fe9c40c594290c9d4b163e8559a4033
|
[
"MIT"
] | null | null | null |
import requests
from sanic import Sanic
from sanic.response import json
from sanic_limiter import Limiter, get_remote_address
from bs4 import BeautifulSoup
async def ratelimit_handler(request, exception):
return json({"error": f"Ratelimit exceeded {exception}."}, status=429)
app = Sanic()
app.error_handler.add(Exception, ratelimit_handler)
limiter = Limiter(app, global_limits=["1 per 3 seconds", "50 per hour"], key_func=get_remote_address)
@app.route("/")
async def main(request):
if not (bot := request.args.get("bot")):
return json({"error": "Bot query is required. Example: ?bot=atlas"})
soup = BeautifulSoup((response := requests.get(f"https://top.gg/bot/{bot}")).content, "html.parser")
if (status := response.status_code) not in [200, 204]:
return json({"status": status, "error": f"Failed to get info on \"{bot}\"."})
try:
votes = int(soup.find(id="points").string.strip())
except:
return json({"status": status, "error": "Was unable to parse bot votes."})
return json({"status": status, "name": soup.find("span", {"class": "bot-name"}).string.strip() if soup.find("span", {"class": "bot-name"}) else bot, "votes": votes})
if __name__ == "__main__":
app.run(host="0.0.0.0", port=9500)
| 36.285714 | 169 | 0.670866 | 0 | 0 | 0 | 0 | 749 | 0.589764 | 856 | 0.674016 | 361 | 0.284252 |
73b067acf9b9f460405ab89ad75c34fdcfb06605
| 8,373 |
py
|
Python
|
third_party/xiuminglib/xiuminglib/vis/video.py
|
leehsiu/nerfactor
|
87f7d3ffa56bdbca925958a4b89e249d35006c80
|
[
"Apache-2.0"
] | 183 |
2021-06-04T01:22:57.000Z
|
2022-03-31T06:18:20.000Z
|
third_party/xiuminglib/xiuminglib/vis/video.py
|
leehsiu/nerfactor
|
87f7d3ffa56bdbca925958a4b89e249d35006c80
|
[
"Apache-2.0"
] | 40 |
2019-05-05T17:04:10.000Z
|
2021-09-06T18:11:19.000Z
|
third_party/xiuminglib/xiuminglib/vis/video.py
|
leehsiu/nerfactor
|
87f7d3ffa56bdbca925958a4b89e249d35006c80
|
[
"Apache-2.0"
] | 26 |
2021-06-04T18:28:11.000Z
|
2022-03-22T13:44:19.000Z
|
from os.path import join, dirname
import numpy as np
from .text import put_text
from .. import const
from ..os import makedirs
from ..imprt import preset_import
from ..log import get_logger
logger = get_logger()
def make_video(
imgs, fps=24, outpath=None, method='matplotlib', dpi=96, bitrate=-1):
"""Writes a list of images into a grayscale or color video.
Args:
imgs (list(numpy.ndarray)): Each image should be of type ``uint8`` or
``uint16`` and of shape H-by-W (grayscale) or H-by-W-by-3 (RGB).
fps (int, optional): Frame rate.
outpath (str, optional): Where to write the video to (a .mp4 file).
``None`` means
``os.path.join(const.Dir.tmp, 'make_video.mp4')``.
method (str, optional): Method to use: ``'matplotlib'``, ``'opencv'``,
``'video_api'``.
dpi (int, optional): Dots per inch when using ``matplotlib``.
bitrate (int, optional): Bit rate in kilobits per second when using
``matplotlib``; reasonable values include 7200.
Writes
- A video of the images.
"""
if outpath is None:
outpath = join(const.Dir.tmp, 'make_video.mp4')
makedirs(dirname(outpath))
assert imgs, "Frame list is empty"
for frame in imgs:
assert np.issubdtype(frame.dtype, np.unsignedinteger), \
"Image type must be unsigned integer"
h, w = imgs[0].shape[:2]
for frame in imgs[1:]:
assert frame.shape[:2] == (h, w), \
"All frames must have the same shape"
if method == 'matplotlib':
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import animation
w_in, h_in = w / dpi, h / dpi
fig = plt.figure(figsize=(w_in, h_in))
Writer = animation.writers['ffmpeg'] # may require you to specify path
writer = Writer(fps=fps, bitrate=bitrate)
def img_plt(arr):
img_plt_ = plt.imshow(arr)
ax = plt.gca()
ax.set_position([0, 0, 1, 1])
ax.set_axis_off()
return img_plt_
anim = animation.ArtistAnimation(fig, [(img_plt(x),) for x in imgs])
anim.save(outpath, writer=writer)
# If obscure error like "ValueError: Invalid file object: <_io.Buff..."
# occurs, consider upgrading matplotlib so that it prints out the real,
# underlying ffmpeg error
plt.close('all')
elif method == 'opencv':
cv2 = preset_import('cv2', assert_success=True)
# TODO: debug codecs (see http://www.fourcc.org/codecs.php)
if outpath.endswith('.mp4'):
# fourcc = cv2.VideoWriter_fourcc(*'MJPG')
# fourcc = cv2.VideoWriter_fourcc(*'X264')
fourcc = cv2.VideoWriter_fourcc(*'H264')
# fourcc = 0x00000021
elif outpath.endswith('.avi'):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
else:
raise NotImplementedError("Video type of\n\t%s" % outpath)
vw = cv2.VideoWriter(outpath, fourcc, fps, (w, h))
for frame in imgs:
if frame.ndim == 3:
frame = frame[:, :, ::-1] # cv2 uses BGR
vw.write(frame)
vw.release()
elif method == 'video_api':
video_api = preset_import('video_api', assert_success=True)
assert outpath.endswith('.webm'), "`video_api` requires .webm"
with video_api.write(outpath, fps=fps) as h:
for frame in imgs:
if frame.ndim == 3 and frame.shape[2] == 4:
frame = frame[:, :, :3]
#frame = frame.astype(np.ubyte)
h.add_frame(frame)
else:
raise ValueError(method)
logger.debug("Images written as a video to:\n%s", outpath)
def make_comparison_video(
imgs1, imgs2, bar_width=4, bar_color=(1, 0, 0), sweep_vertically=False,
sweeps=1, label1='', label2='', font_size=None, font_ttf=None,
label1_top_left_xy=None, label2_top_left_xy=None, **make_video_kwargs):
"""Writes two lists of images into a comparison video that toggles between
two videos with a sweeping bar.
Args:
imgs? (list(numpy.ndarray)): Each image should be of type ``uint8`` or
``uint16`` and of shape H-by-W (grayscale) or H-by-W-by-3 (RGB).
bar_width (int, optional): Width of the sweeping bar.
bar_color (tuple(float), optional): Bar and label RGB, normalized to
:math:`[0,1]`. Defaults to red.
sweep_vertically (bool, optional): Whether to sweep vertically or
horizontally.
sweeps (int, optional): Number of sweeps.
label? (str, optional): Label for each video.
font_size (int, optional): Font size.
font_ttf (str, optional): Path to the .ttf font file. Defaults to Arial.
label?_top_left_xy (tuple(int), optional): The XY coordinate of the
label's top left corner.
make_video_kwargs (dict, optional): Keyword arguments for
:func:`make_video`.
Writes
- A comparison video.
"""
# Bar is perpendicular to sweep-along
sweep_along = 0 if sweep_vertically else 1
bar_along = 1 if sweep_vertically else 0
# Number of frames
n_frames = len(imgs1)
assert n_frames == len(imgs2), \
"Videos to be compared have different numbers of frames"
img_shape = imgs1[0].shape
# Bar color according to image dtype
img_dtype = imgs1[0].dtype
bar_color = np.array(bar_color, dtype=img_dtype)
if np.issubdtype(img_dtype, np.integer):
bar_color *= np.iinfo(img_dtype).max
# Map from frame index to bar location, considering possibly multiple trips
bar_locs = []
for i in range(sweeps):
ind = np.arange(0, img_shape[sweep_along])
if i % 2 == 1: # reverse every other trip
ind = ind[::-1]
bar_locs.append(ind)
bar_locs = np.hstack(bar_locs) # all possible locations
ind = np.linspace(0, len(bar_locs) - 1, num=n_frames, endpoint=True)
bar_locs = [bar_locs[int(x)] for x in ind] # uniformly sampled
# Label locations
if label1_top_left_xy is None:
# Label 1 at top left corner
label1_top_left_xy = (int(0.1 * img_shape[1]), int(0.05 * img_shape[0]))
if label2_top_left_xy is None:
if sweep_vertically:
# Label 2 at bottom left corner
label2_top_left_xy = (
int(0.1 * img_shape[1]), int(0.75 * img_shape[0]))
else:
# Label 2 at top right corner
label2_top_left_xy = (
int(0.7 * img_shape[1]), int(0.05 * img_shape[0]))
frames = []
for i, (img1, img2) in enumerate(zip(imgs1, imgs2)):
assert img1.shape == img_shape, f"`imgs1[{i}]` has a differnet shape"
assert img2.shape == img_shape, f"`imgs2[{i}]` has a differnet shape"
assert img1.dtype == img_dtype, f"`imgs1[{i}]` has a differnet dtype"
assert img2.dtype == img_dtype, f"`imgs2[{i}]` has a differnet dtype"
# Label the two images
img1 = put_text(
img1, label1, label_top_left_xy=label1_top_left_xy,
font_size=font_size, font_color=bar_color, font_ttf=font_ttf)
img2 = put_text(
img2, label2, label_top_left_xy=label2_top_left_xy,
font_size=font_size, font_color=bar_color, font_ttf=font_ttf)
# Bar start and end
bar_loc = bar_locs[i]
bar_width_half = bar_width // 2
bar_start = max(0, bar_loc - bar_width_half)
bar_end = min(bar_loc + bar_width_half, img_shape[sweep_along])
# Up to bar start, we show Image 1; bar end onwards, Image 2
img1 = np.take(img1, range(bar_start), axis=sweep_along)
img2 = np.take(
img2, range(bar_end, img_shape[sweep_along]), axis=sweep_along)
# Between the two images, we show the bar
actual_bar_width = img_shape[
sweep_along] - img1.shape[sweep_along] - img2.shape[sweep_along]
reps = [1, 1, 1]
reps[sweep_along] = actual_bar_width
reps[bar_along] = img_shape[bar_along]
bar_img = np.tile(bar_color, reps)
frame = np.concatenate((img1, bar_img, img2), axis=sweep_along)
frames.append(frame)
make_video(frames, **make_video_kwargs)
| 37.886878 | 80 | 0.609817 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,253 | 0.388511 |
73b135f20a4d854cdb5b09c10b76e9756be5c474
| 161 |
py
|
Python
|
shipfunk_python/__init__.py
|
vilkasgroup/shipfunk_python
|
cd8a5414bda7e9670511c52d0b4df2efd11ee5d9
|
[
"MIT"
] | null | null | null |
shipfunk_python/__init__.py
|
vilkasgroup/shipfunk_python
|
cd8a5414bda7e9670511c52d0b4df2efd11ee5d9
|
[
"MIT"
] | 2 |
2018-01-16T07:32:18.000Z
|
2018-01-17T07:29:41.000Z
|
shipfunk_python/__init__.py
|
vilkasgroup/shipfunk_python
|
cd8a5414bda7e9670511c52d0b4df2efd11ee5d9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Top-level package for Shipfunk."""
__author__ = """Jaana Sarajärvi"""
__email__ = '[email protected]'
__version__ = '0.1.1'
| 20.125 | 39 | 0.652174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.716049 |
73b14a8ac2d94f0475d3f40d5181eb41aedadcce
| 638 |
py
|
Python
|
vpc/nos/driver/ovs/ne.py
|
zhufawuwo/baton
|
64c88750bc96b92e268b4903f34a1d5021c686f4
|
[
"Apache-2.0"
] | null | null | null |
vpc/nos/driver/ovs/ne.py
|
zhufawuwo/baton
|
64c88750bc96b92e268b4903f34a1d5021c686f4
|
[
"Apache-2.0"
] | null | null | null |
vpc/nos/driver/ovs/ne.py
|
zhufawuwo/baton
|
64c88750bc96b92e268b4903f34a1d5021c686f4
|
[
"Apache-2.0"
] | null | null | null |
#! python3
# coding: utf-8
from vpc.nos import NetworkElement,NetworkElementEvent,event_t,EventChain
class OVSEvent(NetworkElementEvent):
def __init__(self,ne_id,type):
super().__init__(ne_id,type)
class OVS(NetworkElement):
def __init__(self,channel,datapath):
super().__init__()
self.chn = channel
self.ofp = self.chn.ofp
self._datapath = datapath
self.ne_online()
@property
def datapath(self):
return self._datapath
def ne_online(self):
e = OVSEvent(self.id,event_t.NE_ONLINE)
EventChain().feed(e)
if __name__ == "__main__":
pass
| 20.580645 | 73 | 0.653605 | 492 | 0.77116 | 0 | 0 | 63 | 0.098746 | 0 | 0 | 35 | 0.054859 |
73b18a00ca497be31f461b8bdce57d8afe3a826f
| 1,307 |
py
|
Python
|
cumulusci/core/config/BaseConfig.py
|
leboff/CumulusCI
|
81edbb1d64f2cc215a951c570052a1e423821cc1
|
[
"BSD-3-Clause"
] | 163 |
2018-09-13T18:49:34.000Z
|
2022-03-25T08:37:15.000Z
|
cumulusci/core/config/BaseConfig.py
|
leboff/CumulusCI
|
81edbb1d64f2cc215a951c570052a1e423821cc1
|
[
"BSD-3-Clause"
] | 1,280 |
2018-09-11T20:09:37.000Z
|
2022-03-31T18:40:21.000Z
|
cumulusci/core/config/BaseConfig.py
|
leboff/CumulusCI
|
81edbb1d64f2cc215a951c570052a1e423821cc1
|
[
"BSD-3-Clause"
] | 93 |
2018-09-13T07:29:22.000Z
|
2022-03-26T23:15:48.000Z
|
import logging
class BaseConfig(object):
"""BaseConfig provides a common interface for nested access for all Config objects in CCI."""
defaults = {}
def __init__(self, config=None, keychain=None):
if config is None:
self.config = {}
else:
self.config = config
self._init_logger()
self._load_config()
def _init_logger(self):
"""Initializes self.logger"""
self.logger = logging.getLogger(__name__)
def _load_config(self):
"""Subclasses may override this method to initialize :py:attr:`~config`"""
pass
def __getattr__(self, name):
tree = name.split("__")
if name.startswith("_"):
raise AttributeError(f"Attribute {name} not found")
value = None
value_found = False
config = self.config
if len(tree) > 1:
# Walk through the config dictionary using __ as a delimiter
for key in tree[:-1]:
config = config.get(key)
if config is None:
break
if config and tree[-1] in config:
value = config[tree[-1]]
value_found = True
if value_found:
return value
else:
return self.defaults.get(name)
| 28.413043 | 97 | 0.560826 | 1,289 | 0.986228 | 0 | 0 | 0 | 0 | 0 | 0 | 292 | 0.223412 |
73b21fcf6f7c734702d8957b8a9a200636e97246
| 8,995 |
py
|
Python
|
scikit_algo/All.py
|
sankar-mukherjee/CoFee
|
d05b461a6cdd581be0f8084a804f02be3332ccdd
|
[
"Apache-2.0"
] | null | null | null |
scikit_algo/All.py
|
sankar-mukherjee/CoFee
|
d05b461a6cdd581be0f8084a804f02be3332ccdd
|
[
"Apache-2.0"
] | null | null | null |
scikit_algo/All.py
|
sankar-mukherjee/CoFee
|
d05b461a6cdd581be0f8084a804f02be3332ccdd
|
[
"Apache-2.0"
] | null | null | null |
"""
Created on Tue Feb 24 16:08:39 2015
@author: mukherjee
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing, metrics
from sklearn.learning_curve import learning_curve
# read Form data
DATA_FORM_FILE = 'all-merged-cat.csv'
#rawdata = pd.read_csv(DATA_FORM_FILE, usecols=np.r_[3,5:12,13:28,81:87,108])
rawdata = pd.read_csv(DATA_FORM_FILE)
#select features
posfeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[3:12]].astype(float)
posfeat_name = rawdata.columns.values[3:12]
lextypefeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[12:14]]
lextypefeat_name = rawdata.columns.values[12:14]
lexfeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[14:29]].astype(float)
lexfeat_name = rawdata.columns.values[14:29]
phonfeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[29:47]]
accoufeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[47:81]].astype(float)
accoufeat_name = rawdata.columns.values[47:81]
phonfeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[29]].astype(float)
lextypefeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[13]]
lextypefeat_name = rawdata.columns.values[13:14].astype(object)
# feature name
feat_name = np.concatenate((posfeat_name,accoufeat_name,lexfeat_name),axis=0)
# Transforming categorical feature
le = preprocessing.LabelBinarizer()
le.fit(lextypefeat)
list(le.classes_)
lextypefeat = le.transform(lextypefeat)
#----------------------------------------------------------------------------------------------------
# select feature combination
featN = np.column_stack((posfeat,accoufeat))
#featB = np.column_stack((lexfeat,lextypefeat))
featB = lexfeat
###------------------------------------------- PCA
#from sklearn.decomposition import PCA
#pca = PCA(n_components=4)
#####------------------------------------------- Randomized PCA
##from sklearn.decomposition import RandomizedPCA
##pca = RandomizedPCA(n_components=30, whiten=True)
###
#scale = pca.fit(feat1)
#feat1 = scale.fit_transform(feat1)
feat = np.column_stack((featN,featB))
feat[np.isnan(feat)] = 0
feat[np.isinf(feat)] = 0
# select test labels
#Ytest = pd.DataFrame.as_matrix(rawdata)[:,20:26].astype(float)
label = pd.DataFrame.as_matrix(rawdata)[:,108]
#remove bad features as there is no label
scale = np.where(label == 'None')
label = np.delete(label,scale)
feat = np.delete(feat,scale,0)
#----------------------------------------------------------------------------------------------------
# Transforming categorical feature
le = preprocessing.LabelEncoder()
le.fit(label)
list(le.classes_)
label = le.transform(label)
# create traning and test data by partioning
nSamples = len(feat)
XtrainPos = feat[:.7 * nSamples,:]
YtrainPos = label[:.7 * nSamples]
XtestPos = feat[.7 * nSamples:,:]
YtestPos = label[.7 * nSamples:]
XtrainAll = feat
#----------------------------------------------------------------------------------------------------
#normalization of features
scale = preprocessing.StandardScaler().fit(XtrainPos)
XtrainPos = scale.transform(XtrainPos)
XtestPos = scale.transform(XtestPos)
# for whole data set
scaleAll = preprocessing.StandardScaler().fit(XtrainAll)
XtrainAll = scaleAll.transform(XtrainAll)
#scale = preprocessing.MinMaxScaler()
#XtrainPos = scale.fit_transform(XtrainPos)
#XtestPos = scale.transform(XtestPos)
#scaleAll = preprocessing.MinMaxScaler()
#XtrainAll = scaleAll.fit_transform(XtrainAll)
#scale = preprocessing.Normalizer().fit(XtrainPos)
#XtrainPos = scale.transform(XtrainPos)
#XtestPos = scale.transform(XtestPos)
#scaleAll = preprocessing.Normalizer().fit(XtrainAll)
#XtrainAll = scaleAll.transform(XtrainAll)
###------------------------------------------- RandomizedLogisticRegression
#from sklearn.linear_model import RandomizedLogisticRegression
#scale = RandomizedLogisticRegression()
#XtrainPos = scale.fit_transform(XtrainPos,YtrainPos)
#XtestPos = scale.transform(XtestPos)
#XtrainAll = scale.fit_transform(XtrainAll,label)
###------------------------------------------- PCA
#from sklearn.decomposition import PCA
#pca = PCA(n_components=30)
####------------------------------------------- Randomized PCA
#from sklearn.decomposition import RandomizedPCA
#pca = RandomizedPCA(n_components=30, whiten=True)
##
##
#scale = pca.fit(XtrainPos)
#XtrainPos = scale.fit_transform(XtrainPos)
#XtestPos = scale.fit_transform(XtestPos)
#scaleAll = pca.fit(XtrainAll)
#XtrainAll = scaleAll.transform(XtrainAll)
###------------------------------------------- LDA
#from sklearn.lda import LDA
#lda = LDA(n_components=4)
#scale = lda.fit(XtrainPos,YtrainPos)
#XtrainPos = scale.transform(XtrainPos)
#XtestPos = scale.transform(XtestPos)
#scaleAll = lda.fit(XtrainAll,label)
#XtrainAll = scaleAll.transform(XtrainAll)
#--------------------------------------------classification-------------------------------------------
##GradientBoost
#from sklearn.ensemble import GradientBoostingClassifier
#clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1,
# max_depth=1, random_state=0)
## SVM
#from sklearn import svm
#clf = svm.SVC()
#from sklearn.multiclass import OneVsOneClassifier
#from sklearn.multiclass import OutputCodeClassifier
#clf = OutputCodeClassifier(svm.SVC())
## RandomForest
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(min_samples_leaf=10)
## SGD
#from sklearn.linear_model import SGDClassifier
#clf = SGDClassifier(loss="log", penalty="l2")
# CART
#from sklearn import tree
#clf = tree.DecisionTreeClassifier()
#
### AdaBoostClassifier
#from sklearn.ensemble import AdaBoostClassifier
#clf = AdaBoostClassifier(n_estimators=100)
# Gaussian Naive Bayes
#from sklearn.naive_bayes import GaussianNB
#clf = GaussianNB()
# KNN
#from sklearn import neighbors
##clf = neighbors.KNeighborsClassifier(n_neighbors=10,weights='distance')
#clf = neighbors.KNeighborsClassifier(n_neighbors=10)
##-------------------------------------------------Traning------------------
clf = clf.fit(XtrainPos, YtrainPos)
print(metrics.classification_report(YtestPos, clf.predict(XtestPos)))
##--------------------------Crossvalidation 5 times using different split------------------------------
#from sklearn import cross_validation
#scores = cross_validation.cross_val_score(clf, XtrainAll, label, cv=3, scoring='f1')
#print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
####---------------------------------Check for overfeat-------------------------------------
train_sample_size, train_scores, test_scores = learning_curve(clf,
XtrainAll, label,
train_sizes=np.arange(0.1,1,0.1), cv=10)
#----------------------------------------Visualization---------------------------------------------
plt.xlabel("# Training sample")
plt.ylabel("Accuracy")
plt.grid();
mean_train_scores = np.mean(train_scores, axis=1)
mean_test_scores = np.mean(test_scores, axis=1)
std_train_scores = np.std(train_scores, axis=1)
std_test_scores = np.std(test_scores, axis=1)
gap = np.abs(mean_test_scores - mean_train_scores)
g = plt.figure(1)
plt.title("Learning curves for %r\n"
"Best test score: %0.2f - Gap: %0.2f" %
(clf, mean_test_scores.max(), gap[-1]))
plt.plot(train_sample_size, mean_train_scores, label="Training", color="b")
plt.fill_between(train_sample_size, mean_train_scores - std_train_scores,
mean_train_scores + std_train_scores, alpha=0.1, color="b")
plt.plot(train_sample_size, mean_test_scores, label="Cross-validation",
color="g")
plt.fill_between(train_sample_size, mean_test_scores - std_test_scores,
mean_test_scores + std_test_scores, alpha=0.1, color="g")
plt.legend(loc="lower right")
g.show()
## confusion matrix
#from sklearn.metrics import confusion_matrix
#cm = confusion_matrix(YtestPos,clf.predict(XtestPos))
## Show confusion matrix in a separate window
#plt.matshow(cm)
#plt.title('Confusion matrix')
#plt.colorbar()
#plt.ylabel('True label')
#plt.xlabel('Predicted label')
#plt.show()
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
f = plt.figure(2,figsize=(18, 18))
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, feat_name[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.savefig('feature_importance')
f.show()
| 36.864754 | 104 | 0.642023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,115 | 0.568649 |
73b2b67943acda046ca7c7f56efd2e03603a7e68
| 4,140 |
py
|
Python
|
tests/test_client.py
|
KazkiMatz/py-googletrans
|
c1d6d5d27c7386c2a1aa6c78dfe376dbb910f7a5
|
[
"MIT"
] | null | null | null |
tests/test_client.py
|
KazkiMatz/py-googletrans
|
c1d6d5d27c7386c2a1aa6c78dfe376dbb910f7a5
|
[
"MIT"
] | 1 |
2020-11-28T18:53:18.000Z
|
2020-11-28T18:53:18.000Z
|
tests/test_client.py
|
TashinAhmed/googletrans
|
9c0014cdcdc22e1f146624279f8dd69c3c62e385
|
[
"MIT"
] | null | null | null |
from httpcore import TimeoutException
from httpcore._exceptions import ConnectError
from httpx import Timeout, Client, ConnectTimeout
from unittest.mock import patch
from pytest import raises
from googletrans import Translator
def test_bind_multiple_service_urls():
service_urls = [
'translate.google.com',
'translate.google.co.kr',
]
translator = Translator(service_urls=service_urls)
assert translator.service_urls == service_urls
assert translator.translate('test', dest='ko')
assert translator.detect('Hello')
def test_api_service_urls():
service_urls = ['translate.googleapis.com']
translator = Translator(service_urls=service_urls)
assert translator.service_urls == service_urls
assert translator.translate('test', dest='ko')
assert translator.detect('Hello')
def test_source_language(translator):
result = translator.translate('안녕하세요.')
assert result.src == 'ko'
def test_pronunciation(translator):
result = translator.translate('안녕하세요.', dest='ja')
assert result.pronunciation == 'Kon\'nichiwa.'
def test_pronunciation_issue_175(translator):
result = translator.translate('Hello', src='en', dest='ru')
assert result.pronunciation is not None
def test_latin_to_english(translator):
result = translator.translate('veritas lux mea', src='la', dest='en')
assert result.text == 'The truth is my light'
def test_unicode(translator):
result = translator.translate(u'안녕하세요.', src='ko', dest='ja')
assert result.text == u'こんにちは。'
def test_emoji(translator):
result = translator.translate('😀')
assert result.text == u'😀'
def test_language_name(translator):
result = translator.translate(u'Hello', src='ENGLISH', dest='iRiSh')
assert result.text == u'Dia dhuit'
def test_language_name_with_space(translator):
result = translator.translate(
u'Hello', src='en', dest='chinese (simplified)')
assert result.dest == 'zh-cn'
def test_language_rfc1766(translator):
result = translator.translate(u'luna', src='it_ch@euro', dest='en')
assert result.text == u'moon'
def test_special_chars(translator):
text = u"©×《》"
result = translator.translate(text, src='en', dest='en')
assert result.text == text
def test_translate_list(translator):
args = (['test', 'exam'], 'ko', 'en')
translations = translator.translate(*args)
assert translations[0].text == u'테스트'
assert translations[1].text == u'시험'
def test_detect_language(translator):
ko = translator.detect(u'한국어')
en = translator.detect('English')
rubg = translator.detect('тест')
assert ko.lang == 'ko'
assert en.lang == 'en'
assert rubg.lang == ['ru', 'bg']
def test_detect_list(translator):
items = [u'한국어', ' English', 'тест']
result = translator.detect(items)
assert result[0].lang == 'ko'
assert result[1].lang == 'en'
assert result[2].lang == ['ru', 'bg']
def test_src_in_special_cases(translator):
args = ('Tere', 'en', 'ee')
result = translator.translate(*args)
assert result.text in ('Hello', 'Hi,')
def test_src_not_in_supported_languages(translator):
args = ('Hello', 'en', 'zzz')
with raises(ValueError):
translator.translate(*args)
def test_dest_in_special_cases(translator):
args = ('hello', 'ee', 'en')
result = translator.translate(*args)
assert result.text == 'Tere'
def test_dest_not_in_supported_languages(translator):
args = ('Hello', 'zzz', 'en')
with raises(ValueError):
translator.translate(*args)
def test_timeout():
# httpx will raise ConnectError in some conditions
with raises((TimeoutException, ConnectError, ConnectTimeout)):
translator = Translator(timeout=Timeout(0.0001))
translator.translate('안녕하세요.')
class MockResponse:
def __init__(self, status_code):
self.status_code = status_code
self.text = 'tkk:\'translation\''
@patch.object(Client, 'get', return_value=MockResponse('403'))
def test_403_error(session_mock):
translator = Translator()
assert translator.translate('test', dest='ko')
| 25.714286 | 73 | 0.68913 | 137 | 0.032357 | 0 | 0 | 177 | 0.041804 | 0 | 0 | 741 | 0.175012 |
73b2dbd6e7f9c859fe75e459a5b5509630530b13
| 3,324 |
py
|
Python
|
Network/class_func.py
|
Mobad225/S-DCNet
|
a5fff5da2e04441f1f9133944ad09bdf087896e6
|
[
"MIT"
] | 153 |
2019-07-31T07:27:11.000Z
|
2022-01-05T08:52:56.000Z
|
Network/class_func.py
|
Mobad225/S-DCNet
|
a5fff5da2e04441f1f9133944ad09bdf087896e6
|
[
"MIT"
] | 17 |
2019-09-11T07:45:29.000Z
|
2021-04-20T05:10:47.000Z
|
Network/class_func.py
|
Mobad225/S-DCNet
|
a5fff5da2e04441f1f9133944ad09bdf087896e6
|
[
"MIT"
] | 30 |
2019-08-20T05:35:20.000Z
|
2021-11-07T07:49:19.000Z
|
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# Func1: change density map into count map
# density map: batch size * 1 * w * h
def get_local_count(density_map,psize,pstride):
IF_gpu = torch.cuda.is_available() # if gpu, return gpu
IF_ret_gpu = (density_map.device.type == 'cuda')
psize,pstride = int(psize),int(pstride)
density_map = density_map.cpu().type(torch.float32)
conv_kernel = torch.ones(1,1,psize,psize,dtype = torch.float32)
if IF_gpu:
density_map,conv_kernel = density_map.cuda(),conv_kernel.cuda()
count_map = F.conv2d(density_map,conv_kernel,stride=pstride)
if not IF_ret_gpu:
count_map = count_map.cpu()
return count_map
# Func2: convert count to class (0->c-1)
def Count2Class(count_map,label_indice):
if isinstance(label_indice,np.ndarray):
label_indice = torch.from_numpy(label_indice)
IF_gpu = torch.cuda.is_available()
IF_ret_gpu = (count_map.device.type == 'cuda')
label_indice = label_indice.cpu().type(torch.float32)
cls_num = len(label_indice)+1
cls_map = torch.zeros(count_map.size()).type(torch.LongTensor)
if IF_gpu:
count_map,label_indice,cls_map = count_map.cuda(),label_indice.cuda(),cls_map.cuda()
for i in range(cls_num-1):
if IF_gpu:
cls_map = cls_map + (count_map >= label_indice[i]).cpu().type(torch.LongTensor).cuda()
else:
cls_map = cls_map + (count_map >= label_indice[i]).cpu().type(torch.LongTensor)
if not IF_ret_gpu:
cls_map = cls_map.cpu()
return cls_map
# Func3: convert class (0->c-1) to count number
def Class2Count(pre_cls,label_indice):
'''
# --Input:
# 1.pre_cls is class label range in [0,1,2,...,C-1]
# 2.label_indice not include 0 but the other points
# --Output:
# 1.count value, the same size as pre_cls
'''
if isinstance(label_indice,np.ndarray):
label_indice = torch.from_numpy(label_indice)
label_indice = label_indice.squeeze()
IF_gpu = torch.cuda.is_available()
IF_ret_gpu = (pre_cls.device.type == 'cuda')
# tranform interval to count value map
label2count = [0.0]
for (i,item) in enumerate(label_indice):
if i<label_indice.size()[0]-1:
tmp_count = (label_indice[i]+label_indice[i+1])/2
else:
tmp_count = label_indice[i]
label2count.append(tmp_count)
label2count = torch.tensor(label2count)
label2count = label2count.type(torch.FloatTensor)
#outputs = outputs.max(dim=1)[1].cpu().data
ORI_SIZE = pre_cls.size()
pre_cls = pre_cls.reshape(-1).cpu()
pre_counts = torch.index_select(label2count,0,pre_cls.cpu().type(torch.LongTensor))
pre_counts = pre_counts.reshape(ORI_SIZE)
if IF_ret_gpu:
pre_counts = pre_counts.cuda()
return pre_counts
if __name__ == '__main__':
pre_cls = torch.Tensor([[0,1,2],[3,4,4]])
label_indice =torch.Tensor([0.5,1,1.5,2])
pre_counts = Class2Count(pre_cls,label_indice)
print(pre_cls)
print(label_indice)
print(pre_counts)
pre_cls = Count2Class(pre_counts,label_indice)
print(pre_cls)
| 34.625 | 99 | 0.647112 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 532 | 0.160048 |
73b325d3f7c7dfbcd48251ddfe6b8d3299767cb6
| 540 |
py
|
Python
|
src/python/pants/backend/codegen/avro/avro_subsystem.py
|
danxmoran/pants
|
7fafd7d789747c9e6a266847a0ccce92c3fa0754
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/codegen/avro/avro_subsystem.py
|
danxmoran/pants
|
7fafd7d789747c9e6a266847a0ccce92c3fa0754
|
[
"Apache-2.0"
] | 22 |
2022-01-27T09:59:50.000Z
|
2022-03-30T07:06:49.000Z
|
src/python/pants/backend/codegen/avro/avro_subsystem.py
|
danxmoran/pants
|
7fafd7d789747c9e6a266847a0ccce92c3fa0754
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from pants.option.option_types import BoolOption
from pants.option.subsystem import Subsystem
class AvroSubsystem(Subsystem):
options_scope = "avro"
help = "General Avro codegen settings."
tailor = BoolOption(
"--tailor",
default=True,
help="If true, add `avro_sources` targets with the `tailor` goal.",
advanced=True,
)
| 27 | 75 | 0.709259 | 275 | 0.509259 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.440741 |
73b51f1631247fbf3daf41c2e06e80f0d22df79c
| 11,864 |
py
|
Python
|
shade/tests/unit/test_shade.py
|
mail2nsrajesh/shade
|
65ce1a22896e52ff59a23a393e3bc4227f55f006
|
[
"Apache-2.0"
] | null | null | null |
shade/tests/unit/test_shade.py
|
mail2nsrajesh/shade
|
65ce1a22896e52ff59a23a393e3bc4227f55f006
|
[
"Apache-2.0"
] | null | null | null |
shade/tests/unit/test_shade.py
|
mail2nsrajesh/shade
|
65ce1a22896e52ff59a23a393e3bc4227f55f006
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import uuid
import testtools
import shade
from shade import _utils
from shade import exc
from shade.tests import fakes
from shade.tests.unit import base
RANGE_DATA = [
dict(id=1, key1=1, key2=5),
dict(id=2, key1=1, key2=20),
dict(id=3, key1=2, key2=10),
dict(id=4, key1=2, key2=30),
dict(id=5, key1=3, key2=40),
dict(id=6, key1=3, key2=40),
]
class TestShade(base.RequestsMockTestCase):
def setUp(self):
# This set of tests are not testing neutron, they're testing
# rebuilding servers, but we do several network calls in service
# of a NORMAL rebuild to find the default_network. Putting
# in all of the neutron mocks for that will make the tests harder
# to read. SO - we're going mock neutron into the off position
# and then turn it back on in the few tests that specifically do.
# Maybe we should reorg these into two classes - one with neutron
# mocked out - and one with it not mocked out
super(TestShade, self).setUp()
self.has_neutron = False
def fake_has_service(*args, **kwargs):
return self.has_neutron
self.cloud.has_service = fake_has_service
def test_openstack_cloud(self):
self.assertIsInstance(self.cloud, shade.OpenStackCloud)
@mock.patch.object(shade.OpenStackCloud, 'search_images')
def test_get_images(self, mock_search):
image1 = dict(id='123', name='mickey')
mock_search.return_value = [image1]
r = self.cloud.get_image('mickey')
self.assertIsNotNone(r)
self.assertDictEqual(image1, r)
@mock.patch.object(shade.OpenStackCloud, 'search_images')
def test_get_image_not_found(self, mock_search):
mock_search.return_value = []
r = self.cloud.get_image('doesNotExist')
self.assertIsNone(r)
def test_get_server(self):
server1 = fakes.make_fake_server('123', 'mickey')
server2 = fakes.make_fake_server('345', 'mouse')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [server1, server2]}),
])
r = self.cloud.get_server('mickey')
self.assertIsNotNone(r)
self.assertEqual(server1['name'], r['name'])
self.assert_calls()
def test_get_server_not_found(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': []}),
])
r = self.cloud.get_server('doesNotExist')
self.assertIsNone(r)
self.assert_calls()
def test_list_servers_exception(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
status_code=400)
])
self.assertRaises(exc.OpenStackCloudException,
self.cloud.list_servers)
self.assert_calls()
def test__neutron_exceptions_resource_not_found(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
status_code=404)
])
self.assertRaises(exc.OpenStackCloudResourceNotFound,
self.cloud.list_networks)
self.assert_calls()
def test__neutron_exceptions_url_not_found(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
status_code=404)
])
self.assertRaises(exc.OpenStackCloudURINotFound,
self.cloud.list_networks)
self.assert_calls()
def test_list_servers(self):
server_id = str(uuid.uuid4())
server_name = self.getUniqueString('name')
fake_server = fakes.make_fake_server(server_id, server_name)
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [fake_server]}),
])
r = self.cloud.list_servers()
self.assertEqual(1, len(r))
self.assertEqual(server_name, r[0]['name'])
self.assert_calls()
def test_list_servers_all_projects(self):
'''This test verifies that when list_servers is called with
`all_projects=True` that it passes `all_tenants=True` to nova.'''
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail'],
qs_elements=['all_tenants=True']),
complete_qs=True,
json={'servers': []}),
])
self.cloud.list_servers(all_projects=True)
self.assert_calls()
def test_iterate_timeout_bad_wait(self):
with testtools.ExpectedException(
exc.OpenStackCloudException,
"Wait value must be an int or float value."):
for count in _utils._iterate_timeout(
1, "test_iterate_timeout_bad_wait", wait="timeishard"):
pass
@mock.patch('time.sleep')
def test_iterate_timeout_str_wait(self, mock_sleep):
iter = _utils._iterate_timeout(
10, "test_iterate_timeout_str_wait", wait="1.6")
next(iter)
next(iter)
mock_sleep.assert_called_with(1.6)
@mock.patch('time.sleep')
def test_iterate_timeout_int_wait(self, mock_sleep):
iter = _utils._iterate_timeout(
10, "test_iterate_timeout_int_wait", wait=1)
next(iter)
next(iter)
mock_sleep.assert_called_with(1.0)
@mock.patch('time.sleep')
def test_iterate_timeout_timeout(self, mock_sleep):
message = "timeout test"
with testtools.ExpectedException(
exc.OpenStackCloudTimeout,
message):
for count in _utils._iterate_timeout(0.1, message, wait=1):
pass
mock_sleep.assert_called_with(1.0)
def test__nova_extensions(self):
body = [
{
"updated": "2014-12-03T00:00:00Z",
"name": "Multinic",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "NMN",
"description": "Multiple network support."
},
{
"updated": "2014-12-03T00:00:00Z",
"name": "DiskConfig",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "OS-DCF",
"description": "Disk Management Extension."
},
]
self.register_uris([
dict(method='GET',
uri='{endpoint}/extensions'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json=dict(extensions=body))
])
extensions = self.cloud._nova_extensions()
self.assertEqual(set(['NMN', 'OS-DCF']), extensions)
self.assert_calls()
def test__nova_extensions_fails(self):
self.register_uris([
dict(method='GET',
uri='{endpoint}/extensions'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
status_code=404),
])
with testtools.ExpectedException(
exc.OpenStackCloudURINotFound,
"Error fetching extension list for nova"
):
self.cloud._nova_extensions()
self.assert_calls()
def test__has_nova_extension(self):
body = [
{
"updated": "2014-12-03T00:00:00Z",
"name": "Multinic",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "NMN",
"description": "Multiple network support."
},
{
"updated": "2014-12-03T00:00:00Z",
"name": "DiskConfig",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "OS-DCF",
"description": "Disk Management Extension."
},
]
self.register_uris([
dict(method='GET',
uri='{endpoint}/extensions'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json=dict(extensions=body))
])
self.assertTrue(self.cloud._has_nova_extension('NMN'))
self.assert_calls()
def test__has_nova_extension_missing(self):
body = [
{
"updated": "2014-12-03T00:00:00Z",
"name": "Multinic",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "NMN",
"description": "Multiple network support."
},
{
"updated": "2014-12-03T00:00:00Z",
"name": "DiskConfig",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "OS-DCF",
"description": "Disk Management Extension."
},
]
self.register_uris([
dict(method='GET',
uri='{endpoint}/extensions'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json=dict(extensions=body))
])
self.assertFalse(self.cloud._has_nova_extension('invalid'))
self.assert_calls()
def test_range_search(self):
filters = {"key1": "min", "key2": "20"}
retval = self.cloud.range_search(RANGE_DATA, filters)
self.assertIsInstance(retval, list)
self.assertEqual(1, len(retval))
self.assertEqual([RANGE_DATA[1]], retval)
def test_range_search_2(self):
filters = {"key1": "<=2", "key2": ">10"}
retval = self.cloud.range_search(RANGE_DATA, filters)
self.assertIsInstance(retval, list)
self.assertEqual(2, len(retval))
self.assertEqual([RANGE_DATA[1], RANGE_DATA[3]], retval)
def test_range_search_3(self):
filters = {"key1": "2", "key2": "min"}
retval = self.cloud.range_search(RANGE_DATA, filters)
self.assertIsInstance(retval, list)
self.assertEqual(0, len(retval))
def test_range_search_4(self):
filters = {"key1": "max", "key2": "min"}
retval = self.cloud.range_search(RANGE_DATA, filters)
self.assertIsInstance(retval, list)
self.assertEqual(0, len(retval))
def test_range_search_5(self):
filters = {"key1": "min", "key2": "min"}
retval = self.cloud.range_search(RANGE_DATA, filters)
self.assertIsInstance(retval, list)
self.assertEqual(1, len(retval))
self.assertEqual([RANGE_DATA[0]], retval)
| 34.99705 | 76 | 0.567515 | 10,932 | 0.921443 | 0 | 0 | 1,418 | 0.119521 | 0 | 0 | 3,089 | 0.260367 |
73b53eb4cdb22bcc92d1f7a0efda19417f586729
| 3,780 |
py
|
Python
|
plots_tournament.py
|
rradules/opponent_modelling_monfg
|
eb28546a6024613a76c942a2e53a48e6a8d83233
|
[
"MIT"
] | 1 |
2021-03-04T04:40:50.000Z
|
2021-03-04T04:40:50.000Z
|
plots_tournament.py
|
rradules/opponent_modelling_monfg
|
eb28546a6024613a76c942a2e53a48e6a8d83233
|
[
"MIT"
] | null | null | null |
plots_tournament.py
|
rradules/opponent_modelling_monfg
|
eb28546a6024613a76c942a2e53a48e6a8d83233
|
[
"MIT"
] | null | null | null |
import matplotlib
import pandas as pd
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import matplotlib.pyplot as plt
import seaborn as sns
from utils.utils import mkdir_p
sns.set()
sns.despine()
sns.set_context("paper", rc={"font.size": 18, "axes.labelsize": 18, "xtick.labelsize": 15, "ytick.labelsize": 15,
"legend.fontsize": 16})
sns.set_style('white', {'axes.edgecolor': "0.5", "pdf.fonttype": 42})
plt.gcf().subplots_adjust(bottom=0.15, left=0.14)
def plot_results(game, mooc, path_data, experiment):
path_plots = f'plots/tour_{experiment}_{game}_l{l1}_{l2}'
mkdir_p(path_plots)
df1 = pd.read_csv(f'{path_data}/agent1_payoff_{info}.csv')
ax = sns.lineplot(x='Episode', y='Payoff', linewidth=2.0, data=df1, ci='sd',
label=f'Agent 1')
df2 = pd.read_csv(f'{path_data}/agent2_payoff_{info}.csv')
ax = sns.lineplot(x='Episode', y='Payoff', linewidth=2.0, data=df2, ci='sd',
label=f'Agent2')
ax.set(ylabel='Scalarised payoff per step')
ax.set(xlabel='Iterations')
# ax.set_ylim(0, 14)
ax.set_xlim(0, episodes)
plot_name = f"{path_plots}/payoffs"
# plt.title("Agent 1")
plt.savefig(plot_name + ".pdf")
plt.clf()
if game in ['iagRNE', 'iagR', 'iagM']:
x_axis_labels = ["L", "M"]
y_axis_labels = ["L", "M"]
else:
x_axis_labels = ["L", "M", "R"]
y_axis_labels = ["L", "M", "R"]
df = pd.read_csv(f'{path_data}/states_{info}_{l1}_{l2}.csv', header=None)
ax = sns.heatmap(df, annot=True, cmap="YlGnBu", vmin=0, vmax=1, xticklabels=x_axis_labels,
yticklabels=y_axis_labels)
plot_name = f"{path_plots}/states"
plt.savefig(plot_name + ".pdf")
plt.clf()
# action probs
df1 = pd.read_csv(f'{path_data}/agent1_probs_{info}.csv')
ax = sns.lineplot(x='Episode', y='Action 1', linewidth=2.0, data=df1, ci='sd',
label='L')
ax = sns.lineplot(x='Episode', y='Action 2', linewidth=2.0, data=df1,
ci='sd', label='M')
if game not in ['iagRNE', 'iagR', 'iagM']:
ax = sns.lineplot(x='Episode', y='Action 3', linewidth=2.0, data=df1,
ci='sd', label='R')
ax.set(ylabel='Action probability')
ax.set(xlabel='Iterations')
ax.set_ylim(0, 1)
ax.set_xlim(0, episodes)
plot_name = f"{path_plots}/probs_ag1"
plt.title(f"Action probabilities - Agent 1")
plt.savefig(plot_name + ".pdf")
plt.clf()
df1 = pd.read_csv(f'{path_data}/agent2_probs_{info}.csv')
ax = sns.lineplot(x='Episode', y='Action 1', linewidth=2.0, data=df1, ci='sd',
label='L')
ax = sns.lineplot(x='Episode', y='Action 2', linewidth=2.0, data=df1,
ci='sd', label='M')
if game not in ['iagRNE', 'iagR', 'iagM']:
ax = sns.lineplot(x='Episode', y='Action 3', linewidth=2.0, data=df1,
ci='sd', label='R')
ax.set(ylabel='Action probability')
ax.set(xlabel='Iterations')
ax.set_ylim(0, 1)
ax.set_xlim(0, episodes)
plot_name = f"{path_plots}/probs_ag2"
plt.title(f"Action probabilities - Agent 2")
plt.savefig(plot_name + ".pdf")
plt.clf()
if __name__ == "__main__":
experiment = ['Q', 'Q']
info = '0M'
l1 = 1
l2 = 1
episodes = 5000
moocs = ['SER']
games = ['iag', 'iagR', 'iagM', 'iagRNE', 'iagNE'] # ['iagRNE'] # ['iag']['iagM']'iagNE',
for l1 in range(1, 2):
for l2 in range(1, 2):
for mooc in moocs:
for game in games:
path_data = f'results/tour_{experiment}_{game}_l{l1}_{l2}'
plot_results(game, mooc, path_data, experiment)
| 34.678899 | 113 | 0.579101 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,195 | 0.316138 |
73b6522af809e94b26c9f10e4657b8e31125731b
| 3,979 |
py
|
Python
|
test/test_wrapper.py
|
bertsky/ocrd_keraslm
|
da105a8a8b68844389cd3e08307c05c9c6123350
|
[
"Apache-2.0"
] | null | null | null |
test/test_wrapper.py
|
bertsky/ocrd_keraslm
|
da105a8a8b68844389cd3e08307c05c9c6123350
|
[
"Apache-2.0"
] | null | null | null |
test/test_wrapper.py
|
bertsky/ocrd_keraslm
|
da105a8a8b68844389cd3e08307c05c9c6123350
|
[
"Apache-2.0"
] | null | null | null |
import os, sys
import shutil
from unittest import TestCase, main
from ocrd.resolver import Resolver
from ocrd_models.ocrd_page import to_xml
from ocrd_modelfactory import page_from_file
from ocrd_utils import MIMETYPE_PAGE
from ocrd_tesserocr.recognize import TesserocrRecognize
from ocrd_keraslm.wrapper import KerasRate
WORKSPACE_DIR = '/tmp/pyocrd-test-ocrd_keraslm'
PWD = os.path.dirname(os.path.realpath(__file__))
class TestKerasRate(TestCase):
def setUp(self):
if os.path.exists(WORKSPACE_DIR):
shutil.rmtree(WORKSPACE_DIR)
#os.makedirs(WORKSPACE_DIR)
shutil.copytree('test/assets/', WORKSPACE_DIR)
def runTest(self):
resolver = Resolver()
# workspace = resolver.workspace_from_url('test/assets/kant_aufklaerung_1784/data/mets.xml',
# dst_dir=WORKSPACE_DIR, download=True)
# self.assertIsNotNone(workspace)
# workaround for OCR-D/core#319:
workspace = resolver.workspace_from_url(WORKSPACE_DIR + '/kant_aufklaerung_1784/data/mets.xml')
self.assertIsNotNone(workspace)
for file_ in workspace.mets.find_files(fileGrp='OCR-D-GT-PAGE'):
workspace.download_file(file_)
#
# rate text alternative 1 on the word level:
#
KerasRate(
workspace,
input_file_grp='OCR-D-GT-PAGE', # has wrong tokenisation but that's ok now
output_file_grp='OCR-D-LM-WORD',
parameter={'textequiv_level': 'word',
'alternative_decoding': False,
'model_file': PWD + '/../model_dta_test.h5'}
).process()
workspace.save_mets()
for file in workspace.mets.find_files(fileGrp='OCR-D-LM-WORD'):
pcgts = page_from_file(workspace.download_file(file))
metadata = pcgts.get_Metadata()
self.assertIsNotNone(metadata)
metadataitems = metadata.get_MetadataItem()
self.assertIsNotNone(metadataitems)
rated = any([i for i in metadataitems if i.get_value() == 'ocrd-keraslm-rate'])
self.assertTrue(rated)
#
# rate and viterbi-decode all text alternatives on the glyph level:
#
TesserocrRecognize( # we need this to get alternatives to decode
workspace,
input_file_grp='OCR-D-GT-PAGE', # has wrong tokenisation but that's ok now
output_file_grp='OCR-D-OCR-TESS-GLYPH',
parameter={'textequiv_level': 'glyph',
'overwrite_words': True,
'model': 'deu-frak'} # old model for alternatives
).process()
workspace.save_mets()
KerasRate(
workspace,
input_file_grp='OCR-D-OCR-TESS-GLYPH',
output_file_grp='OCR-D-LM-GLYPH',
parameter={'textequiv_level': 'glyph',
'alternative_decoding': True,
'beam_width': 10, # not too slow
'model_file': PWD + '/../model_dta_test.h5'}
).process()
workspace.save_mets()
for file in workspace.mets.find_files(fileGrp='OCR-D-LM-GLYPH'):
pcgts = page_from_file(workspace.download_file(file))
metadata = pcgts.get_Metadata()
self.assertIsNotNone(metadata)
metadataitems = metadata.get_MetadataItem()
self.assertIsNotNone(metadataitems)
rated = any([i for i in metadataitems if i.get_value() == 'ocrd-keraslm-rate'])
self.assertTrue(rated)
page = pcgts.get_Page()
for region in page.get_TextRegion():
for line in region.get_TextLine():
for word in line.get_Word():
for glyph in word.get_Glyph():
self.assertEqual(len(glyph.get_TextEquiv()), 1) # only 1-best results
if __name__ == '__main__':
main()
| 43.25 | 103 | 0.605931 | 3,516 | 0.883639 | 0 | 0 | 0 | 0 | 0 | 0 | 1,084 | 0.27243 |
73b6bd8f4831b3ecbdd4ef2d6b98086651e18b51
| 16,415 |
py
|
Python
|
meltingpot/python/configs/substrates/territory_rooms.py
|
Rohan138/meltingpot
|
d4e3839225b78babcedbbbf95cf747ff9e0a87b5
|
[
"Apache-2.0"
] | null | null | null |
meltingpot/python/configs/substrates/territory_rooms.py
|
Rohan138/meltingpot
|
d4e3839225b78babcedbbbf95cf747ff9e0a87b5
|
[
"Apache-2.0"
] | null | null | null |
meltingpot/python/configs/substrates/territory_rooms.py
|
Rohan138/meltingpot
|
d4e3839225b78babcedbbbf95cf747ff9e0a87b5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Territory: Rooms.
Example video: https://youtu.be/u0YOiShqzA4
See _Territory: Open_ for the general description of the mechanics at play in
this substrate.
In this substrate, _Territory: Rooms_, individuals start in segregated rooms
that strongly suggest a partition individuals could adhere to. They can break
down the walls of these regions and invade each other's "natural territory", but
the destroyed resources are lost forever. A peaceful partition is possible at
the start of the episode, and the policy to achieve it is easy to implement. But
if any agent gets too greedy and invades, it buys itself a chance of large
rewards, but also chances inflicting significant chaos and deadweight loss on
everyone if its actions spark wider conflict. The reason it can spiral out of
control is that once an agent's neighbor has left their natural territory then
it becomes rational to invade the space, leaving one's own territory undefended,
creating more opportunity for mischief by others.
"""
from typing import Any, Dict
from ml_collections import config_dict
from meltingpot.python.utils.substrates import colors
from meltingpot.python.utils.substrates import game_object_utils
from meltingpot.python.utils.substrates import shapes
from meltingpot.python.utils.substrates import specs
_COMPASS = ["N", "E", "S", "W"]
# This number just needs to be greater than the number of players.
MAX_ALLOWED_NUM_PLAYERS = 10
DEFAULT_ASCII_MAP = """
WRRRRRWWRRRRRWWRRRRRW
R RR RR R
R RR RR R
R P RR P RR P R
R RR RR R
R RR RR R
WRRRRRWWRRRRRWWRRRRRW
WRRRRRWWRRRRRWWRRRRRW
R RR RR R
R RR RR R
R P RR P RR P R
R RR RR R
R RR RR R
WRRRRRWWRRRRRWWRRRRRW
WRRRRRWWRRRRRWWRRRRRW
R RR RR R
R RR RR R
R P RR P RR P R
R RR RR R
R RR RR R
WRRRRRWWRRRRRWWRRRRRW
"""
# `prefab` determines which prefab game object to use for each `char` in the
# ascii map.
CHAR_PREFAB_MAP = {
"P": "spawn_point",
"W": "wall",
"R": {"type": "all", "list": ["resource", "reward_indicator"]},
}
WALL = {
"name": "wall",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall",
"stateConfigs": [{
"state": "wall",
"layer": "upperPhysical",
"sprite": "Wall",
}],
}
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Wall",],
"spriteShapes": [shapes.WALL],
"palettes": [{"*": (95, 95, 95, 255),
"&": (100, 100, 100, 255),
"@": (109, 109, 109, 255),
"#": (152, 152, 152, 255)}],
"noRotates": [True]
}
},
{
"component": "Transform",
"kwargs": {
"position": (0, 0),
"orientation": "N"
}
},
{
"component": "AllBeamBlocker",
"kwargs": {}
},
]
}
SPAWN_POINT = {
"name": "spawn_point",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "playerSpawnPoint",
"stateConfigs": [{
"state": "playerSpawnPoint",
"layer": "logic",
"groups": ["spawnPoints"],
}],
}
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "invisible",
"spriteNames": [],
"spriteRGBColors": []
}
},
{
"component": "Transform",
"kwargs": {
"position": (0, 0),
"orientation": "N"
}
},
]
}
RESOURCE = {
"name": "resource",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "unclaimed",
"stateConfigs": [
{"state": "unclaimed",
"layer": "upperPhysical",
"sprite": "UnclaimedResourceSprite",
"groups": ["unclaimedResources"]},
{"state": "destroyed"},
],
}
},
{
"component": "Appearance",
"kwargs": {
"spriteNames": ["UnclaimedResourceSprite"],
# This color is grey.
"spriteRGBColors": [(64, 64, 64, 255)]
}
},
{
"component": "Transform",
"kwargs": {
"position": (0, 0),
"orientation": "N"
}
},
{
"component": "Resource",
"kwargs": {
"initialHealth": 2,
"destroyedState": "destroyed",
"reward": 1.0,
"rewardRate": 0.01,
"rewardDelay": 100
}
},
]
}
REWARD_INDICATOR = {
"name": "reward_indicator",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "inactive",
"stateConfigs": [
{"state": "active",
"layer": "overlay",
"sprite": "ActivelyRewardingResource"},
{"state": "inactive"},
],
}
},
{
"component": "Appearance",
"kwargs": {
"spriteNames": ["ActivelyRewardingResource",],
"renderMode": "ascii_shape",
"spriteShapes": [shapes.PLUS_IN_BOX],
"palettes": [{"*": (86, 86, 86, 65),
"#": (202, 202, 202, 105),
"@": (128, 128, 128, 135),
"x": (0, 0, 0, 0)}],
"noRotates": [True]
}
},
{
"component": "Transform",
"kwargs": {
"position": (0, 0),
"orientation": "N"
}
},
{
"component": "RewardIndicator",
"kwargs": {
}
},
]
}
# PLAYER_COLOR_PALETTES is a list with each entry specifying the color to use
# for the player at the corresponding index.
PLAYER_COLOR_PALETTES = []
for i in range(MAX_ALLOWED_NUM_PLAYERS):
PLAYER_COLOR_PALETTES.append(shapes.get_palette(colors.palette[i]))
# Set up player-specific settings for resources.
for j, color in enumerate(colors.palette[:MAX_ALLOWED_NUM_PLAYERS]):
sprite_name = "Color" + str(j + 1) + "ResourceSprite"
game_object_utils.get_first_named_component(
RESOURCE,
"StateManager")["kwargs"]["stateConfigs"].append({
"state": "claimed_by_" + str(j + 1),
"layer": "upperPhysical",
"sprite": sprite_name,
"groups": ["claimedResources"]
})
game_object_utils.get_first_named_component(
RESOURCE,
"Appearance")["kwargs"]["spriteNames"].append(sprite_name)
game_object_utils.get_first_named_component(
RESOURCE,
"Appearance")["kwargs"]["spriteRGBColors"].append(color)
# PREFABS is a dictionary mapping names to template game objects that can
# be cloned and placed in multiple locations accoring to an ascii map.
PREFABS = {
"wall": WALL,
"spawn_point": SPAWN_POINT,
"resource": RESOURCE,
"reward_indicator": REWARD_INDICATOR,
}
# Primitive action components.
# pylint: disable=bad-whitespace
# pyformat: disable
NOOP = {"move": 0, "turn": 0, "fireZap": 0, "fireClaim": 0}
FORWARD = {"move": 1, "turn": 0, "fireZap": 0, "fireClaim": 0}
STEP_RIGHT = {"move": 2, "turn": 0, "fireZap": 0, "fireClaim": 0}
BACKWARD = {"move": 3, "turn": 0, "fireZap": 0, "fireClaim": 0}
STEP_LEFT = {"move": 4, "turn": 0, "fireZap": 0, "fireClaim": 0}
TURN_LEFT = {"move": 0, "turn": -1, "fireZap": 0, "fireClaim": 0}
TURN_RIGHT = {"move": 0, "turn": 1, "fireZap": 0, "fireClaim": 0}
FIRE_ZAP = {"move": 0, "turn": 0, "fireZap": 1, "fireClaim": 0}
FIRE_CLAIM = {"move": 0, "turn": 0, "fireZap": 0, "fireClaim": 1}
# pyformat: enable
# pylint: enable=bad-whitespace
ACTION_SET = (
NOOP,
FORWARD,
BACKWARD,
STEP_LEFT,
STEP_RIGHT,
TURN_LEFT,
TURN_RIGHT,
FIRE_ZAP,
FIRE_CLAIM
)
# The Scene object is a non-physical object, its components implement global
# logic.
def create_scene():
"""Creates the global scene."""
scene = {
"name": "scene",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "scene",
"stateConfigs": [{
"state": "scene",
}],
}
},
{
"component": "Transform",
"kwargs": {
"position": (0, 0),
"orientation": "N"
},
},
{
"component": "StochasticIntervalEpisodeEnding",
"kwargs": {
"minimumFramesPerEpisode": 1000,
"intervalLength": 100, # Set equal to unroll length.
"probabilityTerminationPerInterval": 0.2
}
}
]
}
return scene
def create_avatar_object(player_idx: int) -> Dict[str, Any]:
"""Create an avatar object that always sees itself as blue."""
# Lua is 1-indexed.
lua_index = player_idx + 1
color_palette = PLAYER_COLOR_PALETTES[player_idx]
live_state_name = "player{}".format(lua_index)
avatar_sprite_name = "avatarSprite{}".format(lua_index)
avatar_object = {
"name": "avatar",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": live_state_name,
"stateConfigs": [
# Initial player state.
{"state": live_state_name,
"layer": "upperPhysical",
"sprite": avatar_sprite_name,
"contact": "avatar",
"groups": ["players"]},
# Player wait state used when they have been zapped out.
{"state": "playerWait",
"groups": ["playerWaits"]},
]
}
},
{
"component": "Transform",
"kwargs": {
"position": (0, 0),
"orientation": "N"
}
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [avatar_sprite_name],
"spriteShapes": [shapes.CUTE_AVATAR],
"palettes": [color_palette],
"noRotates": [True]
}
},
{
"component": "Avatar",
"kwargs": {
"index": lua_index,
"aliveState": live_state_name,
"waitState": "playerWait",
"spawnGroup": "spawnPoints",
"actionOrder": ["move",
"turn",
"fireZap",
"fireClaim"],
"actionSpec": {
"move": {"default": 0, "min": 0, "max": len(_COMPASS)},
"turn": {"default": 0, "min": -1, "max": 1},
"fireZap": {"default": 0, "min": 0, "max": 1},
"fireClaim": {"default": 0, "min": 0, "max": 1},
},
"view": {
"left": 5,
"right": 5,
"forward": 9,
"backward": 1,
"centered": False
},
}
},
{
"component": "AvatarDirectionIndicator",
# We do not normally use direction indicators for the MAGI suite,
# but we do use them for territory because they function to claim
# any resources they contact.
"kwargs": {"color": (202, 202, 202, 50)}
},
{
"component": "Zapper",
"kwargs": {
"cooldownTime": 2,
"beamLength": 3,
"beamRadius": 1,
"framesTillRespawn": 1e6, # Effectively never respawn.
"penaltyForBeingZapped": 0,
"rewardForZapping": 0,
}
},
{
"component": "ReadyToShootObservation",
},
{
"component": "ResourceClaimer",
"kwargs": {
"color": color_palette["*"],
"playerIndex": lua_index,
"beamLength": 2,
"beamRadius": 0,
"beamWait": 0,
}
},
{
"component": "LocationObserver",
"kwargs": {
"objectIsAvatar": True,
"alsoReportOrientation": True
}
},
{
"component": "Taste",
"kwargs": {
"role": "none",
"rewardAmount": 1.0,
}
},
]
}
return avatar_object
def create_avatar_objects(num_players):
"""Returns list of avatar objects of length 'num_players'."""
avatar_objects = []
for player_idx in range(0, num_players):
game_object = create_avatar_object(player_idx)
avatar_objects.append(game_object)
return avatar_objects
def create_lab2d_settings(num_players: int) -> Dict[str, Any]:
"""Returns the lab2d settings."""
lab2d_settings = {
"levelName": "territory",
"levelDirectory":
"meltingpot/lua/levels",
"numPlayers": num_players,
# Define upper bound of episode length since episodes end stochastically.
"maxEpisodeLengthFrames": 2000,
"spriteSize": 8,
"topology": "TORUS", # Choose from ["BOUNDED", "TORUS"],
"simulation": {
"map": DEFAULT_ASCII_MAP,
"gameObjects": create_avatar_objects(num_players),
"scene": create_scene(),
"prefabs": PREFABS,
"charPrefabMap": CHAR_PREFAB_MAP,
},
}
return lab2d_settings
def get_config(factory=create_lab2d_settings):
"""Default configuration for training on the territory level."""
config = config_dict.ConfigDict()
# Basic configuration.
config.num_players = 9
# Lua script configuration.
config.lab2d_settings = factory(config.num_players)
# Action set configuration.
config.action_set = ACTION_SET
# Observation format configuration.
config.individual_observation_names = [
"RGB",
"READY_TO_SHOOT",
"POSITION",
"ORIENTATION",
]
config.global_observation_names = [
"WORLD.RGB",
]
# The specs of the environment (from a single-agent perspective).
config.action_spec = specs.action(len(ACTION_SET))
config.timestep_spec = specs.timestep({
"RGB": specs.OBSERVATION["RGB"],
"READY_TO_SHOOT": specs.OBSERVATION["READY_TO_SHOOT"],
"POSITION": specs.OBSERVATION["POSITION"],
"ORIENTATION": specs.OBSERVATION["ORIENTATION"],
"WORLD.RGB": specs.rgb(168, 168),
})
return config
| 31.266667 | 80 | 0.50003 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,556 | 0.460311 |
73b6d9825bd3d60f6c8e389a888e756f7df56287
| 5,269 |
py
|
Python
|
aptitudetech_private/tasks.py
|
CloudGround/aptitudetech_private
|
d4d150226bd33ea0c76086264286ae7cae52457f
|
[
"MIT"
] | null | null | null |
aptitudetech_private/tasks.py
|
CloudGround/aptitudetech_private
|
d4d150226bd33ea0c76086264286ae7cae52457f
|
[
"MIT"
] | null | null | null |
aptitudetech_private/tasks.py
|
CloudGround/aptitudetech_private
|
d4d150226bd33ea0c76086264286ae7cae52457f
|
[
"MIT"
] | 1 |
2019-05-17T00:04:05.000Z
|
2019-05-17T00:04:05.000Z
|
#-*- coding: utf-8 -*-
import frappe
import boto3
import boto3.session
import rows
import json
import zipfile
import tempfile
import sqlite3
from io import BytesIO
from frappe import _
from frappe.utils import cint, flt, today, getdate, get_first_day, add_to_date
try:
from frappe.utils import file_manager
with_file_manager = True
except ImportError:
with_file_manager = False
from frappe.core.doctype.file.file import create_new_folder
SQLVIEW = """
select lineitemusageaccountid as account,
lineitemproductcode as item_group,
productproductfamily as item_code,
productinstancetype as item_type,
pricingterm as item_term,
pricingunit as item_unit,
strftime('%Y-%m-%d', min(billbillingperiodstartdate)) as start_date,
strftime('%Y-%m-%d', max(billbillingperiodenddate)) as end_date,
sum(lineitemusageamount) as consumed_units,
sum(ifnull(lineitemunblendedcost, 0.0)) / sum(ifnull(lineitemusageamount, 1.0)) as cost_per_unit
from billing_aptech
where lineitemlineitemtype != "Tax"
group by lineitemusageaccountid, lineitemproductcode, productproductfamily, productinstancetype, pricingterm, pricingunit
order by lineitemusageaccountid, lineitemproductcode, productproductfamily, productinstancetype, pricingterm, pricingunit
"""
import_fields = u"""
lineItem/UsageAccountId
lineItem/LineItemType
lineItem/ProductCode
product/productFamily
product/instanceType
pricing/term
pricing/unit
bill/BillingPeriodStartDate
bill/BillingPeriodEndDate
lineItem/UsageAmount
lineItem/UnblendedCost
lineItem/UnblendedRate
""".strip().splitlines()
def download_aws():
settings = frappe.get_doc('Monthly Recurring Setup', 'Monthly Recurring Setup')
_today = getdate(add_to_date(today(), months=-1))
if _today.day != cint(settings.processing_day):
return
session = boto3.session.Session(region_name=settings.region_name)
s3client = session.client('s3', config=boto3.session.Config(signature_version='s3v4'))
first_day = get_first_day(_today, 0, 0)
next_month = get_first_day(_today, 0, 1)
manifest = None
with tempfile.NamedTemporaryFile() as temp:
s3client.download_file(
settings.s3_bucket,
'{}/{}/{}-{}/{}-Manifest.json'.format(
settings.report_name,
settings.csv_file_prefix,
str(first_day).replace('-', ''),
str(next_month).replace('-', ''),
settings.csv_file_prefix),
temp.name)
with open(temp.name, 'rb') as f:
manifest = json.load(f)
if not manifest:
return
data = None
with tempfile.NamedTemporaryFile() as temp:
s3client.download_file(
settings.s3_bucket,
manifest['reportKeys'][0],
temp.name)
with zipfile.ZipFile(temp.name) as zf:
for fl in zf.filelist:
data = rows.import_from_csv(BytesIO(zf.read(fl.filename)),
dialect='excel',
import_fields=import_fields)
if not data:
return
tabulated = False
with tempfile.NamedTemporaryFile() as temp:
conn = rows.export_to_sqlite(data, temp.name, 'billing_aptech')
tabulated = rows.import_from_sqlite(conn, query=SQLVIEW)
if not tabulated:
return
if not frappe.db.exists('File', 'Home/' + settings.csv_storage_folder):
create_new_folder(settings.csv_storage_folder, 'Home')
if not frappe.db.exists('File', 'Home/' + settings.csv_storage_folder + '/' + str(_today.year)):
create_new_folder(str(_today.year), 'Home/' + settings.csv_storage_folder)
content = BytesIO()
rows.export_to_csv(tabulated, content)
content.seek(0)
fname = ' '.join([_today.strftime('%m'), _today.strftime('%B')]) + '.csv'
folder = '/'.join(['Home', settings.csv_storage_folder, _today.strftime('%Y')])
full_fname = folder + '/' + fname
if full_fname:
frappe.delete_doc('File', full_fname)
if with_file_manager:
file_manager.save_file(fname, content.read(), None, None, folder, is_private=1)
else:
frappe.new_doc('File').update({
'file_name': fname,
'content': content.read(),
'folder': folder,
'is_private': 1
}).save()
instructions = frappe.get_all('Service Instruction', fields=['*'], filters={'type': 'Amazon Web Services', 'group': 'AWS - Account Info'})
keys = {}
for instruction in instructions:
if not instruction.instruction:
continue
instruction_data = json.loads(instruction.instruction, object_pairs_hook=frappe._dict)
keys[int(instruction_data.aws_account_id)] = [instruction.parent, frappe.db.get_value('Service', instruction.parent, 'service_plan')]
for row in tabulated:
if row.account in keys:
mful_data = {
'service': keys[row.account][0],
'customer': frappe.db.get_value('Service', keys[row.account][0], 'customer'),
'metered_feature': 'MF-000011',
'start_date': row.start_date,
'end_date': add_to_date(row.end_date, days=-1),
'item_group': row.item_group,
'item_code': row.item_code,
'item_type': row.item_type,
'unit': row.item_unit
}
usage_data = {
'consumed_units': row.consumed_units,
'cost_per_unit': (row.cost_per_unit or 0.0) * flt(settings.exchange_rate)
}
mful = frappe.db.exists('Metered Feature Units Log', mful_data)
if mful:
frappe.get_doc('Metered Feature Units Log', mful).update(usage_data).save()
else:
frappe.new_doc('Metered Feature Units Log').update(mful_data).update(usage_data).insert()
| 31.933333 | 139 | 0.72974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,737 | 0.329664 |
73b7ddfb55e7a791df45923bdbfc93d74e627ca1
| 1,983 |
py
|
Python
|
udfs/tests/test_run_udfs.py
|
tslr/bigquery-utils
|
67143b87a24bbbde684aa5ff061f80ffc27c71ed
|
[
"Apache-2.0"
] | null | null | null |
udfs/tests/test_run_udfs.py
|
tslr/bigquery-utils
|
67143b87a24bbbde684aa5ff061f80ffc27c71ed
|
[
"Apache-2.0"
] | null | null | null |
udfs/tests/test_run_udfs.py
|
tslr/bigquery-utils
|
67143b87a24bbbde684aa5ff061f80ffc27c71ed
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from parameterized import parameterized
from google.cloud import bigquery
from google.api_core.exceptions import GoogleAPICallError
from utils import Utils
class TestRunUDFs(unittest.TestCase):
@parameterized.expand(Utils.get_all_udf_paths())
def test_run_udf_and_verify_expected_result(self, udf_path):
client = bigquery.Client()
bq_test_dataset = Utils.get_target_bq_dataset(udf_path)
udf_name = Utils.extract_udf_name(udf_path)
test_cases = Utils.load_test_cases(udf_path)
if test_cases.get(udf_name):
for case in test_cases[udf_name]:
try:
actual_result_rows = client.query(
f'SELECT `{bq_test_dataset}.{udf_name}`('
f' {case["input"]} )'
).result()
expected_result_rows = client.query(
f'SELECT {case["expected_output"]}'
).result()
for actual, expected in zip(
actual_result_rows, expected_result_rows):
self.assertEqual(expected, actual)
except GoogleAPICallError as e:
self.fail(e.message)
else:
self.skipTest(f'Test inputs and outputs are not provided for : {udf_path}')
if __name__ == '__main__':
unittest.main()
| 36.054545 | 87 | 0.642965 | 1,182 | 0.596067 | 0 | 0 | 1,139 | 0.574382 | 0 | 0 | 728 | 0.367121 |
73b8798661011cebe8aed8c67f5ab3688edd6b74
| 1,195 |
py
|
Python
|
pandas/tests/generic/test_panel.py
|
EternalLearner42/pandas
|
a2b414ccaab83e085d46e8217d5302a5d0f874f4
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/generic/test_panel.py
|
EternalLearner42/pandas
|
a2b414ccaab83e085d46e8217d5302a5d0f874f4
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/generic/test_panel.py
|
EternalLearner42/pandas
|
a2b414ccaab83e085d46e8217d5302a5d0f874f4
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from warnings import catch_warnings, simplefilter
from pandas import Panel
from pandas.util.testing import assert_panel_equal
from .test_generic import Generic
class TestPanel(Generic):
_typ = Panel
_comparator = lambda self, x, y: assert_panel_equal(x, y, by_blocks=True)
# run all the tests, but wrap each in a warning catcher
for t in ['test_rename', 'test_get_numeric_data',
'test_get_default', 'test_nonzero',
'test_downcast', 'test_constructor_compound_dtypes',
'test_head_tail',
'test_size_compat', 'test_split_compat',
'test_unexpected_keyword',
'test_stat_unexpected_keyword', 'test_api_compat',
'test_stat_non_defaults_args',
'test_truncate_out_of_bounds',
'test_metadata_propagation', 'test_copy_and_deepcopy',
'test_pct_change', 'test_sample']:
def f():
def tester(self):
f = getattr(super(TestPanel, self), t)
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
f()
return tester
setattr(TestPanel, t, f())
| 30.641026 | 77 | 0.659414 | 120 | 0.100418 | 0 | 0 | 0 | 0 | 0 | 0 | 500 | 0.41841 |
73b8db5714154072049f41562b46bb8f89e7deee
| 1,233 |
py
|
Python
|
shortest-paths.py
|
SAUSy-Lab/map-speed-test
|
0c9e78056017a247976ff63782c6366c5a724bf4
|
[
"MIT"
] | 2 |
2017-03-31T02:16:57.000Z
|
2019-07-13T14:31:04.000Z
|
shortest-paths.py
|
SAUSy-Lab/map-speed-test
|
0c9e78056017a247976ff63782c6366c5a724bf4
|
[
"MIT"
] | 10 |
2017-01-07T04:26:41.000Z
|
2017-03-07T21:00:27.000Z
|
shortest-paths.py
|
SAUSy-Lab/map-speed-test
|
0c9e78056017a247976ff63782c6366c5a724bf4
|
[
"MIT"
] | null | null | null |
# calculate shortest paths between OD pairs
# in the map_speed_od postgis table
# update the shortest path geometry into the table
import requests, json, psycopg2
# get OD pairs from DB
conn_string = (
"host='localhost' dbname='' user='' password=''"
)
connection = psycopg2.connect(conn_string)
connection.autocommit = True
c = connection.cursor()
c.execute("""
SELECT
id,
ST_X(ST_StartPoint(vector)) AS lon1,
ST_Y(ST_StartPoint(vector)) AS lat1,
ST_X(ST_EndPoint(vector)) AS lon2,
ST_Y(ST_EndPoint(vector)) AS lat2
FROM map_speed_od
""")
# iterate over DB pairs
for (rid,lon1,lat1,lon2,lat2) in c.fetchall():
# request route for these points
options = {
'geometries':'geojson',
'overview':'full',
'steps':'false',
'annotations':'false'
}
response = requests.get(
('http://206.167.182.17:5000/route/v1/transit/'+str(lon1)+','+str(lat1)+';'+str(lon2)+','+str(lat2)),
params=options,
timeout=5
)
# parse the result
j = json.loads(response.text)
print json.dumps(j['routes'][0]['geometry'])
# insert the route result
c.execute("""
UPDATE map_speed_od
SET shortest_path = ST_SetSRID(ST_GeomFromGeoJSON(%s),4326)
WHERE id = %s;
""",
(json.dumps(j['routes'][0]['geometry']),rid,)
)
| 25.163265 | 103 | 0.687753 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 764 | 0.619627 |
73b9218ed262aae642dc0406539a72aa91d888bc
| 320 |
py
|
Python
|
my_tools/tools_for_os/for_file.py
|
Alex2Yang97/waiting_time_project
|
649dbaa4bd45b9b9974a5b71a8ee17fada07bcc9
|
[
"MIT"
] | null | null | null |
my_tools/tools_for_os/for_file.py
|
Alex2Yang97/waiting_time_project
|
649dbaa4bd45b9b9974a5b71a8ee17fada07bcc9
|
[
"MIT"
] | 12 |
2020-11-13T17:16:58.000Z
|
2021-04-23T01:25:17.000Z
|
my_tools/tools_for_os/for_file.py
|
Alex2Yang97/waiting_time_project
|
649dbaa4bd45b9b9974a5b71a8ee17fada07bcc9
|
[
"MIT"
] | null | null | null |
#-*- coding:utf-8 -*-
# @Time : 2020-02-15 15:49
# @Author : Zhirui(Alex) Yang
# @Function :
import os
def create_folder(folder_name):
if not os.path.exists(folder_name):
os.makedirs(folder_name)
print(folder_name, 'has created!')
else:
print(folder_name, 'already existed!')
| 17.777778 | 46 | 0.61875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.396875 |
73be179d5a3f60a254ebcb05e6ce4cdd7d7c207f
| 7,842 |
py
|
Python
|
tcp_tls_tunnel/hyper_http2_adapter.py
|
DSAdv/tcp-tls-tunnel-py
|
e9b5271e4cfae1df09b9fab77db4906b7cee8337
|
[
"MIT"
] | 1 |
2021-08-30T21:03:41.000Z
|
2021-08-30T21:03:41.000Z
|
tcp_tls_tunnel/hyper_http2_adapter.py
|
DSAdv/tcp-tls-tunnel-py
|
e9b5271e4cfae1df09b9fab77db4906b7cee8337
|
[
"MIT"
] | 1 |
2022-03-31T12:02:29.000Z
|
2022-03-31T12:02:29.000Z
|
tcp_tls_tunnel/hyper_http2_adapter.py
|
DSAdv/tcp-tls-tunnel-py
|
e9b5271e4cfae1df09b9fab77db4906b7cee8337
|
[
"MIT"
] | 1 |
2021-08-28T14:35:18.000Z
|
2021-08-28T14:35:18.000Z
|
import ssl
import socket
from typing import Tuple
from hyper.common.util import to_native_string
from urllib.parse import urlparse
from hyper import HTTP11Connection, HTTPConnection
from hyper.common.bufsocket import BufferedSocket
from hyper.common.exceptions import TLSUpgrade
from hyper.contrib import HTTP20Adapter
from hyper.tls import init_context
from tcp_tls_tunnel.utils import generate_basic_header, generate_proxy_url
from tcp_tls_tunnel.dto import ProxyOptions, AdapterOptions, TunnelOptions
from tcp_tls_tunnel.exceptions import ProxyError
def _create_tunnel(tunnel_opts: TunnelOptions,
dest_host: str,
dest_port: int,
server_name: str = None,
proxy: ProxyOptions = None,
timeout: int = None) -> Tuple[socket.socket, str]:
"""
Sends CONNECT method to a proxy and returns a socket with established
connection to the target.
:returns: socket, proto
"""
headers = {
"Authorization": generate_basic_header(tunnel_opts.auth_login, tunnel_opts.auth_password),
"Client": tunnel_opts.client.value,
"Connection": 'keep-alive',
"Server-Name": server_name or dest_host,
"Host": tunnel_opts.host,
"Secure": str(int(tunnel_opts.secure)),
"HTTP2": str(int(tunnel_opts.http2)),
}
if proxy:
headers["Proxy"] = generate_proxy_url(proxy=proxy)
conn = HTTP11Connection(tunnel_opts.host, tunnel_opts.port, timeout=timeout)
conn.request('CONNECT', '%s:%d' % (dest_host, dest_port),
headers=headers)
resp = conn.get_response()
try:
proto = resp.headers.get("Alpn-Protocol")[0].decode('utf-8')
except TypeError:
proto = 'http/1.1'
if resp.status != 200:
raise ProxyError(
"Tunnel connection failed: %d %s" %
(resp.status, to_native_string(resp.reason)),
response=resp
)
return getattr(conn, "_sock"), proto
class TunnelHTTP20Adapter(HTTP20Adapter):
def __init__(self,
adapter_opts: AdapterOptions,
proxy_opts: ProxyOptions = None,
window_manager=None,
*args, **kwargs):
super(TunnelHTTP20Adapter, self).__init__(window_manager=window_manager, *args, **kwargs)
self.adapter_opts = adapter_opts
self.proxy_opts = proxy_opts
def get_connection(self, host, port, scheme, cert=None, verify=True,
proxy=None, timeout=None):
"""
Gets an appropriate HTTP/2 connection object based on
host/port/scheme/cert tuples.
"""
secure = (scheme == 'https')
if port is None: # pragma: no cover
port = 80 if not secure else 443
ssl_context = None
if not verify:
verify = False
ssl_context = init_context(cert=cert)
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
elif verify is True and cert is not None:
ssl_context = init_context(cert=cert)
elif verify is not True:
ssl_context = init_context(cert_path=verify, cert=cert)
if proxy:
proxy_headers = self.proxy_headers(proxy)
proxy_netloc = urlparse(proxy).netloc
else:
proxy_headers = None
proxy_netloc = None
# We put proxy headers in the connection_key, because
# ``proxy_headers`` method might be overridden, so we can't
# rely on proxy headers being the same for the same proxies.
proxy_headers_key = (frozenset(proxy_headers.items())
if proxy_headers else None)
connection_key = (host, port, scheme, cert, verify,
proxy_netloc, proxy_headers_key)
try:
conn = self.connections[connection_key]
except KeyError:
conn = TunnelHTTPConnection(
self.adapter_opts,
self.proxy_opts,
host=host,
port=port,
secure=secure,
ssl_context=ssl_context,
proxy_host=proxy_netloc,
proxy_headers=proxy_headers,
enable_push=True,
timeout=timeout,
)
self.connections[connection_key] = conn
return conn
class TunnelHTTPConnection(HTTPConnection):
def __init__(self,
adapter_opts: AdapterOptions,
proxy_opts: ProxyOptions = None,
host=None,
port=None,
secure=None,
enable_push=False,
ssl_context=None,
proxy_host=None,
proxy_port=None,
proxy_headers=None,
timeout=None,
**kwargs):
super().__init__(host=host,
port=port,
secure=secure,
enable_push=enable_push,
ssl_context=ssl_context,
proxy_host=proxy_host,
proxy_port=proxy_port,
proxy_headers=proxy_headers,
timeout=timeout,
**kwargs)
self._conn = TunnelHTTP11Connection(
tunnel_opts=TunnelOptions(
host=adapter_opts.host,
port=adapter_opts.port,
auth_login=adapter_opts.auth_login,
auth_password=adapter_opts.auth_password,
client=adapter_opts.client,
secure=secure,
http2=True
),
proxy_opts=proxy_opts,
host=self._host,
port=self._port,
**self._h1_kwargs
)
def __exit__(self, type, value, tb): # pragma: no cover
self._conn.close()
return False
class TunnelHTTP11Connection(HTTP11Connection):
def __init__(self,
tunnel_opts: TunnelOptions,
proxy_opts: ProxyOptions = None,
host=None, port=None, secure=None, ssl_context=None,
proxy_host=None, proxy_port=None, proxy_headers=None,
timeout=None,
**kwargs):
super(TunnelHTTP11Connection, self).__init__(host=host, port=port,
secure=secure, ssl_context=ssl_context,
proxy_host=proxy_host, proxy_port=proxy_port,
proxy_headers=proxy_headers, timeout=timeout,
**kwargs)
self.tunnel_opts = tunnel_opts
self.proxy_opts = proxy_opts
self.timeout = timeout
def connect(self):
"""
Connect to the server specified when the object was created. This is a
no-op if we're already connected.
:returns: Nothing.
"""
if self._sock is None:
# Tunnel socket creation with tunnel's TLS proto
sock, proto = _create_tunnel(
tunnel_opts=self.tunnel_opts,
proxy=self.proxy_opts,
dest_host=self.host,
dest_port=self.port,
server_name=None, # TODO: server_name
timeout=self.timeout
)
sock = BufferedSocket(sock, self.network_buffer_size)
sock.settimeout(self.timeout) # Set read timeout
if self.secure is not True:
proto = 'http/1.1'
if proto not in ('http/1.1', None):
raise TLSUpgrade(proto, sock)
self._sock = sock
return
| 35.165919 | 98 | 0.561464 | 5,821 | 0.742285 | 0 | 0 | 0 | 0 | 0 | 0 | 917 | 0.116934 |
73bfa3453754f3fe35dd27f3bb51112f146dfd38
| 1,387 |
py
|
Python
|
get_variances.py
|
OmnesRes/GRIMMER
|
173c99ebdb6a9edb1242d24a791d0c5d778ff643
|
[
"MIT"
] | 4 |
2017-02-20T12:03:29.000Z
|
2018-10-27T14:06:07.000Z
|
get_variances.py
|
OmnesRes/GRIMMER
|
173c99ebdb6a9edb1242d24a791d0c5d778ff643
|
[
"MIT"
] | 1 |
2019-10-08T17:39:30.000Z
|
2019-10-11T20:56:50.000Z
|
get_variances.py
|
OmnesRes/GRIMMER
|
173c99ebdb6a9edb1242d24a791d0c5d778ff643
|
[
"MIT"
] | null | null | null |
from itertools import *
import time
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
#my own variance function runs much faster than numpy or the Python 3 ported statistics module
def variance(data,u):
return sum([(i-u)**2 for i in data])/len(data)
##rounding the means and variances helps to collapse them
precision_ave=16
precision_var=12
def run(n,r):
all_deviations={}
start=time.clock()
for i in combinations_with_replacement(range(n), r):
if n-1 in i:
u=round(sum(i)/float(len(i)),precision_ave)
var=round(variance(i,u),precision_var)
if var not in all_deviations:
all_deviations[var]={u:''}
else:
all_deviations[var][u]=''
end=time.clock()
duration=end-start
data=sorted(all_deviations.keys())
f=open(os.path.join(BASE_DIR,'raw_variances',str(r)+'.txt'),'w')
#write a header line that includes time to complete
f.write(str(n)+' '+str(duration))
f.write('\n')
for i in data:
f.write(str(i))
f.write('\t')
f.write(str(sorted(all_deviations[i].keys())))
f.write('\n')
f.close()
##perform runs
#n can probably just be set to 7 or even lower
#code will take a while, you should run copies of this script in parallel
for r in range(5,100):
n=30-r
if n<=7:
n=7
run(n,r)
| 26.169811 | 94 | 0.626532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 378 | 0.272531 |
73c1b51a0130489b93a3586b1b8afac1d574b406
| 621 |
py
|
Python
|
utils/pagenation.py
|
Andrewpqc/URL-shortener
|
74943b9f1f787e243a32e27eec425eb51f84e65e
|
[
"MIT"
] | 9 |
2018-07-01T11:19:05.000Z
|
2021-12-30T03:00:03.000Z
|
utils/pagenation.py
|
Andrewpqc/URL-shortener
|
74943b9f1f787e243a32e27eec425eb51f84e65e
|
[
"MIT"
] | 1 |
2020-12-09T23:46:04.000Z
|
2020-12-09T23:46:04.000Z
|
utils/pagenation.py
|
Andrewpqc/URL-shortener
|
74943b9f1f787e243a32e27eec425eb51f84e65e
|
[
"MIT"
] | 1 |
2018-06-06T15:10:57.000Z
|
2018-06-06T15:10:57.000Z
|
# coding: utf-8
"""
paginate.py
```````````
: 分页api
"""
from flask import url_for
def pagination(lit, page, perpage,endpoint):
"""
返回当前分页的列表对象,
next、last链接
{current: next_lit}
"""
_yu = len(lit) % perpage
_chu = len(lit) // perpage
if _yu == 0:
last = _chu
else:
last = _chu + 1
current = lit[perpage*(page-1): perpage*page]
next_page = ""
if page < last:
next_page = url_for(endpoint, page=page+1)
elif page == last:
next_page = ""
last_page = url_for(endpoint, page=last)
return [current, (next_page, last_page)]
| 20.7 | 50 | 0.558776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.260337 |
73c3330e453fbfebace232606fc0f58589eb269b
| 5,272 |
py
|
Python
|
app/default/rest_train.py
|
dbhack-aquila/aquila
|
5fd31665fcfdb2a1ba341f5c98d44668e467add8
|
[
"MIT"
] | 1 |
2017-12-16T14:51:54.000Z
|
2017-12-16T14:51:54.000Z
|
app/default/rest_train.py
|
dbhack-aquila/aquila
|
5fd31665fcfdb2a1ba341f5c98d44668e467add8
|
[
"MIT"
] | null | null | null |
app/default/rest_train.py
|
dbhack-aquila/aquila
|
5fd31665fcfdb2a1ba341f5c98d44668e467add8
|
[
"MIT"
] | null | null | null |
import pandas as pd
from . import default
import wikipedia
import json
from flask import jsonify
import re
import os
import multiprocessing
import requests
import urllib
import hashlib
df = 0
wikipedia.set_lang("de")
def init():
path = os.path.dirname(os.path.abspath(__file__)) + "/surveyor_hackathon_data_20171212.csv"
global df
df = pd.read_csv(path, sep=';', decimal='.', skiprows=0, nrows=100000) # Read one row
df = df.sort_values('created')
df = df.filter(items=['sid', 'gps_breite', 'gps_laenge'])
df.rename(columns={'gps_laenge': 'trainLongitude', 'gps_breite': 'trainLatitude'}, inplace=True)
# TODO sort by time
def get_first_image_thumbnail(wikipedia_page):
htmlcode = wikipedia_page.html()
try:
imgcode = re.search('<img.*src=".*".*/>', htmlcode).group(0)
imagecode_array = imgcode.split()
for imagecode_part in imagecode_array:
if "src=" in imagecode_part:
imagecode_array = imagecode_part.split('"')
break
for imagecode_part in imagecode_array:
if "//" in imagecode_part:
return "https:" + imagecode_part
except:
return ''
def get_first_image(thumbnail_url):
try:
if thumbnail_url == "":
return ""
return thumbnail_url.split("thumb/")[0] + thumbnail_url.split("thumb/")[1].rsplit("/", 1)[0]
except:
return ''
def get_wikidata_id(article):
"""Find the Wikidata ID for a given Wikipedia article."""
dapp = urllib.parse.urlencode({"action": "query",
"prop": "pageprops",
"ppprop":"wikibase_item",
"redirects": 1,
"format": "json",
"titles": article})
query_string = "https://de.wikipedia.org/w/api.php?%s" % dapp
ret = requests.get(query_string).json()
id = next(iter(ret["query"]["pages"]))
# TODO: Catch the case where article has no Wikidata ID
# This can happen for new or seldomly edited articles
return ret["query"]["pages"][id]["pageprops"]["wikibase_item"]
def get_wikidata_image(wikidata_id):
"""Return the image for the Wikidata item with *wikidata_id*. """
query_string = ("https://www.wikidata.org/wiki/Special:EntityData/%s.json"
% wikidata_id)
item = json.loads(requests.get(query_string).text)
wdata = item["entities"][wikidata_id]["claims"]
try:
image = wdata["P18"][0]["mainsnak"]["datavalue"]["value"].replace(" ", "_")
except KeyError:
print("No image on Wikidata.")
else:
md = hashlib.md5(image.encode('utf-8')).hexdigest()
image_url = ("https://upload.wikimedia.org/wikipedia/commons/thumb/%s/%s/%s/64px-%s"
% (md[0], md[:2], image, image))
return image_url
def get_wikidata_desc(wikidata_id):
"""Return the image for the Wikidata item with *wikidata_id*. """
dapp = urllib.parse.urlencode({'action':'wbgetentities','ids':get_wikidata_id(wikidata_id),'languages':'de'})
query_string = "https://www.wikidata.org/w/api.php?" + dapp
res = requests.get(query_string).text
print(query_string)
item = json.loads(res)
wdata = item["entities"][wikidata_id]["descriptions"]["de"]["value"]
return wdata
def get_first_image_2(page):
wid = get_wikidata_id(page)
return get_wikidata_image(wid)
def get_poi(poi):
poi, rest = poi.split(";lat")
lat, lon = rest.split(";lon")
npoi = {}
urls = []
npoi['name'] = poi
wid = get_wikidata_id(poi)
info = wikipedia.page(poi)
npoi['description'] = info.summary # get_wikidata_desc(poi)
npoi['latitude'] = float(lat)
npoi['longitude'] = float(lon)
thumbnail_url = get_first_image_thumbnail(info)
npoi['thumbnailUrl'] = thumbnail_url
npoi['imageUrl'] = get_first_image(thumbnail_url) # get_wikidata_image(wid)
urls.append(info.url)
npoi['linkUrls'] = urls
return npoi
@default.route('/gps/<int:trainid>/<int:time>')
def browse(trainid, time):
global df
df_temp = df[df['sid'] == trainid]
gjson = df_temp.iloc[time].to_dict()
result = requests.get("http://api.wikunia.de/sights/api.php?lat=" + str(df_temp.iloc[time]['trainLatitude']) + "&lon=" + str(df_temp.iloc[time]['trainLongitude']) + "&rad=0.05&limit=10")
print(str(df_temp.iloc[time]['trainLatitude']), str(df_temp.iloc[time]['trainLongitude']))
rJson = json.loads(result.text)
pois=[]
print(rJson)
for _, poi in rJson.items():
if isinstance(poi, dict):
print(poi['sight'])
pois.append(poi['sight']+";lat"+poi['lat']+";lon"+poi['lon'])
print(len(pois))
#pois = wikipedia.geosearch(df_temp.iloc[time]['trainLatitude'], df_temp.iloc[time]['trainLongitude'])
poi_list = []
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
poi_list = pool.map(get_poi, pois)
pool.close()
#for i in pois:
# poi_list.append(get_poi(i))
gjson['pois'] = poi_list
return jsonify(dict(gjson))
if __name__ == "__main__":
wid = get_wikidata_id("Limburger Dom")
image_url = get_wikidata_image(wid)
print(image_url)
| 33.579618 | 190 | 0.624241 | 0 | 0 | 0 | 0 | 1,089 | 0.206563 | 0 | 0 | 1,569 | 0.29761 |
73c42c7f51f7b24a02fde60345ef5bd395fee637
| 246 |
py
|
Python
|
tools/python_api_Easytest/out.py
|
xutongxin1/UnitAi-project
|
226ccc7d73096fd3582a55bf76593756d8033892
|
[
"MIT"
] | 5 |
2019-03-23T09:21:14.000Z
|
2019-10-18T11:31:10.000Z
|
tools/python_api_Easytest/out.py
|
xutongxin1/UnitAi-project
|
226ccc7d73096fd3582a55bf76593756d8033892
|
[
"MIT"
] | null | null | null |
tools/python_api_Easytest/out.py
|
xutongxin1/UnitAi-project
|
226ccc7d73096fd3582a55bf76593756d8033892
|
[
"MIT"
] | 2 |
2020-01-12T06:03:44.000Z
|
2020-01-17T00:23:20.000Z
|
import json,requests
def test():
url='http://localhost:5000/login'
headers = {'Content-Type':'application/json'}
data={
"user": "xutongxin"
}
req = requests.post(url, json.dumps(data), headers)
result = json.loads(req.text)
print(test)
| 22.363636 | 52 | 0.686992 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.317073 |
73c50231a058cf0ef478478e7a36afc7a3fd3081
| 3,481 |
py
|
Python
|
src/cam_loop.py
|
stay-whimsical/screamchess
|
4950d480f8f33db2bc3f2d94eea5dc6706ae8087
|
[
"MIT"
] | 2 |
2019-06-19T20:25:12.000Z
|
2021-06-04T04:43:36.000Z
|
src/cam_loop.py
|
pablo-meier/screamchess
|
4950d480f8f33db2bc3f2d94eea5dc6706ae8087
|
[
"MIT"
] | 8 |
2017-08-19T07:09:55.000Z
|
2017-08-20T21:11:11.000Z
|
src/cam_loop.py
|
pablo-meier/screamchess
|
4950d480f8f33db2bc3f2d94eea5dc6706ae8087
|
[
"MIT"
] | 1 |
2020-04-17T00:19:43.000Z
|
2020-04-17T00:19:43.000Z
|
from camera import board_image_processor as bip
from chess.models import *
import cv2
import numpy as np
from media.sound import *
def show_webcam(mirror=False):
cam = cv2.VideoCapture(0)
while True:
ret_val, img = cam.read()
#chess_state = process_image(img)
cv2.imshow('webcam', img)
if cv2.waitKey(1) == 27:
break
def one_frame(id=0):
cam = cv2.VideoCapture(id)
ret_val, img = cam.read()
#print("width: " + str(cam.get(3)))
#print("height: " + str(cam.get(4)))
# cv2.imwrite("testimage3.png", img)
return img
def show_all_hsv_color_ranges(steps, board_processor):
step_size = 180/steps
hsv = one_frame()
for i in range(steps):
lower = np.array([i*step_size, 50, 50])
upper = np.array([(i+1)*step_size, 255, 255])
conv = board_processor._get_convolved_image(hsv, (lower, upper))
board_processor._show_image(conv)
def test_color_ranges():
board_processor = bip.BoardProcessor()
red = (np.array([0, 50, 50]), np.array([18, 255, 255]))
green = (np.array([19, 50, 50]), np.array([36, 255, 255]))
blue = (np.array([90, 50, 50]), np.array([108, 255, 255]))
color_map = {'W': green, 'B': blue}
board_processor.set_color_map(color_map)
return board_processor
def blend_images(num):
alpha = 0.5
beta = 1.0 - alpha
gamma = 0.0
img = one_frame()
for i in range(num):
img2 = one_frame()
img = cv2.addWeighted(img, alpha, img2, beta, gamma)
return img
def main_get_color_ranges():
board_processor = bip.BoardProcessor(debug_image_mode=False)
show_all_hsv_color_ranges(10, board_processor)
def main():
print 'Now initializing board processor'
board_processor = bip.BoardProcessor(debug_image_mode=False)
#board_processor = test_color_ranges()
#board_processor = bip.BoardProcessor()
state = board_processor.get_cur_state()
board = Board()
while True:
#img = one_frame()
print 'reading 5 frames'
try:
img = blend_images(5)
tmp_im_path = '/tmp/img.jpg'
cv2.imwrite(tmp_im_path, img)
board_processor._cache_pil_im(tmp_im_path)
board_processor._show_image(img, show_this_image=False)
print 'updating state'
board_processor.update_state(img)
ret_state = board_processor.get_cur_state()
if ret_state != state:
pieces = []
for i in range(0,8):
for j in range(0,8):
if board.state[i][j].piece:
pieces.append(board.state[i][j].piece)
piece_index = random.randint(0, len(pieces) - 1)
if pieces:
play_sound(pieces[piece_index], random_action())
print '\033[34;1m Got state change, new state = \033[0m'
for row in ret_state:
m = []
for x in row:
if x is None:
m.append('-')
else:
m.append('P')
print m
state = ret_state
else:
print 'No new state',
except Exception as e:
print '\033[31;1m GOT EXCEPTION', e, '\033[0m'
# show_webcam()
if __name__ == '__main__':
main()
#main_get_color_ranges()
| 33.152381 | 72 | 0.564493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 485 | 0.139328 |
73c507797796f3d05c197c7fb4b73550955df8ce
| 2,854 |
py
|
Python
|
__train/preprocessing.py
|
aiddun/jazzCNN
|
f2d60d1b0697e71327e1d6d2bb9af6407e1253d1
|
[
"MIT"
] | 1 |
2018-03-02T09:59:36.000Z
|
2018-03-02T09:59:36.000Z
|
_evaluate/preprocessing.py
|
AidDun/jazzCNN
|
f2d60d1b0697e71327e1d6d2bb9af6407e1253d1
|
[
"MIT"
] | 3 |
2020-11-13T17:17:54.000Z
|
2022-02-09T23:27:21.000Z
|
_evaluate/preprocessing.py
|
AidDun/jazzCNN
|
f2d60d1b0697e71327e1d6d2bb9af6407e1253d1
|
[
"MIT"
] | null | null | null |
import numpy as np
from numpy import random
import glob
import scipy.io.wavfile
np.random.seed(4)
def preprocess(periods, testCategoryNum):
periodList = periods
catNum = len(periodList)
def createpathlist():
print("Loading file paths.")
x = []
y = []
for recDirIndex in range(len(periodList)):
rnge = periodList[recDirIndex]
bottomrange = rnge[0]
toprange = rnge[1]
for i in range(bottomrange, toprange):
recYearDirIndex = glob.glob("..//FINAL//" + str(i) + "//*.wav")
for n in range(len(recYearDirIndex)):
path = recYearDirIndex[n]
x.append(path)
y.append(recDirIndex)
#Created 2 original arrays for readability
print("Done.")
return np.array(x), np.array(y)
def truncateData():
x, y = createpathlist()
#Least prevalent category
originalLengths = []
for n in range(catNum):
originalLengths.append(np.count_nonzero(y == n))
minimumNum = min(originalLengths)
for n in range(catNum):
while( y.tolist().count(n) > minimumNum ):
#First occuring instance
for q in range(y.size):
if y[q] == n:
y = np.delete(y, q)
x = np.delete(x, q)
break
return x, y
def psudoRandomOrder():
x, y = truncateData()
print("")
print("Psudo-randomising Data")
randOrder = np.random.permutation(x.shape[0])
x, y = x[randOrder], y[randOrder]
print("Shuffled.")
return x, y
def BatchSeparator():
x, y = psudoRandomOrder()
print("")
print("Separating data into testing and training set.")
x_test = []
y_test = []
for n in range(catNum):
while( y_test.count(n) < testCategoryNum ):
#first occuring instance
for q in range(y.size):
if y[q] == n:
x_test.append(x[q])
x = np.delete(x, q)
y_test.append(y[q])
y = np.delete(y, q)
break
x_test = np.array(y_test)
y_test = np.array(y_test)
x_train = np.array(x)
y_train = np.array(y)
return x_train, y_train, x_test, y_test
x_train, y_train, x_test, y_test = BatchSeparator()
print("Created training set of " + str(y_train.size) + " recordings and a testing set of " + str(y_test.size) + " recordings.")
print("Preproccessing complete.")
return x_train, y_train, x_test, y_test
| 25.711712 | 131 | 0.501402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 353 | 0.123686 |
73c590592e5f6c7d80e9e638ac61992cbf513263
| 49 |
py
|
Python
|
test/fixtures/python/analysis/main1.py
|
matsubara0507/semantic
|
67899f701abc0f1f0cb4374d8d3c249afc33a272
|
[
"MIT"
] | 8,844 |
2019-05-31T15:47:12.000Z
|
2022-03-31T18:33:51.000Z
|
test/fixtures/python/analysis/main1.py
|
matsubara0507/semantic
|
67899f701abc0f1f0cb4374d8d3c249afc33a272
|
[
"MIT"
] | 401 |
2019-05-31T18:30:26.000Z
|
2022-03-31T16:32:29.000Z
|
test/fixtures/python/analysis/main1.py
|
matsubara0507/semantic
|
67899f701abc0f1f0cb4374d8d3c249afc33a272
|
[
"MIT"
] | 504 |
2019-05-31T17:55:03.000Z
|
2022-03-30T04:15:04.000Z
|
import a as b
import b.c as e
b.foo(1)
e.baz(1)
| 8.166667 | 15 | 0.632653 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
73c5ed5d0d7202bd31940ec8f1e4251cfbaeba8a
| 10,054 |
py
|
Python
|
app/views.py
|
allthingsclowd/K5_User_Onboarding_Example
|
313b0033ceb015cca86574762915e02000d4bbbb
|
[
"MIT"
] | null | null | null |
app/views.py
|
allthingsclowd/K5_User_Onboarding_Example
|
313b0033ceb015cca86574762915e02000d4bbbb
|
[
"MIT"
] | null | null | null |
app/views.py
|
allthingsclowd/K5_User_Onboarding_Example
|
313b0033ceb015cca86574762915e02000d4bbbb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""Summary - Flask Views Used to Control/Wrap a web UI
around the Add User Python Script
Author: Graham Land
Date: 08/12/16
Twitter: @allthingsclowd
Github: https://github.com/allthingscloud
Blog: https://allthingscloud.eu
"""
from flask import flash, render_template, session, request, redirect, url_for, json, make_response
from app import app
import os,binascii
import AddUserToProjectv3 as K5User
import k5APIwrappersV19 as K5API
from functools import wraps
app.secret_key = os.urandom(24)
JSESSION_ID = binascii.b2a_hex(os.urandom(16))
def login_required(f):
"""Summary - Decorator used to ensure that routes channeled through
this function are authenticated already
Otherwise they're returned to the login screen
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session['regionaltoken'] is None:
return redirect(url_for('index', next=request.url))
return f(*args, **kwargs)
return decorated_function
@app.route('/', methods=['GET', 'POST'])
@app.route('/login', methods=['GET', 'POST'])
def index():
"""Summary - Default login screen used to capture user login details
and authenticate user session
"""
session['regionaltoken'] = None
if request.headers.get('x-forwarded-proto') != 'https':
secure_url=request.url.replace("http://","https://")
return redirect(secure_url)
if request.method == 'POST':
adminUser = request.form.get('k5username', None)
adminPassword = request.form.get('k5password', None)
contract = request.form.get('k5contract', None)
region = request.form.get('k5region', None)
#print adminUser, adminPassword, contract, region
try:
regional_token = K5API.get_unscoped_token(
adminUser, adminPassword, contract, region)
#print regional_token
#print regional_token.json()
defaultid = regional_token.json()['token']['project'].get('id')
global_token = K5API.get_globally_scoped_token(
adminUser, adminPassword, contract, defaultid, region)
if not isinstance(regional_token, str):
#print "Got this far!!"
for role in regional_token.json()['token']['roles']:
if role['name'] == 'cpf_admin':
session['adminUser'] = adminUser
session['adminPassword'] = adminPassword
session['regionaltoken'] = regional_token.headers[
'X-Subject-Token']
session['globaltoken'] = global_token.headers[
'X-Subject-Token']
session['contract'] = contract
session['contractid'] = regional_token.json()['token']['project'][
'domain'].get('id')
session['defaultprjid'] = regional_token.json()['token'][
'project'].get('id')
session['region'] = region
#print "Downloads"
#print session['bubbles'].json()
return redirect(url_for('adduser'))
else:
return render_template('hello-flask-login.html',
title='K5 User Onboarding Portal (Demo)')
except:
return render_template('hello-flask-login.html',
title='K5 User Onboarding Portal (Demo)')
else:
resp = make_response(render_template('hello-flask-login.html',
title='K5 User Onboarding Portal (Demo)'))
resp.set_cookie('JSESSIONID',JSESSION_ID)
return resp
@app.route('/adduser', methods=['GET', 'POST'])
@login_required
def adduser():
"""Summary - Call the add user function
"""
if request.method == 'POST':
if request.form.get('AddUser', None) == "Add User":
adminUser = session['adminUser']
adminPassword = session['adminPassword']
contract = session['contract']
contractid = session['contractid']
region = session['region']
defaultprjid = session['defaultprjid']
try:
regional_token = K5API.get_unscoped_token(
adminUser, adminPassword, contract, region)
global_token = K5API.get_globally_scoped_token(
adminUser, adminPassword, contract, defaultprjid, region)
id_token = K5API.get_unscoped_idtoken(
adminUser, adminPassword, contract)
except:
return render_template('hello-flask-login.html',
title='K5 User Onboarding Portal (Demo)')
newregionaltoken = regional_token.headers['X-Subject-Token']
newglobaltoken = global_token.headers['X-Subject-Token']
email = request.form.get('k5useremail', None)
userProject = request.form.get('k5project', None)
userProjectA = unicode(userProject) + unicode('a')
userProjectB = unicode(userProject) + unicode('b')
try:
resultprojecta = K5User.adduser_to_K5(id_token,
newglobaltoken,
newregionaltoken,
contractid,
contract,
region,
email,
userProjectA)
resultprojectb = K5User.adduser_to_K5(id_token,
newglobaltoken,
newregionaltoken,
contractid,
contract,
region,
email,
userProjectB)
#print result
except:
return render_template('hello-flask-login.html',
title='K5 User Onboarding Portal (Demo)')
if resultprojecta is not None:
#print result
session['newuserlogin'] = resultprojecta[2]
session['newuserpassword'] = resultprojecta[4]
session['newuserstatusa'] = resultprojecta[5]
session['newuserprojecta'] = userProjectA
session['newuserstatusb'] = resultprojectb[5]
session['newuserprojectb'] = userProjectB
session['newusercontract'] = contract
session['newuserregion'] = region
return redirect(url_for('userstatus'))
else:
if request.form.get('Logout', None) == "Logout":
return redirect(url_for('logout'))
if request.method == 'GET':
region = session['region']
defaultprjid = session['defaultprjid']
regionaltoken = session['regionaltoken']
# report_bubbles = json.dumps(download_item_in_storage_container(
# regionaltoken,
# defaultprjid,
# "Bubbles",
# "Bubbles.json", region).json())
report_bubbles = [{ "name": "Test"}]
#print "\n\n\nLoading JSON Details..................\n\n\n"
#print "The actual JSON File.................."
#print report_bubbles
return render_template('hello-flask-adduser.html',
title='K5 Add User',
bubbles=report_bubbles)
@app.route('/userstatus', methods=['GET', 'POST'])
@login_required
def userstatus():
"""Summary - Display the results of the user add request
"""
if request.method == 'POST':
if request.form.get('AddUser', None) == "Add Another User":
return redirect(url_for('adduser'))
else:
if request.form.get('Logout', None) == "Logout":
return redirect(url_for('logout'))
if request.method == 'GET':
username = session['newuserlogin']
userpassword = session['newuserpassword']
userstatusa = session['newuserstatusa']
userprojecta = session['newuserprojecta']
userstatusb = session['newuserstatusb']
userprojectb = session['newuserprojectb']
usercontract = session['newusercontract']
usercontractid = session['contractid']
userregion = session['newuserregion']
return render_template('hello-flask-result.html',
title='K5 New User Details',
userstatus=( 'Username : ' + username +
' | Password : ' + userpassword +
' | Project 1 : ' + userprojecta +
' | Status : ' + userstatusa +
' | Project 2 : ' + userprojectb +
' | Status : ' + userstatusb +
' | Contract : ' + usercontract +
' | Contract ID : ' + usercontractid +
' | Region : ' + userregion))
@app.route('/logout')
@login_required
def logout():
"""Summary - Dump the user session cookies on logout
"""
# remove session vars
session.pop('regionaltoken', None)
session.pop('globaltoken', None)
session.pop('adminUser', None)
session.pop('adminPassword', None)
return redirect(url_for('index'))
| 41.717842 | 98 | 0.514919 | 0 | 0 | 0 | 0 | 9,207 | 0.915755 | 0 | 0 | 2,940 | 0.292421 |
73c828b9d6fbfbe855a2020cf66b582e67bedfef
| 867 |
py
|
Python
|
src/users/models.py
|
gabrielstork/django-to-do-list
|
756f636fc531f131bbf0649c14272178ce13d957
|
[
"MIT"
] | 6 |
2021-11-15T18:56:44.000Z
|
2022-02-15T10:02:24.000Z
|
src/users/models.py
|
gabrielstork/django-to-do-list
|
756f636fc531f131bbf0649c14272178ce13d957
|
[
"MIT"
] | 1 |
2022-02-14T20:28:39.000Z
|
2022-02-14T20:28:39.000Z
|
src/users/models.py
|
gabrielstork/django-to-do-list
|
756f636fc531f131bbf0649c14272178ce13d957
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import AbstractUser
from django.core.validators import MinLengthValidator
from django.utils.translation import gettext_lazy as _
from django.db import models
from . import validators
class User(AbstractUser):
min_length_validator = MinLengthValidator(
limit_value=3,
message=_('At least 3 characters are required.')
)
characters_validator = validators.CharactersValidator()
username = models.CharField(
_('username'),
max_length=15,
primary_key=True,
help_text=_('Required. 3-15 characters. Letters and numbers only.'),
validators=[characters_validator, min_length_validator],
error_messages={
'unique': _('A user with that username already exists.'),
},
)
first_name = None
last_name = None
REQUIRED_FIELDS = []
| 27.967742 | 76 | 0.690888 | 649 | 0.748558 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.175317 |
73c9920f5c36cc9f240880ba80cb675e0c7cb5ca
| 5,135 |
py
|
Python
|
readux/books/abbyyocr.py
|
jpkarlsberg/readux
|
50a895dcf7d64b753a07808e9be218cab3682850
|
[
"Apache-2.0"
] | null | null | null |
readux/books/abbyyocr.py
|
jpkarlsberg/readux
|
50a895dcf7d64b753a07808e9be218cab3682850
|
[
"Apache-2.0"
] | null | null | null |
readux/books/abbyyocr.py
|
jpkarlsberg/readux
|
50a895dcf7d64b753a07808e9be218cab3682850
|
[
"Apache-2.0"
] | null | null | null |
'''
:class:`eulxml.xmlmap.XmlObject` classes for working with ABBYY
FineReadux OCR XML.
Currently supports **FineReader6-schema-v1** and
**FineReader8-schema-v2**.
----
'''
from eulxml import xmlmap
class Base(xmlmap.XmlObject):
'''Base :class:`eulxml.xmlmap.XmlObject` for ABBYY OCR XML with
common namespace declarations.
'''
ROOT_NAMESPACES = {
'fr6v1': 'http://www.abbyy.com/FineReader_xml/FineReader6-schema-v1.xml',
'fr8v2': 'http://www.abbyy.com/FineReader_xml/FineReader8-schema-v2.xml'
}
'namespaces for supported versions of FineReader xml'
id = xmlmap.StringField('@xml:id')
def frns(xpath):
'''Utility function to convert a simple xpath to match any of the
configured versions of ABBYY FineReader XML namespaces. Example
conversions:
* ``page`` becomes ``f1:page|f2:page``
* ``text/par`` becomes ``f1:page/f1:text|f2:page/f2:text``
Uses all declared namespace prefixes from
:attr:`Base.ROOT_NAMESPACES`
'''
namespaces = Base.ROOT_NAMESPACES.keys()
return '|'.join('/'.join('%s:%s' % (ns, el) for el in xpath.split('/'))
for ns in namespaces)
class Formatting(Base):
'''A group of characters in a single :class:`Line` with uniform
formatting.'''
ROOT_NAME = 'formatting'
language = xmlmap.StringField('@lang')
'language of this formatted section'
text = xmlmap.StringField('text()')
'text value'
# char params ?
# boolean attributes for: ff, fs, bold, italic, subscript, superscript,
# smallcaps, underline, strikeout, color, scaling, spacing
class Line(Base):
'''A single line of text in a :class:`Paragraph`.'''
ROOT_NAME = 'line'
baseline = xmlmap.IntegerField('@baseline')
'integer baseline'
left = xmlmap.IntegerField('@l')
'integer left'
top = xmlmap.IntegerField('@t')
'integer top'
right = xmlmap.IntegerField('@r')
'integer right'
bottom = xmlmap.IntegerField('@b')
'integer bottom'
formatted_text = xmlmap.NodeListField(frns('formatting'),
Formatting)
'list of :class:`Formatting` elements'
class Paragraph(Base):
'''A single paragraph of text somewhere in a :class:`Document`.'''
ROOT_NAME = 'par'
align = xmlmap.StringField('@align') # default is Left; Center, Right, Justified
'text alignment (Left, Center, Right, Justified)'
left_indent = xmlmap.IntegerField('@leftIndent')
'integer left indent'
right_indent = xmlmap.IntegerField('@rightIndent')
'integer right indent'
start_indent = xmlmap.IntegerField('@startIndent')
'integer start indent'
line_spacing = xmlmap.IntegerField('@lineSpacing')
'integer line spacing'
# dropChars stuff ?
lines = xmlmap.NodeListField(frns('line'), Line)
'list of :class:`Line` elements'
class Block(Base):
ROOT_NAME = 'page'
'''A single block of content on a :class:`Page`.'''
type = xmlmap.StringField('@blockType') # Text, Table, Picture, Barcode
'type of block (Text, Table, Picture, Barcode)'
left = xmlmap.IntegerField('@l')
'integer left'
top = xmlmap.IntegerField('@t')
'integer top'
right = xmlmap.IntegerField('@r')
'integer right'
bottom = xmlmap.IntegerField('@b')
'integer bottom'
# must have one & only one region;
# region/rect dimensions appears to be redundant...
paragraphs = xmlmap.NodeListField(frns('text/par'), Paragraph)
'list of :class:`Paragraph` elements'
class Page(Base):
'''A single page of a :class:`Document`.'''
ROOT_NAME = 'page'
width = xmlmap.IntegerField('@width')
'integer width'
height = xmlmap.IntegerField('@height')
'integer height'
resolution = xmlmap.IntegerField('@resolution')
'integer resolution'
blocks = xmlmap.NodeListField(frns('block'), Block)
'list of :class:`Block` elements in this page'
text_blocks = xmlmap.NodeListField(frns('block[@blockType="Text"]'),
Block)
'text :class:`Block` elements (where type is "Text")'
picture_blocks = xmlmap.NodeListField(frns('block[@blockType="Picture"]'),
Block)
'picture :class:`Block` elements (where type is "Picture")'
# block position info possibly redundant? map paragraphs directly
paragraphs = xmlmap.NodeListField(frns('block/text/par'),
Paragraph)
'list of :class:`Paragraph` elements in any of the blocks on this page'
class Document(Base):
''':class:`~eulxml.xmlmap.XmlObject` class for an ABBYY
OCR XML Document.
.. Note::
Currently there is no support for tabular formatting elements.
'''
ROOT_NAME ='document'
pages = xmlmap.NodeListField(frns('page'), Page)
'pages as :class:`Page`'
page_count = xmlmap.IntegerField('@pagesCount')
'integer page_count (document ``@pagesCount``)'
language = xmlmap.StringField('@mainLanguage')
'main language of the document'
languages = xmlmap.StringField('@languages')
'all languages included in the document'
| 36.161972 | 84 | 0.651996 | 4,388 | 0.854528 | 0 | 0 | 0 | 0 | 0 | 0 | 2,947 | 0.573905 |
73c9e7bedf96216a6d9365965c340b5bab6a369e
| 742 |
py
|
Python
|
Aulas/aula14.py
|
adonaifariasdev/cursoemvideo-python3
|
1fd35e45b24c52013fa3bc98e723971db8e6b7d1
|
[
"MIT"
] | null | null | null |
Aulas/aula14.py
|
adonaifariasdev/cursoemvideo-python3
|
1fd35e45b24c52013fa3bc98e723971db8e6b7d1
|
[
"MIT"
] | null | null | null |
Aulas/aula14.py
|
adonaifariasdev/cursoemvideo-python3
|
1fd35e45b24c52013fa3bc98e723971db8e6b7d1
|
[
"MIT"
] | null | null | null |
'''for c in range(1, 10):
print(c)
print('FIM')'''
'''c = 1
while c < 10:
print(c)
c += 1
print('FIM')'''
'''n = 1
while n != 0: #flag ou condição de parada
n = int(input('Digite um valor: '))
print('FIM')'''
'''r = 'S'
while r == 'S':
n = int(input('Digite um valor: '))
r = str(input('Quer continuar? [S/N]')).upper()
print('FIM')'''
n = 1
totPar = totaImpar = 0
while n != 0:
n = int(input('Digite um valor: '))
if n != 0: # nao vai contabilizar o 0 no final da contagem
if n % 2 ==0:
totPar += 1
else:
totaImpar += 1
print('Você digitou {} numeros pares e {} numeros impares.'.format(totPar, totaImpar))
# OBS.: nesse caso não vai considerar o 0 como numero!!!!
| 22.484848 | 86 | 0.540431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 536 | 0.718499 |
73cb0638be23bf0c8d4dd43c1030dd71337f3c61
| 2,330 |
py
|
Python
|
tests/test_markdown_in_code_cells.py
|
st--/jupytext
|
f8e8352859cc22e17b11154d0770fd946c4a430a
|
[
"MIT"
] | 5,378 |
2018-09-01T22:03:43.000Z
|
2022-03-31T06:51:42.000Z
|
tests/test_markdown_in_code_cells.py
|
st--/jupytext
|
f8e8352859cc22e17b11154d0770fd946c4a430a
|
[
"MIT"
] | 812 |
2018-08-31T08:26:13.000Z
|
2022-03-30T18:12:11.000Z
|
tests/test_markdown_in_code_cells.py
|
st--/jupytext
|
f8e8352859cc22e17b11154d0770fd946c4a430a
|
[
"MIT"
] | 380 |
2018-09-02T01:40:07.000Z
|
2022-03-25T13:57:23.000Z
|
"""Issue #712"""
from nbformat.v4.nbbase import new_code_cell, new_notebook
from jupytext import reads, writes
from jupytext.cell_to_text import three_backticks_or_more
from jupytext.compare import compare, compare_notebooks
from .utils import requires_myst
def test_three_backticks_or_more():
assert three_backticks_or_more([""]) == "```"
assert three_backticks_or_more(["``"]) == "```"
assert three_backticks_or_more(["```python"]) == "````"
assert three_backticks_or_more(["```"]) == "````"
assert three_backticks_or_more(["`````python"]) == "``````"
assert three_backticks_or_more(["`````"]) == "``````"
def test_triple_backticks_in_code_cell(
no_jupytext_version_number,
nb=new_notebook(
metadata={"main_language": "python"},
cells=[
new_code_cell(
'''a = """
```
foo
```
"""'''
)
],
),
text='''---
jupyter:
jupytext:
main_language: python
---
````python
a = """
```
foo
```
"""
````
''',
):
actual_text = writes(nb, fmt="md")
compare(actual_text, text)
actual_nb = reads(text, fmt="md")
compare_notebooks(actual_nb, nb)
@requires_myst
def test_triple_backticks_in_code_cell_myst(
no_jupytext_version_number,
nb=new_notebook(
metadata={"main_language": "python"},
cells=[
new_code_cell(
'''a = """
```
foo
```
"""'''
)
],
),
text='''---
jupytext:
main_language: python
---
````{code-cell}
a = """
```
foo
```
"""
````
''',
):
actual_text = writes(nb, fmt="md:myst")
compare(actual_text, text)
actual_nb = reads(text, fmt="md:myst")
compare_notebooks(actual_nb, nb)
def test_alternate_tree_four_five_backticks(
no_jupytext_version_number,
nb=new_notebook(
metadata={"main_language": "python"},
cells=[
new_code_cell('a = """\n```\n"""'),
new_code_cell("b = 2"),
new_code_cell('c = """\n````\n"""'),
],
),
text='''---
jupyter:
jupytext:
main_language: python
---
````python
a = """
```
"""
````
```python
b = 2
```
`````python
c = """
````
"""
`````
''',
):
actual_text = writes(nb, fmt="md")
compare(actual_text, text)
actual_nb = reads(text, fmt="md")
compare_notebooks(actual_nb, nb)
| 17.923077 | 63 | 0.564807 | 0 | 0 | 0 | 0 | 543 | 0.233047 | 0 | 0 | 650 | 0.27897 |
73cd6b9d543cd1b702c785eacf0e7b85b40a9737
| 629 |
py
|
Python
|
amy/workshops/migrations/0152_event_open_ttt_applications.py
|
code-review-doctor/amy
|
268c1a199510457891459f3ddd73fcce7fe2b974
|
[
"MIT"
] | 53 |
2015-01-10T17:39:19.000Z
|
2019-06-12T17:36:34.000Z
|
amy/workshops/migrations/0152_event_open_ttt_applications.py
|
code-review-doctor/amy
|
268c1a199510457891459f3ddd73fcce7fe2b974
|
[
"MIT"
] | 1,176 |
2015-01-02T06:32:47.000Z
|
2019-06-18T11:57:47.000Z
|
amy/workshops/migrations/0152_event_open_ttt_applications.py
|
code-review-doctor/amy
|
268c1a199510457891459f3ddd73fcce7fe2b974
|
[
"MIT"
] | 44 |
2015-01-03T15:08:56.000Z
|
2019-06-09T05:33:08.000Z
|
# Generated by Django 2.1 on 2018-09-02 14:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workshops', '0151_auto_20180902_0409'),
]
operations = [
migrations.AddField(
model_name='event',
name='open_TTT_applications',
field=models.BooleanField(blank=True, default=False, help_text="If this event is TTT, you can mark it as 'open applications' which means that people not associated with this event's member sites can also take part in this event.", verbose_name='TTT Open applications'),
),
]
| 33.105263 | 281 | 0.677266 | 538 | 0.855326 | 0 | 0 | 0 | 0 | 0 | 0 | 300 | 0.476948 |
73ced5d59e03f3d885db00b3181a8bf0e4e60e2a
| 5,220 |
py
|
Python
|
example/cifar10/fast_at.py
|
KuanKuanQAQ/ares
|
40dbefc18f6438e1812021fe6d6c3195f22ca295
|
[
"MIT"
] | 206 |
2020-12-31T09:43:11.000Z
|
2022-03-30T07:02:41.000Z
|
example/cifar10/fast_at.py
|
afoolboy/ares
|
89610d41fdde194e4ad916d29961aaed73383692
|
[
"MIT"
] | 7 |
2021-01-26T06:45:44.000Z
|
2022-02-26T05:25:48.000Z
|
example/cifar10/fast_at.py
|
afoolboy/ares
|
89610d41fdde194e4ad916d29961aaed73383692
|
[
"MIT"
] | 61 |
2020-12-29T14:02:41.000Z
|
2022-03-26T14:21:10.000Z
|
''' This file provides a wrapper class for Fast_AT (https://github.com/locuslab/fast_adversarial) model for CIFAR-10 dataset. '''
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import tensorflow as tf
from ares.model.pytorch_wrapper import pytorch_classifier_with_logits
from ares.utils import get_res_path
MODEL_PATH = get_res_path('./cifar10/cifar_model_weights_30_epochs.pth')
def load(_):
model = Fast_AT()
model.load()
return model
@pytorch_classifier_with_logits(n_class=10, x_min=0.0, x_max=1.0,
x_shape=(32, 32, 3), x_dtype=tf.float32, y_dtype=tf.int32)
class Fast_AT(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
self.model = PreActResNet18().cuda()
self._mean_torch = torch.tensor((0.4914, 0.4822, 0.4465)).view(3,1,1).cuda()
self._std_torch = torch.tensor((0.2471, 0.2435, 0.2616)).view(3,1,1).cuda()
def forward(self, x):
x = x.transpose(1, 2).transpose(1, 3).contiguous()
input_var = (x.cuda() - self._mean_torch) / self._std_torch
labels = self.model(input_var)
return labels.cpu()
def load(self):
checkpoint = torch.load(MODEL_PATH)
self.model.load_state_dict(checkpoint)
self.model.float()
self.model.eval()
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(x) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.bn = nn.BatchNorm2d(512 * block.expansion)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.relu(self.bn(out))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def PreActResNet18():
return PreActResNet(PreActBlock, [2,2,2,2])
if __name__ == '__main__':
if not os.path.exists(MODEL_PATH):
if not os.path.exists(os.path.dirname(MODEL_PATH)):
os.makedirs(os.path.dirname(MODEL_PATH), exist_ok=True)
url = 'https://drive.google.com/file/d/1XM-v4hqi9u8EDrQ2xdCo37XXcM9q-R07/view'
print('Please download "{}" to "{}".'.format(url, MODEL_PATH))
| 37.021277 | 129 | 0.638314 | 4,129 | 0.790996 | 0 | 0 | 851 | 0.163027 | 0 | 0 | 417 | 0.079885 |
73cf094cf77e18c95fada7abbb805a0feed41fec
| 526 |
py
|
Python
|
auto_pilot/common/registrable.py
|
farrellsc/zAutoPilot
|
652d93690237dcb21c3cbdbdad95f917b7fec6e3
|
[
"MIT"
] | 1 |
2018-03-05T08:27:58.000Z
|
2018-03-05T08:27:58.000Z
|
auto_pilot/common/registrable.py
|
farrellsc/zAutoPilot
|
652d93690237dcb21c3cbdbdad95f917b7fec6e3
|
[
"MIT"
] | null | null | null |
auto_pilot/common/registrable.py
|
farrellsc/zAutoPilot
|
652d93690237dcb21c3cbdbdad95f917b7fec6e3
|
[
"MIT"
] | null | null | null |
from typing import Callable, TypeVar, List
T = TypeVar('T')
class Registrable(object):
reg_list = dict()
@classmethod
def register(cls, class_name: str) -> Callable:
def register_inner(class_type: T) -> None:
cls.reg_list[class_name] = class_type
return register_inner
@classmethod
def list_available(cls) -> List[str]:
return list(cls.reg_list.keys())
@classmethod
def by_name(cls, class_name: str) -> T:
return cls.reg_list.get(class_name, None)
| 23.909091 | 51 | 0.65019 | 462 | 0.878327 | 0 | 0 | 396 | 0.752852 | 0 | 0 | 3 | 0.005703 |
73cf528b5a42e68ea53f81fc68bbf5a7a0f2cf10
| 688 |
py
|
Python
|
noheavenbot/cogs/commands/testing.py
|
Molanito13/noheaven-bot
|
ad126d4601321ecabff9d1d214ce7d3f4e258c3e
|
[
"MIT"
] | 3 |
2018-10-13T14:05:24.000Z
|
2018-12-25T21:40:21.000Z
|
noheavenbot/cogs/commands/testing.py
|
Molanito13/noheaven-bot
|
ad126d4601321ecabff9d1d214ce7d3f4e258c3e
|
[
"MIT"
] | 2 |
2018-10-08T14:33:39.000Z
|
2020-03-02T18:00:47.000Z
|
noheavenbot/cogs/commands/testing.py
|
Molanito13/noheaven-bot
|
ad126d4601321ecabff9d1d214ce7d3f4e258c3e
|
[
"MIT"
] | 5 |
2018-10-08T14:18:58.000Z
|
2020-11-01T17:55:51.000Z
|
from discord.ext.commands import command, Cog
from noheavenbot.utils.constants import TEXTCHANNELS
from discord import Member
from noheavenbot.utils.database_tables.table_users import Users
from noheavenbot.utils.validator import has_role as check_role
class Test(Cog):
def __init__(self, bot):
self.bot = bot
@command()
async def test(self, ctx):
if check_role(ctx.message.author.roles, 445947005169303552, True):
admin = True
return await ctx.send('all ok')
else:
return await ctx.send('Solo los administradores pueden añadir bots con permisos de administración.')
def setup(bot):
bot.add_cog(Test(bot))
| 28.666667 | 112 | 0.709302 | 389 | 0.563768 | 0 | 0 | 314 | 0.455072 | 299 | 0.433333 | 87 | 0.126087 |
73cfd3a5b8cd1e7653bb83ccce83e87f0876fda2
| 6,174 |
py
|
Python
|
mayan/apps/linking/tests/test_smart_link_condition_views.py
|
atitaya1412/Mayan-EDMS
|
bda9302ba4b743e7d829ad118b8b836221888172
|
[
"Apache-2.0"
] | 343 |
2015-01-05T14:19:35.000Z
|
2018-12-10T19:07:48.000Z
|
mayan/apps/linking/tests/test_smart_link_condition_views.py
|
atitaya1412/Mayan-EDMS
|
bda9302ba4b743e7d829ad118b8b836221888172
|
[
"Apache-2.0"
] | 191 |
2015-01-03T00:48:19.000Z
|
2018-11-30T09:10:25.000Z
|
mayan/apps/linking/tests/test_smart_link_condition_views.py
|
atitaya1412/Mayan-EDMS
|
bda9302ba4b743e7d829ad118b8b836221888172
|
[
"Apache-2.0"
] | 257 |
2019-05-14T10:26:37.000Z
|
2022-03-30T03:37:36.000Z
|
from mayan.apps.testing.tests.base import GenericViewTestCase
from ..events import event_smart_link_edited
from ..permissions import permission_smart_link_edit
from .mixins import (
SmartLinkConditionViewTestMixin, SmartLinkTestMixin,
SmartLinkViewTestMixin
)
class SmartLinkConditionViewTestCase(
SmartLinkConditionViewTestMixin, SmartLinkTestMixin,
SmartLinkViewTestMixin, GenericViewTestCase
):
def setUp(self):
super().setUp()
self._create_test_smart_link()
def test_smart_link_condition_create_view_no_permission(self):
condition_count = self.test_smart_link.conditions.count()
self._clear_events()
response = self._request_test_smart_link_condition_create_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.test_smart_link.conditions.count(), condition_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_smart_link_condition_create_view_with_access(self):
self.grant_access(
obj=self.test_smart_link, permission=permission_smart_link_edit
)
condition_count = self.test_smart_link.conditions.count()
self._clear_events()
response = self._request_test_smart_link_condition_create_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
self.test_smart_link.conditions.count(), condition_count + 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(
events[0].action_object, self.test_smart_link_condition
)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_smart_link)
self.assertEqual(events[0].verb, event_smart_link_edited.id)
def test_smart_link_condition_delete_view_no_permission(self):
self._create_test_smart_link_condition()
condition_count = self.test_smart_link.conditions.count()
self._clear_events()
response = self._request_test_smart_link_condition_delete_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.test_smart_link.conditions.count(), condition_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_smart_link_condition_delete_view_with_access(self):
self._create_test_smart_link_condition()
self.grant_access(
obj=self.test_smart_link, permission=permission_smart_link_edit
)
condition_count = self.test_smart_link.conditions.count()
self._clear_events()
response = self._request_test_smart_link_condition_delete_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
self.test_smart_link.conditions.count(), condition_count - 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_smart_link)
self.assertEqual(events[0].verb, event_smart_link_edited.id)
def test_smart_link_condition_edit_view_no_permission(self):
self._create_test_smart_link_condition()
instance_values = self._model_instance_to_dictionary(
instance=self.test_smart_link_condition
)
self._clear_events()
response = self._request_test_smart_link_condition_edit_view()
self.assertEqual(response.status_code, 404)
self.test_smart_link_condition.refresh_from_db()
self.assertEqual(
self._model_instance_to_dictionary(
instance=self.test_smart_link_condition
), instance_values
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_smart_link_condition_edit_view_with_access(self):
self._create_test_smart_link_condition()
self.grant_access(
obj=self.test_smart_link, permission=permission_smart_link_edit
)
instance_values = self._model_instance_to_dictionary(
instance=self.test_smart_link_condition
)
self._clear_events()
response = self._request_test_smart_link_condition_edit_view()
self.assertEqual(response.status_code, 302)
self.test_smart_link_condition.refresh_from_db()
self.assertNotEqual(
self._model_instance_to_dictionary(
instance=self.test_smart_link_condition
), instance_values
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(
events[0].action_object, self.test_smart_link_condition
)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_smart_link)
self.assertEqual(events[0].verb, event_smart_link_edited.id)
def test_smart_link_condition_list_view_no_permission(self):
self._create_test_smart_link_condition()
self._clear_events()
response = self._request_test_smart_link_condition_list_view()
self.assertNotContains(
response=response, status_code=404,
text=self.test_smart_link_condition.smart_link.label
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_smart_link_condition_list_view_with_access(self):
self._create_test_smart_link_condition()
self.grant_access(
obj=self.test_smart_link, permission=permission_smart_link_edit
)
self._clear_events()
response = self._request_test_smart_link_condition_list_view()
self.assertContains(
response=response, status_code=200,
text=self.test_smart_link_condition.smart_link.label
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
| 33.737705 | 75 | 0.698737 | 5,901 | 0.955782 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
73d0507c967519673d3c90287e9f91022857b10e
| 19,105 |
py
|
Python
|
P1.py
|
chinmaydas96/CarND-LaneLines-P1
|
be8e03257962314d6adea68634d053d5f0550510
|
[
"MIT"
] | null | null | null |
P1.py
|
chinmaydas96/CarND-LaneLines-P1
|
be8e03257962314d6adea68634d053d5f0550510
|
[
"MIT"
] | null | null | null |
P1.py
|
chinmaydas96/CarND-LaneLines-P1
|
be8e03257962314d6adea68634d053d5f0550510
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# # Self-Driving Car Engineer Nanodegree
#
#
# ## Project: **Finding Lane Lines on the Road**
# ***
# In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below.
#
# Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.
#
# In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.
#
# ---
# Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.
#
# **Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**
#
# ---
# **The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**
#
# ---
#
# <figure>
# <img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" />
# <figcaption>
# <p></p>
# <p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p>
# </figcaption>
# </figure>
# <p></p>
# <figure>
# <img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" />
# <figcaption>
# <p></p>
# <p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p>
# </figcaption>
# </figure>
# **Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
# ## Import Packages
# In[1]:
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
# ## Read in an Image
# In[2]:
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
# ## Ideas for Lane Detection Pipeline
# **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
#
# `cv2.inRange()` for color selection
# `cv2.fillPoly()` for regions selection
# `cv2.line()` to draw lines on an image given endpoints
# `cv2.addWeighted()` to coadd / overlay two images
# `cv2.cvtColor()` to grayscale or change color
# `cv2.imwrite()` to output images to file
# `cv2.bitwise_and()` to apply a mask to an image
#
# **Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
# ## Helper Functions
# Below are some helper functions to help get you started. They should look familiar from the lesson!
# In[3]:
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines_new(img, lines, color=[255, 0, 0], thickness=6):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
## create an empty array with all the line slope
all_slopes = np.zeros((len(lines)))
## create an empty array for left lines
left_line_slope = []
## create an empty array for right lines
right_line_slope = []
# keep each line slope in the array
for index,line in enumerate(lines):
for x1,y1,x2,y2 in line:
all_slopes[index] = (y2-y1)/(x2-x1)
# get all left line slope if it is positive
left_line_slope = all_slopes[all_slopes > 0]
# get all left line slope if it is negetive
right_line_slope = all_slopes[all_slopes < 0]
## mean value of left slope and right slope
m_l = left_line_slope.mean()
m_r = right_line_slope.mean()
# Create empty list for all the left points and right points
final_x4_l = []
final_x3_l = []
final_x4_r = []
final_x3_r = []
## get fixed y-cordinate in both top and bottom point
y4 = 320
y3 = img.shape[0]
## Go for each line to calculate left top x-cordinate, right top x-cordinate,
## left buttom x-cordinate, right bottom top x-cordinate
for index,line in enumerate(lines):
for x1,y1,x2,y2 in line:
m = (y2-y1)/(x2-x1)
if m > 0 :
final_x4_l.append(int(((x1 + (y4 - y1) / m_l) + (x2 + (y4 - y2) / m_l))/ 2))
final_x3_l.append(int(((x1 + (y3 - y1) / m_l) + (x2 + (y3 - y2) / m_l))/ 2))
else:
final_x4_r.append(int(((x1 + (y4 - y1) / m_r) + (x2 + (y4 - y2) / m_r))/ 2))
final_x3_r.append(int(((x1 + (y3 - y1) / m_r) + (x2 + (y3 - y2) / m_r))/ 2))
try :
## taking average of each points
x4_l = int(sum(final_x4_l)/ len(final_x4_l))
x4_r = int(sum(final_x4_r)/ len(final_x4_r))
x3_l = int(sum(final_x3_l)/ len(final_x3_l))
x3_r = int(sum(final_x3_r)/ len(final_x3_r))
## Draw the left line and right line
cv2.line(img, (x4_l, y4), (x3_l, y3), color, thickness)
cv2.line(img, (x4_r, y4), (x3_r, y3), color, thickness)
except:
pass
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines_new(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
# ## Test Images
#
# Build your pipeline to work on the images in the directory "test_images"
# **You should make sure your pipeline works well on these images before you try the videos.**
# In[4]:
import os
os.listdir("test_images/")
# ## Build a Lane Finding Pipeline
#
#
# Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
#
# Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
# In[18]:
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
def preprocess_image(image_path):
image = mpimg.imread(image_path)
gray_image = grayscale(image)
blured_image = gaussian_blur(gray_image, 5)
canny_image = canny(gray_image, low_threshold=100, high_threshold=170)
vertices = np.array([[(80,image.shape[0]),(450, 320), (490, 320), (image.shape[1],image.shape[0])]], dtype=np.int32)
roi_image = region_of_interest(canny_image, vertices)
hough_img = hough_lines(roi_image, rho=2, theta=np.pi/180, threshold=50, min_line_len=100, max_line_gap=160)
final_img= weighted_img(hough_img, image, α=0.8, β=1., γ=0.)
return final_img
def process_test_images(source_folder,destination_folder):
## create destination folder if not present
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
## Get all input files from the source folder
list_test_files = os.listdir(source_folder)
## process all the input files
for file in list_test_files:
output = preprocess_image(source_folder+ '/' + file)
cv2.imwrite(destination_folder+'/'+ file, cv2.cvtColor(output, cv2.COLOR_RGB2BGR))
process_test_images('test_images','test_images_output')
# In[19]:
# In[20]:
os.listdir('test_images')
# In[21]:
# Checking in an image
plt.figure(figsize=(15,8))
plt.subplot(121)
image = mpimg.imread('test_images/solidYellowCurve.jpg')
plt.imshow(image)
plt.title('Original image')
plt.subplot(122)
image = mpimg.imread('test_images_output/whiteCarLaneSwitch.jpg')
plt.imshow(image)
plt.title('Output image')
plt.show()
# ## Test on Videos
#
# You know what's cooler than drawing lanes over images? Drawing lanes over video!
#
# We can test our solution on two provided videos:
#
# `solidWhiteRight.mp4`
#
# `solidYellowLeft.mp4`
#
# **Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
#
# **If you get an error that looks like this:**
# ```
# NeedDownloadError: Need ffmpeg exe.
# You can download it by calling:
# imageio.plugins.ffmpeg.download()
# ```
# **Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**
# In[9]:
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
# In[10]:
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
gray_image = grayscale(image)
blured_image = gaussian_blur(gray_image, 5)
canny_image = canny(gray_image, low_threshold=100, high_threshold=170)
vertices = np.array([[(80,image.shape[0]),(450, 320), (490, 320), (image.shape[1],image.shape[0])]], dtype=np.int32)
roi_image = region_of_interest(canny_image, vertices)
hough_img = hough_lines(roi_image, rho=2, theta=np.pi/180, threshold=50, min_line_len=100, max_line_gap=160)
result= weighted_img(hough_img, image, α=0.8, β=1., γ=0.)
return result
# Let's try the one with the solid white lane on the right first ...
# In[11]:
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
white_clip.write_videofile(white_output, audio=False)
# ## Improve the draw_lines() function
#
# **At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".**
#
# **Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**
# Now for the one with the solid yellow lane on the left. This one's more tricky!
# In[13]:
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
yellow_clip.write_videofile(yellow_output, audio=False)
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
cv2.imwrite('image_test.jpg',image)
gray_image = grayscale(image)
blured_image = gaussian_blur(gray_image, 5)
canny_image = canny(gray_image, low_threshold=100, high_threshold=170)
cv2.imwrite('image_test_canny.jpg',canny_image)
x_size = image.shape[1]
y_size = image.shape[0]
left_bottom = (80, y_size)
left_top = (x_size / 2 - 50, y_size / 2 + 50)
right_bottom = (x_size - 80, y_size)
right_top = (x_size / 2 + 50, y_size / 2 + 50)
#vertices = np.array([[left_bottom, left_top, right_top, right_bottom]], dtype=np.int32)
#vertices = np.array([[(280,image.shape[0]),(450, 320), (490, 320), (image.shape[1],image.shape[0])]], dtype=np.int32)
vertices = np.array([[(300,680),(620, 460), (720, 460), (1085,673)]], dtype=np.int32)
roi_image = region_of_interest(canny_image, vertices)
try:
hough_img = hough_lines(roi_image, rho=2, theta=np.pi/180, threshold=50, min_line_len=100, max_line_gap=160)
result= weighted_img(hough_img, image, α=0.8, β=1., γ=0.)
return result
except:
return image
# In[16]:
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
challenge_clip.write_videofile(challenge_output, audio=False)
| 40.997854 | 638 | 0.702277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12,609 | 0.659363 |
73d14617d94420a3d56d21a483a4a8f9476f65c1
| 170 |
py
|
Python
|
notebooks/container/__init__.py
|
DanieleBaranzini/sktime-tutorial-pydata-amsterdam-2020
|
eb9d76a8dc7fff29e4123b940200d58eed87147c
|
[
"BSD-3-Clause"
] | 114 |
2020-06-16T09:29:30.000Z
|
2022-03-12T09:06:52.000Z
|
notebooks/container/__init__.py
|
DanieleBaranzini/sktime-tutorial-pydata-amsterdam-2020
|
eb9d76a8dc7fff29e4123b940200d58eed87147c
|
[
"BSD-3-Clause"
] | 5 |
2020-11-06T13:02:26.000Z
|
2021-06-10T18:34:37.000Z
|
notebooks/container/__init__.py
|
DanieleBaranzini/sktime-tutorial-pydata-amsterdam-2020
|
eb9d76a8dc7fff29e4123b940200d58eed87147c
|
[
"BSD-3-Clause"
] | 62 |
2020-06-16T09:25:05.000Z
|
2022-03-01T21:02:10.000Z
|
from container.base import TimeBase
from container.array import TimeArray, TimeDtype
from container.timeseries import TimeSeries
from container.timeframe import TimeFrame
| 42.5 | 48 | 0.876471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
73d30c85b213e414209b78284449266b653e1713
| 558 |
py
|
Python
|
spiketools/utils/base.py
|
claire98han/SpikeTools
|
f1cdffd50e2cbdb75961a716425c4665aa930f54
|
[
"Apache-2.0"
] | 1 |
2022-03-09T19:40:37.000Z
|
2022-03-09T19:40:37.000Z
|
spiketools/utils/base.py
|
claire98han/SpikeTools
|
f1cdffd50e2cbdb75961a716425c4665aa930f54
|
[
"Apache-2.0"
] | 35 |
2021-09-28T15:13:31.000Z
|
2021-11-26T04:38:08.000Z
|
spiketools/utils/base.py
|
claire98han/SpikeTools
|
f1cdffd50e2cbdb75961a716425c4665aa930f54
|
[
"Apache-2.0"
] | 4 |
2021-09-28T14:56:24.000Z
|
2022-03-09T21:00:31.000Z
|
"""Base utility functions, that manipulate basic data structures, etc."""
###################################################################################################
###################################################################################################
def flatten(lst):
"""Flatten a list of lists into a single list.
Parameters
----------
lst : list of list
A list of embedded lists.
Returns
-------
lst
A flattened list.
"""
return [item for sublist in lst for item in sublist]
| 26.571429 | 99 | 0.378136 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 472 | 0.845878 |
73d374874a532014fc2ba903875cc4289b921e60
| 11,593 |
py
|
Python
|
zentral/contrib/osquery/forms.py
|
mikemcdonald/zentral
|
4aa03937abfbcea6480aa04bd99f4da7b8dfc923
|
[
"Apache-2.0"
] | null | null | null |
zentral/contrib/osquery/forms.py
|
mikemcdonald/zentral
|
4aa03937abfbcea6480aa04bd99f4da7b8dfc923
|
[
"Apache-2.0"
] | null | null | null |
zentral/contrib/osquery/forms.py
|
mikemcdonald/zentral
|
4aa03937abfbcea6480aa04bd99f4da7b8dfc923
|
[
"Apache-2.0"
] | 1 |
2020-09-09T19:26:04.000Z
|
2020-09-09T19:26:04.000Z
|
from django import forms
from zentral.core.probes.forms import BaseCreateProbeForm
from zentral.utils.forms import validate_sha256
from .probes import (OsqueryProbe, OsqueryComplianceProbe,
OsqueryDistributedQueryProbe, OsqueryFileCarveProbe,
OsqueryFIMProbe)
# OsqueryProbe
class DiscoveryForm(forms.Form):
query = forms.CharField(widget=forms.Textarea(attrs={'rows': 5}))
def get_item_d(self):
return self.cleaned_data["query"]
@staticmethod
def get_initial(discovery):
return {"query": discovery}
class QueryForm(forms.Form):
query = forms.CharField(widget=forms.Textarea(attrs={'rows': 5}))
description = forms.CharField(required=False,
help_text="Description of what this query does. Can be left empty",
widget=forms.Textarea(attrs={'rows': 3}))
value = forms.CharField(required=False,
help_text="Why is this query relevant. Can be left empty",
widget=forms.Textarea(attrs={'rows': 3}))
removed = forms.BooleanField(label='Include {"action": "removed"} results?',
help_text='If False, only {"action": "added"} results will be in the logs',
initial=True,
required=False)
snapshot = forms.BooleanField(label='Run this query in "snapshot" mode?',
help_text=('If True, osquery will not store differentials '
'and will not emulate an event stream'),
initial=False,
required=False)
interval = forms.IntegerField(min_value=10, # 10 seconds
max_value=2678400, # 31 days
initial=3600)
shard = forms.IntegerField(min_value=1, max_value=100, required=False,
help_text="Restrict this query to a percentage (1-100) of target hosts")
def clean_removed(self):
remove = self.cleaned_data.get("removed")
if not remove:
remove = False
return remove
def clean_snapshot(self):
snapshot = self.cleaned_data.get("snapshot")
if not snapshot:
snapshot = False
return snapshot
def clean_description(self):
description = self.cleaned_data.get("description")
if not description:
return None
else:
return description
def clean_value(self):
value = self.cleaned_data.get("value")
if not value:
return None
else:
return value
def clean(self):
cleaned_data = super().clean()
removed = cleaned_data["removed"]
snapshot = cleaned_data["snapshot"]
if removed and snapshot:
raise forms.ValidationError('{"action": "removed"} results are not available in "snapshot" mode')
return cleaned_data
def get_item_d(self):
return {f: v for f, v in self.cleaned_data.items() if v is not None}
@staticmethod
def get_initial(query):
initial = {}
for attr in ("query", "description", "value", "interval", "removed", "shard"):
value = getattr(query, attr, None)
if value is not None:
initial[attr] = value
return initial
class CreateProbeForm(BaseCreateProbeForm, QueryForm):
model = OsqueryProbe
field_order = ("name", "query", "description", "value", "removed", "snapshot", "interval", "shard")
def get_body(self):
return {"queries": [self.get_item_d()]}
# OsqueryComplianceProbe
class PreferenceFileForm(forms.Form):
rel_path = forms.CharField(label="Relative path")
type = forms.ChoiceField(label='Location',
choices=(('USERS', '/Users/%/Library/Preferences/'),
('GLOBAL', '/Library/Preferences/')))
description = forms.CharField(required=False,
widget=forms.Textarea(attrs={'rows': 3}))
interval = forms.IntegerField(min_value=10, # 10 seconds
max_value=2678400, # 31 days
initial=3600)
def clean_description(self):
description = self.cleaned_data.get("description")
if not description:
return None
else:
return description
def get_item_d(self):
return {f: v for f, v in self.cleaned_data.items() if v is not None}
@staticmethod
def get_initial(query):
initial = {}
for attr in ("rel_path", "type", "description", "interval"):
value = getattr(query, attr, None)
if value is not None:
initial[attr] = value
return initial
class KeyForm(forms.Form):
key = forms.CharField()
test = forms.ChoiceField(choices=(('EQ', ' = '),
('INT_LTE', 'integer ≤'),
('INT_GTE', 'integer ≥'),
('INT_GTE_LTE', '≤ integer ≤')),
initial='STR',
widget=forms.Select(attrs={'class': 'key-test-sel'}))
arg_l = forms.CharField(required=False)
arg_r = forms.CharField(required=True)
def clean(self):
cd = self.cleaned_data
test = cd.get('test')
arg_l = cd.get('arg_l')
arg_r = cd.get('arg_r')
if test and test != 'EQ':
if arg_r:
try:
cd['arg_r'] = int(arg_r)
except ValueError:
self.add_error('arg_r', 'not an integer')
if test == 'INT_GTE_LTE':
if arg_l is None:
self.add_error('arg_l', 'missing value')
else:
try:
cd['arg_l'] = int(arg_l)
except ValueError:
self.add_error('arg_l', 'not an integer')
return cd
class BaseKeyFormSet(forms.BaseFormSet):
def clean(self):
"""Checks that no two keys are the same"""
if any(self.errors):
# Don't bother validating the formset unless each form is valid on its own
return
keys = []
for form in self.forms:
key = form.cleaned_data['key']
if key in keys:
raise forms.ValidationError("Articles in a set must have distinct titles.")
keys.append(key)
def get_keys(self):
keys = []
for kcd in self.cleaned_data:
if not kcd.get("DELETE"):
k = {'key': kcd['key']}
test = kcd['test']
arg_r = kcd['arg_r']
if test == 'EQ':
k['value'] = arg_r
elif test == 'INT_LTE':
k['max_value'] = arg_r
elif test == 'INT_GTE':
k['min_value'] = arg_r
else:
k['min_value'] = kcd['arg_l']
k['max_value'] = arg_r
keys.append(k)
return sorted(keys, key=lambda k: k['key'])
@staticmethod
def get_initial(preference_file):
initial = []
for k in preference_file.keys:
key = {'key': k.key}
if k.value is not None:
key['arg_r'] = k.value
key['test'] = 'EQ'
else:
min_value = k.min_value
max_value = k.max_value
if min_value is not None and max_value is not None:
key['test'] = 'INT_GTE_LTE'
key['arg_l'] = min_value
key['arg_r'] = max_value
elif min_value is not None:
key['test'] = 'INT_GTE'
key['arg_r'] = min_value
elif max_value is not None:
key['test'] = 'INT_LTE'
key['arg_r'] = max_value
initial.append(key)
return sorted(initial, key=lambda d: d['key'])
KeyFormSet = forms.formset_factory(KeyForm,
formset=BaseKeyFormSet,
min_num=1, max_num=10, extra=0, can_delete=True)
class FileChecksumForm(forms.Form):
path = forms.CharField()
sha256 = forms.CharField(validators=[validate_sha256],
help_text="The result of shasum -a 256 /path/to/file")
description = forms.CharField(required=False,
widget=forms.Textarea(attrs={'rows': 3}))
interval = forms.IntegerField(min_value=10, # 10 seconds
max_value=2678400, # 31 days
initial=3600)
def clean_description(self):
description = self.cleaned_data.get("description")
if not description:
return None
else:
return description
def get_item_d(self):
return {f: v for f, v in self.cleaned_data.items() if v is not None}
@staticmethod
def get_initial(file_checksum):
initial = {}
for field in ("path", "sha256", "description", "interval"):
val = getattr(file_checksum, field, None)
if val:
initial[field] = val
return initial
class CreateComplianceProbeForm(BaseCreateProbeForm):
model = OsqueryComplianceProbe
def get_body(self):
return {}
# OsqueryDistributedQueryProbe
class DistributedQueryForm(forms.Form):
query = forms.CharField(widget=forms.Textarea(attrs={'class': 'form-control',
'rows': 5}))
def get_body(self):
return {'distributed_query': self.cleaned_data['query']}
class CreateDistributedQueryProbeForm(BaseCreateProbeForm, DistributedQueryForm):
model = OsqueryDistributedQueryProbe
field_order = ("name", "query")
# OsqueryFileCarveProbe
class FileCarveForm(forms.Form):
path = forms.CharField(help_text="Example: /Users/%/Downloads/%.jpg or /etc/hosts")
def get_body(self):
return {'path': self.cleaned_data['path']}
class CreateFileCarveProbeForm(BaseCreateProbeForm, FileCarveForm):
model = OsqueryFileCarveProbe
field_order = ("name", "path")
# FIM probes
class FilePathForm(forms.Form):
file_path = forms.CharField(help_text="Example: /Users/%/Library or /Users/%/Library/ or /Users/%/Library/%%")
file_access = forms.BooleanField(label="Observe file access events ?", initial=False, required=False,
help_text="File accesses on Linux using inotify may induce "
"unexpected and unwanted performance reduction.")
def clean_file_access(self):
file_access = self.cleaned_data.get("file_access")
if not file_access:
file_access = False
return file_access
def get_item_d(self):
return self.cleaned_data
@staticmethod
def get_initial(file_path):
return {"file_path": file_path.file_path,
"file_access": file_path.file_access}
class CreateFIMProbeForm(BaseCreateProbeForm, FilePathForm):
model = OsqueryFIMProbe
field_order = ("name", "file_path", "file_access")
def get_body(self):
return {'file_paths': [self.get_item_d()]}
| 35.344512 | 114 | 0.547399 | 10,950 | 0.943884 | 0 | 0 | 1,977 | 0.170416 | 0 | 0 | 2,134 | 0.18395 |
73d5dcabb54b57daa8b78e26015c8bd966917221
| 197 |
py
|
Python
|
src/dataclay/communication/grpc/messages/logicmodule/__init__.py
|
kpavel/pyclay
|
275bc8af5c57301231a20cca1cc88556a9c84c79
|
[
"BSD-3-Clause"
] | 1 |
2020-04-16T17:09:15.000Z
|
2020-04-16T17:09:15.000Z
|
src/dataclay/communication/grpc/messages/logicmodule/__init__.py
|
kpavel/pyclay
|
275bc8af5c57301231a20cca1cc88556a9c84c79
|
[
"BSD-3-Clause"
] | 35 |
2019-11-06T17:06:16.000Z
|
2021-04-12T16:27:20.000Z
|
src/dataclay/communication/grpc/messages/logicmodule/__init__.py
|
kpavel/pyclay
|
275bc8af5c57301231a20cca1cc88556a9c84c79
|
[
"BSD-3-Clause"
] | 1 |
2020-05-06T11:28:16.000Z
|
2020-05-06T11:28:16.000Z
|
""" Class description goes here. """
"""Package containing gRPC classes."""
__author__ = 'Enrico La Sala <[email protected]>'
__copyright__ = '2017 Barcelona Supercomputing Center (BSC-CNS)'
| 24.625 | 64 | 0.725888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.817259 |
73db434f1dcc511c2a6170ca3b1d4a1d255f07e3
| 87 |
py
|
Python
|
src/cms/models/offers/__init__.py
|
mckinly/cms-django
|
c9995a3bfab6ee2d02f2406a7f83cf91b7ccfcca
|
[
"Apache-2.0"
] | 14 |
2020-12-03T07:56:30.000Z
|
2021-10-30T13:09:50.000Z
|
src/cms/models/offers/__init__.py
|
Integreat/integreat-cms
|
b3f80964a6182d714f26ac229342b47e1c7c4f29
|
[
"Apache-2.0"
] | 367 |
2020-11-20T00:34:20.000Z
|
2021-12-14T15:20:42.000Z
|
src/cms/models/offers/__init__.py
|
mckinly/cms-django
|
c9995a3bfab6ee2d02f2406a7f83cf91b7ccfcca
|
[
"Apache-2.0"
] | 3 |
2021-02-09T18:46:52.000Z
|
2021-12-07T10:41:39.000Z
|
"""
This package contains :class:`~cms.models.offers.offer_template.OfferTemplate`
"""
| 21.75 | 78 | 0.758621 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.988506 |
73dc1ffc39f60e86bf599c00df7b537997fbf251
| 5,150 |
py
|
Python
|
service/audio_trigger_test.py
|
nicolas-f/noisesensor
|
fc007fe5e03b0deca0863d987cb6776be1cd2bef
|
[
"BSD-3-Clause"
] | 2 |
2020-03-29T21:58:45.000Z
|
2021-09-21T12:43:15.000Z
|
service/audio_trigger_test.py
|
nicolas-f/noisesensor
|
fc007fe5e03b0deca0863d987cb6776be1cd2bef
|
[
"BSD-3-Clause"
] | null | null | null |
service/audio_trigger_test.py
|
nicolas-f/noisesensor
|
fc007fe5e03b0deca0863d987cb6776be1cd2bef
|
[
"BSD-3-Clause"
] | 1 |
2019-02-19T14:53:01.000Z
|
2019-02-19T14:53:01.000Z
|
import numpy
from scipy.spatial import distance
import matplotlib.pyplot as plt
import math
import matplotlib.ticker as mtick
freqs = [20, 25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000, 10000, 12500]
def cosine_distance(a, b, weight = None):
assert len(a) == len(b)
if weight is None:
weight = [1.0] * len(a)
ab_sum, a_sum, b_sum = 0, 0, 0
for ai, bi, wi in zip(a, b, weight):
ab_sum += ai * bi
a_sum += ai * ai
b_sum += bi * bi
return 1 - ab_sum / math.sqrt(a_sum * b_sum)
# from scipy
def _validate_weights(w, dtype=numpy.double):
w = _validate_vector(w, dtype=dtype)
if numpy.any(w < 0):
raise ValueError("Input weights should be all non-negative")
return w
# from scipy
def _validate_vector(u, dtype=None):
# XXX Is order='c' really necessary?
u = numpy.asarray(u, dtype=dtype, order='c').squeeze()
# Ensure values such as u=1 and u=[1] still return 1-D arrays.
u = numpy.atleast_1d(u)
if u.ndim > 1:
raise ValueError("Input vector should be 1-D.")
return u
# from scipy
def dist_cosine(u, v, w=None):
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
uv = numpy.average(u * v, weights=w)
uu = numpy.average(numpy.square(u), weights=w)
vv = numpy.average(numpy.square(v), weights=w)
dist = 1.0 - uv / numpy.sqrt(uu * vv)
return dist
def autocolor(bar):
for col in bar:
if col.get_height() > 0.995:
col.set_color('r')
trigger = [40.49, 39.14, 34.47, 30.5, 39.54, 31.98, 38.37, 43.84, 36.09, 43.72, 40.55, 39.25, 39.15, 38.36, 38.3, 36.58,
39.9, 47.76, 51.64, 37.2, 44.89, 46.6, 51.08, 37.77, 28, 29.59, 30.25, 23.16, 25.74]
weight = [0.04,0.04,0.04,0.04,0.04,0.04,0.04,0.14,0.14,0.14,0.14,0.14,0.14,0.14,0.14,0.14,0.14,0.14,0.14, 0.24, 0.41,
0.60, 0.80, 0.94, 1.0, 0.94, 0.80, 0.60, 0.41]
ref_spectrum = numpy.genfromtxt('test/test2_far.csv', delimiter=',', skip_header=1, usecols=range(5, 34))
test1_spectrum = numpy.genfromtxt('test/test1_near.csv', delimiter=',', skip_header=1, usecols=range(5, 34))
test2_spectrum = numpy.genfromtxt('test/test2_far_far.csv', delimiter=',', skip_header=1, usecols=range(5, 34))
test3_spectrum = numpy.genfromtxt('test/test_background.csv', delimiter=',', skip_header=1, usecols=range(5, 34))
dist0 = numpy.ones(len(ref_spectrum)) - [distance.cosine(trigger, ref_spectrum[idfreq], w=weight) for idfreq in range(len(ref_spectrum))]
dist1 = numpy.ones(len(ref_spectrum)) - [distance.cosine(trigger, test1_spectrum[idfreq], w=weight) for idfreq in range(len(ref_spectrum))]
dist2 = numpy.ones(len(ref_spectrum)) - [distance.cosine(trigger, test2_spectrum[idfreq], w=weight) for idfreq in range(len(ref_spectrum))]
dist3 = numpy.ones(len(ref_spectrum)) - [distance.cosine(trigger, test3_spectrum[idfreq], w=weight) for idfreq in range(len(ref_spectrum))]
dist0_bis = numpy.ones(len(ref_spectrum)) - [dist_cosine(trigger, ref_spectrum[idfreq], w=weight) for idfreq in range(len(ref_spectrum))]
#print(numpy.around(dist0_bis - dist0, 3))
ref_spectrum = numpy.rot90(ref_spectrum)
test1_spectrum = numpy.rot90(test1_spectrum)
test2_spectrum = numpy.rot90(test2_spectrum)
test3_spectrum = numpy.rot90(test3_spectrum)
fig, axes = plt.subplots(nrows=4, ncols=3, constrained_layout=True)
gs = axes[0, 0].get_gridspec()
axes[0, 1].imshow(ref_spectrum)
autocolor(axes[0, 2].bar(numpy.arange(len(dist0)), dist0))
axes[1, 1].imshow(test1_spectrum)
autocolor(axes[1, 2].bar(numpy.arange(len(dist1)), dist1))
axes[2, 1].imshow(test2_spectrum)
autocolor(axes[2, 2].bar(numpy.arange(len(dist2)), dist2))
axes[3, 1].imshow(test3_spectrum)
axes[3, 2].bar(numpy.arange(len(dist2)), dist3)
for ax in axes[0:, 0]:
ax.remove()
axbig = fig.add_subplot(gs[0:, 0])
axbig.set_title("Spectrum trigger")
axbig.imshow(numpy.rot90([trigger]))
for i in range(len(axes)):
axes[i, 2].set_ylim([0.95, 1.0])
axes[i, 1].set_yticks(range(len(freqs))[::5])
axes[i, 1].set_yticklabels([str(ylab) + " Hz" for ylab in freqs[::5]][::-1])
axes[i, 1].set_xticks(range(len(ref_spectrum[0]))[::20])
axes[i, 1].set_xticklabels([str(xlabel)+" s" % xlabel for xlabel in numpy.arange(0, 10, 0.125)][::20])
axes[i, 2].set_xticks(range(len(ref_spectrum[0]))[::20])
axes[i, 2].set_xticklabels([str(xlabel)+" s" % xlabel for xlabel in numpy.arange(0, 10, 0.125)][::20])
axes[i, 2].set_ylabel("Cosine similarity (%)")
axes[i, 2].yaxis.set_major_formatter(mtick.PercentFormatter(1.0))
axes[i, 1].set_title("Spectrogram "+str(i)+" (dB)")
axbig.set_yticks(range(len(freqs)))
axbig.set_yticklabels([str(ylab) + " Hz" for ylab in freqs][::-1])
axbig.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
plt.show()
| 37.591241 | 162 | 0.665825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 624 | 0.121165 |
73ddae6e14c41c647c3dc794212f25f68df13789
| 1,094 |
py
|
Python
|
Python/6-hc_sr04-sensor.py
|
matr1xprogrammer/raspberry_pi-iot
|
7ff8247fde839a23dd75720c58f3b04d86485ec4
|
[
"MIT"
] | 2 |
2017-02-18T12:05:25.000Z
|
2017-02-18T12:15:53.000Z
|
Python/6-hc_sr04-sensor.py
|
matr1xprogrammer/raspberry_pi-iot
|
7ff8247fde839a23dd75720c58f3b04d86485ec4
|
[
"MIT"
] | null | null | null |
Python/6-hc_sr04-sensor.py
|
matr1xprogrammer/raspberry_pi-iot
|
7ff8247fde839a23dd75720c58f3b04d86485ec4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# HC-SR04 Ultrasonic ranging sensor
#
import RPi.GPIO as GPIO
import sys, time
try:
GPIO.setmode(GPIO.BCM)
TRIG = 23
ECHO = 24
print "Distance measurement in progress..."
GPIO.setup(TRIG, GPIO.OUT)
GPIO.setup(ECHO, GPIO.IN)
GPIO.output(TRIG, False)
while True:
print "Waiting for sensor to settle"
time.sleep(2)
GPIO.output(TRIG, True)
time.sleep(0.00001)
GPIO.output(TRIG, False)
while GPIO.input(ECHO) == 0:
pulse_start = time.time()
while GPIO.input(ECHO) == 1:
pulse_end = time.time()
pulse_duration = pulse_end - pulse_start
distance = pulse_duration * 17150
distance = round(distance, 2)
print "Distance: ", distance, "cm"
except KeyboardInterrupt:
GPIO.cleanup()
print("<Ctrl+C> pressed... exiting.")
except:
GPIO.cleanup()
print("Error: {0} {1}".format(sys.exc_info()[0], sys.exc_info()[1]))
| 22.791667 | 74 | 0.543876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.167276 |
73de45d1436eebf32a4bbacaf18feaafc9502e50
| 10,651 |
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/arista/eos/plugins/module_utils/network/eos/config/ospfv3/ospfv3.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1 |
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/arista/eos/plugins/module_utils/network/eos/config/ospfv3/ospfv3.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12 |
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/arista/eos/plugins/module_utils/network/eos/config/ospfv3/ospfv3.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
"""
The eos_ospfv3 config file.
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to its desired end-state is
created.
"""
import re
from ansible.module_utils.six import iteritems
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
dict_merge,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.resource_module import (
ResourceModule,
)
from ansible_collections.arista.eos.plugins.module_utils.network.eos.facts.facts import (
Facts,
)
from ansible_collections.arista.eos.plugins.module_utils.network.eos.rm_templates.ospfv3 import (
Ospfv3Template,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
get_from_dict,
)
class Ospfv3(ResourceModule):
"""
The eos_ospfv3 config class
"""
def __init__(self, module):
super(Ospfv3, self).__init__(
empty_fact_val={},
facts_module=Facts(module),
module=module,
resource="ospfv3",
tmplt=Ospfv3Template(module=module),
)
self.parsers = [
"vrf",
"address_family",
"adjacency",
"auto_cost",
"area.default_cost",
"area.authentication",
"area.encryption",
"area.nssa",
"area.ranges",
"area.stub",
"bfd",
"default_information",
"default_metric",
"distance",
"fips_restrictions",
"graceful_restart",
"graceful_restart_period",
"graceful_restart_helper",
"log_adjacency_changes",
"max_metric",
"maximum_paths",
"passive_interface",
"redistribute",
"router_id",
"shutdown",
"timers.lsa",
"timers.out_delay",
"timers.pacing",
"timers.throttle.lsa",
"timers.throttle.spf",
]
def execute_module(self):
"""Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
if self.state not in ["parsed", "gathered"]:
self.generate_commands()
self.run_commands()
return self.result
def generate_commands(self):
"""Generate configuration commands to send based on
want, have and desired state.
"""
wantd = {}
haved = {}
for entry in self.want.get("processes", []):
wantd.update({entry["vrf"]: entry})
for entry in self.have.get("processes", []):
haved.update({entry["vrf"]: entry})
# turn all lists of dicts into dicts prior to merge
for entry in wantd, haved:
self._ospf_list_to_dict(entry)
# if state is merged, merge want onto have and then compare
if self.state == "merged":
wantd = dict_merge(haved, wantd)
# if state is deleted, empty out wantd and set haved to wantd
if self.state == "deleted":
h_del = {}
for k, v in iteritems(haved):
if k in wantd or not wantd:
h_del.update({k: v})
wantd = {}
haved = h_del
# remove superfluous config for overridden and deleted
if self.state in ["overridden", "deleted"]:
for k, have in iteritems(haved):
if k not in wantd and have.get("vrf") == k:
self.commands.append(self._tmplt.render(have, "vrf", True))
for k, want in iteritems(wantd):
self._compare(want=want, have=haved.pop(k, {}))
def _compare(self, want, have):
"""Leverages the base class `compare()` method and
populates the list of commands to be run by comparing
the `want` and `have` data with the `parsers` defined
for the Ospfv3 network resource.
"""
begin = len(self.commands)
self._af_compare(want=want, have=have)
self._global_compare(want=want, have=have)
if len(self.commands) != begin or (not have and want):
self.commands.insert(
begin, self._tmplt.render(want or have, "vrf", False)
)
self.commands.append("exit")
def _global_compare(self, want, have):
for name, entry in iteritems(want):
if name in ["vrf", "address_family"]:
continue
if not isinstance(entry, dict) and name != "areas":
self.compare(
parsers=self.parsers,
want={name: entry},
have={name: have.pop(name, None)},
)
else:
if name == "areas" and entry:
self._areas_compare(
want={name: entry}, have={name: have.get(name, {})}
)
else:
# passing dict without vrf, inorder to avoid no router ospfv3 command
h = {}
for i in have:
if i != "vrf":
h.update({i: have[i]})
self.compare(
parsers=self.parsers,
want={name: entry},
have={name: h.pop(name, {})},
)
# remove remaining items in have for replaced
for name, entry in iteritems(have):
if name in ["vrf", "address_family"]:
continue
if not isinstance(entry, dict):
self.compare(
parsers=self.parsers,
want={name: want.pop(name, None)},
have={name: entry},
)
else:
# passing dict without vrf, inorder to avoid no router ospfv3 command
# w = {i: want[i] for i in want if i != "vrf"}
self.compare(
parsers=self.parsers,
want={name: want.pop(name, {})},
have={name: entry},
)
def _af_compare(self, want, have):
wafs = want.get("address_family", {})
hafs = have.get("address_family", {})
for name, entry in iteritems(wafs):
begin = len(self.commands)
self._compare_lists(want=entry, have=hafs.get(name, {}))
self._areas_compare(want=entry, have=hafs.get(name, {}))
self.compare(
parsers=self.parsers, want=entry, have=hafs.pop(name, {})
)
if (
len(self.commands) != begin
and "afi" in entry
and entry["afi"] != "router"
):
self._rotate_commands(begin=begin)
self.commands.insert(
begin, self._tmplt.render(entry, "address_family", False)
)
self.commands.append("exit")
for name, entry in iteritems(hafs):
self.addcmd(entry, "address_family", True)
def _rotate_commands(self, begin=0):
# move negate commands to beginning
for cmd in self.commands[begin::]:
negate = re.match(r"^no .*", cmd)
if negate:
self.commands.insert(
begin, self.commands.pop(self.commands.index(cmd))
)
begin += 1
def _areas_compare(self, want, have):
wareas = want.get("areas", {})
hareas = have.get("areas", {})
for name, entry in iteritems(wareas):
self._area_compare(want=entry, have=hareas.pop(name, {}))
for name, entry in iteritems(hareas):
self._area_compare(want={}, have=entry)
def _area_compare(self, want, have):
parsers = [
"area.default_cost",
"area.encryption",
"area.authentication",
"area.nssa",
"area.stub",
]
self.compare(parsers=parsers, want=want, have=have)
self._area_compare_lists(want=want, have=have)
def _area_compare_lists(self, want, have):
for attrib in ["ranges"]:
wdict = want.get(attrib, {})
hdict = have.get(attrib, {})
for key, entry in iteritems(wdict):
if entry != hdict.pop(key, {}):
entry["area_id"] = want["area_id"]
self.addcmd(entry, "area.{0}".format(attrib), False)
# remove remaining items in have for replaced
for entry in hdict.values():
entry["area_id"] = have["area_id"]
self.addcmd(entry, "area.{0}".format(attrib), True)
def _compare_lists(self, want, have):
for attrib in ["redistribute"]:
wdict = get_from_dict(want, attrib) or {}
hdict = get_from_dict(have, attrib) or {}
for key, entry in iteritems(wdict):
if entry != hdict.pop(key, {}):
self.addcmd(entry, attrib, False)
# remove remaining items in have for replaced
for entry in hdict.values():
self.addcmd(entry, attrib, True)
def _ospf_list_to_dict(self, entry):
for name, proc in iteritems(entry):
for area in proc.get("areas", []):
if "ranges" in area:
range_dict = {}
for entry in area.get("ranges", []):
range_dict.update({entry["address"]: entry})
area["ranges"] = range_dict
areas_dict = {}
for entry in proc.get("areas", []):
areas_dict.update({entry["area_id"]: entry})
proc["areas"] = areas_dict
redis_dict = {}
for entry in proc.get("redistribute", []):
redis_dict.update({entry["routes"]: entry})
proc["redistribute"] = redis_dict
if "address_family" in proc:
addr_dict = {}
for entry in proc.get("address_family", []):
addr_dict.update({entry["afi"]: entry})
proc["address_family"] = addr_dict
self._ospf_list_to_dict(proc["address_family"])
| 36.351536 | 103 | 0.529903 | 9,526 | 0.894376 | 0 | 0 | 0 | 0 | 0 | 0 | 2,537 | 0.238194 |
73de5fb73d8473474f580b5f20b98adc8660e07b
| 1,141 |
py
|
Python
|
platypush/plugins/logger/__init__.py
|
BlackLight/runbullet
|
8d26c8634d2677b4402f0a21b9ab8244b44640db
|
[
"MIT"
] | 3 |
2017-11-03T17:03:36.000Z
|
2017-11-10T06:38:15.000Z
|
platypush/plugins/logger/__init__.py
|
BlackLight/runbullet
|
8d26c8634d2677b4402f0a21b9ab8244b44640db
|
[
"MIT"
] | 14 |
2017-11-04T11:46:37.000Z
|
2017-12-11T19:15:27.000Z
|
platypush/plugins/logger/__init__.py
|
BlackLight/runbullet
|
8d26c8634d2677b4402f0a21b9ab8244b44640db
|
[
"MIT"
] | null | null | null |
from platypush.plugins import Plugin, action
class LoggerPlugin(Plugin):
"""
Plugin to log traces on the standard Platypush logger
"""
@action
def trace(self, msg, *args, **kwargs):
"""
logger.trace wrapper
"""
self.logger.trace(msg, *args, **kwargs)
@action
def debug(self, msg, *args, **kwargs):
"""
logger.debug wrapper
"""
self.logger.debug(msg, *args, **kwargs)
@action
def info(self, msg, *args, **kwargs):
"""
logger.info wrapper
"""
self.logger.info(msg, *args, **kwargs)
@action
def warning(self, msg, *args, **kwargs):
"""
logger.warning wrapper
"""
self.logger.warning(msg, *args, **kwargs)
@action
def error(self, msg, *args, **kwargs):
"""
logger.error wrapper
"""
self.logger.error(msg, *args, **kwargs)
@action
def exception(self, exception, *args, **kwargs):
"""
logger.exception wrapper
"""
self.logger.exception(exception, *args, **kwargs)
# vim:sw=4:ts=4:et:
| 21.12963 | 57 | 0.531113 | 1,070 | 0.937774 | 0 | 0 | 933 | 0.817704 | 0 | 0 | 357 | 0.312883 |
73de6cd753fb9320e7590a96928403d694712cd8
| 1,632 |
py
|
Python
|
hc/front/tests/test_add_pdc.py
|
IfBkg/healthchecks
|
dcd8a74c6b0bcdb0065e7c27d5b6639823400562
|
[
"BSD-3-Clause"
] | 1 |
2020-07-13T15:33:31.000Z
|
2020-07-13T15:33:31.000Z
|
hc/front/tests/test_add_pdc.py
|
IfBkg/healthchecks
|
dcd8a74c6b0bcdb0065e7c27d5b6639823400562
|
[
"BSD-3-Clause"
] | 53 |
2020-11-27T14:55:01.000Z
|
2021-04-22T10:01:13.000Z
|
hc/front/tests/test_add_pdc.py
|
IfBkg/healthchecks
|
dcd8a74c6b0bcdb0065e7c27d5b6639823400562
|
[
"BSD-3-Clause"
] | null | null | null |
from django.test.utils import override_settings
from hc.api.models import Channel
from hc.test import BaseTestCase
@override_settings(PD_VENDOR_KEY="foo")
class AddPdConnectTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.url = "/projects/%s/add_pdc/" % self.project.code
def test_it_works(self):
session = self.client.session
session["pd"] = "1234567890AB" # 12 characters
session.save()
self.client.login(username="[email protected]", password="password")
url = self.url + "1234567890AB/?service_key=123"
r = self.client.get(url, follow=True)
self.assertRedirects(r, self.channels_url)
c = Channel.objects.get()
self.assertEqual(c.kind, "pd")
self.assertEqual(c.pd_service_key, "123")
self.assertEqual(c.project, self.project)
@override_settings(PD_VENDOR_KEY=None)
def test_it_requires_vendor_key(self):
self.client.login(username="[email protected]", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 404)
@override_settings(PD_ENABLED=False)
def test_it_requires_pd_enabled(self):
self.client.login(username="[email protected]", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 404)
def test_it_requires_rw_access(self):
self.bobs_membership.rw = False
self.bobs_membership.save()
self.client.login(username="[email protected]", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 403)
| 34 | 76 | 0.677083 | 1,474 | 0.903186 | 0 | 0 | 1,514 | 0.927696 | 0 | 0 | 215 | 0.13174 |
73dee1fd408bd1037f09660c2312f58f954869d8
| 994 |
py
|
Python
|
atcoder/corp/codethxfes2014b_e.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | 1 |
2018-11-12T15:18:55.000Z
|
2018-11-12T15:18:55.000Z
|
atcoder/corp/codethxfes2014b_e.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
atcoder/corp/codethxfes2014b_e.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
import sys
sys.setrecursionlimit(3000)
def check(rs, cs):
table[rs][cs] = 2
if (rs, cs) == (rg, cg): return True
if rs > 0 and table[rs - 1][cs] == 1 and check(rs - 1, cs):
return True
if cs > 0 and table[rs][cs - 1] == 1 and check(rs, cs - 1):
return True
if rs < r - 1 and table[rs + 1][cs] == 1 and check(rs + 1, cs):
return True
if cs < c - 1 and table[rs][cs + 1] == 1 and check(rs, cs + 1):
return True
return False
r, c = map(int, input().split())
table = [[0] * c for _ in range(r)]
rs, cs = map(lambda x:int(x) - 1, input().split())
rg, cg = map(lambda x:int(x) - 1, input().split())
n = int(input())
draw = [list(map(int, input().split())) for _ in range(n)]
for ri, ci, hi, wi in draw:
ri -= 1
ci -= 1
for i in range(ri, ri+hi):
for j in range(ci, ci+wi):
table[i][j] = 1
if table[rs][cs] != 1 or table[rg][cg] != 1:
print('NO')
else:
print('YES' if check(rs, cs) else 'NO')
| 28.4 | 67 | 0.524145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.013078 |
73df0b517cdf0b8ebc3a55ea196f1562c83f9f1c
| 4,329 |
py
|
Python
|
tests/test_bullet_train.py
|
masschallenge/bullet-train-python-client
|
bcec653c0b4ed65779ab4e1a2f809810c684be00
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_bullet_train.py
|
masschallenge/bullet-train-python-client
|
bcec653c0b4ed65779ab4e1a2f809810c684be00
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_bullet_train.py
|
masschallenge/bullet-train-python-client
|
bcec653c0b4ed65779ab4e1a2f809810c684be00
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import logging
from unittest import mock, TestCase
from bullet_train import BulletTrain
import os
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
TEST_API_URL = 'https://test.bullet-train.io/api'
TEST_IDENTIFIER = 'test-identity'
TEST_FEATURE = 'test-feature'
class MockResponse:
def __init__(self, data, status_code):
self.json_data = json.loads(data)
self.status_code = status_code
def json(self):
return self.json_data
def mock_response(filename, *args, status=200, **kwargs):
print('Hit URL %s with params' % args[0], kwargs.get('params'))
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, filename), 'rt') as f:
return MockResponse(f.read(), status)
def mocked_get_specific_feature_flag_enabled(*args, **kwargs):
return mock_response('data/get-flag-for-specific-feature-enabled.json', *args, **kwargs)
def mocked_get_specific_feature_flag_disabled(*args, **kwargs):
return mock_response('data/get-flag-for-specific-feature-disabled.json', *args, **kwargs)
def mocked_get_specific_feature_flag_not_found(*args, **kwargs):
return mock_response('data/not-found.json', *args, status=404, **kwargs)
def mocked_get_value(*args, **kwargs):
return mock_response('data/get-value-for-specific-feature.json', *args, **kwargs)
def mocked_get_identity_flags_with_trait(*args, **kwargs):
return mock_response('data/get-identity-flags-with-trait.json', *args, **kwargs)
def mocked_get_identity_flags_without_trait(*args, **kwargs):
return mock_response('data/get-identity-flags-without-trait.json', *args, **kwargs)
class BulletTrainTestCase(TestCase):
test_environment_key = 'test-env-key'
def setUp(self) -> None:
self.bt = BulletTrain(environment_id=self.test_environment_key, api=TEST_API_URL)
@mock.patch('bullet_train.bullet_train.requests.get', side_effect=mocked_get_specific_feature_flag_enabled)
def test_has_feature_returns_true_if_feature_returned(self, mock_get):
# When
result = self.bt.has_feature(TEST_FEATURE)
# Then
assert result
@mock.patch('bullet_train.bullet_train.requests.get', side_effect=mocked_get_specific_feature_flag_not_found)
def test_has_feature_returns_false_if_feature_not_returned(self, mock_get):
# When
result = self.bt.has_feature(TEST_FEATURE)
# Then
assert not result
@mock.patch('bullet_train.bullet_train.requests.get', side_effect=mocked_get_specific_feature_flag_enabled)
def test_feature_enabled_returns_true_if_feature_enabled(self, mock_get):
# When
result = self.bt.feature_enabled(TEST_FEATURE)
# Then
assert result
@mock.patch('bullet_train.bullet_train.requests.get', side_effect=mocked_get_specific_feature_flag_disabled)
def test_feature_enabled_returns_true_if_feature_disabled(self, mock_get):
# When
result = self.bt.feature_enabled(TEST_FEATURE)
# Then
assert not result
@mock.patch('bullet_train.bullet_train.requests.get', side_effect=mocked_get_value)
def test_get_value_returns_value_for_environment_if_feature_exists(self, mock_get):
# When
result = self.bt.get_value(TEST_FEATURE)
# Then
assert result == 'Test value'
@mock.patch('bullet_train.bullet_train.requests.get', side_effect=mocked_get_specific_feature_flag_not_found)
def test_get_value_returns_None_for_environment_if_feature_does_not_exist(self, mock_get):
# When
result = self.bt.get_value(TEST_FEATURE)
# Then
assert result is None
@mock.patch('bullet_train.bullet_train.requests.get', side_effect=mocked_get_identity_flags_with_trait)
def test_get_trait_returns_trait_value_if_trait_key_exists(self, mock_get):
# When
result = self.bt.get_trait('trait_key', TEST_IDENTIFIER)
# Then
assert result == 'trait_value'
@mock.patch('bullet_train.bullet_train.requests.get', side_effect=mocked_get_identity_flags_without_trait)
def test_get_trait_returns_None_if_trait_key_does_not_exist(self, mock_get):
# When
result = self.bt.get_trait('trait_key', TEST_IDENTIFIER)
# Then
assert result is None
| 34.632 | 113 | 0.736891 | 2,849 | 0.65812 | 0 | 0 | 2,409 | 0.55648 | 0 | 0 | 823 | 0.190113 |
73df243fb4b55e390ea6a1111a32c8c6671d261d
| 3,105 |
py
|
Python
|
plim/console.py
|
spollard/Plim
|
7689de85364691063ed5c43a891c433f9ebef5b9
|
[
"MIT"
] | 85 |
2015-01-08T20:15:54.000Z
|
2022-03-12T21:51:27.000Z
|
plim/console.py
|
spollard/Plim
|
7689de85364691063ed5c43a891c433f9ebef5b9
|
[
"MIT"
] | 18 |
2015-02-27T14:59:08.000Z
|
2021-09-24T10:27:19.000Z
|
plim/console.py
|
spollard/Plim
|
7689de85364691063ed5c43a891c433f9ebef5b9
|
[
"MIT"
] | 14 |
2015-02-26T07:20:42.000Z
|
2022-02-01T17:52:16.000Z
|
"""
This module contains entry points for command-line utilities provided by Plim package.
"""
import sys
import os
import argparse
import codecs
from pkg_resources import get_distribution
from pkg_resources import EntryPoint
from mako.template import Template
from mako.lookup import TemplateLookup
from .util import PY3K
def plimc(args=None, stdout=None):
"""This is the `plimc` command line utility
:param args: list of command-line arguments. If None, then ``sys.argv[1:]`` will be used.
:type args: list or None
:param stdout: file-like object representing stdout. If None, then ``sys.stdout`` will be used.
Custom stdout is used for testing purposes.
:type stdout: None or a file-like object
"""
# Parse arguments
# ------------------------------------
cli_parser = argparse.ArgumentParser(description='Compile plim source files into mako files.')
cli_parser.add_argument('source', help="path to source plim template")
cli_parser.add_argument('-o', '--output', help="write result to FILE.")
cli_parser.add_argument('-e', '--encoding', default='utf-8', help="content encoding")
cli_parser.add_argument('-p', '--preprocessor', default='plim:preprocessor',
help="Preprocessor instance that will be used for parsing the template")
cli_parser.add_argument('-H', '--html', action='store_true', help="Render HTML output instead of Mako template")
cli_parser.add_argument('-V', '--version', action='version',
version='Plim {}'.format(get_distribution("Plim").version))
if args is None:
args = sys.argv[1:]
args = cli_parser.parse_args(args)
# Get custom preprocessor, if specified
# -------------------------------------
preprocessor_path = args.preprocessor
# Add an empty string path, so modules located at the current working dir
# are reachable and considered in the first place (see issue #32).
sys.path.insert(0, '')
preprocessor = EntryPoint.parse('x={}'.format(preprocessor_path)).load(False)
# Render to html, if requested
# ----------------------------
if args.html:
root_dir = os.path.dirname(os.path.abspath(args.source))
template_file = os.path.basename(args.source)
lookup = TemplateLookup(directories=[root_dir],
input_encoding=args.encoding,
output_encoding=args.encoding,
preprocessor=preprocessor)
content = lookup.get_template(template_file).render_unicode()
else:
with codecs.open(args.source, 'rb', args.encoding) as fd:
content = preprocessor(fd.read())
# Output
# ------------------------------------
if args.output is None:
if stdout is None:
stdout = PY3K and sys.stdout.buffer or sys.stdout
fd = stdout
content = codecs.encode(content, 'utf-8')
else:
fd = codecs.open(args.output, 'wb', args.encoding)
try:
fd.write(content)
finally:
fd.close()
| 40.324675 | 116 | 0.622544 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,251 | 0.402899 |
73e0868276739ce21107e9b9452274d8030151db
| 2,568 |
py
|
Python
|
devel_notes/test_class_speed.py
|
mekhub/alphafold
|
8d89abf73ea07841b550b968aceae794acb244df
|
[
"MIT"
] | 3 |
2019-05-15T16:46:20.000Z
|
2019-07-19T13:27:45.000Z
|
devel_notes/test_class_speed.py
|
mekhub/alphafold
|
8d89abf73ea07841b550b968aceae794acb244df
|
[
"MIT"
] | null | null | null |
devel_notes/test_class_speed.py
|
mekhub/alphafold
|
8d89abf73ea07841b550b968aceae794acb244df
|
[
"MIT"
] | 4 |
2020-02-08T02:43:01.000Z
|
2021-08-22T09:23:17.000Z
|
#!/usr/bin/python
import time
import sys
import os
from copy import deepcopy
sys.path.append(os.path.join(os.getcwd(), '..'))
from alphafold.partition import DynamicProgrammingData as DP
def getval( DP, idx ):
return DP.X[ idx ][ idx ]
x = [[]]*500
for i in range( 500 ): x[i] = [0.0]*500
dx = deepcopy( x )
xcontrib = [[]]*500
for i in range( 500 ): xcontrib[i] = [[]]*500
xDP = DP( 500 ) # 500x500 object with other stuff in it.
N = 500000
print 'Try for ', N, 'cycles each:'
# Time getting
print 'GETTING'
t0 = time.time()
for i in range( N ): y = x[56][56]
t1 = time.time()
print t1 - t0, 'y = x[56][56]'
t0 = time.time()
for i in range( N ): y = xDP.X[56][56]
t1 = time.time()
print t1 - t0,'y = xDP.X[56][56]'
t0 = time.time()
for i in range( N ): y = getval(xDP,56)
t1 = time.time()
print t1 - t0, 'y = getval(xDP,56)'
t0 = time.time()
for i in range( N ): y = xDP[56][56]
t1 = time.time()
print t1 - t0, 'y = xDP[56][56]'
# Time setting
print 'SETTING'
t0 = time.time()
for i in range( N ): x[56][56] = 20
t1 = time.time()
print t1 - t0, 'x[56][56] = 20'
t0 = time.time()
for i in range( N ): xDP.X[56][56] = 20
t1 = time.time()
print t1 - t0,'xDP.X[56][56] = 20'
t0 = time.time()
for i in range( N ):
val = 20
xDP.X[56][56] = val
t1 = time.time()
print t1 - t0,'val = 20; xDP.X[56][56] = val'
t0 = time.time()
for i in range( N ): xDP[56][56] = 20
t1 = time.time()
print t1 - t0,'xDP[56][56] = 20'
# Time setting, including derivs
print 'SETTING INCLUDE DERIVS'
t0 = time.time()
for i in range( N ):
x[56][56] = 20
dx[56][56] = 0
t1 = time.time()
print t1 - t0, 'x[56][56] = 20, dx[56][56] = 20'
t0 = time.time()
for i in range( N ):
x[56][56] = (20,0)
t1 = time.time()
print t1 - t0, 'x[56][56] = (20,0)'
t0 = time.time()
for i in range( N ):
xDP.X[56][56] = 20
xDP.dX[56][56] = 0
t1 = time.time()
print t1 - t0,'xDP.X[56][56] = 20, xDP.dX[56][56]'
t0 = time.time()
for i in range( N ):
xDP.add(56,56,20)
t1 = time.time()
print t1 - t0,'xDP += 20'
# Time setting, including derivs and contribs
print 'SETTING INCLUDE DERIVS AND CONTRIBS'
t0 = time.time()
for i in range( N ):
x[56][56] = 20
dx[56][56] = 0
xcontrib[56][56].append( [x,56,56,20] )
t1 = time.time()
print t1 - t0, 'x[56][56] = 20'
t0 = time.time()
for i in range( N ):
xDP.X[56][56] = 20
xDP.dX[56][56] = 0
xDP.X_contrib[56][56].append( [x,56,56,20] )
t1 = time.time()
print t1 - t0,'xDP.X[56][56] = 20'
t0 = time.time()
for i in range( N ):
xDP.add(56,56,20)
t1 = time.time()
print t1 - t0,'xDP += 20'
| 20.709677 | 60 | 0.575545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 573 | 0.223131 |
73e13d84ff4673d8d1b1b964136674b1bd1ae5ef
| 688 |
py
|
Python
|
testRead.py
|
BichonCby/BaseBSPython
|
411f7f5be5636aa7dc9975fb0ab61daa37e6d40a
|
[
"MIT"
] | null | null | null |
testRead.py
|
BichonCby/BaseBSPython
|
411f7f5be5636aa7dc9975fb0ab61daa37e6d40a
|
[
"MIT"
] | null | null | null |
testRead.py
|
BichonCby/BaseBSPython
|
411f7f5be5636aa7dc9975fb0ab61daa37e6d40a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*-coding:Latin-1 -*
import time
from Definitions import *
#from ev3dev2.motor import OUTPUT_B,LargeMotor
from ev3dev2.sensor import *
from AddSensors import AngleSensor
from ev3dev2.sensor.lego import TouchSensor
import Trace
trace = Trace.Trace()
i=0
toucher = TouchSensor(INPUT_3)
EncoderSensRight = AngleSensor(INPUT_1)
EncoderSensLeft = AngleSensor(INPUT_2)
trace.Log('toto\n')
while i<50:
top = time.time()
i=i+1
#toucher.value()
fic=open('/sys/class/lego-sensor/sensor0/value0','r')
val = fic.read()
fic.close()
duration = (time.time()-top)
trace.Log(val + ': %.2f\n' %(duration*1000))
time.sleep(0.1)
trace.Close()
| 22.193548 | 57 | 0.699128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.241279 |
73e2d9cdb5fe490b5f63b902095fb196603f96d6
| 1,912 |
py
|
Python
|
circular-queue/circular-queue.py
|
tywmick/data-structures
|
996451b470a118f3f27faab6e109f50716e399a5
|
[
"Unlicense"
] | null | null | null |
circular-queue/circular-queue.py
|
tywmick/data-structures
|
996451b470a118f3f27faab6e109f50716e399a5
|
[
"Unlicense"
] | null | null | null |
circular-queue/circular-queue.py
|
tywmick/data-structures
|
996451b470a118f3f27faab6e109f50716e399a5
|
[
"Unlicense"
] | null | null | null |
class CircularQueue:
"""
A circlular queue: a first-in-first-out data structure with a fixed buffer size.
"""
def __init__(self, size):
if type(size) is not int:
raise TypeError("Queue size must be a postive integer.")
if size <= 0:
raise ValueError("Queue size must be a postive integer.")
self.queue = []
self.read_pos = 0
self.write_pos = 0
self.size = size
self.queue = [None for i in range(size)]
def enqueue(self, element):
"""
Adds an element to the buffer if the buffer is not already full.
:param element: The element you wish to add.
:returns: The element itself if it was added, or `None` if the buffer was full.
"""
if self.queue[self.write_pos] is None:
self.queue[self.write_pos] = element
self.write_pos = (self.write_pos + 1) % self.size
return element
else:
# Buffer is full
return None
def dequeue(self):
"""
Removes an element from the buffer if the buffer is not already empty.
:returns: The element removed (or `None` if the buffer was empty).
"""
if self.queue[self.read_pos] is None:
# Buffer is empty
return None
else:
item = self.queue[self.read_pos]
self.queue[self.read_pos] = None
self.read_pos = (self.read_pos + 1) % self.size
return item
def clear(self):
"""Clears the contents of the queue."""
self.queue = [None for i in range(self.size)]
self.read_pos = self.write_pos = 0
def print(self):
"""
Prints the queue to the console as a list, starting with the element that will
be read next.
"""
return self.queue[self.read_pos :] + self.queue[: self.read_pos]
| 31.344262 | 87 | 0.569038 | 1,911 | 0.999477 | 0 | 0 | 0 | 0 | 0 | 0 | 770 | 0.40272 |
73e339eb2591f2a4b2f2b9553c0b32fcb1202cbf
| 2,697 |
py
|
Python
|
infer.py
|
vic9527/ViClassifier
|
fd6c4730e880f35a9429277a6025219315e067cc
|
[
"MIT"
] | 1 |
2021-11-03T05:05:34.000Z
|
2021-11-03T05:05:34.000Z
|
infer.py
|
vic9527/viclassifier
|
fd6c4730e880f35a9429277a6025219315e067cc
|
[
"MIT"
] | null | null | null |
infer.py
|
vic9527/viclassifier
|
fd6c4730e880f35a9429277a6025219315e067cc
|
[
"MIT"
] | null | null | null |
def load_model(model_path, device_type='cuda'):
import torch
from viclassifier.utils import dev_opt
device = dev_opt.usingDevice(device_type)
model = torch.load(model_path, map_location=device)
model.to(device)
# 测试时不启用 BatchNormalization 和 Dropout
model.eval()
return model
def predict(model, image_path, idx_to_class=None, is_show=False, device_type='cuda'):
import torch
from PIL import Image, ImageDraw, ImageFont
from viclassifier.utils import dev_opt
from viclassifier.utils import trans_gen
device = dev_opt.usingDevice(device_type)
model.eval().to(device)
transform = trans_gen.genTrans('test')
image = Image.open(image_path).convert('RGB')
image_tensor = transform(image)
# pytorch中的view 功能类似于numpy中的resize() 函数 把原先tensor中的数据按照行优先的顺序排成你要的形状
# 注意原来的tensor与新的tensor是共享内存的,也就是说对其中的一个tensor进行更改的话,另外一个tensor也会自动进行相应的修改。
# 应该使用clone()函数克隆和再进行view(),而且使⽤clone还有⼀个好处是会被记录在计算图中,即梯度回传到副本时也会传到源Tensor。
image_tensor_view = image_tensor.view(1, 3, 224, 224).to(device)
with torch.no_grad():
out = model(image_tensor_view)
ps = torch.exp(out)
topk, topclass = ps.topk(1, dim=1)
# print("Prediction : ", idx_to_class[topclass.cpu().numpy()[0][0]],
# ", Score: ", topk.cpu().numpy()[0][0])
if is_show:
text = str(topclass.cpu().numpy()[0][0]) + " " + str(topk.cpu().numpy()[0][0])
if idx_to_class is not None:
text = idx_to_class[topclass.cpu().numpy()[0][0]] + " " + str(topk.cpu().numpy()[0][0])
draw = ImageDraw.Draw(image)
font = ImageFont.truetype('arial.ttf', 36)
draw.text((0, 0), text, (255, 0, 0), font=font)
image.show()
label = topclass.cpu().numpy()[0][0]
if idx_to_class is not None:
label = idx_to_class[label]
return label, topk.cpu().numpy()[0][0]
if __name__ == "__main__":
import os, sys
viclassifier_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(viclassifier_dir)
sys.path.append(viclassifier_dir)
model = load_model('D:\\myai\\projects\\tmp\\git\\viclassifier\\tmps\\model.pth')
print(model)
image_path = r'C:\xxx\xxx.jpg'
# ### python字典键值对互换###
# d1 = {'a': 1, 'b': 2, 'c': 3}
# # 用遍历互换键值对
# d2 = {}
# for key, value in d1.items():
# d2[value] = key
#
# # 用列表生成器
# d2 = {k: v for v, k in d1.items()}
#
# # 用zip运算符
# d2 = dict(zip(d1.value(), d1.key()))
class_to_idx = {'bad': 0, 'good': 1}
idx_to_class = {k: v for v, k in class_to_idx.items()}
predict(model, image_path, idx_to_class, is_show=False, device_type='cuda')
| 32.107143 | 105 | 0.632925 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,060 | 0.350877 |
73e41b86e4797d0bdf28efbbcf4b63a5d38dc998
| 1,675 |
py
|
Python
|
compiler/router/tests/10_supply_grid_test.py
|
bsg-external/OpenRAM
|
3c5e13f95c925a204cabf052525c3de07638168f
|
[
"BSD-3-Clause"
] | 43 |
2016-11-06T20:53:46.000Z
|
2021-09-03T18:57:39.000Z
|
compiler/router/tests/10_supply_grid_test.py
|
bsg-external/OpenRAM
|
3c5e13f95c925a204cabf052525c3de07638168f
|
[
"BSD-3-Clause"
] | 27 |
2016-11-15T19:28:25.000Z
|
2018-02-20T19:23:52.000Z
|
compiler/router/tests/10_supply_grid_test.py
|
bsg-external/OpenRAM
|
3c5e13f95c925a204cabf052525c3de07638168f
|
[
"BSD-3-Clause"
] | 30 |
2016-11-09T16:02:45.000Z
|
2018-02-23T17:07:59.000Z
|
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
#!/usr/bin/env python3
"Run a regresion test the library cells for DRC"
import unittest
from testutils import header,openram_test
import sys,os
sys.path.append(os.path.join(sys.path[0],".."))
import globals
import debug
OPTS = globals.OPTS
class no_blockages_test(openram_test):
"""
Simplest two pin route test with no blockages.
"""
def runTest(self):
globals.init_openram("config_{0}".format(OPTS.tech_name))
from supply_router import supply_router as router
if False:
from control_logic import control_logic
cell = control_logic(16)
layer_stack =("metal3","via3","metal4")
rtr=router(layer_stack, cell)
self.assertTrue(rtr.route())
else:
from sram import sram
from sram_config import sram_config
c = sram_config(word_size=4,
num_words=32,
num_banks=1)
c.words_per_row=1
sram = sram(c, "sram1")
cell = sram.s
self.local_check(cell,True)
# fails if there are any DRC errors on any cells
globals.end_openram()
# instantiate a copy of the class to actually run the test
if __name__ == "__main__":
(OPTS, args) = globals.parse_args()
del sys.argv[1:]
header(__file__, OPTS.tech_name)
unittest.main()
| 29.385965 | 79 | 0.63403 | 954 | 0.569552 | 0 | 0 | 0 | 0 | 0 | 0 | 558 | 0.333134 |
73e5db5282163558729f472aa4322e2b0c37c1ec
| 3,021 |
py
|
Python
|
sources/decoding/analyse_model.py
|
int-brain-lab/paper-ephys-atlas
|
47a7d52d6d59b5b618826d6f4cb72329dee77e0e
|
[
"MIT"
] | null | null | null |
sources/decoding/analyse_model.py
|
int-brain-lab/paper-ephys-atlas
|
47a7d52d6d59b5b618826d6f4cb72329dee77e0e
|
[
"MIT"
] | null | null | null |
sources/decoding/analyse_model.py
|
int-brain-lab/paper-ephys-atlas
|
47a7d52d6d59b5b618826d6f4cb72329dee77e0e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat May 21 17:05:48 2022
@author: Guido Meijer
"""
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, balanced_accuracy_score, confusion_matrix
from ibllib.atlas import BrainRegions
from joblib import load
from model_functions import load_channel_data, load_trained_model
import matplotlib.pyplot as plt
import seaborn as sns
br = BrainRegions()
# Settings
FEATURES = ['psd_delta', 'psd_theta', 'psd_alpha', 'psd_beta', 'psd_gamma', 'rms_ap', 'rms_lf',
'spike_rate', 'axial_um', 'x', 'y', 'depth']
# Load in data
chan_volt = load_channel_data()
# chan_volt = pd.read_parquet("/home/sebastian/Downloads/FlatIron/tables/channels_voltage_features.pqt")
chan_volt = chan_volt.loc[~chan_volt['rms_ap'].isnull()] # remove NaNs
# 31d8dfb1-71fd-4c53-9229-7cd48bee07e4 64d04585-67e7-4320-baad-8d4589fd18f7
if True:
test = chan_volt.loc[['31d8dfb1-71fd-4c53-9229-7cd48bee07e4', '64d04585-67e7-4320-baad-8d4589fd18f7'], : ]
else:
test = chan_volt
feature_arr = test[FEATURES].to_numpy()
regions = test['cosmos_acronyms'].values
# Load model
clf = load_trained_model('channels', 'cosmos')
# Decode brain regions
print('Decoding brain regions..')
predictions = clf.predict(feature_arr)
probs = clf.predict_proba(feature_arr)
# histogram of response probabilities
certainties = probs.max(1)
plt.hist(certainties)
plt.close()
# plot of calibration, how certain are correct versus incorrect predicitions
plt.hist(certainties[regions == predictions], label='Correct predictions')
plt.hist(certainties[regions != predictions], label='Wrong predictions')
plt.title("Model calibration", size=24)
plt.legend(frameon=False, fontsize=16)
plt.ylabel("Occurences", size=21)
plt.xlabel("Prob for predicted region", size=21)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
sns.despine()
plt.tight_layout()
plt.savefig("/home/sebastian/Pictures/calibration")
plt.close()
# compute accuracy and balanced for our highly imbalanced dataset
acc = accuracy_score(regions, predictions)
bacc = balanced_accuracy_score(regions, predictions)
print(f'Accuracy: {acc*100:.1f}%')
print(f'Balanced accuracy: {bacc*100:.1f}%')
# compute confusion matrix
names = np.unique(np.append(regions, predictions))
cm = confusion_matrix(regions, predictions, labels=names)
cm = cm / cm.sum(1)[:, None]
cm_copy = cm.copy()
# list top n classifications
n = 10
np.max(cm[~np.isnan(cm)])
cm[np.isnan(cm)] = 0
for i in range(n):
ind = np.unravel_index(np.argmax(cm, axis=None), cm.shape)
if ind[0] != ind[1]:
print("Top {} classification, mistake: {} gets classified as {}".format(i+1, names[ind[0]], names[ind[1]]))
else:
print("Top {} classification, success: {} gets classified as {}".format(i+1, names[ind[0]], names[ind[1]]))
cm[ind] = 0
# plot confusion matrix
plt.imshow(cm_copy)
plt.yticks(range(len(names)), names)
plt.xticks(range(len(names)), names, rotation='65')
plt.show()
| 32.138298 | 115 | 0.737504 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,164 | 0.385303 |
73e778dc0ac39e74782e31bce2904aee2683d400
| 3,923 |
py
|
Python
|
Lab04_82773/ex4_4/ex4_4.py
|
viniciusbenite/cdb
|
ccc39e9320b03e26d5479a24f76a209ed2283000
|
[
"MIT"
] | null | null | null |
Lab04_82773/ex4_4/ex4_4.py
|
viniciusbenite/cdb
|
ccc39e9320b03e26d5479a24f76a209ed2283000
|
[
"MIT"
] | null | null | null |
Lab04_82773/ex4_4/ex4_4.py
|
viniciusbenite/cdb
|
ccc39e9320b03e26d5479a24f76a209ed2283000
|
[
"MIT"
] | null | null | null |
# Vinicius Ribeiro
# Nmec 82773
# Make sure to run pip3 install -r requirements.txt and load the .dump at Neo4j
# https://neo4j.com/docs/operations-manual/current/tools/dump-load/
# Dataset: https://neo4j.com/graphgist/beer-amp-breweries-graphgist#_create_nodes_and_relationships
import sys
from neo4j import GraphDatabase
# Connect to local DB
def init_db(uri, user, password):
try:
_driver = GraphDatabase.driver(uri, auth=(user, password))
print("Connection successful")
except:
print("Something went wrong...")
sys.exit()
with _driver.session() as session:
result = session.run("match (c) return c")
result = list(result)
print("rows: {}".format(len(result)))
exec_queries(session)
def exec_queries(session):
# Map of queries to be executed:
queries = {"querie1": "MATCH (b:Beer)-[r:BEER_CATEGORY]->(c:Category{category:'British Ale'})"
"RETURN b AS BEER, c AS TYPE",
"querie2": "MATCH (b:Beer)-[r:BEER_STYLE]->(s:Style)"
"WHERE s.style CONTAINS 'Ale'"
"RETURN b.name AS BEER, s AS STYLE",
"querie3": "MATCH (category:Category{category: 'British Ale'})<-[:BEER_CATEGORY]-(beer:Beer)-["
":BREWED_AT]->(brewery: Brewery)-[:LOC_CITY]->(city:City)-[:LOC_STATE]->(state:State)-["
":LOC_COUNTRY]->(country:Country {country: 'United Kingdom'}) "
"RETURN Distinct(beer.name) as beer, brewery.name as brewery, city.city as city "
"ORDER BY city, beer",
"querie4": "MATCH (b:Beer)-[r:BEER_CATEGORY]->(c:Category {category: 'Irish Ale'})"
"RETURN b.name AS BEER_NAME, b.abv AS ALC ,c.category AS CATEGORY "
"ORDER BY b.abv DESC "
"LIMIT 10",
"querie5": "MATCH (b:Beer)"
"WHERE b.name CONTAINS 'Stout'"
"RETURN b.name AS BEER "
"ORDER BY b.name",
"querie6": "MATCH (b:Beer)-[:BEER_STYLE]->(s:Style)"
"WHERE b.name CONTAINS 'IPA' AND s.style CONTAINS 'Ale'"
"RETURN b.name AS BEER, s.style AS STYLE",
"querie7": "MATCH (b:Beer)-[:BREWED_AT]->(brewery: Brewery)-[:LOC_CITY]->(city:City)-[:LOC_STATE]->("
"state:State)-[:LOC_COUNTRY]->(country:Country) "
"WHERE country.country <> 'Belgium' AND b.abv > 9.0 "
"RETURN DISTINCT b.name AS BEER, country.country AS COUNTRY, b.abv AS ALCH "
"ORDER BY b.abv",
"querie8": "MATCH (b:Beer)-[:BEER_STYLE]->(s:Style)"
"WITH s, COUNT(b) AS total "
"RETURN s.style AS STYLE , total AS NUM_OF_DIF_BEERS "
"ORDER BY total DESC "
"LIMIT 10",
"querie9": "MATCH path=shortestPath((b1:Beer)-[*]-(b2:Beer)) "
"WHERE b1.name='Bare Knuckle Stout' AND b2.name='Hop Ottin IPA' "
"RETURN length(path) AS PATH_LEN, path AS PATH ",
"querie10": "MATCH path=shortestPath((b1:Beer)-[*]-(b2:Beer)) "
"WHERE b1.name <> b2.name "
"RETURN LENGTH(path) AS LENGTH, b1.name AS BEER1, b2.name AS BEER2, path AS PATH "
"ORDER BY LENGTH DESC "
"LIMIT 1"}
for key in queries:
results = session.run(queries[key])
for result in results:
print(result)
close(session)
print("FINISH!")
def close(_driver):
_driver.close()
init_db("bolt://localhost:7687", "neo4j", "12345")
| 42.182796 | 116 | 0.515932 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,282 | 0.581698 |
73e823c830b6abe9c91c69930849b15b603a17bb
| 184 |
py
|
Python
|
readthedocs/code-tabs/python/tests/test_directory_listing_recursive.py
|
xenon-middleware/xenon-tutorial
|
92e4e4037ab2bc67c8473ac4366ff41326a7a41c
|
[
"Apache-2.0"
] | 2 |
2016-06-23T09:03:34.000Z
|
2018-03-31T12:45:39.000Z
|
readthedocs/code-tabs/python/tests/test_directory_listing_recursive.py
|
NLeSC/Xenon-examples
|
92e4e4037ab2bc67c8473ac4366ff41326a7a41c
|
[
"Apache-2.0"
] | 54 |
2015-11-26T16:36:48.000Z
|
2017-08-01T12:12:51.000Z
|
readthedocs/code-tabs/python/tests/test_directory_listing_recursive.py
|
xenon-middleware/xenon-examples
|
92e4e4037ab2bc67c8473ac4366ff41326a7a41c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import pytest
from pyxenon_snippets import directory_listing_recursive
def test_directory_listing_recursive():
directory_listing_recursive.run_example()
| 16.727273 | 56 | 0.831522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.11413 |
73e8d0b6bdf6ce5014c04793aa8b3ccc731b67fb
| 764 |
py
|
Python
|
submissions/past201912-open/i.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 1 |
2021-05-10T01:16:28.000Z
|
2021-05-10T01:16:28.000Z
|
submissions/past201912-open/i.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 3 |
2021-05-11T06:14:15.000Z
|
2021-06-19T08:18:36.000Z
|
submissions/past201912-open/i.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | null | null | null |
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
from itertools import product
n, m = map(int, readline().split())
inf = float('inf')
dp = [inf] * (2 ** n)
dp[0] = 0
for _ in range(m):
s, c = readline().rstrip().decode().split()
c = int(c)
bit = [0] * n
for i, ss in enumerate(s):
if ss == 'Y':
bit[i] = 1
for i, v in enumerate(product([0, 1], repeat=n)):
if dp[i] != inf:
num = 0
for index, (x, y) in enumerate(zip(v[::-1], bit)):
if x == 1 or y == 1:
num += 2 ** index
dp[num] = min(dp[num], dp[i] + c)
print(-1 if dp[-1] == inf else dp[-1])
| 27.285714 | 62 | 0.510471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.010471 |
73e8d525fff7a96e23c10924c3bedcf78a0ab5d6
| 55,250 |
py
|
Python
|
google/cloud/dlp_v2/services/dlp_service/transports/grpc_asyncio.py
|
LaudateCorpus1/python-dlp
|
e0a51c9254677016f547647848dcbee85ee1bf29
|
[
"Apache-2.0"
] | 32 |
2020-07-11T02:50:13.000Z
|
2022-02-10T19:45:59.000Z
|
google/cloud/dlp_v2/services/dlp_service/transports/grpc_asyncio.py
|
LaudateCorpus1/python-dlp
|
e0a51c9254677016f547647848dcbee85ee1bf29
|
[
"Apache-2.0"
] | 112 |
2020-02-11T13:24:14.000Z
|
2022-03-31T20:59:08.000Z
|
google/cloud/dlp_v2/services/dlp_service/transports/grpc_asyncio.py
|
LaudateCorpus1/python-dlp
|
e0a51c9254677016f547647848dcbee85ee1bf29
|
[
"Apache-2.0"
] | 22 |
2020-02-03T18:23:38.000Z
|
2022-01-29T08:09:29.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.dlp_v2.types import dlp
from google.protobuf import empty_pb2 # type: ignore
from .base import DlpServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import DlpServiceGrpcTransport
class DlpServiceGrpcAsyncIOTransport(DlpServiceTransport):
"""gRPC AsyncIO backend transport for DlpService.
The Cloud Data Loss Prevention (DLP) API is a service that
allows clients to detect the presence of Personally Identifiable
Information (PII) and other privacy-sensitive data in user-
supplied, unstructured data streams, like text blocks or images.
The service also includes methods for sensitive data redaction
and scheduling of data scans on Google Cloud Platform based data
sets.
To learn more about concepts and find how-to guides see
https://cloud.google.com/dlp/docs/.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "dlp.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "dlp.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def inspect_content(
self,
) -> Callable[[dlp.InspectContentRequest], Awaitable[dlp.InspectContentResponse]]:
r"""Return a callable for the inspect content method over gRPC.
Finds potentially sensitive info in content.
This method has limits on input size, processing time,
and output size.
When no InfoTypes or CustomInfoTypes are specified in
this request, the system will automatically choose what
detectors to run. By default this may be all types, but
may change over time as detectors are updated.
For how to guides, see
https://cloud.google.com/dlp/docs/inspecting-images and
https://cloud.google.com/dlp/docs/inspecting-text,
Returns:
Callable[[~.InspectContentRequest],
Awaitable[~.InspectContentResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "inspect_content" not in self._stubs:
self._stubs["inspect_content"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/InspectContent",
request_serializer=dlp.InspectContentRequest.serialize,
response_deserializer=dlp.InspectContentResponse.deserialize,
)
return self._stubs["inspect_content"]
@property
def redact_image(
self,
) -> Callable[[dlp.RedactImageRequest], Awaitable[dlp.RedactImageResponse]]:
r"""Return a callable for the redact image method over gRPC.
Redacts potentially sensitive info from an image.
This method has limits on input size, processing time,
and output size. See
https://cloud.google.com/dlp/docs/redacting-sensitive-
data-images to learn more.
When no InfoTypes or CustomInfoTypes are specified in
this request, the system will automatically choose what
detectors to run. By default this may be all types, but
may change over time as detectors are updated.
Returns:
Callable[[~.RedactImageRequest],
Awaitable[~.RedactImageResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "redact_image" not in self._stubs:
self._stubs["redact_image"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/RedactImage",
request_serializer=dlp.RedactImageRequest.serialize,
response_deserializer=dlp.RedactImageResponse.deserialize,
)
return self._stubs["redact_image"]
@property
def deidentify_content(
self,
) -> Callable[
[dlp.DeidentifyContentRequest], Awaitable[dlp.DeidentifyContentResponse]
]:
r"""Return a callable for the deidentify content method over gRPC.
De-identifies potentially sensitive info from a
ContentItem. This method has limits on input size and
output size. See
https://cloud.google.com/dlp/docs/deidentify-sensitive-
data to learn more.
When no InfoTypes or CustomInfoTypes are specified in
this request, the system will automatically choose what
detectors to run. By default this may be all types, but
may change over time as detectors are updated.
Returns:
Callable[[~.DeidentifyContentRequest],
Awaitable[~.DeidentifyContentResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "deidentify_content" not in self._stubs:
self._stubs["deidentify_content"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/DeidentifyContent",
request_serializer=dlp.DeidentifyContentRequest.serialize,
response_deserializer=dlp.DeidentifyContentResponse.deserialize,
)
return self._stubs["deidentify_content"]
@property
def reidentify_content(
self,
) -> Callable[
[dlp.ReidentifyContentRequest], Awaitable[dlp.ReidentifyContentResponse]
]:
r"""Return a callable for the reidentify content method over gRPC.
Re-identifies content that has been de-identified. See
https://cloud.google.com/dlp/docs/pseudonymization#re-identification_in_free_text_code_example
to learn more.
Returns:
Callable[[~.ReidentifyContentRequest],
Awaitable[~.ReidentifyContentResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "reidentify_content" not in self._stubs:
self._stubs["reidentify_content"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/ReidentifyContent",
request_serializer=dlp.ReidentifyContentRequest.serialize,
response_deserializer=dlp.ReidentifyContentResponse.deserialize,
)
return self._stubs["reidentify_content"]
@property
def list_info_types(
self,
) -> Callable[[dlp.ListInfoTypesRequest], Awaitable[dlp.ListInfoTypesResponse]]:
r"""Return a callable for the list info types method over gRPC.
Returns a list of the sensitive information types
that the DLP API supports. See
https://cloud.google.com/dlp/docs/infotypes-reference to
learn more.
Returns:
Callable[[~.ListInfoTypesRequest],
Awaitable[~.ListInfoTypesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_info_types" not in self._stubs:
self._stubs["list_info_types"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/ListInfoTypes",
request_serializer=dlp.ListInfoTypesRequest.serialize,
response_deserializer=dlp.ListInfoTypesResponse.deserialize,
)
return self._stubs["list_info_types"]
@property
def create_inspect_template(
self,
) -> Callable[[dlp.CreateInspectTemplateRequest], Awaitable[dlp.InspectTemplate]]:
r"""Return a callable for the create inspect template method over gRPC.
Creates an InspectTemplate for re-using frequently
used configuration for inspecting content, images, and
storage. See https://cloud.google.com/dlp/docs/creating-
templates to learn more.
Returns:
Callable[[~.CreateInspectTemplateRequest],
Awaitable[~.InspectTemplate]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_inspect_template" not in self._stubs:
self._stubs["create_inspect_template"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/CreateInspectTemplate",
request_serializer=dlp.CreateInspectTemplateRequest.serialize,
response_deserializer=dlp.InspectTemplate.deserialize,
)
return self._stubs["create_inspect_template"]
@property
def update_inspect_template(
self,
) -> Callable[[dlp.UpdateInspectTemplateRequest], Awaitable[dlp.InspectTemplate]]:
r"""Return a callable for the update inspect template method over gRPC.
Updates the InspectTemplate.
See https://cloud.google.com/dlp/docs/creating-templates
to learn more.
Returns:
Callable[[~.UpdateInspectTemplateRequest],
Awaitable[~.InspectTemplate]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_inspect_template" not in self._stubs:
self._stubs["update_inspect_template"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/UpdateInspectTemplate",
request_serializer=dlp.UpdateInspectTemplateRequest.serialize,
response_deserializer=dlp.InspectTemplate.deserialize,
)
return self._stubs["update_inspect_template"]
@property
def get_inspect_template(
self,
) -> Callable[[dlp.GetInspectTemplateRequest], Awaitable[dlp.InspectTemplate]]:
r"""Return a callable for the get inspect template method over gRPC.
Gets an InspectTemplate.
See https://cloud.google.com/dlp/docs/creating-templates
to learn more.
Returns:
Callable[[~.GetInspectTemplateRequest],
Awaitable[~.InspectTemplate]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_inspect_template" not in self._stubs:
self._stubs["get_inspect_template"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/GetInspectTemplate",
request_serializer=dlp.GetInspectTemplateRequest.serialize,
response_deserializer=dlp.InspectTemplate.deserialize,
)
return self._stubs["get_inspect_template"]
@property
def list_inspect_templates(
self,
) -> Callable[
[dlp.ListInspectTemplatesRequest], Awaitable[dlp.ListInspectTemplatesResponse]
]:
r"""Return a callable for the list inspect templates method over gRPC.
Lists InspectTemplates.
See https://cloud.google.com/dlp/docs/creating-templates
to learn more.
Returns:
Callable[[~.ListInspectTemplatesRequest],
Awaitable[~.ListInspectTemplatesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_inspect_templates" not in self._stubs:
self._stubs["list_inspect_templates"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/ListInspectTemplates",
request_serializer=dlp.ListInspectTemplatesRequest.serialize,
response_deserializer=dlp.ListInspectTemplatesResponse.deserialize,
)
return self._stubs["list_inspect_templates"]
@property
def delete_inspect_template(
self,
) -> Callable[[dlp.DeleteInspectTemplateRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete inspect template method over gRPC.
Deletes an InspectTemplate.
See https://cloud.google.com/dlp/docs/creating-templates
to learn more.
Returns:
Callable[[~.DeleteInspectTemplateRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_inspect_template" not in self._stubs:
self._stubs["delete_inspect_template"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/DeleteInspectTemplate",
request_serializer=dlp.DeleteInspectTemplateRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_inspect_template"]
@property
def create_deidentify_template(
self,
) -> Callable[
[dlp.CreateDeidentifyTemplateRequest], Awaitable[dlp.DeidentifyTemplate]
]:
r"""Return a callable for the create deidentify template method over gRPC.
Creates a DeidentifyTemplate for re-using frequently
used configuration for de-identifying content, images,
and storage. See
https://cloud.google.com/dlp/docs/creating-templates-
deid to learn more.
Returns:
Callable[[~.CreateDeidentifyTemplateRequest],
Awaitable[~.DeidentifyTemplate]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_deidentify_template" not in self._stubs:
self._stubs["create_deidentify_template"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/CreateDeidentifyTemplate",
request_serializer=dlp.CreateDeidentifyTemplateRequest.serialize,
response_deserializer=dlp.DeidentifyTemplate.deserialize,
)
return self._stubs["create_deidentify_template"]
@property
def update_deidentify_template(
self,
) -> Callable[
[dlp.UpdateDeidentifyTemplateRequest], Awaitable[dlp.DeidentifyTemplate]
]:
r"""Return a callable for the update deidentify template method over gRPC.
Updates the DeidentifyTemplate.
See https://cloud.google.com/dlp/docs/creating-
templates-deid to learn more.
Returns:
Callable[[~.UpdateDeidentifyTemplateRequest],
Awaitable[~.DeidentifyTemplate]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_deidentify_template" not in self._stubs:
self._stubs["update_deidentify_template"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/UpdateDeidentifyTemplate",
request_serializer=dlp.UpdateDeidentifyTemplateRequest.serialize,
response_deserializer=dlp.DeidentifyTemplate.deserialize,
)
return self._stubs["update_deidentify_template"]
@property
def get_deidentify_template(
self,
) -> Callable[
[dlp.GetDeidentifyTemplateRequest], Awaitable[dlp.DeidentifyTemplate]
]:
r"""Return a callable for the get deidentify template method over gRPC.
Gets a DeidentifyTemplate.
See https://cloud.google.com/dlp/docs/creating-
templates-deid to learn more.
Returns:
Callable[[~.GetDeidentifyTemplateRequest],
Awaitable[~.DeidentifyTemplate]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_deidentify_template" not in self._stubs:
self._stubs["get_deidentify_template"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/GetDeidentifyTemplate",
request_serializer=dlp.GetDeidentifyTemplateRequest.serialize,
response_deserializer=dlp.DeidentifyTemplate.deserialize,
)
return self._stubs["get_deidentify_template"]
@property
def list_deidentify_templates(
self,
) -> Callable[
[dlp.ListDeidentifyTemplatesRequest],
Awaitable[dlp.ListDeidentifyTemplatesResponse],
]:
r"""Return a callable for the list deidentify templates method over gRPC.
Lists DeidentifyTemplates.
See https://cloud.google.com/dlp/docs/creating-
templates-deid to learn more.
Returns:
Callable[[~.ListDeidentifyTemplatesRequest],
Awaitable[~.ListDeidentifyTemplatesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_deidentify_templates" not in self._stubs:
self._stubs["list_deidentify_templates"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/ListDeidentifyTemplates",
request_serializer=dlp.ListDeidentifyTemplatesRequest.serialize,
response_deserializer=dlp.ListDeidentifyTemplatesResponse.deserialize,
)
return self._stubs["list_deidentify_templates"]
@property
def delete_deidentify_template(
self,
) -> Callable[[dlp.DeleteDeidentifyTemplateRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete deidentify template method over gRPC.
Deletes a DeidentifyTemplate.
See https://cloud.google.com/dlp/docs/creating-
templates-deid to learn more.
Returns:
Callable[[~.DeleteDeidentifyTemplateRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_deidentify_template" not in self._stubs:
self._stubs["delete_deidentify_template"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/DeleteDeidentifyTemplate",
request_serializer=dlp.DeleteDeidentifyTemplateRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_deidentify_template"]
@property
def create_job_trigger(
self,
) -> Callable[[dlp.CreateJobTriggerRequest], Awaitable[dlp.JobTrigger]]:
r"""Return a callable for the create job trigger method over gRPC.
Creates a job trigger to run DLP actions such as
scanning storage for sensitive information on a set
schedule. See
https://cloud.google.com/dlp/docs/creating-job-triggers
to learn more.
Returns:
Callable[[~.CreateJobTriggerRequest],
Awaitable[~.JobTrigger]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_job_trigger" not in self._stubs:
self._stubs["create_job_trigger"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/CreateJobTrigger",
request_serializer=dlp.CreateJobTriggerRequest.serialize,
response_deserializer=dlp.JobTrigger.deserialize,
)
return self._stubs["create_job_trigger"]
@property
def update_job_trigger(
self,
) -> Callable[[dlp.UpdateJobTriggerRequest], Awaitable[dlp.JobTrigger]]:
r"""Return a callable for the update job trigger method over gRPC.
Updates a job trigger.
See https://cloud.google.com/dlp/docs/creating-job-
triggers to learn more.
Returns:
Callable[[~.UpdateJobTriggerRequest],
Awaitable[~.JobTrigger]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_job_trigger" not in self._stubs:
self._stubs["update_job_trigger"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/UpdateJobTrigger",
request_serializer=dlp.UpdateJobTriggerRequest.serialize,
response_deserializer=dlp.JobTrigger.deserialize,
)
return self._stubs["update_job_trigger"]
@property
def hybrid_inspect_job_trigger(
self,
) -> Callable[
[dlp.HybridInspectJobTriggerRequest], Awaitable[dlp.HybridInspectResponse]
]:
r"""Return a callable for the hybrid inspect job trigger method over gRPC.
Inspect hybrid content and store findings to a
trigger. The inspection will be processed
asynchronously. To review the findings monitor the jobs
within the trigger.
Returns:
Callable[[~.HybridInspectJobTriggerRequest],
Awaitable[~.HybridInspectResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "hybrid_inspect_job_trigger" not in self._stubs:
self._stubs["hybrid_inspect_job_trigger"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/HybridInspectJobTrigger",
request_serializer=dlp.HybridInspectJobTriggerRequest.serialize,
response_deserializer=dlp.HybridInspectResponse.deserialize,
)
return self._stubs["hybrid_inspect_job_trigger"]
@property
def get_job_trigger(
self,
) -> Callable[[dlp.GetJobTriggerRequest], Awaitable[dlp.JobTrigger]]:
r"""Return a callable for the get job trigger method over gRPC.
Gets a job trigger.
See https://cloud.google.com/dlp/docs/creating-job-
triggers to learn more.
Returns:
Callable[[~.GetJobTriggerRequest],
Awaitable[~.JobTrigger]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_job_trigger" not in self._stubs:
self._stubs["get_job_trigger"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/GetJobTrigger",
request_serializer=dlp.GetJobTriggerRequest.serialize,
response_deserializer=dlp.JobTrigger.deserialize,
)
return self._stubs["get_job_trigger"]
@property
def list_job_triggers(
self,
) -> Callable[[dlp.ListJobTriggersRequest], Awaitable[dlp.ListJobTriggersResponse]]:
r"""Return a callable for the list job triggers method over gRPC.
Lists job triggers.
See https://cloud.google.com/dlp/docs/creating-job-
triggers to learn more.
Returns:
Callable[[~.ListJobTriggersRequest],
Awaitable[~.ListJobTriggersResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_job_triggers" not in self._stubs:
self._stubs["list_job_triggers"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/ListJobTriggers",
request_serializer=dlp.ListJobTriggersRequest.serialize,
response_deserializer=dlp.ListJobTriggersResponse.deserialize,
)
return self._stubs["list_job_triggers"]
@property
def delete_job_trigger(
self,
) -> Callable[[dlp.DeleteJobTriggerRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete job trigger method over gRPC.
Deletes a job trigger.
See https://cloud.google.com/dlp/docs/creating-job-
triggers to learn more.
Returns:
Callable[[~.DeleteJobTriggerRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_job_trigger" not in self._stubs:
self._stubs["delete_job_trigger"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/DeleteJobTrigger",
request_serializer=dlp.DeleteJobTriggerRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_job_trigger"]
@property
def activate_job_trigger(
self,
) -> Callable[[dlp.ActivateJobTriggerRequest], Awaitable[dlp.DlpJob]]:
r"""Return a callable for the activate job trigger method over gRPC.
Activate a job trigger. Causes the immediate execute
of a trigger instead of waiting on the trigger event to
occur.
Returns:
Callable[[~.ActivateJobTriggerRequest],
Awaitable[~.DlpJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "activate_job_trigger" not in self._stubs:
self._stubs["activate_job_trigger"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/ActivateJobTrigger",
request_serializer=dlp.ActivateJobTriggerRequest.serialize,
response_deserializer=dlp.DlpJob.deserialize,
)
return self._stubs["activate_job_trigger"]
@property
def create_dlp_job(
self,
) -> Callable[[dlp.CreateDlpJobRequest], Awaitable[dlp.DlpJob]]:
r"""Return a callable for the create dlp job method over gRPC.
Creates a new job to inspect storage or calculate
risk metrics. See
https://cloud.google.com/dlp/docs/inspecting-storage and
https://cloud.google.com/dlp/docs/compute-risk-analysis
to learn more.
When no InfoTypes or CustomInfoTypes are specified in
inspect jobs, the system will automatically choose what
detectors to run. By default this may be all types, but
may change over time as detectors are updated.
Returns:
Callable[[~.CreateDlpJobRequest],
Awaitable[~.DlpJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_dlp_job" not in self._stubs:
self._stubs["create_dlp_job"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/CreateDlpJob",
request_serializer=dlp.CreateDlpJobRequest.serialize,
response_deserializer=dlp.DlpJob.deserialize,
)
return self._stubs["create_dlp_job"]
@property
def list_dlp_jobs(
self,
) -> Callable[[dlp.ListDlpJobsRequest], Awaitable[dlp.ListDlpJobsResponse]]:
r"""Return a callable for the list dlp jobs method over gRPC.
Lists DlpJobs that match the specified filter in the
request. See
https://cloud.google.com/dlp/docs/inspecting-storage and
https://cloud.google.com/dlp/docs/compute-risk-analysis
to learn more.
Returns:
Callable[[~.ListDlpJobsRequest],
Awaitable[~.ListDlpJobsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_dlp_jobs" not in self._stubs:
self._stubs["list_dlp_jobs"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/ListDlpJobs",
request_serializer=dlp.ListDlpJobsRequest.serialize,
response_deserializer=dlp.ListDlpJobsResponse.deserialize,
)
return self._stubs["list_dlp_jobs"]
@property
def get_dlp_job(self) -> Callable[[dlp.GetDlpJobRequest], Awaitable[dlp.DlpJob]]:
r"""Return a callable for the get dlp job method over gRPC.
Gets the latest state of a long-running DlpJob.
See https://cloud.google.com/dlp/docs/inspecting-storage
and https://cloud.google.com/dlp/docs/compute-risk-
analysis to learn more.
Returns:
Callable[[~.GetDlpJobRequest],
Awaitable[~.DlpJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_dlp_job" not in self._stubs:
self._stubs["get_dlp_job"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/GetDlpJob",
request_serializer=dlp.GetDlpJobRequest.serialize,
response_deserializer=dlp.DlpJob.deserialize,
)
return self._stubs["get_dlp_job"]
@property
def delete_dlp_job(
self,
) -> Callable[[dlp.DeleteDlpJobRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete dlp job method over gRPC.
Deletes a long-running DlpJob. This method indicates
that the client is no longer interested in the DlpJob
result. The job will be cancelled if possible.
See https://cloud.google.com/dlp/docs/inspecting-storage
and https://cloud.google.com/dlp/docs/compute-risk-
analysis to learn more.
Returns:
Callable[[~.DeleteDlpJobRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_dlp_job" not in self._stubs:
self._stubs["delete_dlp_job"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/DeleteDlpJob",
request_serializer=dlp.DeleteDlpJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_dlp_job"]
@property
def cancel_dlp_job(
self,
) -> Callable[[dlp.CancelDlpJobRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the cancel dlp job method over gRPC.
Starts asynchronous cancellation on a long-running
DlpJob. The server makes a best effort to cancel the
DlpJob, but success is not guaranteed.
See https://cloud.google.com/dlp/docs/inspecting-storage
and https://cloud.google.com/dlp/docs/compute-risk-
analysis to learn more.
Returns:
Callable[[~.CancelDlpJobRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_dlp_job" not in self._stubs:
self._stubs["cancel_dlp_job"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/CancelDlpJob",
request_serializer=dlp.CancelDlpJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["cancel_dlp_job"]
@property
def create_stored_info_type(
self,
) -> Callable[[dlp.CreateStoredInfoTypeRequest], Awaitable[dlp.StoredInfoType]]:
r"""Return a callable for the create stored info type method over gRPC.
Creates a pre-built stored infoType to be used for
inspection. See
https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Returns:
Callable[[~.CreateStoredInfoTypeRequest],
Awaitable[~.StoredInfoType]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_stored_info_type" not in self._stubs:
self._stubs["create_stored_info_type"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/CreateStoredInfoType",
request_serializer=dlp.CreateStoredInfoTypeRequest.serialize,
response_deserializer=dlp.StoredInfoType.deserialize,
)
return self._stubs["create_stored_info_type"]
@property
def update_stored_info_type(
self,
) -> Callable[[dlp.UpdateStoredInfoTypeRequest], Awaitable[dlp.StoredInfoType]]:
r"""Return a callable for the update stored info type method over gRPC.
Updates the stored infoType by creating a new
version. The existing version will continue to be used
until the new version is ready. See
https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Returns:
Callable[[~.UpdateStoredInfoTypeRequest],
Awaitable[~.StoredInfoType]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_stored_info_type" not in self._stubs:
self._stubs["update_stored_info_type"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/UpdateStoredInfoType",
request_serializer=dlp.UpdateStoredInfoTypeRequest.serialize,
response_deserializer=dlp.StoredInfoType.deserialize,
)
return self._stubs["update_stored_info_type"]
@property
def get_stored_info_type(
self,
) -> Callable[[dlp.GetStoredInfoTypeRequest], Awaitable[dlp.StoredInfoType]]:
r"""Return a callable for the get stored info type method over gRPC.
Gets a stored infoType.
See https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Returns:
Callable[[~.GetStoredInfoTypeRequest],
Awaitable[~.StoredInfoType]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_stored_info_type" not in self._stubs:
self._stubs["get_stored_info_type"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/GetStoredInfoType",
request_serializer=dlp.GetStoredInfoTypeRequest.serialize,
response_deserializer=dlp.StoredInfoType.deserialize,
)
return self._stubs["get_stored_info_type"]
@property
def list_stored_info_types(
self,
) -> Callable[
[dlp.ListStoredInfoTypesRequest], Awaitable[dlp.ListStoredInfoTypesResponse]
]:
r"""Return a callable for the list stored info types method over gRPC.
Lists stored infoTypes.
See https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Returns:
Callable[[~.ListStoredInfoTypesRequest],
Awaitable[~.ListStoredInfoTypesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_stored_info_types" not in self._stubs:
self._stubs["list_stored_info_types"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/ListStoredInfoTypes",
request_serializer=dlp.ListStoredInfoTypesRequest.serialize,
response_deserializer=dlp.ListStoredInfoTypesResponse.deserialize,
)
return self._stubs["list_stored_info_types"]
@property
def delete_stored_info_type(
self,
) -> Callable[[dlp.DeleteStoredInfoTypeRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete stored info type method over gRPC.
Deletes a stored infoType.
See https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Returns:
Callable[[~.DeleteStoredInfoTypeRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_stored_info_type" not in self._stubs:
self._stubs["delete_stored_info_type"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/DeleteStoredInfoType",
request_serializer=dlp.DeleteStoredInfoTypeRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_stored_info_type"]
@property
def hybrid_inspect_dlp_job(
self,
) -> Callable[
[dlp.HybridInspectDlpJobRequest], Awaitable[dlp.HybridInspectResponse]
]:
r"""Return a callable for the hybrid inspect dlp job method over gRPC.
Inspect hybrid content and store findings to a job.
To review the findings, inspect the job. Inspection will
occur asynchronously.
Returns:
Callable[[~.HybridInspectDlpJobRequest],
Awaitable[~.HybridInspectResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "hybrid_inspect_dlp_job" not in self._stubs:
self._stubs["hybrid_inspect_dlp_job"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/HybridInspectDlpJob",
request_serializer=dlp.HybridInspectDlpJobRequest.serialize,
response_deserializer=dlp.HybridInspectResponse.deserialize,
)
return self._stubs["hybrid_inspect_dlp_job"]
@property
def finish_dlp_job(
self,
) -> Callable[[dlp.FinishDlpJobRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the finish dlp job method over gRPC.
Finish a running hybrid DlpJob. Triggers the
finalization steps and running of any enabled actions
that have not yet run.
Returns:
Callable[[~.FinishDlpJobRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "finish_dlp_job" not in self._stubs:
self._stubs["finish_dlp_job"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/FinishDlpJob",
request_serializer=dlp.FinishDlpJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["finish_dlp_job"]
def close(self):
return self.grpc_channel.close()
__all__ = ("DlpServiceGrpcAsyncIOTransport",)
| 43.814433 | 102 | 0.639982 | 54,002 | 0.977412 | 0 | 0 | 46,324 | 0.838443 | 0 | 0 | 34,164 | 0.618353 |
73e9a8245d7f2b954b01c47bce5f6ddf87248068
| 781 |
py
|
Python
|
tym.py
|
tsyogesh40/Finger_recognition-Python-
|
4c1597cd246be1248bbfbb6cfc1ce1cbf5c4ecac
|
[
"MIT"
] | null | null | null |
tym.py
|
tsyogesh40/Finger_recognition-Python-
|
4c1597cd246be1248bbfbb6cfc1ce1cbf5c4ecac
|
[
"MIT"
] | null | null | null |
tym.py
|
tsyogesh40/Finger_recognition-Python-
|
4c1597cd246be1248bbfbb6cfc1ce1cbf5c4ecac
|
[
"MIT"
] | null | null | null |
import datetime
t=datetime.datetime.now()
#date format
weekday=t.strftime("%a") # %A for abbr
day=t.strftime("%d")
month=t.strftime("%b") #%B for abbr
month_num=t.strftime("%m")
year=t.strftime("%Y")
date=t.strftime("%Y-%m-%d")
print(date)
#time format
hour_12=t.strftime("%I")
hour_24=t.strftime("%H")
minutes=t.strftime("%H")
seconds=t.strftime("%S")
am_pm=t.strftime("%p")
time_12=t.strftime("%I:%M:%S %p") #12hrs time AM/PM
time_24=t.strftime("%H:%M:%S") #24 Hrs time
print(time_12)
print(time_24)
def sem_calc(month):
if(month>=1 & month<6):
return "odd"
else:
return "even"
def date():
t=datetime.datetime.now()
time_12=t.strftime("%I:%M:%S %p") #12hrs time AM/PM
return time_12
print(sem_calc(int(month_num)))
print(date())
| 17.75 | 55 | 0.641485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 192 | 0.245839 |
73eb8bdab00daf7ae249b9e5cfe3937c7c3470b5
| 92 |
py
|
Python
|
parameters_8001.py
|
sanket0211/courier-portal
|
6b35aa006813f710db9c3e61da4a718aff20881d
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8001.py
|
sanket0211/courier-portal
|
6b35aa006813f710db9c3e61da4a718aff20881d
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8001.py
|
sanket0211/courier-portal
|
6b35aa006813f710db9c3e61da4a718aff20881d
|
[
"BSD-3-Clause"
] | null | null | null |
password="pbkdf2(1000,20,sha512)$8a062c206755a51e$df13c5122a621a9de3a64d39f26460f175076ca0"
| 46 | 91 | 0.891304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.891304 |
73ec5cfa22b958735251f6bd136ed85eba9a7172
| 562 |
py
|
Python
|
TheKinozal/custom_storages/async_s3_video.py
|
R-Mielamud/TheKinozal
|
62cb79faae58b23f0ef0175593ed9b5746229b5b
|
[
"MIT"
] | 1 |
2020-10-16T19:15:32.000Z
|
2020-10-16T19:15:32.000Z
|
TheKinozal/custom_storages/async_s3_video.py
|
R-Mielamud/TheKinozal
|
62cb79faae58b23f0ef0175593ed9b5746229b5b
|
[
"MIT"
] | null | null | null |
TheKinozal/custom_storages/async_s3_video.py
|
R-Mielamud/TheKinozal
|
62cb79faae58b23f0ef0175593ed9b5746229b5b
|
[
"MIT"
] | null | null | null |
import os
from TheKinozal import settings
from storages.backends.s3boto3 import S3Boto3Storage
from helpers.random_string import generate_random_string
from helpers.chunked_upload import ChunkedS3VideoUploader
class AsyncS3VideoStorage(S3Boto3Storage):
def _save(self, name, content):
filename, ext = os.path.splitext(name)
name = filename + "_" + generate_random_string() + ext
uploader = ChunkedS3VideoUploader(content, settings.AWS_VIDEOS_KEY, name)
uploader.upload()
return settings.AWS_VIDEOS_KEY + "/" + name
| 35.125 | 81 | 0.756228 | 350 | 0.622776 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.010676 |
73ed247eb28b6b5d48aa9d6331bcb389807b9a5d
| 1,098 |
py
|
Python
|
bh_tsne/prep_result.py
|
mr4jay/numerai
|
a07b2dcafe9f078df8578d150d585f239fe73c51
|
[
"MIT"
] | 306 |
2016-09-18T07:32:33.000Z
|
2022-03-22T16:30:26.000Z
|
bh_tsne/prep_result.py
|
mikekosk/numerai
|
2a09c648c66143ee101cd80de4827108aaf218fc
|
[
"MIT"
] | 2 |
2017-01-04T02:17:20.000Z
|
2017-09-18T11:43:59.000Z
|
bh_tsne/prep_result.py
|
mikekosk/numerai
|
2a09c648c66143ee101cd80de4827108aaf218fc
|
[
"MIT"
] | 94 |
2016-09-17T03:48:55.000Z
|
2022-01-05T11:54:25.000Z
|
import struct
import numpy as np
import pandas as pd
df_train = pd.read_csv('../data/train_data.csv')
df_valid = pd.read_csv('../data/valid_data.csv')
df_test = pd.read_csv('../data/test_data.csv')
with open('result.dat', 'rb') as f:
N, = struct.unpack('i', f.read(4))
no_dims, = struct.unpack('i', f.read(4))
print(N, no_dims)
mappedX = struct.unpack('{}d'.format(N * no_dims), f.read(8 * N * no_dims))
mappedX = np.array(mappedX).reshape((N, no_dims))
print(mappedX)
tsne_train = mappedX[:len(df_train)]
tsne_valid = mappedX[len(df_train):len(df_train)+len(df_valid)]
tsne_test = mappedX[len(df_train)+len(df_valid):]
assert(len(tsne_train) == len(df_train))
assert(len(tsne_valid) == len(df_valid))
assert(len(tsne_test) == len(df_test))
save_path = '../data/tsne_{}d_30p.npz'.format(no_dims)
np.savez(save_path, train=tsne_train, valid=tsne_valid, test=tsne_test)
print('Saved: {}'.format(save_path))
# landmarks, = struct.unpack('{}i'.format(N), f.read(4 * N))
# costs, = struct.unpack('{}d'.format(N), f.read(8 * N))
| 34.3125 | 79 | 0.653916 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 251 | 0.228597 |
73eec10a12c7ce55e197ae8c7928050831069eb9
| 623 |
py
|
Python
|
moca/urls.py
|
satvikdhandhania/vit-11
|
e599f2b82a9194658c67bbd5c7e45f3b50d016da
|
[
"BSD-3-Clause"
] | 1 |
2016-09-20T20:36:53.000Z
|
2016-09-20T20:36:53.000Z
|
moca/urls.py
|
satvikdhandhania/vit-11
|
e599f2b82a9194658c67bbd5c7e45f3b50d016da
|
[
"BSD-3-Clause"
] | null | null | null |
moca/urls.py
|
satvikdhandhania/vit-11
|
e599f2b82a9194658c67bbd5c7e45f3b50d016da
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls.defaults import patterns, url, include
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
(r'^log/', include('requestlog.urls')),
(r'^admin/', include(admin.site.urls)),
# Pass anything that doesn't match on to the mrs app
url(r'^',
include('moca.mrs.urls')),
)
from django.conf import settings
if settings.DEBUG:
urlpatterns += patterns(
'',
(r'^static/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
| 23.074074 | 60 | 0.632424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 227 | 0.364366 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.