hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a047d0ac5a16d636bbec804560bb56282540b1b2
| 13,172 |
py
|
Python
|
remit/settings.py
|
naamara/blink
|
326c035b2f0ef0feae4cd7aa2d4e73fa4a40171a
|
[
"Unlicense",
"MIT"
] | null | null | null |
remit/settings.py
|
naamara/blink
|
326c035b2f0ef0feae4cd7aa2d4e73fa4a40171a
|
[
"Unlicense",
"MIT"
] | 10 |
2019-12-26T17:31:31.000Z
|
2022-03-21T22:17:33.000Z
|
remit/settings.py
|
naamara/blink
|
326c035b2f0ef0feae4cd7aa2d4e73fa4a40171a
|
[
"Unlicense",
"MIT"
] | null | null | null |
''' settings for Django '''
import os
import django.conf.global_settings as DEFAULT_SETTINGS
LOCALHOST = False
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEBUG_PAYMENTS = DEBUG
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir)) + '/'
LIVE = 1
ADMINS = (
('Madra David', '[email protected]'),
)
APP_EMAILS = {
'contact_us':'[email protected]',
'about_us':'[email protected]',
'info':'[email protected]',
'support':'[email protected]',
}
DEBUG_EMAILS = {
'[email protected]' ,
}
APP_NAME = 'Useremit'
DOMAIN_NAME = 'Remit'
APP_TITLE = 'Remit | Send Money to Mobile Money in Uganda or Kenya | Pay utility bills online'
MANAGERS = ADMINS
USE_JUMIO = True
BASE_URL = 'https://useremit.com/'
BASE_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir)) + '/'
DATABASES = {
'default': {
# Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'ENGINE': 'django.db.backends.postgresql_psycopg2',
# Or path to database file if using sqlite3.
'NAME': 'anenyuoe4',
# The following settings are not used with sqlite3:
'USER': 'dqebbquaa4iba',
'PASSWORD': 'WMm8mq1ZYAOn',
# Empty for localhost through domain sockets or '127.0.0.1' for
# localhost through TCP.
'HOST': 'LOCALHOST',
'PORT': '', # Set to empty string for default.
'OPTIONS': {'autocommit': True, },
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['www.useremit.com', 'http://useremit.com',
'https://useremit.com', 'https://useremit.com']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
#TIME_ZONE = 'Africa/Nairobi'
TIME_ZONE ='UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = BASE_DIR + 'static/uploads/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = BASE_URL + 'static/uploads/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
#GEOIP_PATH = BASE_URL + 'geoip_data/'
geo_dir = os.path.dirname(__file__)
geo_rel_path = "geoip"
GEOIP_PATH = os.path.join(geo_dir, geo_rel_path)
EMAIL_TEMPLATE_DIR = BASE_DIR + 'templates/email/'
AJAX_TEMPLATE_DIR = BASE_DIR + 'templates/ajax/'
SMS_TEMPLATE_DIR = BASE_DIR + 'templates/sms/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ksx8+lq!5pzx&)xuqp0sc-rdgtd14gmix-eglq(iz%3+7h)f52'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'sslify.middleware.SSLifyMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
#'session_security.middleware.SessionSecurityMiddleware',
# Uncomment the next line for simple clickjacking protection:
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'remit.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'remit.wsgi.application'
TEMPLATE_DIRS = (
BASE_DIR + 'templates',
BASE_DIR + 'remit_admin/templates/',
BASE_DIR + 'remit_admin/templates/admin/',
)
INSTALLED_APPS = (
#background tasks
#'huey.djhuey',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'remit',
'social_widgets',
'accounts',
#'south'
'landingapp',
'coverage',
#'notification',
'nexmo',
'guardian',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
#'django_admin_bootstrapped.bootstrap3',
#'django_admin_bootstrapped',
# Uncomment the next line to enable the admin:
'remit_admin',
'session_security',
'gravatar',
'django_cron',
'django.contrib.humanize',
'django_extensions',
#'django_bitcoin',
'btc',
'rest_framework',
'rest_framework.authtoken',
'api',
'seo',
'payments',
'background_task',
'django.contrib.admin',
'ipn',
'standard',
'crispy_forms',
'tinymce',
#'django_twilio',
)
PAYPAL_RECEIVER_EMAIL = "[email protected]"
# Rest Framework
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAdminUser'
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
#'rest_framework.renderers.BrowsableAPIRenderer',
),
# Use Django's standard `django.contrib.auth` permissions,
'DATETIME_FORMAT': '%Y-%m-%d %H:%M:%S'
}
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Custom template processors
# YOpay
YOPAY_USERNAME = '100224720137'
YOPAY_PASSWORD = 'jLQF-r1oa-OyIq-0zoQ-544O-7U1F-oGj5-YoyU'
YOPAY_ENDPOINT = 'https://paymentsapi1.yo.co.ug/ybs/task.php'
# Ipay
LIVE = 1
IPAY_CALLBACK_URL = '%stransaction/confirm_payment/' % BASE_URL
IPAY_USER = 'redcore'
IPAY_MERCHANT = 'RedCore'
IPAY_HASH_KEY = '0yiq0zoQ544O'
# uba
UBA_CALLBACK_URL = ''
UBA_MERCHANT_ID = ''
UBA_MERCHANT_KEY = ''
#jumio
JUMIO_URL="https://netverify.com/api/netverify/v2/initiateNetverify/"
JUMIO_TOKEN="fcf1eec3-728d-4f8a-8811-5b8e0e534597"
JUMIO_SECRET="9mnQyVj1ppiyVESYroDHZS23Z9OfQ9GS"
JUMIO_USER_AGENT="MyCompany MyApp/1.0.0"
USE_JUMIO = True
"""
JUMIO_SUCCESS_URL="https://simtransfer.com/jumiopass/"
JUMIO_ERROR_URL="https://simtransfer.com/jumiofail/"
"""
JUMIO_SUCCESS_URL="https://simtransfer.com/idscanned/"
JUMIO_ERROR_URL="https://simtransfer.com/idscanfailed/"
JUMIO_CALLBACK="https://simtransfer.com/jumiodata/"
# Mailgun
ANONYMOUS_USER_ID = -1
AUTH_PROFILE_MODULE = 'accounts.Profile'
LOGIN_URL = BASE_URL + 'login/'
SIGNUP_URL = BASE_URL + 'signup/'
LOGOUT_URL = BASE_URL + 'signout/'
AUTHENTICATION_BACKENDS = (
'accounts.backends.EmailVerificationBackend',
'remit.backends.EmailAuthBackend',
'guardian.backends.ObjectPermissionBackend',
)
ACTIVATION_LINK = BASE_URL + 'activate/'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
"""
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
DEFAULT_FROM_EMAIL = ''
DEFAULT_TO_EMAIL = ''
"""
#EMAIL_PORT = 587
ADMIN_USER='admin_key_user'
ADMIN_USER_KEY='user_004_admin'
# Mailgun settings
DEFAULT_FROM_EMAIL = 'Remit.ug <[email protected]>'
#EMAIL_USE_TLS = True
#EMAIL_HOST = 'smtp.mailgun.org'
#EMAIL_HOST_USER = '[email protected]'
#EMAIL_HOST_PASSWORD = '25s0akinnuk8'
#EMAIL_PORT = 25
# Mailgun settings
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
#EMAIL_TEMPLATE_DIR = '%stemplates/email/' % (BASE_DIR)
# using sandbox account here , change later
"""
MAILGUN_ACCESS_KEY = 'key-159a0akhdauw79rtshe1rw-itl6t-0i6'
MAILGUN_SERVER_NAME = 'remit.ug'
MAILGUN_ACCESS_LINK = 'https://api.mailgun.net/v2/remit.ug/messages'
"""
MAILGUN_ACCESS_KEY = 'key-159a0akhdauw79rtshe1rw-itl6t-0i6'
MAILGUN_SERVER_NAME = 'useremit.com'
MAILGUN_ACCESS_LINK = 'https://api.mailgun.net/v3/useremit.com/messages'
CONTACT_NO = '+256783877133'
# Nexmo
NEXMO_USERNAME = '8cede62f'
NEXMO_PASSWORD = 'd4d43a29'
NEXMO_FROM = 'Remit'
#Nexmo App
NEXMO_API_KEY = '8cede62fSecret'
NEXMO_API_SECRET = 'd4d43a29'
NEXMO_DEFAULT_FROM = 'Remit'
#if set to zero we use twilio
USE_NEXMO = 0
USE_TWILIO = True
USE_SUKUMA = False
USE_AFRICA_SMS = True
TWILIO_ACCOUNT_SID='AC2a0de3ac9808d7bfa5c3d75853c073d6'
TWILIO_AUTH_TOKEN='82b2ab8535255c8fd8d96bad96103ae7'
TWILIO_DEFAULT_CALLERID = 'Remit'
# Session security
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# cron jobs
CRON_CLASSES = [
"remit.cron.UpdateRates",
# ...
]
# Paganation
PAGNATION_LIMIT = 10
# Avatar
GRAVATAR_URL = "https://www.gravatar.com/avatar.php?"
# Bitcoin
#BITCOIND_CONNECTION_STRING = "http://ubuntu:bitwa8bfede82llet@localhost:8332"
BITCOIND_CONNECTION_STRING = "http://redcorebrpc:BKGyjwyNXzHumywcau3FubmyaJ8NypJtd1eSdTYCqSkJ@localhost:8332"
# How many bitcoin network confirmations are required until we consider the transaction
# as received
BITCOIN_MINIMUM_CONFIRMATIONS = 3
# Use Django signals to tell the system when new money has arrived to your
# wallets
BITCOIN_TRANSACTION_SIGNALING = True
from decimal import Decimal
MAIN_ADDRESS = '12oaMnJZZJRx59kWyAshzmogHERo8y54Et'
BITCOIN_PAYMENT_BUFFER_SIZE = 1
BITCOIN_ADDRESS_BUFFER_SIZE = 1
PAYMENT_VALID_HOURS = 1
BITCOIN_PRIVKEY_FEE = Decimal("0.0005")
BITCOIN_TRANSACTION_CACHING = 1
#admin who processed transactions
PROCESSED_BY = 1
#background tasks
#HUEY_CONFIG = {
# 'QUEUE': 'huey.backends.redis_backend.RedisBlockingQueue',
# 'QUEUE_NAME': 'test-queue',
# 'QUEUE_CONNECTION': {
# 'host': 'localhost',
# 'port': 6379,
# },
# 'THREADS': 4,
#}
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
CSRF_FAILURE_VIEW = 'remit.views.csrf_failure_view'
MTN_SDP = '172.25.48.43'
MTN_TEST_BED = 0
MTN_SDP_USERNAME = 'remitug.sp1'
MTN_SDP_PASS = 'Huawei2014'
MTN_SDP_SERVICEID = '2560110001380'
MTN_SDP_URL = 'http://172.25.48.43:8310/'
MTN_VENDOR_CODE = 'REMIT'
REVENUE_SHARE = 2.16
#disable email and sms sending
DISABLE_COMMS = False
#background tasks
MAX_ATTEMPTS = 5
#need this for generating reports from sqlite
IS_SQLITE = False
OTHER_FEES = True
OTHER_FEES = True
SEND_KYC_SMS = True
# Pesapot
PESAPOT_URL = 'http://pesapot.com/api/'
PESAPOT_TOKEN = ''
PESAPOT_KEY = ''
#paybill
PAYBILL = False
DISABLE_MTN = True
ENABLE_TRADELANCE = True
ENABLE_YO = False
DISABLE_AIRTEL_MONEY = False
DISABLE_MTN_MOBILE_MONEY = False
#force Transaction id
FORCE_TRANSACTION_ID = True
# Localhost settings
# Crispy forms tags settings
CRISPY_TEMPLATE_PACK = 'bootstrap3'
try:
from local_settings import *
except ImportError:
pass
STATIC_ROOT = BASE_DIR + 'static'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = BASE_URL + 'static/'
| 24.994307 | 109 | 0.719708 |
a04c74859e147b481962d7389a2144e8a4b8236e
| 1,151 |
py
|
Python
|
tests/integration/test_issue_1447.py
|
alexey-tereshenkov-oxb/pex
|
2e2d1e50e604fdee48b0d51aea482ca255521ff0
|
[
"Apache-2.0"
] | 2,160 |
2015-01-06T17:57:39.000Z
|
2022-03-30T19:59:01.000Z
|
tests/integration/test_issue_1447.py
|
alexey-tereshenkov-oxb/pex
|
2e2d1e50e604fdee48b0d51aea482ca255521ff0
|
[
"Apache-2.0"
] | 1,242 |
2015-01-22T14:56:46.000Z
|
2022-03-31T18:02:38.000Z
|
tests/integration/test_issue_1447.py
|
Satertek/pex
|
64de1c4cf031118ef446ac98a8c164c91c23bb9b
|
[
"Apache-2.0"
] | 248 |
2015-01-15T13:34:50.000Z
|
2022-03-26T01:24:18.000Z
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import shutil
import subprocess
import sys
from pex.pex_info import PexInfo
from pex.testing import run_pex_command
from pex.typing import TYPE_CHECKING
from pex.variables import unzip_dir
if TYPE_CHECKING:
from typing import Any
| 29.512821 | 85 | 0.730669 |
a04cf7b68b006d07caae20b361bd4e847b1b78eb
| 13,900 |
py
|
Python
|
tests/gfp.py
|
mcepl/git-packaging-tools
|
de705a9ac2efd1752754e4feb093fe85821f9224
|
[
"MIT"
] | 8 |
2017-08-15T12:51:34.000Z
|
2020-10-07T09:58:34.000Z
|
tests/gfp.py
|
mcepl/git-packaging-tools
|
de705a9ac2efd1752754e4feb093fe85821f9224
|
[
"MIT"
] | 5 |
2017-02-04T12:32:16.000Z
|
2020-07-01T14:13:19.000Z
|
tests/gfp.py
|
mcepl/git-packaging-tools
|
de705a9ac2efd1752754e4feb093fe85821f9224
|
[
"MIT"
] | 6 |
2017-02-07T13:31:21.000Z
|
2021-02-10T23:14:03.000Z
|
#!/usr/bin/python3
#
# Copyright (c) 2017-2020, SUSE LLC
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer. Redistributions
# in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the SUSE Linux Products GmbH nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Author: Bo Maryniuk <[email protected]>
This tool helps to:
1. Format patches from Git the way it has a minimal impact on
the changes in the future
2. Update patches to the current package source
3. Detect content differences, if the filename is still the same
4. Generate include message for .changes logfile
'''
import os
import sys
import re
import argparse
import shutil
ORDERING_FILE = 'patches.orders.txt'
CHANGES_FILE = 'patches.changes.txt'
def remove_order(filename):
'''
Remove order of the patch filename.
Git formats patches: XXXX-filename.patch
This function removes the "XXXX-" part, if any.
'''
ordnum = os.path.basename(filename).split('-')[0]
if ordnum and not re.sub(r'[0-9]', '', ordnum):
filename = os.path.join(os.path.dirname(filename),
filename.split('-', 1)[-1]).lower()
ordnum = int(ordnum)
else:
ordnum = None
return ordnum, filename
def remove_order_from_subject(src_file, dst_file, use_unique=False):
'''
Remove subject inside the patch.
Git format patches inside with the following subject format:
Subject: [PATCH X/Y] .........
This function removes [PATCH X/Y] part, if any. In Git
format-patches one can add "-N" flag, so then subject won't have
these numbers, but just "[PATCH]". In this case we leave it out.
'''
if os.path.exists(dst_file) and not use_unique:
raise IOError('the file {0} exists'.format(dst_file))
if os.path.exists(dst_file) and use_unique:
dst_file = unique(dst_file)
dst = open(dst_file, 'w')
for fline in open(src_file).read().split(os.linesep):
fline_tk = re.split(r'\s+\[PATCH \d+/\d+\]\s+', fline)
if len(fline_tk) == 2 and fline_tk[0] == 'Subject:':
fline = ' [PATCH] '.join(fline_tk)
dst.write('{0}\n'.format(fline))
dst.close()
def git_format_patch(tag):
'''
Formats patches from the given tag.
'''
patches = 0
for patch in os.popen(
'git format-patch {0}'.format(tag)).read().split(os.linesep):
if patch.split('.')[-1] == 'patch':
patches += 1
print("Patches fetched: {0}".format(patches))
def get_diff_contents(data):
'''
Get diff contents only.
'''
# Yes, I know about library https://github.com/cscorley/whatthepatch
# But for now we go ultra-primitive to keep no deps
data = '--'.join(data.split("--")[:-1])
contents = []
for chunk in re.split(r'@@.*?@@.*?\n', data)[1:]:
contents.append(chunk.split('diff --git')[0])
return contents
def unique(fname):
'''
Change name to the unique, in case it isn't.
:param fname:
:param use:
:return:
'''
fname = fname.split('.')
if '-' not in fname[0]:
fname[0] = '{0}-{1}'.format(fname[0], 1)
else:
chnk = fname[0].split('-')
try:
fname[0] = '{0}-{1}'.format('-'.join(chnk[:-1]), int(chnk[-1]) + 1)
except ValueError:
# Filename is not in "str-int", but "str-str".
fname[0] = '{0}-{1}'.format(fname[0], 1)
return '.'.join(fname)
def extract_spec_source_patches(specfile):
'''
Extracts source patches from the .spec file to match existing
comments, according to the
https://en.opensuse.org/openSUSE:Packaging_Patches_guidelines
:param: specfile
:return:
'''
patch_sec_start = False
patch_sec_end = False
head_buff = []
patch_section = []
for spec_line in open(specfile).read().split(os.linesep):
if re.match(r'^[Pp]atch[0-9]+:', spec_line) and not patch_sec_start:
patch_sec_start = True
if not spec_line.startswith('#') and \
not re.match(r'^[Pp]atch[0-9]+:', spec_line) and \
patch_sec_start and \
not patch_sec_end:
patch_sec_end = True
if not patch_sec_start and not patch_sec_end:
head_buff.append(spec_line)
if patch_sec_start and not patch_sec_end:
patch_section.append(spec_line)
first_comment = []
for head_line in reversed(head_buff):
if not head_line:
break
if head_line.startswith('#'):
first_comment.append(head_line)
patch_section.insert(0, os.linesep.join(first_comment))
patchset = {}
curr_key = None
for line in reversed(patch_section):
if re.match(r'^[Pp]atch[0-9]+:', line):
curr_key = re.sub(r'^[Pp]atch[0-9]+:', '', line).strip()
patchset[curr_key] = []
continue
if curr_key and line and line.startswith('#'):
patchset[curr_key].append(line)
return patchset
def do_remix_spec(args):
'''
Remix spec file.
:param args:
:return:
'''
if not os.path.exists(args.spec or ''):
raise IOError('Specfile {0} is not accessible or is somewhere else'.format(args.spec))
if not os.path.exists(args.ordering or ''):
args.ordering = './{0}'.format(ORDERING_FILE)
if not os.path.exists(args.ordering):
raise IOError('Ordering file is expected "./{0}" but is not visible'.format(ORDERING_FILE))
patchset = extract_spec_source_patches(args.spec)
for o_line in open(args.ordering).read().split(os.linesep):
if re.match(r'^[Pp]atch[0-9]+:', o_line):
ref, pname = [_f for _f in o_line.split(' ') if _f]
print(os.linesep.join(patchset.get(pname) or ['# Description N/A']))
print(ref.ljust(15), pname)
def do_create_patches(args):
'''
Create and reformat patches for the package.
'''
current_dir = os.path.abspath('.')
if not args.existing:
if os.listdir(current_dir):
print("Error: this directory has to be empty!")
sys.exit(1)
git_format_patch(args.format)
else:
if not [fname for fname in os.listdir(current_dir) if fname.endswith('.patch')]:
print("Error: can't find a single patch in {0} to work with!".format(current_dir))
sys.exit(1)
ord_fh = open(args.ordering or ORDERING_FILE, 'w')
ord_fh.write('#\n#\n# This is pre-generated snippets of patch ordering\n#\n')
ord_patches_p = []
patches = 0
for fname in os.listdir(current_dir):
if fname.split('.')[-1] == 'patch':
# Check if we should skip this patch in case subject starts with SKIP_TAG
with open(fname) as patch_file:
if any(re.match(r'^Subject: \[PATCH.*] {}'.format(re.escape(args.skip_tag)), i) for i in patch_file.readlines()):
print("Skipping {}".format(fname))
os.unlink(fname)
continue
print("Preparing {}".format(fname))
order, nfname = remove_order(fname)
if args.index is not None:
order += args.index
remove_order_from_subject(fname, nfname, use_unique=args.increment)
os.unlink(fname)
ord_fh.write('{patch}{fname}\n'.format(patch='Patch{0}:'.format(order).ljust(15), fname=nfname))
ord_patches_p.append(order)
patches += 1
if ord_patches_p:
ord_fh.write('#\n#\n# Patch processing inclusion:\n')
for order in ord_patches_p:
ord_fh.write('%patch{num} -p1\n'.format(num=order))
else:
ord_fh.write('# Nothing here, folks... :-(\n')
ord_fh.close()
print("\nRe-formatted {0} patch{1}".format(patches, patches > 1 and 'es' or ''))
def do_update_patches(args):
'''
Update patches on the target package source.
'''
print("Updating packages from {0} directory".format(args.update))
added = []
removed = []
changed = []
# Gather current patches
current_patches = {}
for fname in os.listdir(os.path.abspath(".")):
if fname.endswith('.patch'):
current_patches[os.path.basename(fname)] = True
for fname in os.listdir(args.update):
if fname.endswith('.patch'):
fname = os.path.join(args.update, fname)
if os.path.isfile(fname):
current_patches[os.path.basename(fname)] = False
n_fname = os.path.basename(fname)
if not os.path.exists(n_fname):
print("Adding {0} patch".format(fname))
shutil.copyfile(fname, os.path.join(os.path.abspath("."), n_fname))
added.append(n_fname)
else:
if get_diff_contents(open(fname).read()) != get_diff_contents(open(n_fname).read()):
if args.changed:
print("Replacing {0} patch".format(n_fname))
os.unlink(n_fname)
shutil.copyfile(fname, os.path.join(os.path.abspath("."), n_fname))
changed.append(n_fname)
else:
print("WARNING: Patches {0} and {1} are different!".format(fname, n_fname))
for fname in sorted([patch_name for patch_name, is_dead in list(current_patches.items()) if is_dead]):
print("Removing {0} patch".format(fname))
os.unlink(fname)
removed.append(fname)
# Generate an include for spec changes
with open(CHANGES_FILE, "w") as changes:
for title, data in [('Changed', changed), ('Added', added),
('Removed', removed)]:
if not data:
continue
print("- {}:".format(title), file=changes)
for fname in sorted(data):
print(" * {}".format(fname), file=changes)
print(file=changes)
if not removed and not added and not changes:
print("No files has been changed")
def main():
'''
Main app.
'''
VERSION = '0.2'
parser = argparse.ArgumentParser(description='Git patch formatter for RPM packages')
parser.add_argument('-u', '--update', action='store', const=None,
help='update current patches with the destination path')
parser.add_argument('-f', '--format', action='store', const=None,
help='specify tag or range of commits for patches to be formatted')
parser.add_argument('-o', '--ordering', action='store', const=None,
help='specify ordering spec inclusion file. Default: {0}'.format(ORDERING_FILE))
parser.add_argument('-x', '--index', action='store', const=None,
help='specify start ordering index. Default: 0')
parser.add_argument('-s', '--spec', action='store', const=None,
help='remix spec file and extract sources with their comments to match new patch ordering')
parser.add_argument('-i', '--increment', action='store_const', const=True,
help='use increments for unique names when patch commits repeated')
parser.add_argument('-c', '--changed', action='store_const', const=True,
help='update also changed files with the content')
parser.add_argument('-e', '--existing', action='store_const', const=True,
help='work with already formatted patches from Git')
parser.add_argument('-k', '--skip-tag', action='store', const=None, default='[skip]',
help='skip commits starting with this tag. Default: [skip]')
parser.add_argument('-v', '--version', action='store_const', const=True,
help='show version')
args = parser.parse_args()
try:
if args.index:
try:
args.index = int(args.index)
except ValueError:
raise Exception('Value "{0}" should be a digit'.format(args.index))
if args.version:
print("Version: {0}".format(VERSION))
elif args.spec:
do_remix_spec(args)
elif args.update and not args.format:
do_update_patches(args)
elif (args.format and not args.update) or args.existing:
do_create_patches(args)
else:
parser.print_help()
sys.exit(1)
except Exception as ex:
print("Critical error:", ex, file=sys.stderr)
if __name__ == '__main__':
main()
| 36.197917 | 129 | 0.606475 |
a04efbad847960e56a6c9a8e43d4465164fb4801
| 5,455 |
py
|
Python
|
modules/dispatch.py
|
kex5n/Vehicles-Dispatch-Simulator
|
d0cca03fbf56e4b0ceeef8dafc59de105c1d4507
|
[
"MIT"
] | null | null | null |
modules/dispatch.py
|
kex5n/Vehicles-Dispatch-Simulator
|
d0cca03fbf56e4b0ceeef8dafc59de105c1d4507
|
[
"MIT"
] | null | null | null |
modules/dispatch.py
|
kex5n/Vehicles-Dispatch-Simulator
|
d0cca03fbf56e4b0ceeef8dafc59de105c1d4507
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
import random
from typing import List
import numpy as np
import torch
from config import Config
from domain import DispatchMode
from models import DQN
from modules.state import FeatureManager
from objects import Area, Vehicle
from objects.area import AreaManager
from objects.vehicle import VehicleManager
random.seed(1234)
np.random.seed(1234)
torch.manual_seed(1234)
torch.cuda.manual_seed_all(1234)
torch.backends.cudnn.deterministic = True
def load_dispatch_component(dispatch_mode: DispatchMode, config: Config, is_train=False) -> DispatchModuleInterface:
if dispatch_mode == DispatchMode.DQN:
dispatch_module = DQNDispatch(config=config, is_train=is_train)
return dispatch_module
elif dispatch_mode == DispatchMode.RANDOM:
dispatch_module = RandomDispatch()
return dispatch_module
elif dispatch_mode == DispatchMode.NOT_DISPATCH:
return None
else:
raise NotImplementedError
| 40.110294 | 148 | 0.700642 |
a04f7f2d5934d5148efee6d0cd9e612d55de51c8
| 4,716 |
py
|
Python
|
dice_vtk/geometries/geometry_base.py
|
dicehub/dice_vtk
|
ab8d9f34ae359461db5687d05bf38548bbaca6ea
|
[
"MIT"
] | null | null | null |
dice_vtk/geometries/geometry_base.py
|
dicehub/dice_vtk
|
ab8d9f34ae359461db5687d05bf38548bbaca6ea
|
[
"MIT"
] | null | null | null |
dice_vtk/geometries/geometry_base.py
|
dicehub/dice_vtk
|
ab8d9f34ae359461db5687d05bf38548bbaca6ea
|
[
"MIT"
] | null | null | null |
# Standard Python modules
# =======================
import weakref
from abc import ABCMeta, abstractmethod, abstractproperty
# External modules
# ================
from vtk import vtkActor
from vtk import vtkMapper
from vtk import vtkPolyDataAlgorithm
from vtk import vtkBoundingBox
# DICE modules
# ============
from dice_tools import wizard
def set_selected(self, enable):
if enable and not self.__selected:
color = getattr(self, 'color', None)
if color != None:
self.__saved_color = color
self.set_color([0.9, 0, 0])
self.__selected = True
wizard.w_geometry_object_selection_state(self, True)
elif not enable and self.__selected:
self.__selected = False
color = getattr(self, 'color', None)
if color != None:
self.set_color(self.__saved_color)
self.__saved_color = None
wizard.w_geometry_object_selection_state(self, False)
wizard.subscribe(GeometryBase, 'w_geometry_objects_select')
| 23.117647 | 71 | 0.601569 |
a05379809d542906a1e8b3ecab8d346bf1a2d752
| 2,272 |
py
|
Python
|
tests/integration/sts/replayer_integration_test.py
|
jhall11/sts
|
b484f184824c9fe59864103f24fdfa24ff8bcdcd
|
[
"Apache-2.0"
] | 5 |
2016-03-18T15:12:04.000Z
|
2019-01-28T20:18:24.000Z
|
tests/integration/sts/replayer_integration_test.py
|
jhall11/sts
|
b484f184824c9fe59864103f24fdfa24ff8bcdcd
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/sts/replayer_integration_test.py
|
jhall11/sts
|
b484f184824c9fe59864103f24fdfa24ff8bcdcd
|
[
"Apache-2.0"
] | 1 |
2019-11-02T22:04:48.000Z
|
2019-11-02T22:04:48.000Z
|
#!/usr/bin/env python
#
# Copyright 2011-2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os
sys.path.append(os.path.dirname(__file__) + "/../../..")
simple_cfg = '''
from sts.control_flow.replayer import Replayer
from sts.simulation_state import SimulationConfig
simulation_config = SimulationConfig()
control_flow = Replayer(simulation_config, "%s")
'''
if __name__ == '__main__':
unittest.main()
| 33.910448 | 116 | 0.677377 |
a054dcd6697f8d246a99e2b87deb291ef103d4ce
| 101 |
py
|
Python
|
add.py
|
Harshillab/python
|
877d5fa6769ce7bcc28ca75c247df42ed7375e55
|
[
"MIT"
] | null | null | null |
add.py
|
Harshillab/python
|
877d5fa6769ce7bcc28ca75c247df42ed7375e55
|
[
"MIT"
] | null | null | null |
add.py
|
Harshillab/python
|
877d5fa6769ce7bcc28ca75c247df42ed7375e55
|
[
"MIT"
] | null | null | null |
import os
a=input("enter username:")
if a.isalpha():
os.system("useradd "+a)
os.system("passwd a")
| 16.833333 | 26 | 0.673267 |
a055c0b6a8a397cfaf7bde8f028637510c8a76bc
| 3,733 |
py
|
Python
|
responsive_dashboard/views.py
|
rhooper/django-responsive-dashboard
|
039d634cbefb87be610334c01bda1a790cf5cd71
|
[
"BSD-3-Clause"
] | 28 |
2015-07-08T01:03:17.000Z
|
2022-03-11T13:30:49.000Z
|
responsive_dashboard/views.py
|
burke-software/django-responsive-dashboard
|
e08d7a12155d87d78cb3928bcc58f2701d326b69
|
[
"BSD-3-Clause"
] | 4 |
2018-09-03T14:15:42.000Z
|
2021-06-10T17:24:09.000Z
|
responsive_dashboard/views.py
|
rhooper/django-responsive-dashboard
|
039d634cbefb87be610334c01bda1a790cf5cd71
|
[
"BSD-3-Clause"
] | 13 |
2015-01-15T14:33:30.000Z
|
2021-08-23T02:39:38.000Z
|
"""Views."""
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, Http404
from django.shortcuts import render, redirect
from responsive_dashboard.dashboard import dashboards
from responsive_dashboard.models import UserDashboard, UserDashlet
# pylint: disable=no-member
| 34.88785 | 114 | 0.698902 |
a05679a1770f12c767a08a09a8c1456749cc03d4
| 769 |
py
|
Python
|
app/config.py
|
tomakado/markovscope-api
|
3dd60439d980e3b77429850f1b43cb37ffd02f99
|
[
"BSD-3-Clause"
] | 1 |
2021-03-06T06:36:25.000Z
|
2021-03-06T06:36:25.000Z
|
app/config.py
|
tomakado/markovscope-api
|
3dd60439d980e3b77429850f1b43cb37ffd02f99
|
[
"BSD-3-Clause"
] | null | null | null |
app/config.py
|
tomakado/markovscope-api
|
3dd60439d980e3b77429850f1b43cb37ffd02f99
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from dataclasses import dataclass
CONFIG = Config.create_from_env()
| 25.633333 | 80 | 0.595579 |
a0578bb9313ba5a000fe92b2495ddd4a94b1be7e
| 15,486 |
py
|
Python
|
atlas/atlas.py
|
pythseq/atlas
|
6fd8d9e8ad05d234fc408aef8e0989da199f3b48
|
[
"BSD-3-Clause"
] | 1 |
2020-12-31T14:54:49.000Z
|
2020-12-31T14:54:49.000Z
|
atlas/atlas.py
|
pythseq/atlas
|
6fd8d9e8ad05d234fc408aef8e0989da199f3b48
|
[
"BSD-3-Clause"
] | null | null | null |
atlas/atlas.py
|
pythseq/atlas
|
6fd8d9e8ad05d234fc408aef8e0989da199f3b48
|
[
"BSD-3-Clause"
] | null | null | null |
import click
import logging
import multiprocessing
import os
import sys
from atlas import __version__
from atlas.conf import make_config
from atlas.parsers import refseq_parser
from atlas.tables import merge_tables
from atlas.workflows import download, run_workflow
logging.basicConfig(level=logging.INFO, datefmt="%Y-%m-%d %H:%M", format="[%(asctime)s %(levelname)s] %(message)s")
if __name__ == "__main__":
cli()
| 54.336842 | 323 | 0.694692 |
a0590b43efec682503f6e281362973bb8f85de85
| 1,101 |
py
|
Python
|
tests/unit/core/types/test_relationships.py
|
jeffsawatzky/python-jsonapi
|
8f181d6764b525f58d06517c65b1f0d24f3c2282
|
[
"MIT"
] | null | null | null |
tests/unit/core/types/test_relationships.py
|
jeffsawatzky/python-jsonapi
|
8f181d6764b525f58d06517c65b1f0d24f3c2282
|
[
"MIT"
] | 237 |
2020-07-23T05:53:22.000Z
|
2022-03-30T23:02:35.000Z
|
tests/unit/core/types/test_relationships.py
|
jeffsawatzky/python-jsonapi
|
8f181d6764b525f58d06517c65b1f0d24f3c2282
|
[
"MIT"
] | null | null | null |
"""Test cases for the python_jsonapi.core.types.relationships module."""
from python_jsonapi.core.types.relationships import Relationship
from python_jsonapi.core.types.relationships import RelationshipsMixin
def test_relationship_init() -> None:
"""Can init a new relationships."""
sut = Relationship()
assert sut is not None
def test_mixin_init() -> None:
"""Can init a new mixin."""
sut = RelationshipsMixin()
assert sut is not None
relationship = Relationship()
sut = RelationshipsMixin(relationships={"self": relationship})
assert sut is not None
assert sut.relationships is not None
assert sut.relationships["self"] == relationship
def test_mixin_add_relationship() -> None:
"""Can add a new entry."""
sut = RelationshipsMixin()
sut.add_relationship(key="relationship1", relationship=Relationship())
sut.add_relationship(key="relationship2", relationship=Relationship())
assert sut.relationships is not None
assert sut.relationships["relationship1"] is not None
assert sut.relationships["relationship2"] is not None
| 34.40625 | 74 | 0.735695 |
a0595a142eaf248d183d94e735f0ba479dc117a7
| 48 |
py
|
Python
|
needlestack/indices/__init__.py
|
needlehaystack/needlestack
|
e00529a2a7c2d85059936a85f54dfb55e515b6ef
|
[
"Apache-2.0"
] | 3 |
2019-10-03T22:15:21.000Z
|
2022-02-08T09:05:41.000Z
|
needlestack/indices/__init__.py
|
cungtv/needlestack
|
e00529a2a7c2d85059936a85f54dfb55e515b6ef
|
[
"Apache-2.0"
] | 1 |
2021-04-30T21:08:47.000Z
|
2021-04-30T21:08:47.000Z
|
needlestack/indices/__init__.py
|
cungtv/needlestack
|
e00529a2a7c2d85059936a85f54dfb55e515b6ef
|
[
"Apache-2.0"
] | 2 |
2019-08-02T19:13:09.000Z
|
2019-10-25T01:47:17.000Z
|
from needlestack.indices.index import BaseIndex
| 24 | 47 | 0.875 |
a05a419a9ddf5084b706e695f35bb68b2e11e8f7
| 698 |
py
|
Python
|
app/accounts/utilities.py
|
porowns/Krypted-Auth
|
ed171bfbd1c98a4c171ddf6a20b18691330b1646
|
[
"MIT"
] | 6 |
2017-12-13T21:53:05.000Z
|
2018-10-04T02:47:05.000Z
|
app/accounts/utilities.py
|
porowns/Krypted-Auth
|
ed171bfbd1c98a4c171ddf6a20b18691330b1646
|
[
"MIT"
] | 106 |
2019-08-11T23:00:39.000Z
|
2021-06-10T19:45:54.000Z
|
app/accounts/utilities.py
|
KryptedGaming/kryptedauth
|
ed171bfbd1c98a4c171ddf6a20b18691330b1646
|
[
"MIT"
] | 10 |
2020-01-18T11:28:44.000Z
|
2022-02-21T06:08:39.000Z
|
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import send_mail
| 33.238095 | 123 | 0.684814 |
a05b0d63377d071367b35f0034a3b68acdab2c2d
| 245 |
py
|
Python
|
run/lemmatize.py
|
osmanbaskaya/mapping-impact
|
8024dd3b916ac2dfc336221dd32faba4c0a98442
|
[
"MIT"
] | 1 |
2016-03-14T15:28:22.000Z
|
2016-03-14T15:28:22.000Z
|
run/lemmatize.py
|
osmanbaskaya/mapping-impact
|
8024dd3b916ac2dfc336221dd32faba4c0a98442
|
[
"MIT"
] | null | null | null |
run/lemmatize.py
|
osmanbaskaya/mapping-impact
|
8024dd3b916ac2dfc336221dd32faba4c0a98442
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Osman Baskaya"
from nltk.stem.wordnet import WordNetLemmatizer
import sys
lmtzr = WordNetLemmatizer()
for line in sys.stdin:
print ' '.join(map(lmtzr.lemmatize, line.split()))
| 14.411765 | 54 | 0.689796 |
a05cc4dfec88f66003f8b833c676b2a3c02c79c3
| 4,066 |
py
|
Python
|
markov.py
|
garybake/markov_lyrics
|
816043acd849b77097aa5bd504b123c6b306b801
|
[
"MIT"
] | null | null | null |
markov.py
|
garybake/markov_lyrics
|
816043acd849b77097aa5bd504b123c6b306b801
|
[
"MIT"
] | null | null | null |
markov.py
|
garybake/markov_lyrics
|
816043acd849b77097aa5bd504b123c6b306b801
|
[
"MIT"
] | null | null | null |
# https://realpython.com/blog/python/lyricize-a-flask-app-to-create-lyrics-using-markov-chains/
from random import choice
import sys
text = "some sample text"
text = """
An old man turned ninety-eight
He won the lottery and died the next day
It's a black fly in your Chardonnay
It's a death row pardon two minutes too late
And isn't it ironic... don't you think
It's like rain on your wedding day
It's a free ride when you've already paid
It's the good advice that you just didn't take
Who would've thought... it figures
Mr. Play It Safe was afraid to fly
He packed his suitcase and kissed his kids goodbye
He waited his whole damn life to take that flight
And as the plane crashed down he thought
"Well isn't this nice..."
And isn't it ironic... don't you think
It's like rain on your wedding day
It's a free ride when you've already paid
It's the good advice that you just didn't take
Who would've thought... it figures
Well life has a funny way of sneaking up on you
When you think everything's okay and everything's going right
And life has a funny way of helping you out when
You think everything's gone wrong and everything blows up
In your face
A traffic jam when you're already late
A no-smoking sign on your cigarette break
It's like ten thousand spoons when all you need is a knife
It's meeting the man of my dreams
And then meeting his beautiful wife
And isn't it ironic...don't you think
A little too ironic...and, yeah, I really do think...
It's like rain on your wedding day
It's a free ride when you've already paid
It's the good advice that you just didn't take
Who would've thought... it figures
Life has a funny way of sneaking up on you
Life has a funny, funny way of helping you out
Helping you out
I recommend getting your heart trampled on to anyone
I recommend walking around naked in your living room
Swallow it down (what a jagged little pill)
It feels so good (swimming in your stomach)
Wait until the dust settles
You live you learn
You love you learn
You cry you learn
You lose you learn
You bleed you learn
You scream you learn
I recommend biting off more then you can chew to anyone
I certainly do
I recommend sticking your foot in your mouth at any time
Feel free
Throw it down (the caution blocks you from the wind)
Hold it up (to the rays)
You wait and see when the smoke clears
You live you learn
You love you learn
You cry you learn
You lose you learn
You bleed you learn
You scream you learn
Wear it out (the way a three-year-old would do)
Melt it down (you're gonna have to eventually anyway)
The fire trucks are coming up around the bend
You live you learn
You love you learn
You cry you learn
You lose you learn
You bleed you learn
You scream you learn
You grieve you learn
You choke you learn
You laugh you learn
You choose you learn
You pray you learn
You ask you learn
You live you learn
"""
# text = "For now, well generate sample text via the very scientific method of throwing a string directly into the code based on some copied & pasted Alanis Morisette lyrics."
if __name__ == "__main__":
generateText(text, int(sys.argv[1]), int(sys.argv[2]))
| 30.571429 | 175 | 0.734629 |
a05d8bd7f43816678e051cbb74ff64ee556b6710
| 1,545 |
py
|
Python
|
curriculum_tools/curriculum_tools/NamedEnv.py
|
darpa-l2m/meta-arcade
|
9c9539c1feef89e9d1d55507bf4f75c965a25038
|
[
"MIT"
] | 2 |
2021-12-17T19:54:41.000Z
|
2021-12-20T06:08:31.000Z
|
curriculum_tools/curriculum_tools/NamedEnv.py
|
darpa-l2m/meta-arcade
|
9c9539c1feef89e9d1d55507bf4f75c965a25038
|
[
"MIT"
] | 1 |
2021-12-17T20:45:07.000Z
|
2021-12-21T16:30:24.000Z
|
curriculum_tools/curriculum_tools/NamedEnv.py
|
darpa-l2m/meta-arcade
|
9c9539c1feef89e9d1d55507bf4f75c965a25038
|
[
"MIT"
] | null | null | null |
"""
Copyright 2021 The Johns Hopkins University Applied Physics Laboratory LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the Software), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import gym
import copy
| 33.586957 | 82 | 0.741748 |
a05f0813319812f0e9d30a1ddfef3dd56345c333
| 9,265 |
py
|
Python
|
scirex/metrics/paired_bootstrap.py
|
viswavi/SciREX
|
8e4b402e95d438c92eeecee315d389903a963b8d
|
[
"Apache-2.0"
] | null | null | null |
scirex/metrics/paired_bootstrap.py
|
viswavi/SciREX
|
8e4b402e95d438c92eeecee315d389903a963b8d
|
[
"Apache-2.0"
] | null | null | null |
scirex/metrics/paired_bootstrap.py
|
viswavi/SciREX
|
8e4b402e95d438c92eeecee315d389903a963b8d
|
[
"Apache-2.0"
] | null | null | null |
# Adapted from Graham Neubig's Paired Bootstrap script
# https://github.com/neubig/util-scripts/blob/master/paired-bootstrap.py
import numpy as np
from sklearn.metrics import f1_score, precision_score, recall_score
from tqdm import tqdm
EVAL_TYPE_ACC = "acc"
EVAL_TYPE_BLEU = "bleu"
EVAL_TYPE_BLEU_DETOK = "bleu_detok"
EVAL_TYPE_PEARSON = "pearson"
EVAL_TYPE_F1 = "f1"
EVAL_TYPE_MACRO_F1 = "macro-f1"
EVAL_TYPE_PREC = "precision"
EVAL_TYPE_REC = "recall"
EVAL_TYPE_AVG = "avg"
EVAL_TYPES = [EVAL_TYPE_ACC,
EVAL_TYPE_BLEU,
EVAL_TYPE_BLEU_DETOK,
EVAL_TYPE_PEARSON,
EVAL_TYPE_F1,
EVAL_TYPE_AVG,
EVAL_TYPE_PREC,
EVAL_TYPE_REC]
def eval_preproc(data, eval_type='acc'):
''' Preprocess into the appropriate format for a particular evaluation type '''
if type(data) == str:
data = data.strip()
if eval_type == EVAL_TYPE_BLEU:
data = data.split()
elif eval_type == EVAL_TYPE_PEARSON:
data = float(data)
elif eval_type in [EVAL_TYPE_F1, EVAL_TYPE_MACRO_F1, EVAL_TYPE_PREC, EVAL_TYPE_REC]:
data = float(data)
elif eval_type == EVAL_TYPE_AVG:
data = float(data)
return data
def eval_measure(gold, sys, eval_type='acc'):
''' Evaluation measure
This takes in gold labels and system outputs and evaluates their
accuracy. It currently supports:
* Accuracy (acc), percentage of labels that match
* Pearson's correlation coefficient (pearson)
* BLEU score (bleu)
* BLEU_detok, on detokenized references and translations, with internal tokenization
:param gold: the correct labels
:param sys: the system outputs
:param eval_type: The type of evaluation to do (acc, pearson, bleu, bleu_detok)
'''
if eval_type == EVAL_TYPE_ACC:
return sum([1 if g == s else 0 for g, s in zip(gold, sys)]) / float(len(gold))
elif eval_type == EVAL_TYPE_BLEU:
import nltk
gold_wrap = [[x] for x in gold]
return nltk.translate.bleu_score.corpus_bleu(gold_wrap, sys)
elif eval_type == EVAL_TYPE_PEARSON:
return np.corrcoef([gold, sys])[0,1]
elif eval_type == EVAL_TYPE_BLEU_DETOK:
import sacrebleu
# make sure score is 0-based instead of 100-based
return sacrebleu.corpus_bleu(sys, [gold]).score / 100.
elif eval_type == EVAL_TYPE_F1:
return f1_score(gold, sys)
elif eval_type == EVAL_TYPE_MACRO_F1:
return f1_score(gold, sys, average="macro")
elif eval_type == EVAL_TYPE_PREC:
return precision_score(gold, sys)
elif eval_type == EVAL_TYPE_REC:
return recall_score(gold, sys)
elif eval_type == EVAL_TYPE_AVG:
return np.mean(sys)
else:
raise NotImplementedError('Unknown eval type in eval_measure: %s' % eval_type)
def eval_with_paired_bootstrap(gold, sys1, sys2,
num_samples=10000, sample_ratio=0.5,
eval_type='acc',
return_results=False):
''' Evaluate with paired boostrap
This compares two systems, performing a significance tests with
paired bootstrap resampling to compare the accuracy of the two systems.
:param gold: The correct labels
:param sys1: The output of system 1
:param sys2: The output of system 2
:param num_samples: The number of bootstrap samples to take
:param sample_ratio: The ratio of samples to take every time
:param eval_type: The type of evaluation to do (acc, pearson, bleu, bleu_detok)
'''
assert(len(gold) == len(sys1))
assert(len(gold) == len(sys2))
# Preprocess the data appropriately for they type of eval
gold = [eval_preproc(x, eval_type) for x in gold]
sys1 = [eval_preproc(x, eval_type) for x in sys1]
sys2 = [eval_preproc(x, eval_type) for x in sys2]
sys1_scores = []
sys2_scores = []
wins = [0, 0, 0]
n = len(gold)
ids = list(range(n))
for _ in tqdm(range(num_samples)):
# Subsample the gold and system outputs
np.random.shuffle(ids)
reduced_ids = ids[:int(len(ids)*sample_ratio)]
reduced_gold = [gold[i] for i in reduced_ids]
reduced_sys1 = [sys1[i] for i in reduced_ids]
reduced_sys2 = [sys2[i] for i in reduced_ids]
# Calculate accuracy on the reduced sample and save stats
sys1_score = eval_measure(reduced_gold, reduced_sys1, eval_type=eval_type)
sys2_score = eval_measure(reduced_gold, reduced_sys2, eval_type=eval_type)
if sys1_score > sys2_score:
wins[0] += 1
elif sys1_score < sys2_score:
wins[1] += 1
else:
wins[2] += 1
sys1_scores.append(sys1_score)
sys2_scores.append(sys2_score)
# Print win stats
wins = [x/float(num_samples) for x in wins]
print('Win ratio: sys1=%.3f, sys2=%.3f, tie=%.3f' % (wins[0], wins[1], wins[2]))
if wins[0] > wins[1]:
print('(sys1 is superior with p value p=%.10f)\n' % (1-wins[0]))
elif wins[1] > wins[0]:
print('(sys2 is superior with p value p=%.10f)\n' % (1-wins[1]))
# Print system stats
sys1_scores.sort()
sys2_scores.sort()
print('sys1 mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' %
(np.mean(sys1_scores), np.median(sys1_scores), sys1_scores[int(num_samples * 0.025)], sys1_scores[int(num_samples * 0.975)]))
print('sys2 mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' %
(np.mean(sys2_scores), np.median(sys2_scores), sys2_scores[int(num_samples * 0.025)], sys2_scores[int(num_samples * 0.975)]))
if return_results:
sys1_summary = (np.mean(sys1_scores), (sys1_scores[int(num_samples * 0.025)], sys1_scores[int(num_samples * 0.975)]))
sys2_summary = (np.mean(sys2_scores), (sys2_scores[int(num_samples * 0.025)], sys2_scores[int(num_samples * 0.975)]))
p_value_lose = 1-wins[0]
p_value_win = 1-wins[1]
return sys1_summary, sys2_summary, p_value_lose, p_value_win
def eval_with_hierarchical_paired_bootstrap(gold, sys1_list, sys2_list,
num_samples=10000, sample_ratio=0.5,
eval_type='acc',
return_results=False):
''' Evaluate with a hierarchical paired boostrap
This compares two systems, performing a significance tests with
paired bootstrap resampling to compare the accuracy of the two systems, with
two-level sampling: first we sample a model, then we sample data to evaluate
it on.
:param gold: The correct labels
:param sys1: The output of system 1
:param sys2: The output of system 2
:param num_samples: The number of bootstrap samples to take
:param sample_ratio: The ratio of samples to take every time
:param eval_type: The type of evaluation to do (acc, pearson, bleu, bleu_detok)
'''
for sys1 in sys1_list:
assert(len(gold) == len(sys1))
for sys2 in sys2_list:
assert(len(gold) == len(sys2))
# Preprocess the data appropriately for they type of eval
gold = [eval_preproc(x, eval_type) for x in gold]
sys1_list = [[eval_preproc(x, eval_type) for x in sys1] for sys1 in sys1_list]
sys2_list = [[eval_preproc(x, eval_type) for x in sys2] for sys2 in sys2_list]
sys1_scores = []
sys2_scores = []
wins = [0, 0, 0]
n = len(gold)
ids = list(range(n))
for _ in tqdm(range(num_samples)):
# Subsample the gold and system outputs
np.random.shuffle(ids)
reduced_ids = ids[:int(len(ids)*sample_ratio)]
sys1_idx = np.random.choice(list(range(len(sys1_list))))
sys1 = sys1_list[sys1_idx]
sys2_idx = np.random.choice(list(range(len(sys2_list))))
sys2 = sys2_list[sys2_idx]
reduced_gold = [gold[i] for i in reduced_ids]
reduced_sys1 = [sys1[i] for i in reduced_ids]
reduced_sys2 = [sys2[i] for i in reduced_ids]
# Calculate accuracy on the reduced sample and save stats
sys1_score = eval_measure(reduced_gold, reduced_sys1, eval_type=eval_type)
sys2_score = eval_measure(reduced_gold, reduced_sys2, eval_type=eval_type)
if sys1_score > sys2_score:
wins[0] += 1
elif sys1_score < sys2_score:
wins[1] += 1
else:
wins[2] += 1
sys1_scores.append(sys1_score)
sys2_scores.append(sys2_score)
# Print win stats
wins = [x/float(num_samples) for x in wins]
print('Win ratio: sys1=%.3f, sys2=%.3f, tie=%.3f' % (wins[0], wins[1], wins[2]))
if wins[0] > wins[1]:
print('(sys1 is superior with p value p=%.10f)\n' % (1-wins[0]))
elif wins[1] > wins[0]:
print('(sys2 is superior with p value p=%.10f)\n' % (1-wins[1]))
# Print system stats
sys1_scores.sort()
sys2_scores.sort()
print('sys1 mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' %
(np.mean(sys1_scores), np.median(sys1_scores), sys1_scores[int(num_samples * 0.025)], sys1_scores[int(num_samples * 0.975)]))
print('sys2 mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' %
(np.mean(sys2_scores), np.median(sys2_scores), sys2_scores[int(num_samples * 0.025)], sys2_scores[int(num_samples * 0.975)]))
if return_results:
sys1_summary = (np.mean(sys1_scores), (sys1_scores[int(num_samples * 0.025)], sys1_scores[int(num_samples * 0.975)]))
sys2_summary = (np.mean(sys2_scores), (sys2_scores[int(num_samples * 0.025)], sys2_scores[int(num_samples * 0.975)]))
p_value_lose = 1-wins[0]
p_value_win = 1-wins[1]
return sys1_summary, sys2_summary, p_value_lose, p_value_win
| 40.814978 | 135 | 0.681705 |
a05f5a0fc89824667b995e5851cdb833729517df
| 970 |
py
|
Python
|
mypage/paginator.py
|
kirill-ivanov-a/mypage-flask
|
b803dfdf3d38d32879d81b8682d51e387c8f709f
|
[
"MIT"
] | null | null | null |
mypage/paginator.py
|
kirill-ivanov-a/mypage-flask
|
b803dfdf3d38d32879d81b8682d51e387c8f709f
|
[
"MIT"
] | null | null | null |
mypage/paginator.py
|
kirill-ivanov-a/mypage-flask
|
b803dfdf3d38d32879d81b8682d51e387c8f709f
|
[
"MIT"
] | null | null | null |
from paginate_sqlalchemy import SqlalchemyOrmPage
| 34.642857 | 88 | 0.609278 |
a060c7c4400126644e1d48eb927d2de5fe556729
| 4,915 |
py
|
Python
|
models/deeplab_v2.py
|
iamsofancyyoualreadyknow/IHC-based-labels-generation-and-semantic-segmentation-for-lung-cancer
|
57904544c6d6b43dcd5937afeb474c0a47456d98
|
[
"MIT"
] | null | null | null |
models/deeplab_v2.py
|
iamsofancyyoualreadyknow/IHC-based-labels-generation-and-semantic-segmentation-for-lung-cancer
|
57904544c6d6b43dcd5937afeb474c0a47456d98
|
[
"MIT"
] | null | null | null |
models/deeplab_v2.py
|
iamsofancyyoualreadyknow/IHC-based-labels-generation-and-semantic-segmentation-for-lung-cancer
|
57904544c6d6b43dcd5937afeb474c0a47456d98
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
# import slim
# conv layers
layers = tf.contrib.layers
arg_scope = tf.contrib.framework.arg_scope
| 56.494253 | 99 | 0.585554 |
a06437850e2dae1448abd64b704f6b42218ef386
| 968 |
py
|
Python
|
Python/logging.py
|
saurabhcommand/Hello-world
|
647bad9da901a52d455f05ecc37c6823c22dc77e
|
[
"MIT"
] | 1,428 |
2018-10-03T15:15:17.000Z
|
2019-03-31T18:38:36.000Z
|
Python/logging.py
|
saurabhcommand/Hello-world
|
647bad9da901a52d455f05ecc37c6823c22dc77e
|
[
"MIT"
] | 1,162 |
2018-10-03T15:05:49.000Z
|
2018-10-18T14:17:52.000Z
|
Python/logging.py
|
saurabhcommand/Hello-world
|
647bad9da901a52d455f05ecc37c6823c22dc77e
|
[
"MIT"
] | 3,909 |
2018-10-03T15:07:19.000Z
|
2019-03-31T18:39:08.000Z
|
import datetime
# Log parm(File_name)
| 31.225806 | 86 | 0.572314 |
a064737d7eb5496d755ad0d39ca50e2c9279c4d9
| 10,541 |
py
|
Python
|
tfep/utils/cli/tool.py
|
andrrizzi/tfep
|
a98ec870007a2ceb72cab147d9e0dfffb7dc8849
|
[
"MIT"
] | 5 |
2021-07-30T16:01:46.000Z
|
2021-12-14T15:24:29.000Z
|
tfep/utils/cli/tool.py
|
andrrizzi/tfep
|
a98ec870007a2ceb72cab147d9e0dfffb7dc8849
|
[
"MIT"
] | 2 |
2021-08-13T12:19:13.000Z
|
2021-10-06T08:04:18.000Z
|
tfep/utils/cli/tool.py
|
andrrizzi/tfep
|
a98ec870007a2ceb72cab147d9e0dfffb7dc8849
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Utility classes to wrap command line tools.
The module provides a class :class:`.CLITool` that provides boilerplate code to
wrap command line tools and make them compatible to :class:`~tfep.utils.cli.Launcher`.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import abc
import inspect
import os
# =============================================================================
# CLITOOL
# =============================================================================
# =============================================================================
# CLI OPTIONS
# =============================================================================
| 36.223368 | 103 | 0.626411 |
a06853a9eca27d640f292fe2b2ffaac04fbafad7
| 1,128 |
py
|
Python
|
invite2app/lib/facebook_auth.py
|
andresgz/invite2app
|
3531db131c4f0646ae01b511971d6642128361e0
|
[
"BSD-3-Clause"
] | null | null | null |
invite2app/lib/facebook_auth.py
|
andresgz/invite2app
|
3531db131c4f0646ae01b511971d6642128361e0
|
[
"BSD-3-Clause"
] | null | null | null |
invite2app/lib/facebook_auth.py
|
andresgz/invite2app
|
3531db131c4f0646ae01b511971d6642128361e0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import facebook
from allauth.socialaccount.models import SocialToken
from django.core.exceptions import ObjectDoesNotExist
| 27.512195 | 71 | 0.638298 |
a068b901b478d011dc44a977f7e4cc0f17632eaf
| 11,386 |
py
|
Python
|
visualize_high_LOO/visualize_high_LOO_cifar_norb.py
|
mkuchnik/Efficient_Augmentation
|
a82190c02509682c34f2df782fb58f8ffd3b11da
|
[
"MIT"
] | 11 |
2019-05-09T22:43:29.000Z
|
2021-01-13T22:26:48.000Z
|
visualize_high_LOO/visualize_high_LOO_cifar_norb.py
|
mkuchnik/Efficient_Augmentation
|
a82190c02509682c34f2df782fb58f8ffd3b11da
|
[
"MIT"
] | 1 |
2020-10-07T14:03:47.000Z
|
2020-10-07T14:03:47.000Z
|
visualize_high_LOO/visualize_high_LOO_cifar_norb.py
|
mkuchnik/Efficient_Augmentation
|
a82190c02509682c34f2df782fb58f8ffd3b11da
|
[
"MIT"
] | 6 |
2019-03-05T02:26:01.000Z
|
2021-05-11T14:35:41.000Z
|
import pprint
import time
import keras
import numpy as np
import joblib
import dataset_loaders
import selection_policy
import augmentations
import experiments
import experiments_util
import featurized_classifiers
import visualization_util
import matplotlib.pyplot as plt
mem = joblib.Memory(cachedir="./cache", verbose=1)
if __name__ == "__main__":
main()
| 36.031646 | 84 | 0.584138 |
a06ffffb39b0434296021e5eee8841761190d6b0
| 370 |
py
|
Python
|
backend/main_app/serializers.py
|
RTUITLab/Avia-Hack-2021-RealityX
|
ca700492d314a28e23fa837cd2dfa04dd67c167c
|
[
"Apache-2.0"
] | null | null | null |
backend/main_app/serializers.py
|
RTUITLab/Avia-Hack-2021-RealityX
|
ca700492d314a28e23fa837cd2dfa04dd67c167c
|
[
"Apache-2.0"
] | null | null | null |
backend/main_app/serializers.py
|
RTUITLab/Avia-Hack-2021-RealityX
|
ca700492d314a28e23fa837cd2dfa04dd67c167c
|
[
"Apache-2.0"
] | null | null | null |
from rest_framework import serializers
from .models import *
| 30.833333 | 125 | 0.672973 |
a0767541c421c26c6de084316db254c59c03c5d0
| 17,875 |
py
|
Python
|
web_site/wx/lib.py
|
Fixdq/dj-deep
|
6712a722c7f620b76f21b1ebf0b618f42eb4a58a
|
[
"MIT"
] | null | null | null |
web_site/wx/lib.py
|
Fixdq/dj-deep
|
6712a722c7f620b76f21b1ebf0b618f42eb4a58a
|
[
"MIT"
] | null | null | null |
web_site/wx/lib.py
|
Fixdq/dj-deep
|
6712a722c7f620b76f21b1ebf0b618f42eb4a58a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on 2014-5-13
@author: skycrab
"""
import json
import time
import random
import string
import urllib
import hashlib
import threading
import traceback
import xml.etree.ElementTree as ET
import logging
from urllib import request as urllib2
from functools import wraps
from .config import WxPayConf, WxPayConf_shop
try:
import pycurl
from cStringIO import StringIO
except ImportError:
pycurl = None
try:
import requests
except ImportError:
requests = None
logger = logging.getLogger('control')
| 38.690476 | 190 | 0.658238 |
a076da17c1234915c44f55110c45dfe832f020a4
| 4,723 |
py
|
Python
|
argo_dsl/tasks.py
|
zen-xu/argo-dsl
|
76b18073c8dd850b212ccaee2a0c95f718c67db6
|
[
"Apache-2.0"
] | null | null | null |
argo_dsl/tasks.py
|
zen-xu/argo-dsl
|
76b18073c8dd850b212ccaee2a0c95f718c67db6
|
[
"Apache-2.0"
] | null | null | null |
argo_dsl/tasks.py
|
zen-xu/argo-dsl
|
76b18073c8dd850b212ccaee2a0c95f718c67db6
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
from contextlib import contextmanager
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
from argo_dsl.api.io.argoproj.workflow import v1alpha1
if TYPE_CHECKING:
from .template import Template
Item = _Item()
SERIALIZE_ARGUMENT_FUNCTION = Callable[[Any], str]
SERIALIZE_ARGUMENT_METHOD = Callable[["Template", Any], str]
| 28.79878 | 106 | 0.645988 |
a07710f0bb90f929f1fc7e78cba178a4fc0fa117
| 1,007 |
py
|
Python
|
tests/article_test.py
|
Kabu1/flashnews
|
30852077c465ce828452125ec5e2b21115609c38
|
[
"Unlicense"
] | null | null | null |
tests/article_test.py
|
Kabu1/flashnews
|
30852077c465ce828452125ec5e2b21115609c38
|
[
"Unlicense"
] | null | null | null |
tests/article_test.py
|
Kabu1/flashnews
|
30852077c465ce828452125ec5e2b21115609c38
|
[
"Unlicense"
] | null | null | null |
import unittest
from app.models import Article
| 45.772727 | 569 | 0.715988 |
a0779d0acb5d0ce28e46508caa76d16adb915bd8
| 474 |
py
|
Python
|
intern/conv_java_import.py
|
zaqwes8811/smart-vocabulary-cards
|
abeab5c86b1c6f68d8796475cba80c4f2c6055ff
|
[
"Apache-2.0"
] | null | null | null |
intern/conv_java_import.py
|
zaqwes8811/smart-vocabulary-cards
|
abeab5c86b1c6f68d8796475cba80c4f2c6055ff
|
[
"Apache-2.0"
] | 11 |
2015-01-25T14:22:52.000Z
|
2015-09-08T09:59:38.000Z
|
intern/conv_java_import.py
|
zaqwes8811/vocabulary-cards
|
abeab5c86b1c6f68d8796475cba80c4f2c6055ff
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
'''
Created on 18.04.2013
@author:
'''
import dals.os_io.io_wrapper as dal
if __name__=='__main__':
sets = dal.get_utf8_template()
sets['name'] = 'test_import_to_jy.txt'
readed = dal.file2list(sets)
map(convert_one_line, readed)
print 'Done'
| 19.75 | 43 | 0.594937 |
a078916cfe94a2866d1c06904964969d62a237ec
| 25 |
py
|
Python
|
relatives/__init__.py
|
treyhunner/django-relatives
|
a578ab135f865df2835957cedfd00476c4b65e18
|
[
"MIT"
] | 10 |
2015-08-14T00:22:52.000Z
|
2021-09-16T08:15:14.000Z
|
relatives/__init__.py
|
treyhunner/django-relatives
|
a578ab135f865df2835957cedfd00476c4b65e18
|
[
"MIT"
] | 12 |
2015-03-09T20:17:16.000Z
|
2021-09-30T18:46:11.000Z
|
relatives/__init__.py
|
treyhunner/django-relatives
|
a578ab135f865df2835957cedfd00476c4b65e18
|
[
"MIT"
] | 3 |
2016-01-05T15:20:10.000Z
|
2018-08-03T10:51:23.000Z
|
__version__ = '1.3.0.a3'
| 12.5 | 24 | 0.64 |
a07a7419dbe104e7dbe0af27f725918587fdc9f2
| 4,596 |
py
|
Python
|
handlers/class_handler.py
|
Hargre/faltometro-bot
|
271772dc52c9d0454d96ef3c43e3a0da32075743
|
[
"MIT"
] | null | null | null |
handlers/class_handler.py
|
Hargre/faltometro-bot
|
271772dc52c9d0454d96ef3c43e3a0da32075743
|
[
"MIT"
] | 2 |
2019-04-02T13:18:23.000Z
|
2019-04-11T14:00:06.000Z
|
handlers/class_handler.py
|
Hargre/faltometro-bot
|
271772dc52c9d0454d96ef3c43e3a0da32075743
|
[
"MIT"
] | null | null | null |
import logging
import math
from emoji import emojize
from peewee import DoesNotExist
from telegram import ParseMode
from telegram.ext import CommandHandler
from telegram.ext import ConversationHandler
from telegram.ext import Filters
from telegram.ext import MessageHandler
from telegram.ext import RegexHandler
from constants import limit_status
from handlers.shared import cancel_handler
from handlers.shared import select_class_keyboard
from models.class_model import ClassModel
ASK_NAME, ASK_LIMIT = range(2)
DELETING_CLASS = range(1)
| 27.035294 | 110 | 0.647302 |
a07ca3f53342a4c5c568050fc58fa24424a4bf96
| 1,137 |
py
|
Python
|
jupyter/aBasic/a_datatype_class/Ex01_valuable.py
|
WoolinChoi/test
|
a0f9c8ecc63443acaae61d744eecec6c943d3a26
|
[
"MIT"
] | null | null | null |
jupyter/aBasic/a_datatype_class/Ex01_valuable.py
|
WoolinChoi/test
|
a0f9c8ecc63443acaae61d744eecec6c943d3a26
|
[
"MIT"
] | 1 |
2021-03-30T09:01:47.000Z
|
2021-03-30T09:01:47.000Z
|
jupyter/aBasic/a_datatype_class/Ex01_valuable.py
|
WoolinChoi/test
|
a0f9c8ecc63443acaae61d744eecec6c943d3a26
|
[
"MIT"
] | 1 |
2019-12-06T18:21:10.000Z
|
2019-12-06T18:21:10.000Z
|
"""
-
`
` ,
` ,
[]
[] Run shift + alt + F10
[] ctrl + q
"""
""" """
#
# print("")
# print('hello')
# print("""""")
# print('''''')
# : ctrl + shift + F10
# '' "" .
'''
a = 7
7 a. ( )
a 7 .
7 .
[ ]
- + + _
-
-
-
-
'''
'''
import keyword
print(keyword.kwlist)
'''
'''
a = 7 # 7 a
b = 7 # 7 b
print(type(a)) # int
print(a is 7) # true
print(b is 7) # true
print(a is b) # true
print(id(a))
print(id(b))
print(id(7))
# id , a b 7 id
'''
#
a, b = 5, 10
print('a+b=', a+b)
# (swapping)
a, b = b, a
print('a=', a, 'b=', b)
#
del b
print(b)
| 15.364865 | 50 | 0.504837 |
a07d57857f23110458e28cf9b4145b1716e6f940
| 2,502 |
py
|
Python
|
convert.py
|
povle/SP-SR2-converter
|
7a675204e15b340deac2b98634805cdf75e6fd4a
|
[
"MIT"
] | 3 |
2021-01-09T20:11:31.000Z
|
2022-03-31T02:05:52.000Z
|
convert.py
|
povle/SP-SR2-converter
|
7a675204e15b340deac2b98634805cdf75e6fd4a
|
[
"MIT"
] | null | null | null |
convert.py
|
povle/SP-SR2-converter
|
7a675204e15b340deac2b98634805cdf75e6fd4a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import sys
import io
import os.path
import shutil
import requests
from convert_file import convert_file
from gooey import Gooey, GooeyParser
if len(sys.argv) >= 2:
if '--ignore-gooey' not in sys.argv:
sys.argv.append('--ignore-gooey')
if __name__ == '__main__':
main()
| 43.137931 | 133 | 0.688649 |
a07db8162c85985e5fa4859871927e9c03a5f877
| 5,289 |
py
|
Python
|
bsd2/vagrant-ansible/ansible/lib/ansible/runner/action_plugins/fetch.py
|
dlab-berkeley/collaboratool-archive
|
fa474e05737f78e628d6b9398c58cf7c966a7bba
|
[
"Apache-2.0"
] | 1 |
2016-01-20T14:36:02.000Z
|
2016-01-20T14:36:02.000Z
|
bsd2/vagrant-ansible/ansible/lib/ansible/runner/action_plugins/fetch.py
|
dlab-berkeley/collaboratool-archive
|
fa474e05737f78e628d6b9398c58cf7c966a7bba
|
[
"Apache-2.0"
] | null | null | null |
bsd2/vagrant-ansible/ansible/lib/ansible/runner/action_plugins/fetch.py
|
dlab-berkeley/collaboratool-archive
|
fa474e05737f78e628d6b9398c58cf7c966a7bba
|
[
"Apache-2.0"
] | null | null | null |
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import pwd
import random
import traceback
import tempfile
import base64
import ansible.constants as C
from ansible import utils
from ansible import errors
from ansible import module_common
from ansible.runner.return_data import ReturnData
| 42.653226 | 135 | 0.626962 |
a07ebd61f61d120e3815b7fb4a6cf2eeafd36431
| 4,563 |
py
|
Python
|
src/plot_by_genome.py
|
MaaT-Pharma/AMBER
|
76aa10e2295265b16337b7bfab769d67d3bea66a
|
[
"Apache-2.0"
] | null | null | null |
src/plot_by_genome.py
|
MaaT-Pharma/AMBER
|
76aa10e2295265b16337b7bfab769d67d3bea66a
|
[
"Apache-2.0"
] | null | null | null |
src/plot_by_genome.py
|
MaaT-Pharma/AMBER
|
76aa10e2295265b16337b7bfab769d67d3bea66a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import argparse
import os
import sys
import matplotlib
import numpy as np
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from src import plots
from src.utils import load_data
from src.utils import argparse_parents
# def main():
# parser = argparse.ArgumentParser(description="Plot purity and completeness per genome. Genomes can be sorted by completeness (default) or purity")
# parser.add_argument('file', nargs='?', type=argparse.FileType('r'), help=argparse_parents.HELP_FILE)
# parser.add_argument('-s','--sort_by', help='Sort by either purity or completeness (default: completeness)', choices=set(['purity','completeness']))
# parser.add_argument('-o','--out_file', help='Path to store image (default: only show image)')
# args = parser.parse_args()
# if not args.file and sys.stdin.isatty():
# parser.print_help()
# parser.exit(1)
# metrics = load_data.load_tsv_table(sys.stdin if not sys.stdin.isatty() else args.file)
# if args.sort_by is not None:
# plot_by_genome(metrics, args.out_file, args.sort_by)
# else:
# plot_by_genome(metrics, args.out_file)
if __name__ == "__main__":
main()
| 38.669492 | 153 | 0.67368 |
a080d1b200263a36cd31a3e857bf790cbd1e3259
| 16,548 |
py
|
Python
|
tests/test_references.py
|
isprojects/djangorestframework-inclusions
|
c6669f404a8a80f2c524a8adfb6548b2eef235c7
|
[
"MIT"
] | null | null | null |
tests/test_references.py
|
isprojects/djangorestframework-inclusions
|
c6669f404a8a80f2c524a8adfb6548b2eef235c7
|
[
"MIT"
] | 4 |
2019-11-15T10:21:20.000Z
|
2021-04-22T13:37:32.000Z
|
tests/test_references.py
|
isprojects/djangorestframework-inclusions
|
c6669f404a8a80f2c524a8adfb6548b2eef235c7
|
[
"MIT"
] | null | null | null |
from django.urls import reverse
from rest_framework.test import APITestCase
from testapp.models import (
A,
B,
C,
Child,
ChildProps,
Container,
Entry,
MainObject,
Parent,
Tag,
)
from .mixins import InclusionsMixin
| 32.383562 | 87 | 0.435098 |
a08113f70d1b07cff7761da1bc92b7750832a572
| 1,508 |
py
|
Python
|
q2_fondue/get_all.py
|
misialq/q2-fondue
|
a7a541ee017381b34d38ef766de39d5d62588465
|
[
"BSD-3-Clause"
] | 10 |
2022-03-21T16:07:22.000Z
|
2022-03-31T09:33:48.000Z
|
q2_fondue/get_all.py
|
misialq/q2-fondue
|
a7a541ee017381b34d38ef766de39d5d62588465
|
[
"BSD-3-Clause"
] | null | null | null |
q2_fondue/get_all.py
|
misialq/q2-fondue
|
a7a541ee017381b34d38ef766de39d5d62588465
|
[
"BSD-3-Clause"
] | 4 |
2022-03-21T06:51:44.000Z
|
2022-03-29T15:56:14.000Z
|
# ----------------------------------------------------------------------------
# Copyright (c) 2022, Bokulich Laboratories.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import qiime2 as q2
import pandas as pd
import threading
from q2_fondue.utils import handle_threaded_exception
from qiime2 import Artifact
threading.excepthook = handle_threaded_exception
| 32.085106 | 78 | 0.657825 |
a08175a3e80e168fe04fe33684d0de9087ed3e33
| 2,652 |
py
|
Python
|
markups/restructuredtext.py
|
LukeC8/pymarkups
|
eec6edbc870fc6fe50c56d30f3caa8b8ee4e239a
|
[
"BSD-3-Clause"
] | null | null | null |
markups/restructuredtext.py
|
LukeC8/pymarkups
|
eec6edbc870fc6fe50c56d30f3caa8b8ee4e239a
|
[
"BSD-3-Clause"
] | null | null | null |
markups/restructuredtext.py
|
LukeC8/pymarkups
|
eec6edbc870fc6fe50c56d30f3caa8b8ee4e239a
|
[
"BSD-3-Clause"
] | null | null | null |
# vim: ts=8:sts=8:sw=8:noexpandtab
# This file is part of python-markups module
# License: 3-clause BSD, see LICENSE file
# Copyright: (C) Dmitry Shachnev, 2012-2018
import markups.common as common
from markups.abstract import AbstractMarkup, ConvertedMarkup
| 30.837209 | 99 | 0.735294 |
a0827c33ad3c6db021a834ac073ebf6c9ba882a7
| 8,025 |
py
|
Python
|
Intelligent Systems and Decision Support Systems/pm-test1.py
|
johnpras/Uni_work
|
1edd8fd56e4d54cdcc0058f0a21799ef6015e3f6
|
[
"MIT"
] | null | null | null |
Intelligent Systems and Decision Support Systems/pm-test1.py
|
johnpras/Uni_work
|
1edd8fd56e4d54cdcc0058f0a21799ef6015e3f6
|
[
"MIT"
] | null | null | null |
Intelligent Systems and Decision Support Systems/pm-test1.py
|
johnpras/Uni_work
|
1edd8fd56e4d54cdcc0058f0a21799ef6015e3f6
|
[
"MIT"
] | null | null | null |
import pandas as pd
from pm4py.objects.log.importer.xes import importer as xes_import
from pm4py.objects.log.util import log as utils
from pm4py.statistics.start_activities.log.get import get_start_activities
from pm4py.statistics.end_activities.log.get import get_end_activities
from pm4py.algo.filtering.log.end_activities import end_activities_filter
from pm4py.visualization.petrinet import factory as vis_factory
from pm4py.algo.discovery.alpha import factory as alpha_miner
from pm4py.algo.discovery.heuristics import factory as heuristics_miner
from pm4py.algo.discovery.inductive import factory as inductive_miner
from pm4py.evaluation import factory as evaluation_factory
from pm4py.algo.conformance.tokenreplay import factory as token_replay
# logs traces
# process model
# 1. event log
log = xes_import.apply('edited_hh110_labour.xes')
trace_key_list = []
event_key_list = []
event_count = 0 # Counter event
for trace in log:
# keys trace key
# trace_key_list .
for trace_key in trace.attributes.keys():
if trace_key not in trace_key_list:
trace_key_list.append(trace_key)
for event in trace:
# keys events
for event_key in event.keys():
if event_key not in event_key_list:
event_key_list.append(event_key)
event_count += 1 # for events counter 1
# 2. trace event
print("Trace keys : " + str(trace_key_list))
print("Event keys : " + str(event_key_list))
# 3. traces
print("Number of traces : " + str(len(log)))
# 4. events
print("Number of events : " + str(event_count))
# 5. events event log
unique_events = utils.get_event_labels(log,'concept:name')
print("Events of log : " + str(unique_events))
# 6.
# traces
#
start_activities = get_start_activities(log)
print("Starting activities: " + str(start_activities))
#
# traces
end_activities = get_end_activities(log)
print("End activities" + str(end_activities))
# 7. case id, activity name, transition (start
# complete), timestamp
# DataFrame
log_df = pd.DataFrame(columns = ["Case ID" , "Activity Name" , "Transition" , "Timestamp"])
for trace_id, trace in enumerate(log):
for event_index, event in enumerate(trace):
# DataFrame
# event,
#
row = pd.DataFrame({
"Case ID" : [trace.attributes["concept:name"]],
"Activity Name" : [event["concept:name"]],
"Transition" : [event["lifecycle:transition"]],
"Timestamp" : [event["time:timestamp"]]
})
# append DataFrame
#
log_df = log_df.append(row, ignore_index = True)
print("Printing log table : \n")
print(log_df)
# dataframe
#
#print(log_df.to_string(index=False))
# log_df csv
log_df.to_csv('log_table.csv', index = False)
# 8. event log traces
# "end"
filtered_log = end_activities_filter.apply(log,["End"])
print("New log : \n " + str(filtered_log))
# size filtered_log
# "End"
print("Size of filtered log : " + str(len(filtered_log)))
# -
# filtered_log csv
# 2 comments
#filt_log_df = pd.DataFrame(filtered_log)
#filt_log_df.to_csv('filtered_log.csv')
# 9.
# Alpha Miner
# log
net, initial_marking, final_marking = alpha_miner.apply(log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('alpha_miner_log_evaluation.csv')
# filtered log
net, initial_marking, final_marking = alpha_miner.apply(filtered_log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(filtered_log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('alpha_miner_filtered_log_evaluation.csv')
# Heuristics Miner
# log
net, initial_marking, final_marking = heuristics_miner.apply(log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('heuristic_miner_log_evaluation.csv')
#alignments = alignment.apply_log(log, net, initial_marking, final_marking)
#pretty_print_alignments(alignments)
# filtered log
net, initial_marking, final_marking = heuristics_miner.apply(filtered_log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(filtered_log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('heuristic_miner_filtered_log_evaluation.csv')
# Inductive Miner
# log
net, initial_marking, final_marking = inductive_miner.apply(log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('inductive_miner_log_evaluation.csv')
# filtered log
net, initial_marking, final_marking = inductive_miner.apply(filtered_log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(filtered_log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('inductive_miner_filtered_log_evaluation.csv')
| 42.68617 | 102 | 0.761994 |
a083ee0517e616b51836bbd85b01482cd453d3cf
| 9,480 |
py
|
Python
|
utils/job_placement/nonuniformRandom/routers.py
|
scalability-llnl/damselfly
|
394e39b3165388e262a90da415dc3338d0f44734
|
[
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 5 |
2016-02-25T04:50:00.000Z
|
2020-06-11T03:00:45.000Z
|
utils/job_placement/nonuniformRandom/routers.py
|
scalability-llnl/damselfly
|
394e39b3165388e262a90da415dc3338d0f44734
|
[
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | null | null | null |
utils/job_placement/nonuniformRandom/routers.py
|
scalability-llnl/damselfly
|
394e39b3165388e262a90da415dc3338d0f44734
|
[
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 1 |
2017-02-07T05:43:53.000Z
|
2017-02-07T05:43:53.000Z
|
#!/usr/bin/env python
##############################################################################
# Copyright (c) 2014, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# Written by:
# Nikhil Jain <[email protected]>
# Abhinav Bhatele <[email protected]>
# Peer-Timo Bremer <[email protected]>
#
# LLNL-CODE-678961. All rights reserved.
#
# This file is part of Damselfly. For details, see:
# https://github.com/LLNL/damselfly
# Please also read the LICENSE file for our notice and the LGPL.
##############################################################################
#
# Define k random distrobutions centered around random positions
# Keep track of empty cells
# For each set
# Until you have placed everything
# Randomly pull an empty cell
# Compute the current PDF value of this cell for this distribution
# sum-up the probability for all already occupied cells and then scale your
# current p with 1 / (1-sum)
# Pull uniform random number [0,1]
# Accept or reject sample
#
from sys import argv,exit
import numpy as np
import struct
from math import *
import random
from __builtin__ import True
symbol = ["ro","g^","bs","yo","cs"]
colors = ["r","g","b","y","c"]
# Base class for are probability distribution
def rank_to_coords(rank,groups,rows,columns,nodes_per_router,cores_per_node):
dims = [0,0,0,0,rank]
dims[4] = rank % cores_per_node;
rank /= cores_per_node;
dims[3] = rank % nodes_per_router;
rank /= nodes_per_router;
dims[2] = rank % columns;
rank /= columns;
dims[1] = rank % rows;
rank /= rows;
dims[0] = rank % groups;
return dims
if len(argv) < 10:
print "Usage: %s <numGroups> <numRows> <numColumns> <numNodesPerRouter> <numCoresPerNode> [Binomial|Geometric] <p> <output filename> <#cores task 1> .... <#cores task N>"
exit(0)
# Parse the command line
groups = int(argv[1])
rows = int(argv[2])
columns = int(argv[3])
nodes_per_router = int(argv[4])
cores_per_node = int(argv[5])
dist = argv[6]
p = float(argv[7])
fileprefix = argv[8]
# Compute the system size
router_count = groups * rows *columns
node_count = router_count * nodes_per_router
cores_per_router = nodes_per_router * cores_per_node
core_count = router_count * nodes_per_router * cores_per_node
task_sizes = [int(arg) for arg in argv[9:]]
# Create a list of tasks
tasks = range(0,len(task_sizes))
# Shuffle the tasks to give everyone the opportunity to have an "empty" machine
np.random.shuffle(tasks)
# Adjust the order of sizes
task_sizes = [task_sizes[i] for i in tasks]
# Create random array of centers
task_centers = np.random.random_integers(0,router_count-1,len(tasks))
# Create the corresponding distributions
if dist == "Binomial":
task_distributions = [Binomial(router_count,c,p) for c in task_centers]
elif dist == "Geometric":
task_distributions = [Geometric(router_count,c,p) for c in task_centers]
# Slots
cores = np.zeros(core_count)
# List of empty router slots
empty = list(xrange(0, router_count))
# List of empty nodes
empty_nodes = list(xrange(0,node_count))
# Create scale down the task_sizes to leave some stragglers
task_sizes_tight = list(task_sizes)
for i,t in enumerate(task_sizes_tight):
# How many routers would this job fill
nr_rounters = t / cores_per_router
if nr_rounters * cores_per_router < t:
nr_rounters += 1
# Pick no more than about 3% of the routers to be left out
task_sizes_tight[i] = (97*nr_rounters) / 100 * cores_per_router
# For all tasks
for t,size,dist in zip(tasks,task_sizes_tight,task_distributions):
count = 0
while count < size:
# Choose a random node
elem = random.choice(empty)
# Get a uniform random number
test = np.random.uniform()
# Get the current pmf value for the distribution
current = dist.adjustedPMF(elem)
if current < 0:
print "Current ", current, " of ", elem, " tested against ", test
print dist.pmf(elem), dist.fill_sum
exit(0)
# If we pass the test
if test < current:
#print "Picked node", elem, " ", (size-count)/cores_per_node, " left to pick"
#print "Current ", current, dist.pmf(elem)," of ", elem, " tested against ", test
# Now fill up all the cores as long as
# we have tasks
i = 0
while i<cores_per_node*nodes_per_router and count<size:
cores[elem*cores_per_node*nodes_per_router + i] = t+1
i += 1
count += 1
# Remove the router from the empty list
empty.remove(elem)
# Remove the corresponding nodes (This assumine the sizes for this
# loop are multiples of the core_per_router
for i in xrange(0,nodes_per_router):
empty_nodes.remove(elem*nodes_per_router + i)
# Adjust all distributions to include another filled element
for d in task_distributions:
d.fillSlot(elem)
# Now place the remaining cores of the tasks by uniformly picking
# empty nodes
for t,full,tight in zip(tasks,task_sizes,task_sizes_tight):
size = full - tight
count = 0
while count < size:
# Choose a random node
elem = random.choice(empty_nodes)
i = 0
while i<cores_per_node and count<size:
cores[elem*cores_per_node + i] = t+1
i += 1
count += 1
# Remove the router from the empty list
empty_nodes.remove(elem)
if False:
pmfs = []
scale = 0
for d in task_distributions:
pmfs.append([d.pmf(i) for i in xrange(0,router_count)])
scale = max(scale,max(pmfs[-1]))
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
for pmf,t in zip(pmfs,tasks):
#print "Colors ", colors[t]
ax.plot(xrange(0,cores_per_node*nodes_per_router*router_count,cores_per_node),pmf,colors[t])
#print ""
for t in tasks:
#print "Colors ", symbol[t]
x = np.where(cores == t+1)
ax.plot(x,[(t+1)*scale/len(tasks) ]*len(x),symbol[t])
#print x
plt.show()
# set up text and binary files
csvfileall = open(fileprefix + ".csv", "w")
binfileall = open(fileprefix + ".bin", "wb")
csvfileall.write("g,r,c,n,core,jobid\n")
for taskid in xrange(0,len(tasks)):
x = np.where(cores == taskid+1)
# Now find the size of the t's job
loc = 0
while tasks[loc] != taskid:
loc += 1
if x[0].shape[0] != task_sizes[loc]:
print "Task assignment inconsistent for task ", taskid, ": found ", x[0].shape[0], " assigned cores but needed ", task_sizes[loc]
exit(0)
csvfile = open("%s-%d.csv" % (fileprefix, taskid), "w")
binfile = open("%s-%d.bin" % (fileprefix, taskid), "wb")
csvfile.write("g,r,c,n,core,jobid\n")
# print x
for rank in x[0]:
dims = rank_to_coords(rank, groups, rows, columns, nodes_per_router, cores_per_node)
csvfile.write("%d,%d,%d,%d,%d,0\n" % (dims[0],dims[1],dims[2],dims[3],dims[4]))
csvfileall.write("%d,%d,%d,%d,%d,%d\n" % (dims[0],dims[1],dims[2],dims[3],dims[4],taskid))
binfile.write(struct.pack('6i', dims[0], dims[1], dims[2], dims[3], dims[4], 0))
binfileall.write(struct.pack('6i', dims[0], dims[1], dims[2], dims[3], dims[4], taskid))
csvfile.close()
binfile.close()
csvfileall.close()
binfileall.close()
| 28.902439 | 175 | 0.59673 |
a0853c6f068e5b0ba0007116f943ea7455d91729
| 46,894 |
py
|
Python
|
src/scml_vis/presenter.py
|
yasserfarouk/scml-vis
|
a8daff36bb29867a67c9a36bcdca9ceef9350e53
|
[
"Apache-2.0"
] | null | null | null |
src/scml_vis/presenter.py
|
yasserfarouk/scml-vis
|
a8daff36bb29867a67c9a36bcdca9ceef9350e53
|
[
"Apache-2.0"
] | 2 |
2021-05-07T22:45:42.000Z
|
2021-09-22T04:35:15.000Z
|
src/scml_vis/presenter.py
|
yasserfarouk/scml-vis
|
a8daff36bb29867a67c9a36bcdca9ceef9350e53
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import shutil
import itertools
import random
import sys
import traceback
from pathlib import Path
import altair as alt
import pandas as pd
import plotly as plotly
import plotly.express as px
import plotly.graph_objects as go
import streamlit as st
from pandas.api.types import is_numeric_dtype
from plotly.validators.scatter.marker import SymbolValidator
from streamlit import cli as stcli
import scml_vis.compiler as compiler
from scml_vis.compiler import VISDATA_FOLDER
from scml_vis.utils import (
add_selector,
add_stats_display,
add_stats_selector,
load_data,
plot_network,
score_distribution,
score_factors,
)
__all__ = ["main"]
MARKERS = SymbolValidator().values[2::3]
MARKERS = [_ for _ in MARKERS if not any(_.startswith(x) for x in ("star", "circle", "square"))]
random.shuffle(MARKERS)
MARKERS = ["circle", "square"] + MARKERS
DB_FOLDER = Path.home() / "negmas" / "runsdb"
DB_NAME = "rundb.csv"
BASE_FOLDERS = [
Path.home() / "negmas" / "logs" / "scml" / "scml2020",
Path.home() / "negmas" / "logs" / "scml" / "scml2020oneshot",
Path.home() / "negmas" / "logs" / "scml" / "scml2021oneshot",
Path.home() / "negmas" / "logs" / "scml" / "scml2021",
Path.home() / "negmas" / "logs" / "tournaments",
Path.home() / "negmas" / "tournaments",
]
WORLD_INDEX = 0
if __name__ == "__main__":
import sys
from streamlit import cli as stcli
folder = None
if len(sys.argv) > 1:
folder = Path(sys.argv[1])
if st._is_running_with_streamlit:
main(folder)
else:
sys.argv = ["streamlit", "run"] + sys.argv
sys.exit(stcli.main())
| 35.338357 | 233 | 0.565403 |
a08cb54701ee8d7129f53895ca2daa2a379bad89
| 4,431 |
py
|
Python
|
QFA/MO_1QFA.py
|
gustawlippa/QFA
|
7f1f8bd0d2c9cb9aeeeb924b2f002c9e849523be
|
[
"MIT"
] | 2 |
2021-01-30T23:14:36.000Z
|
2021-02-17T01:41:56.000Z
|
QFA/MO_1QFA.py
|
gustawlippa/QFA
|
7f1f8bd0d2c9cb9aeeeb924b2f002c9e849523be
|
[
"MIT"
] | null | null | null |
QFA/MO_1QFA.py
|
gustawlippa/QFA
|
7f1f8bd0d2c9cb9aeeeb924b2f002c9e849523be
|
[
"MIT"
] | null | null | null |
import numpy as np
from typing import List
from math import sqrt
from QFA.Automaton import Automaton
from math import cos, sin, pi
if __name__ == "__main__":
example()
| 31.425532 | 120 | 0.643421 |
a08cc063efc183d6784f567bb7e999cbddbf1bbf
| 2,292 |
py
|
Python
|
whats_in_the_cupboard/search/views.py
|
brandonholderman/whats_in_the_cupboard
|
8f8b0abe8b94547fa488db689261a4f475a24779
|
[
"MIT"
] | null | null | null |
whats_in_the_cupboard/search/views.py
|
brandonholderman/whats_in_the_cupboard
|
8f8b0abe8b94547fa488db689261a4f475a24779
|
[
"MIT"
] | 10 |
2020-02-11T23:36:20.000Z
|
2022-03-11T23:57:52.000Z
|
whats_in_the_cupboard/search/views.py
|
brandonholderman/whats_in_the_cupboard
|
8f8b0abe8b94547fa488db689261a4f475a24779
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.views.generic import TemplateView, ListView
from django.shortcuts import render
from rest_framework.authentication import TokenAuthentication
from rest_framework import viewsets, mixins
from rest_framework.response import Response
from rest_framework import generics, status
from rest_framework.views import APIView
from .serializers import SearchSerializer
from .sample_data import MOCK_DATA
from .models import Search
import requests
import os
# class SearchView(mixins.ListAPIMixin):
# serializer_class = SearchSerializer
# def get(self, request):
# response = requests.get(MOCK_DATA)
# if response.ok:
# return response
# else:
# return None
# class PostCollection(ListModelMixin,
# CreateModelMixin,
# GenericAPIView):
# queryset = Post.objects.all()
# serializer_class = PostSerializer
# def get(self, request, *args, **kwargs):
# return self.list(request, *args, **kwargs)
# def post(self, request, *args, **kwargs):
# return self.create(request, *args, **kwargs)
# def delete(self, request, *args, **kwargs):
# return self.destroy(request, *args, **kwargs)
# return context
# def home(request):
# ip_address = request.META.get('HTTP_X_FORWARDED_FOR', '')
# response = requests.get(
# 'https://nasaapidimasv1.p.rapidapi.com/getAsteroidStats')
# nasadata = response.json()
# return render(request, 'home.html', {
# 'ip': nasadata['ip'],
# 'country': nasadata['country_name'],
# 'latitude': nasadata['latitude'],
# 'longitude': nasadata['longitude'],
# 'api_key': os.environ.get('API_KEY', '')
# })
# Create your views here.
| 27.614458 | 67 | 0.654887 |
a08ec5f751e5c0ed745a1196c05685644187a34f
| 591 |
py
|
Python
|
scripts/freq_shecker.py
|
Fumiya-K/ros_myo
|
dac160aae5d0cd75211c60261bd1232ef089e530
|
[
"MIT"
] | null | null | null |
scripts/freq_shecker.py
|
Fumiya-K/ros_myo
|
dac160aae5d0cd75211c60261bd1232ef089e530
|
[
"MIT"
] | null | null | null |
scripts/freq_shecker.py
|
Fumiya-K/ros_myo
|
dac160aae5d0cd75211c60261bd1232ef089e530
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Vector3
from sensor_msgs.msg import Imu
import numpy as np
c_imu, c_angle = 0, 0
if __name__ == "__main__":
rospy.init_node("freq_checker")
imu_sub = rospy.Subscriber("/myo_raw/myo_ori", Vector3, cb_angle)
ang_sub = rospy.Subscriber("/myo_raw/myo_imu", Imu, cb_imu)
rospy.spin()
| 21.107143 | 66 | 0.717428 |
a0921c92865225de5297219ccf69a9133b387063
| 2,431 |
py
|
Python
|
dist/snippets/woosmap_http_zones_collection_request/woosmap_http_zones_collection_request.py
|
woosmap/openapi-specification
|
7f934628a75695884db2fa29dd1d04efd1fb20de
|
[
"MIT"
] | null | null | null |
dist/snippets/woosmap_http_zones_collection_request/woosmap_http_zones_collection_request.py
|
woosmap/openapi-specification
|
7f934628a75695884db2fa29dd1d04efd1fb20de
|
[
"MIT"
] | 3 |
2021-12-20T16:15:13.000Z
|
2022-02-15T00:44:19.000Z
|
dist/snippets/woosmap_http_zones_collection_request/woosmap_http_zones_collection_request.py
|
woosmap/openapi-specification
|
7f934628a75695884db2fa29dd1d04efd1fb20de
|
[
"MIT"
] | null | null | null |
# [START woosmap_http_zones_collection_request]
import requests
import json
url = "https://api.woosmap.com/zones?private_key=YOUR_PRIVATE_API_KEY"
payload = json.dumps({
"zones": [
{
"zone_id": "ZoneA",
"description": "Delivery Zone for Store A",
"store_id": "STORE_ID_123456",
"polygon": "POLYGON ((-122.496116 37.7648181,-122.4954079 37.751518,-122.4635648 37.7530788,-122.4618481 37.7514501,-122.4601315 37.7521288,-122.4565266 37.7513144,-122.4540375 37.7566755,-122.4528359 37.7583041,-122.4515485 37.7595934,-122.4546384 37.774656,-122.4718903 37.7731635,-122.472577 37.772485,-122.4755811 37.7725529,-122.4791001 37.7723493,-122.4793576 37.7713995,-122.4784993 37.769839,-122.4783276 37.7680071,-122.4774693 37.766718,-122.4772118 37.7652931,-122.496116 37.7648181))",
"types": [
"delivery"
]
},
{
"zone_id": "ZoneB",
"description": "Delivery Zone for Store B",
"store_id": "STORE_ID_123456",
"polygon": "POLYGON ((-122.4546384 37.774656,-122.4515485 37.7595934,-122.4354306 37.7602172,-122.4333707 37.7512596,-122.423071 37.7511239,-122.4242726 37.7687665,-122.4259893 37.7691736,-122.4289075 37.7732444,-122.4306241 37.7850483,-122.4472753 37.7830133,-122.445902 37.7759581,-122.4546384 37.774656))",
"types": [
"delivery"
]
},
{
"zone_id": "ZoneC",
"description": "Delivery Zone for Store C",
"store_id": "STORE_ID_45678",
"polygon": "POLYGON ((-122.4758889 37.7524995,-122.4751594 37.7321718,-122.4688079 37.7299995,-122.4648597 37.7261979,-122.4519851 37.7228035,-122.4483802 37.7215815,-122.4458053 37.726741,-122.4365356 37.7310857,-122.4315574 37.7324433,-122.4246909 37.7312214,-122.4219444 37.731493,-122.423071 37.7511239,-122.4333707 37.7512596,-122.4354306 37.7602172,-122.4515485 37.7595934,-122.4528628 37.7582744,-122.4540375 37.7566755,-122.4565266 37.7513144,-122.4601315 37.7521288,-122.4618481 37.7514501,-122.4635648 37.7530788,-122.4758889 37.7524995))",
"types": [
"delivery"
]
}
]
})
headers = {
'content-type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
# [END woosmap_http_zones_collection_request]
| 52.847826 | 562 | 0.65035 |
a0963df40f1df1fa608416915de9bf22beecf414
| 1,692 |
py
|
Python
|
src/CyPhyMasterInterpreter/run_master_interpreter_sample.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
src/CyPhyMasterInterpreter/run_master_interpreter_sample.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
src/CyPhyMasterInterpreter/run_master_interpreter_sample.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
import win32com.client
# Disable early binding: full of race conditions writing the cache files,
# and changes the semantics since inheritance isn't handled correctly
import win32com.client.gencache
_savedGetClassForCLSID = win32com.client.gencache.GetClassForCLSID
win32com.client.gencache.GetClassForCLSID = lambda x: None
project = win32com.client.DispatchEx("Mga.MgaProject")
project.Open("MGA=" + r'D:\Projects\META\development\models\DynamicsTeam\MasterInterpreter\MasterInterpreter.mga')
# config_light = win32com.client.DispatchEx("CyPhyMasterInterpreter.ConfigurationSelectionLight")
# # GME id, or guid, or abs path or path to Test bench or SoT or PET
# config_light.ContextId = '{6d24a596-ec4f-4910-895b-d03a507878c3}'
# print config_light.SelectedConfigurationIds
# config_light.SetSelectedConfigurationIds(['id-0065-000000f1'])
# #config_light.KeepTemporaryModels = True
# #config_light.PostToJobManager = True
# master = win32com.client.DispatchEx("CyPhyMasterInterpreter.CyPhyMasterInterpreterAPI")
# master.Initialize(project)
# results = master.RunInTransactionWithConfigLight(config_light)
# It works only this way and does not worth the time to figure out the other way.
# will run ALL configurations.
focusobj = None
try:
project.BeginTransactionInNewTerr()
focusobj = project.GetObjectByID('id-0065-00000635')
finally:
project.AbortTransaction()
selectedobj=win32com.client.DispatchEx("Mga.MgaFCOs")
interpreter = "MGA.Interpreter.CyPhyMasterInterpreter"
launcher = win32com.client.DispatchEx("Mga.MgaLauncher")
launcher.RunComponent(interpreter, project, focusobj, selectedobj, 128)
project.Close()
| 38.454545 | 115 | 0.785461 |
a097f2e9cca87b9c4ab3fbfbe7eb9b74f83ce331
| 4,051 |
py
|
Python
|
image_utils.py
|
datascisteven/Flictionary-Flask
|
9437f0b6377b11cecfa37c8a94eb68cc4e7018f8
|
[
"MIT"
] | null | null | null |
image_utils.py
|
datascisteven/Flictionary-Flask
|
9437f0b6377b11cecfa37c8a94eb68cc4e7018f8
|
[
"MIT"
] | null | null | null |
image_utils.py
|
datascisteven/Flictionary-Flask
|
9437f0b6377b11cecfa37c8a94eb68cc4e7018f8
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from PIL import ImageOps
| 28.935714 | 132 | 0.621328 |
a098e971e8b1b7172d8860ca8ed8514362a25eea
| 360 |
py
|
Python
|
src/lqc/generate/web_page/ui_tools/create.py
|
tysmith/layout-quickcheck
|
c5ba9431a40f650a594140541e32af7c8ff21695
|
[
"MIT"
] | null | null | null |
src/lqc/generate/web_page/ui_tools/create.py
|
tysmith/layout-quickcheck
|
c5ba9431a40f650a594140541e32af7c8ff21695
|
[
"MIT"
] | null | null | null |
src/lqc/generate/web_page/ui_tools/create.py
|
tysmith/layout-quickcheck
|
c5ba9431a40f650a594140541e32af7c8ff21695
|
[
"MIT"
] | null | null | null |
import os
| 25.714286 | 82 | 0.633333 |
a09ac1675173bac590c8d099736901eb4fe0b015
| 886 |
py
|
Python
|
students/K33422/laboratory_works/Moruga_Elina/lr_2/simple_django_web_projects(1)/django_project_Moruga/project_first_app/views.py
|
Elyavor/ITMO_ICT_WebDevelopment_2021-2022
|
63fad07bcdc0a9a6b85d46eacf97182162262181
|
[
"MIT"
] | null | null | null |
students/K33422/laboratory_works/Moruga_Elina/lr_2/simple_django_web_projects(1)/django_project_Moruga/project_first_app/views.py
|
Elyavor/ITMO_ICT_WebDevelopment_2021-2022
|
63fad07bcdc0a9a6b85d46eacf97182162262181
|
[
"MIT"
] | null | null | null |
students/K33422/laboratory_works/Moruga_Elina/lr_2/simple_django_web_projects(1)/django_project_Moruga/project_first_app/views.py
|
Elyavor/ITMO_ICT_WebDevelopment_2021-2022
|
63fad07bcdc0a9a6b85d46eacf97182162262181
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import Http404
from .models import CarOwner
| 59.066667 | 192 | 0.767494 |
a09b9af2b847bf39b063ead0a72aab28cd93427e
| 126 |
py
|
Python
|
wtpy/apps/__init__.py
|
Huijun-Cui/wtpy
|
9a8243a20b944fbb37aa33d81215b7b36ac7b1e2
|
[
"MIT"
] | null | null | null |
wtpy/apps/__init__.py
|
Huijun-Cui/wtpy
|
9a8243a20b944fbb37aa33d81215b7b36ac7b1e2
|
[
"MIT"
] | null | null | null |
wtpy/apps/__init__.py
|
Huijun-Cui/wtpy
|
9a8243a20b944fbb37aa33d81215b7b36ac7b1e2
|
[
"MIT"
] | null | null | null |
from .WtBtAnalyst import WtBtAnalyst
from .WtCtaOptimizer import WtCtaOptimizer
__all__ = ["WtBtAnalyst","WtCtaOptimizer"]
| 31.5 | 43 | 0.809524 |
a09c1cbcccf7a63039a5587fbbf109f0b5dc595c
| 608 |
py
|
Python
|
grove_potentiometer.py
|
cpmpercussion/empi_controller
|
178d3952994d7e13067674cbcd261d945e6b4799
|
[
"MIT"
] | null | null | null |
grove_potentiometer.py
|
cpmpercussion/empi_controller
|
178d3952994d7e13067674cbcd261d945e6b4799
|
[
"MIT"
] | null | null | null |
grove_potentiometer.py
|
cpmpercussion/empi_controller
|
178d3952994d7e13067674cbcd261d945e6b4799
|
[
"MIT"
] | null | null | null |
import math
import sys
import time
from grove.adc import ADC
Grove = GroveRotaryAngleSensor
if __name__ == '__main__':
main()
| 17.882353 | 58 | 0.626645 |
a09c77edcb165aec8e2b0d92f741bba565b1c3ad
| 627 |
py
|
Python
|
ad2web/api/forms.py
|
billfor/alarmdecoder-webapp
|
43c3ebb2b44c7291cd89a2a7a31bbdfdb3ec06dc
|
[
"BSD-3-Clause",
"MIT"
] | 46 |
2015-06-14T02:19:16.000Z
|
2022-03-24T03:11:19.000Z
|
ad2web/api/forms.py
|
billfor/alarmdecoder-webapp
|
43c3ebb2b44c7291cd89a2a7a31bbdfdb3ec06dc
|
[
"BSD-3-Clause",
"MIT"
] | 66 |
2015-03-14T16:30:43.000Z
|
2021-08-28T22:20:01.000Z
|
ad2web/api/forms.py
|
billfor/alarmdecoder-webapp
|
43c3ebb2b44c7291cd89a2a7a31bbdfdb3ec06dc
|
[
"BSD-3-Clause",
"MIT"
] | 44 |
2015-02-13T19:23:37.000Z
|
2021-12-30T04:17:21.000Z
|
# -*- coding: utf-8 -*-
import string
from flask_wtf import FlaskForm as Form
from wtforms.fields.html5 import URLField, EmailField, TelField
from wtforms import (ValidationError, HiddenField, TextField, HiddenField,
PasswordField, SubmitField, TextAreaField, IntegerField, RadioField,
FileField, DecimalField, BooleanField, SelectField, FormField, FieldList)
from wtforms.validators import (Required, Length, EqualTo, Email, NumberRange,
URL, AnyOf, Optional, IPAddress)
from flask_login import current_user
from ..user import User
from ..widgets import ButtonField
| 34.833333 | 81 | 0.773525 |
a09e18ee423836b7c3ce0e61bbbd9d1885bd9f19
| 73 |
py
|
Python
|
modelator_py/util/tla/__init__.py
|
informalsystems/modelator-py
|
d66464096c022799e680e6201590a2ead69be32d
|
[
"Apache-2.0"
] | null | null | null |
modelator_py/util/tla/__init__.py
|
informalsystems/modelator-py
|
d66464096c022799e680e6201590a2ead69be32d
|
[
"Apache-2.0"
] | 3 |
2022-03-30T16:01:49.000Z
|
2022-03-31T13:40:03.000Z
|
modelator_py/util/tla/__init__.py
|
informalsystems/modelator-py
|
d66464096c022799e680e6201590a2ead69be32d
|
[
"Apache-2.0"
] | null | null | null |
"""TLA+ parser and syntax tree."""
from .parser import parse, parse_expr
| 24.333333 | 37 | 0.726027 |
a0a2f155643acffd9a5b5d44e3b912311ab75ced
| 2,084 |
py
|
Python
|
runai/mp/keras/layers/core.py
|
bamps53/runai
|
0c868160f64e1e063c6eb6f660d42917322d40c5
|
[
"MIT"
] | 86 |
2020-01-23T18:56:41.000Z
|
2022-02-14T22:32:08.000Z
|
runai/mp/keras/layers/core.py
|
bamps53/runai
|
0c868160f64e1e063c6eb6f660d42917322d40c5
|
[
"MIT"
] | 18 |
2020-01-24T17:55:18.000Z
|
2021-12-01T01:01:32.000Z
|
runai/mp/keras/layers/core.py
|
bamps53/runai
|
0c868160f64e1e063c6eb6f660d42917322d40c5
|
[
"MIT"
] | 12 |
2020-02-03T14:30:44.000Z
|
2022-01-08T16:06:59.000Z
|
import keras.backend as K
import keras.layers
import runai.mp
from .keep import Keep
from .parallelised import Parallelised
Activation = Keep.create('Activation')
Dropout = Keep.create('Dropout')
Flatten = Keep.create('Flatten')
| 31.575758 | 151 | 0.600768 |
a0a7abf53aec9f31e9e5488c61a3e3d5fb017c5d
| 461 |
py
|
Python
|
graphics.py
|
Nemo20k/lactose_multistability_model
|
e50d68bb508e243d0a775d1d562bd8e8b88b3b30
|
[
"MIT"
] | null | null | null |
graphics.py
|
Nemo20k/lactose_multistability_model
|
e50d68bb508e243d0a775d1d562bd8e8b88b3b30
|
[
"MIT"
] | null | null | null |
graphics.py
|
Nemo20k/lactose_multistability_model
|
e50d68bb508e243d0a775d1d562bd8e8b88b3b30
|
[
"MIT"
] | null | null | null |
from matplotlib import pyplot as plt
import numpy as np
| 27.117647 | 109 | 0.724512 |
a0a9d841059677b45b6f09a062af0ebdbc1dceea
| 5,394 |
py
|
Python
|
webapp/element43/apps/common/util.py
|
Ososope/eve_online
|
b368f77aaff403e5f1523a1a0e01d105fed0ada9
|
[
"BSD-3-Clause"
] | null | null | null |
webapp/element43/apps/common/util.py
|
Ososope/eve_online
|
b368f77aaff403e5f1523a1a0e01d105fed0ada9
|
[
"BSD-3-Clause"
] | null | null | null |
webapp/element43/apps/common/util.py
|
Ososope/eve_online
|
b368f77aaff403e5f1523a1a0e01d105fed0ada9
|
[
"BSD-3-Clause"
] | null | null | null |
# utility functions
import ast
import urllib
import datetime
import pytz
import pylibmc
# Import settings
from django.conf import settings
# API Models
from apps.api.models import APIKey, Character, APITimer
# Eve_DB Models
from eve_db.models import MapSolarSystem
# API Access Masks
CHARACTER_API_ACCESS_MASKS = {'AccountBalance': 1,
'AssetList': 2,
'CalendarEventAttendees': 4,
'CharacterSheet': 8,
'ContactList': 16,
'ContactNotifications': 32,
'FacWarStats': 64,
'IndustryJobs': 128,
'KillLog': 256,
'MailBodies': 512,
'MailingLists': 1024,
'MailMessages': 2048,
'MarketOrders': 4096,
'Medals': 8192,
'Notifications': 16384,
'NotificationTexts': 32768,
'Research': 65536,
'SkillInTraining': 131072,
'SkillQueue': 262144,
'Standings': 524288,
'UpcomingCalendarEvents': 1048576,
'WalletJournal': 2097152,
'WalletTransactions': 4194304,
'CharacterInfo': 25165824,
'AccountStatus': 33554432,
'Contracts': 67108864,
'Locations': 134217728}
def get_memcache_client():
"""
Returns a ready-to-use memcache client
"""
return pylibmc.Client(settings.MEMCACHE_SERVER,
binary=settings.MEMCACHE_BINARY,
behaviors=settings.MEMCACHE_BEHAVIOUR)
def dictfetchall(cursor):
"""
Returns all rows from a cursor as a dict
"""
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def cast_empty_string_to_int(string):
"""
Casts empty string to 0
"""
# Strip stuff only if it's a string
if isinstance(string, str):
string = string.strip()
return int(string) if string else 0
def cast_empty_string_to_float(string):
"""
Casts empty string to 0
"""
# Strip stuff only if it's a string
if isinstance(string, str):
string = string.strip()
return float(string) if string else 0
def calculate_character_access_mask(sheets):
"""
Returns combined access mask for a list of API sheets.
"""
mask = 0
for sheet in sheets:
mask += CHARACTER_API_ACCESS_MASKS[sheet]
return mask
def manage_character_api_timers(character):
"""
Adds and removes character APITimers for a given character depending on the character's key permissions.
When we add more functions, we need to add them to the masks dictionary.
"""
key_mask = character.apikey.accessmask
for sheet in CHARACTER_API_ACCESS_MASKS:
mask = CHARACTER_API_ACCESS_MASKS[sheet]
if ((mask & key_mask) == mask):
# If we have permission, create timer if not already present
try:
APITimer.objects.get(character=character, apisheet=sheet)
except APITimer.DoesNotExist:
new_timer = APITimer(character=character,
corporation=None,
apisheet=sheet,
nextupdate=pytz.utc.localize(datetime.datetime.utcnow()))
new_timer.save()
else:
# If we are not permitted to do this, remove existent timers
try:
APITimer.objects.get(character=character, apisheet=sheet).delete
except APITimer.DoesNotExist:
pass
def validate_characters(user, access_mask):
"""
Returns characters of a user that match a given minimum access mask.
"""
# Get keys
keys = APIKey.objects.filter(user=user)
characters = []
for key in keys:
# Do a simple bitwise operation to determine if we have sufficient rights with this key.
if ((access_mask & key.accessmask) == access_mask):
# Get all chars from that key which have sufficient permissions.
characters += list(Character.objects.filter(apikey=key))
return characters
def find_path(start, finish, security=5, invert=0):
"""
Returns a list of system objects which represent the path.
start: system_id of first system
finish: system_id of last system
security: sec level of system * 10
invert: if true (1), use security as highest seclevel you want to enter, default (0) seclevel is the lowest you want to try to use
"""
# Set params
params = urllib.urlencode({'start': start, 'finish': finish, 'seclevel': security, 'invert': invert})
response = urllib.urlopen('http://localhost:3455/path', params)
path_list = ast.literal_eval(response.read())
path = []
for waypoint in path_list:
path.append(MapSolarSystem.objects.get(id=waypoint))
return path
| 31 | 134 | 0.561735 |
a0ade23395d3069649385af2893a0f1454cfd97f
| 349 |
py
|
Python
|
forms/views/field.py
|
alphagov/submit-forms
|
6339b40debbab668263246162ab33c68391ef744
|
[
"MIT"
] | 3 |
2017-11-20T18:17:47.000Z
|
2019-08-09T14:59:36.000Z
|
forms/views/field.py
|
alphagov/submit-forms
|
6339b40debbab668263246162ab33c68391ef744
|
[
"MIT"
] | null | null | null |
forms/views/field.py
|
alphagov/submit-forms
|
6339b40debbab668263246162ab33c68391ef744
|
[
"MIT"
] | 3 |
2019-08-29T11:55:16.000Z
|
2021-04-10T19:52:14.000Z
|
from django.shortcuts import render
from ..models import Field
| 20.529412 | 61 | 0.670487 |
a0af5afc99a71406be5ffead3cb66d5a5fbdf490
| 2,608 |
py
|
Python
|
crafting/CraftingHandler.py
|
uuk0/mcpython-4
|
1ece49257b3067027cc43b452a2fc44908d3514c
|
[
"MIT"
] | 2 |
2019-08-21T08:23:45.000Z
|
2019-09-25T13:20:28.000Z
|
crafting/CraftingHandler.py
|
uuk0/mcpython-4
|
1ece49257b3067027cc43b452a2fc44908d3514c
|
[
"MIT"
] | 11 |
2019-08-21T08:46:01.000Z
|
2021-09-08T01:18:04.000Z
|
crafting/CraftingHandler.py
|
uuk0/mcpython-4
|
1ece49257b3067027cc43b452a2fc44908d3514c
|
[
"MIT"
] | 5 |
2019-08-30T08:19:57.000Z
|
2019-10-26T03:31:16.000Z
|
"""mcpython - a minecraft clone written in python licenced under MIT-licence
authors: uuk, xkcdjerry
original game by forgleman licenced under MIT-licence
minecraft by Mojang
blocks based on 1.14.4.jar of minecraft, downloaded on 20th of July, 2019"""
import globals as G
import crafting.IRecipeType
import json
import ResourceLocator
import item.ItemHandler
import traceback
import mod.ModMcpython
G.craftinghandler = CraftingHandler()
mod.ModMcpython.mcpython.eventbus.subscribe("stage:recipe:groups", load_recipe_providers,
info="loading crafting recipe groups")
mod.ModMcpython.mcpython.eventbus.subscribe("stage:recipes", G.craftinghandler.load, "minecraft",
info="loading crafting recipes")
| 35.243243 | 113 | 0.657209 |
a0b0788c0fdd53bb74359f134c5cbbe7dd53cb63
| 1,625 |
py
|
Python
|
xcache.py
|
ATLAS-Analytics/AlarmAndAlertService
|
a167439b0c3f3c9594af52bd21fe8713b5f47bf1
|
[
"MIT"
] | null | null | null |
xcache.py
|
ATLAS-Analytics/AlarmAndAlertService
|
a167439b0c3f3c9594af52bd21fe8713b5f47bf1
|
[
"MIT"
] | 1 |
2021-05-26T02:21:42.000Z
|
2021-05-26T02:21:42.000Z
|
xcache.py
|
ATLAS-Analytics/AlarmAndAlertService
|
a167439b0c3f3c9594af52bd21fe8713b5f47bf1
|
[
"MIT"
] | null | null | null |
# Checks number of concurrent connections from XCaches to MWT2 dCache.
# Creates alarm if more than 200 from any server.
# ====
# It is run every 30 min from a cron job.
import json
from datetime import datetime
import requests
from alerts import alarms
config_path = '/config/config.json'
with open(config_path) as json_data:
config = json.load(json_data,)
print('current time', datetime.now())
res = requests.get(
'http://graphite.mwt2.org/render?target=dcache.xrootd.*&format=json&from=now-2min')
if (res.status_code == 200):
data = res.json()
print(data)
print('recieved data on {} servers'.format(len(data)))
else:
print('problem in receiving connections!')
ALARM = alarms('Virtual Placement', 'XCache', 'large number of connections')
for server in data:
serverIP = server['target'].replace('dcache.xrootd.', '').replace('_', '.')
connections = server['datapoints'][-1][0]
timestamp = server['datapoints'][-1][1]
timestamp = datetime.fromtimestamp(timestamp)
timestamp = timestamp.strftime("%Y-%m-%d %H:%M:%S")
if not connections:
print('n connections not retrieved... skipping.')
continue
if connections < 200:
print('server {} has {} connections.'.format(serverIP, connections))
else:
source = {
"xcache": serverIP,
"n_connections": connections,
"timestamp": timestamp
}
print(source)
ALARM.addAlarm(
body='too many connections.',
tags=[serverIP],
source=source
)
| 30.092593 | 88 | 0.619692 |
a0b0d03bf62e28fff9360da39608230424f15bea
| 769 |
py
|
Python
|
Question3_Competetive_Programming/solution.py
|
Robotrek-TechTatva/big-pp
|
5790075638aa7f39d787dfc390f43da1cdb4ed56
|
[
"MIT"
] | null | null | null |
Question3_Competetive_Programming/solution.py
|
Robotrek-TechTatva/big-pp
|
5790075638aa7f39d787dfc390f43da1cdb4ed56
|
[
"MIT"
] | null | null | null |
Question3_Competetive_Programming/solution.py
|
Robotrek-TechTatva/big-pp
|
5790075638aa7f39d787dfc390f43da1cdb4ed56
|
[
"MIT"
] | null | null | null |
import csv
filename = "traingles.csv"
with open(filename, 'r') as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader)
for line in csv_reader:
if line:
print(isInside(line))
| 21.361111 | 49 | 0.470741 |
a0b1f6e65ee6e7176da940ac100c95bce2eaea30
| 238 |
py
|
Python
|
tutorials/W0D4_Calculus/solutions/W0D4_Tutorial2_Solution_359be293.py
|
vasudev-sharma/course-content
|
46fb9be49da52acb5df252dda43f11b6d1fe827f
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 1 |
2021-06-09T09:56:21.000Z
|
2021-06-09T09:56:21.000Z
|
tutorials/W0D4_Calculus/solutions/W0D4_Tutorial2_Solution_359be293.py
|
macasal/course-content
|
0fc5e1a0d736c6b0391eeab587012ed0ab01e462
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 1 |
2021-06-16T05:41:08.000Z
|
2021-06-16T05:41:08.000Z
|
tutorials/W0D4_Calculus/solutions/W0D4_Tutorial2_Solution_359be293.py
|
macasal/course-content
|
0fc5e1a0d736c6b0391eeab587012ed0ab01e462
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null |
t = np.arange(0, 10, 0.1) # Time from 0 to 10 years in 0.1 steps
with plt.xkcd():
p = np.exp(0.3 * t)
fig = plt.figure(figsize=(6, 4))
plt.plot(t, p)
plt.ylabel('Population (millions)')
plt.xlabel('time (years)')
plt.show()
| 21.636364 | 64 | 0.60084 |
a0b4e94b3b4a3e4439a5b84940a160611b866816
| 1,063 |
py
|
Python
|
test/python/squarepants_test/plugins/test_link_resources_jars.py
|
ericzundel/mvn2pants
|
59776864939515bc0cae28e1b89944ce55b98b21
|
[
"Apache-2.0"
] | 8 |
2015-04-14T22:37:56.000Z
|
2021-01-20T19:46:40.000Z
|
test/python/squarepants_test/plugins/test_link_resources_jars.py
|
ericzundel/mvn2pants
|
59776864939515bc0cae28e1b89944ce55b98b21
|
[
"Apache-2.0"
] | 1 |
2016-01-13T23:19:14.000Z
|
2016-01-22T22:47:48.000Z
|
test/python/squarepants_test/plugins/test_link_resources_jars.py
|
ericzundel/mvn2pants
|
59776864939515bc0cae28e1b89944ce55b98b21
|
[
"Apache-2.0"
] | 3 |
2015-12-13T08:35:34.000Z
|
2018-08-01T17:44:59.000Z
|
# Tests for code in squarepants/src/main/python/squarepants/plugins/copy_resources/tasks/copy_resource_jars
#
# Run with:
# ./pants test squarepants/src/test/python/squarepants_test/plugins:copy_resources
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants_test.tasks.task_test_base import TaskTestBase
from squarepants.plugins.link_resources_jars.targets.resources_jar import ResourcesJar
from squarepants.plugins.link_resources_jars.tasks.link_resources_jars import LinkResourcesJars
| 40.884615 | 107 | 0.804327 |
a0b646cbb8b05a36f6c66a8ee0acf369718630ee
| 2,339 |
py
|
Python
|
src/binwalk/__main__.py
|
puppywang/binwalk
|
fa0c0bd59b8588814756942fe4cb5452e76c1dcd
|
[
"MIT"
] | 5,504 |
2017-11-30T21:25:07.000Z
|
2022-03-31T17:00:58.000Z
|
src/binwalk/__main__.py
|
puppywang/binwalk
|
fa0c0bd59b8588814756942fe4cb5452e76c1dcd
|
[
"MIT"
] | 247 |
2017-12-07T06:09:56.000Z
|
2022-03-23T05:34:47.000Z
|
src/binwalk/__main__.py
|
puppywang/binwalk
|
fa0c0bd59b8588814756942fe4cb5452e76c1dcd
|
[
"MIT"
] | 953 |
2017-12-01T17:05:17.000Z
|
2022-03-26T13:15:33.000Z
|
import os
import sys
# If installed to a custom prefix directory, binwalk may not be in
# the default module search path(s). Try to resolve the prefix module
# path and make it the first entry in sys.path.
# Ensure that 'src/binwalk' becomes '.' instead of an empty string
_parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
for _module_path in [
# from repo: src/scripts/ -> src/
_parent_dir,
# from build dir: build/scripts-3.4/ -> build/lib/
os.path.join(_parent_dir, "lib"),
# installed in non-default path: bin/ -> lib/python3.4/site-packages/
os.path.join(_parent_dir,
"lib",
"python%d.%d" % (sys.version_info[0], sys.version_info[1]),
"site-packages")
]:
if os.path.exists(_module_path) and _module_path not in sys.path:
sys.path = [_module_path] + sys.path
import binwalk
import binwalk.modules
if __name__ == "__main__":
main()
| 35.984615 | 97 | 0.595554 |
a0b73f136f5ae88a402fa6be43272da9242cdedc
| 642 |
py
|
Python
|
MINI_WEB/mini_web/framework/mini_frame_4.py
|
GalphaXie/LaoX
|
b7e8f9744292dc052c870e4d873052e9bfec19ee
|
[
"MIT"
] | null | null | null |
MINI_WEB/mini_web/framework/mini_frame_4.py
|
GalphaXie/LaoX
|
b7e8f9744292dc052c870e4d873052e9bfec19ee
|
[
"MIT"
] | 12 |
2020-03-24T17:39:25.000Z
|
2022-03-12T00:01:24.000Z
|
MINI_WEB/mini_web/framework/mini_frame_4.py
|
GalphaXie/LaoX
|
b7e8f9744292dc052c870e4d873052e9bfec19ee
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# file: mini_frame.py
# Created by Guang at 19-7-19
# description:
# *-* coding:utf8 *-*
import time
| 21.4 | 106 | 0.619938 |
a0b7ca82a2ce39606a44ac65893f26c1b02da5d3
| 3,174 |
py
|
Python
|
server.py
|
AndrewB330/BinanceTerminal
|
3699a295d2b2af810d30ff692bab4e106ec44392
|
[
"MIT"
] | 14 |
2020-03-09T04:08:03.000Z
|
2021-12-29T14:53:32.000Z
|
server.py
|
AndrewB330/BinanceTerminal
|
3699a295d2b2af810d30ff692bab4e106ec44392
|
[
"MIT"
] | null | null | null |
server.py
|
AndrewB330/BinanceTerminal
|
3699a295d2b2af810d30ff692bab4e106ec44392
|
[
"MIT"
] | null | null | null |
import time
import pymongo
import schedule
from order import *
from utils import *
# MONGODB
db = pymongo.MongoClient("mongodb://localhost:27017/")["ShaurmaBinanceTerminal"]
order_db = db["orders"]
JOB_INTERVAL = 10.0 # interval of updating
jobs_pool = {}
if __name__ == '__main__':
# initialize_test_db()
run_server()
| 31.74 | 116 | 0.598614 |
a0b8186276c361f655fc43a3b80aba5c60bd0210
| 4,979 |
py
|
Python
|
sarenka/backend/api_searcher/searcher_full.py
|
adolabsnet/sarenka
|
2032aa6ddebfc69b0db551b7793080d17282ced2
|
[
"MIT"
] | 380 |
2019-12-05T09:37:47.000Z
|
2022-03-31T09:37:27.000Z
|
sarenka/backend/api_searcher/searcher_full.py
|
watchmen-coder/sarenka
|
d7fc0928e4992de3dbb1546137ca6a158e930ba8
|
[
"MIT"
] | 14 |
2020-09-26T17:49:42.000Z
|
2022-02-04T18:16:16.000Z
|
sarenka/backend/api_searcher/searcher_full.py
|
watchmen-coder/sarenka
|
d7fc0928e4992de3dbb1546137ca6a158e930ba8
|
[
"MIT"
] | 60 |
2021-01-01T16:25:30.000Z
|
2022-03-26T18:48:03.000Z
|
"""
Modu spiajcy wszystkie wyszukiwania w jedn klas - wszystkei dane dla adresu ip/domeny.
Klasa bezporednio uywana w widoku Django.
"""
from rest_framework.reverse import reverse
from typing import List, Dict
import whois
import socket
from connectors.credential import CredentialsNotFoundError
from api_searcher.search_engines.censys_engine.censys_host_search import CensysHostSearch
from api_searcher.search_engines.shodan_engine.shodan_host_search import ShodanHostSearch
from .dns.dns_searcher import DNSSearcher, DNSSearcherError
| 40.811475 | 126 | 0.589677 |
a0ba9ed937616e6ee4572b155cab4164464097a6
| 75 |
py
|
Python
|
Codewars/8kyu/get-the-mean-of-an-array/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | 7 |
2017-09-20T16:40:39.000Z
|
2021-08-31T18:15:08.000Z
|
Codewars/8kyu/get-the-mean-of-an-array/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
Codewars/8kyu/get-the-mean-of-an-array/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
# Python - 3.6.0
get_average = lambda marks: int(sum(marks) / len(marks))
| 18.75 | 56 | 0.666667 |
a0bb420692799a6a79988f6528e8182e5954185a
| 3,234 |
py
|
Python
|
cifar10/train.py
|
ashawkey/hawtorch
|
a6e28422da9258458b6268f5981c68d60623e12f
|
[
"MIT"
] | 1 |
2019-12-01T05:48:00.000Z
|
2019-12-01T05:48:00.000Z
|
cifar10/train.py
|
ashawkey/hawtorch
|
a6e28422da9258458b6268f5981c68d60623e12f
|
[
"MIT"
] | null | null | null |
cifar10/train.py
|
ashawkey/hawtorch
|
a6e28422da9258458b6268f5981c68d60623e12f
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import hawtorch
import hawtorch.io as io
from hawtorch import Trainer
from hawtorch.metrics import ClassificationMeter
from hawtorch.utils import backup
import models
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default='configs.json')
parser = parser.parse_args()
config_file = parser.config
args = io.load_json(config_file)
logger = io.logger(args["workspace_path"])
names = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
if __name__ == "__main__":
backup(args["workspace_path"])
trainer = create_trainer()
trainer.train(args["epochs"])
trainer.evaluate()
| 34.042105 | 119 | 0.62987 |
a0bbcdf05486aa95d06b89b25ca7866a985c51bb
| 718 |
py
|
Python
|
examples/scatterplot.py
|
ajduberstein/slayer
|
e4f2b6e0277ac38fe71ec99eaf3ee4769057b0ea
|
[
"MIT"
] | 2 |
2019-02-26T23:55:06.000Z
|
2019-02-26T23:56:09.000Z
|
examples/scatterplot.py
|
ajduberstein/slayer
|
e4f2b6e0277ac38fe71ec99eaf3ee4769057b0ea
|
[
"MIT"
] | 1 |
2019-02-10T07:00:39.000Z
|
2019-02-10T07:00:39.000Z
|
examples/scatterplot.py
|
ajduberstein/slayer
|
e4f2b6e0277ac38fe71ec99eaf3ee4769057b0ea
|
[
"MIT"
] | null | null | null |
"""
Example of how to make a Scatterplot with a time component
"""
import slayer as sly
import pandas as pd
DATA_URL = 'https://raw.githubusercontent.com/ajduberstein/sf_growth/master/public/data/business.csv'
businesses = pd.read_csv(DATA_URL)
FUCHSIA_RGBA = [255, 0, 255, 140]
color_scale = sly.ColorScale(
palette='random',
variable_name='neighborhood',
scale_type='categorical_random')
s = sly.Slayer(sly.Viewport(longitude=-122.43, latitude=37.76, zoom=11)) +\
sly.Timer(tick_rate=0.75) + \
sly.Scatterplot(
businesses,
position=['lng', 'lat'],
color=color_scale,
radius=50,
time_field='start_date')
s.to_html('scatterplot.html', interactive=True)
| 26.592593 | 101 | 0.693593 |
a0bc99badd8c414f8e67c165139e1e1864acd087
| 3,699 |
py
|
Python
|
test_dictondisk.py
|
MKuranowski/dictondisk
|
ca25f8fed2f60d8ee63d6c5eaa9e620555581383
|
[
"MIT"
] | null | null | null |
test_dictondisk.py
|
MKuranowski/dictondisk
|
ca25f8fed2f60d8ee63d6c5eaa9e620555581383
|
[
"MIT"
] | null | null | null |
test_dictondisk.py
|
MKuranowski/dictondisk
|
ca25f8fed2f60d8ee63d6c5eaa9e620555581383
|
[
"MIT"
] | null | null | null |
import dictondisk
import random
import pytest
import os
remove_keys = {0, (33, 12.23), "c", ""}
vanilla_dict = {
0: 1337, 1: 3.14, 2: 2.71, 3: 1.61,
"a": "", "b": "!", "c": "", "": "",
(1, .5): "lorem", (33, 12.23): "ipsum",
-1: ["one", "two", "three"]
}
| 20.324176 | 58 | 0.593674 |
a0bd4d5ee3152479bb0efe0eaded5fd65042adf4
| 1,904 |
py
|
Python
|
backend/src/users/models.py
|
moatom/alistice
|
222217928d9634b14e3c192abedc8c7d419ab868
|
[
"MIT"
] | null | null | null |
backend/src/users/models.py
|
moatom/alistice
|
222217928d9634b14e3c192abedc8c7d419ab868
|
[
"MIT"
] | null | null | null |
backend/src/users/models.py
|
moatom/alistice
|
222217928d9634b14e3c192abedc8c7d419ab868
|
[
"MIT"
] | null | null | null |
from src.extentions import db
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
import datetime as dt
# https://help.twitter.com/en/managing-your-account/twitter-username-rules
# https://office-hack.com/gmail/password/
| 46.439024 | 87 | 0.628151 |
a0bead6599200d03855aef8174ff835ecca2f74f
| 76,496 |
py
|
Python
|
commonroad/scenario/lanelet.py
|
CommonRoad/commonroad-io
|
93824961da9c41eb7768b5cf1acbed9a07446dc2
|
[
"BSD-3-Clause"
] | 3 |
2022-01-05T09:10:18.000Z
|
2022-03-22T15:09:43.000Z
|
commonroad/scenario/lanelet.py
|
CommonRoad/commonroad-io
|
93824961da9c41eb7768b5cf1acbed9a07446dc2
|
[
"BSD-3-Clause"
] | null | null | null |
commonroad/scenario/lanelet.py
|
CommonRoad/commonroad-io
|
93824961da9c41eb7768b5cf1acbed9a07446dc2
|
[
"BSD-3-Clause"
] | null | null | null |
import copy
import enum
from typing import *
import numpy as np
from shapely.geometry import MultiPolygon as ShapelyMultiPolygon
from shapely.geometry import Point as ShapelyPoint
from shapely.geometry import Polygon as ShapelyPolygon
from shapely.strtree import STRtree
import commonroad.geometry.transform
from commonroad.common.validity import *
from commonroad.geometry.shape import Polygon, ShapeGroup, Circle, Rectangle, Shape
from commonroad.scenario.intersection import Intersection
from commonroad.scenario.obstacle import Obstacle
from commonroad.scenario.traffic_sign import TrafficSign, TrafficLight
from commonroad.visualization.drawable import IDrawable
from commonroad.visualization.param_server import ParamServer
from commonroad.visualization.renderer import IRenderer
__author__ = "Christian Pek, Sebastian Maierhofer"
__copyright__ = "TUM Cyber-Physical Systems Group"
__credits__ = ["BMW CAR@TUM"]
__version__ = "2022.1"
__maintainer__ = "Sebastian Maierhofer"
__email__ = "[email protected]"
__status__ = "released"
class Lanelet:
"""
Class which describes a Lanelet entity according to the CommonRoad specification. Each lanelet is described by a
left and right boundary (polylines). Furthermore, lanelets have relations to other lanelets, e.g. an adjacent left
neighbor or a predecessor.
"""
def __init__(self, left_vertices: np.ndarray, center_vertices: np.ndarray, right_vertices: np.ndarray,
lanelet_id: int, predecessor=None, successor=None, adjacent_left=None,
adjacent_left_same_direction=None, adjacent_right=None, adjacent_right_same_direction=None,
line_marking_left_vertices=LineMarking.NO_MARKING, line_marking_right_vertices=LineMarking.NO_MARKING,
stop_line=None, lanelet_type=None, user_one_way=None, user_bidirectional=None, traffic_signs=None,
traffic_lights=None, ):
"""
Constructor of a Lanelet object
:param left_vertices: The vertices of the left boundary of the Lanelet described as a
polyline [[x0,y0],[x1,y1],...,[xn,yn]]
:param center_vertices: The vertices of the center line of the Lanelet described as a
polyline [[x0,y0],[x1,y1],...,[xn,yn]]
:param right_vertices: The vertices of the right boundary of the Lanelet described as a
polyline [[x0,y0],[x1,y1],...,[xn,yn]]
:param lanelet_id: The unique id (natural number) of the lanelet
:param predecessor: The list of predecessor lanelets (None if not existing)
:param successor: The list of successor lanelets (None if not existing)
:param adjacent_left: The adjacent left lanelet (None if not existing)
:param adjacent_left_same_direction: True if the adjacent left lanelet has the same driving direction,
false otherwise (None if no left adjacent lanelet exists)
:param adjacent_right: The adjacent right lanelet (None if not existing)
:param adjacent_right_same_direction: True if the adjacent right lanelet has the same driving direction,
false otherwise (None if no right adjacent lanelet exists)
:param line_marking_left_vertices: The type of line marking of the left boundary
:param line_marking_right_vertices: The type of line marking of the right boundary
:param stop_line: The stop line of the lanelet
:param lanelet_type: The types of lanelet applicable here
:param user_one_way: type of users that will use the lanelet as one-way
:param user_bidirectional: type of users that will use the lanelet as bidirectional way
:param traffic_signs: Traffic signs to be applied
:param traffic_lights: Traffic lights to follow
"""
# Set required properties
self._left_vertices = None
self._right_vertices = None
self._center_vertices = None
self._lanelet_id = None
self.lanelet_id = lanelet_id
self.left_vertices = left_vertices
self.right_vertices = right_vertices
self.center_vertices = center_vertices
# check if length of each polyline is the same
assert len(left_vertices[0]) == len(center_vertices[0]) == len(
right_vertices[0]), '<Lanelet/init>: Provided polylines do not share the same length! {}/{}/{}'.format(
len(left_vertices[0]), len(center_vertices[0]), len(right_vertices[0]))
# Set lane markings
self._line_marking_left_vertices = line_marking_left_vertices
self._line_marking_right_vertices = line_marking_right_vertices
# Set predecessors and successors
self._predecessor = None
if predecessor is None:
self._predecessor = []
else:
self.predecessor = predecessor
self._successor = None
if successor is None:
self._successor = []
else:
self.successor = successor
# Set adjacent lanelets
self._adj_left = None
self._adj_left_same_direction = None
if adjacent_left is not None:
self.adj_left = adjacent_left
self.adj_left_same_direction = adjacent_left_same_direction
self._adj_right = None
self._adj_right_same_direction = None
if adjacent_right is not None:
self.adj_right = adjacent_right
self.adj_right_same_direction = adjacent_right_same_direction
self._distance = None
self._inner_distance = None
# create empty polygon
self._polygon = Polygon(np.concatenate((self.right_vertices, np.flip(self.left_vertices, 0))))
self._dynamic_obstacles_on_lanelet = {}
self._static_obstacles_on_lanelet = set()
self._stop_line = None
if stop_line:
self.stop_line = stop_line
self._lanelet_type = None
if lanelet_type is None:
self._lanelet_type = set()
else:
self.lanelet_type = lanelet_type
self._user_one_way = None
if user_one_way is None:
self._user_one_way = set()
else:
self.user_one_way = user_one_way
self._user_bidirectional = None
if user_bidirectional is None:
self._user_bidirectional = set()
else:
self.user_bidirectional = user_bidirectional
# Set Traffic Rules
self._traffic_signs = None
if traffic_signs is None:
self._traffic_signs = set()
else:
self.traffic_signs = traffic_signs
self._traffic_lights = None
if traffic_lights is None:
self._traffic_lights = set()
else:
self.traffic_lights = traffic_lights
def add_predecessor(self, lanelet: int):
"""
Adds the ID of a predecessor lanelet to the list of predecessors.
:param lanelet: Predecessor lanelet ID.
"""
if lanelet not in self.predecessor:
self.predecessor.append(lanelet)
def remove_predecessor(self, lanelet: int):
"""
Removes the ID of a predecessor lanelet from the list of predecessors.
:param lanelet: Predecessor lanelet ID.
"""
if lanelet in self.predecessor:
self.predecessor.remove(lanelet)
def add_successor(self, lanelet: int):
"""
Adds the ID of a successor lanelet to the list of successors.
:param lanelet: Successor lanelet ID.
"""
if lanelet not in self.successor:
self.successor.append(lanelet)
def remove_successor(self, lanelet: int):
"""
Removes the ID of a successor lanelet from the list of successors.
:param lanelet: Successor lanelet ID.
"""
if lanelet in self.successor:
self.successor.remove(lanelet)
def translate_rotate(self, translation: np.ndarray, angle: float):
"""
This method translates and rotates a lanelet
:param translation: The translation given as [x_off,y_off] for the x and y translation
:param angle: The rotation angle in radian (counter-clockwise defined)
"""
assert is_real_number_vector(translation, 2), '<Lanelet/translate_rotate>: provided translation ' \
'is not valid! translation = {}'.format(translation)
assert is_valid_orientation(
angle), '<Lanelet/translate_rotate>: provided angle is not valid! angle = {}'.format(angle)
# create transformation matrix
t_m = commonroad.geometry.transform.translation_rotation_matrix(translation, angle)
# transform center vertices
tmp = t_m.dot(np.vstack((self.center_vertices.transpose(), np.ones((1, self.center_vertices.shape[0])))))
tmp = tmp[0:2, :]
self._center_vertices = tmp.transpose()
# transform left vertices
tmp = t_m.dot(np.vstack((self.left_vertices.transpose(), np.ones((1, self.left_vertices.shape[0])))))
tmp = tmp[0:2, :]
self._left_vertices = tmp.transpose()
# transform right vertices
tmp = t_m.dot(np.vstack((self.right_vertices.transpose(), np.ones((1, self.right_vertices.shape[0])))))
tmp = tmp[0:2, :]
self._right_vertices = tmp.transpose()
# transform the stop line
if self._stop_line is not None:
self._stop_line.translate_rotate(translation, angle)
# recreate polygon in case it existed
self._polygon = Polygon(np.concatenate((self.right_vertices, np.flip(self.left_vertices, 0))))
def interpolate_position(self, distance: float) -> Tuple[np.ndarray, np.ndarray, np.ndarray, int]:
"""
Computes the interpolated positions on the center/right/left polyline of the lanelet for a given distance
along the lanelet
:param distance: The distance for the interpolation
:return: The interpolated positions on the center/right/left polyline and the segment id of the polyline where
the interpolation takes place in the form ([x_c,y_c],[x_r,y_r],[x_l,y_l], segment_id)
"""
assert is_real_number(distance) and np.greater_equal(self.distance[-1], distance) and np.greater_equal(distance,
0), \
'<Lanelet/interpolate_position>: provided distance is not valid! distance = {}'.format(
distance)
idx = np.searchsorted(self.distance, distance) - 1
while not self.distance[idx] <= distance:
idx += 1
r = (distance - self.distance[idx]) / (self.distance[idx + 1] - self.distance[idx])
return ((1 - r) * self._center_vertices[idx] + r * self._center_vertices[idx + 1],
(1 - r) * self._right_vertices[idx] + r * self._right_vertices[idx + 1],
(1 - r) * self._left_vertices[idx] + r * self._left_vertices[idx + 1], idx)
def convert_to_polygon(self) -> Polygon:
"""
Converts the given lanelet to a polygon representation
:return: The polygon of the lanelet
"""
warnings.warn("Use the lanelet property <polygon> instead", DeprecationWarning)
return self._polygon
def contains_points(self, point_list: np.ndarray) -> List[bool]:
"""
Checks if a list of points is enclosed in the lanelet
:param point_list: The list of points in the form [[px1,py1],[px2,py2,],...]
:return: List of Boolean values with True indicating point is enclosed and False otherwise
"""
assert isinstance(point_list,
ValidTypes.ARRAY), '<Lanelet/contains_points>: provided list of points is not a list! type ' \
'= {}'.format(type(point_list))
assert is_valid_polyline(
point_list), 'Lanelet/contains_points>: provided list of points is malformed! points = {}'.format(
point_list)
return [self._polygon.contains_point(p) for p in point_list]
def get_obstacles(self, obstacles: List[Obstacle], time_step: int = 0) -> List[Obstacle]:
"""
Returns the subset of obstacles, which are located in the lanelet, of a given candidate set
:param obstacles: The set of obstacle candidates
:param time_step: The time step for the occupancy to check
:return:
"""
assert isinstance(obstacles, list) and all(
isinstance(o, Obstacle) for o in obstacles), '<Lanelet/get_obstacles>: Provided list of obstacles' \
' is malformed! obstacles = {}'.format(obstacles)
# output list
res = list()
lanelet_shapely_obj = self._polygon.shapely_object
# look at each obstacle
for o in obstacles:
o_shape = o.occupancy_at_time(time_step).shape
# vertices to check
shape_shapely_objects = list()
# distinguish between shape and shape group and extract vertices
if isinstance(o_shape, ShapeGroup):
shape_shapely_objects.extend([sh.shapely_object for sh in o_shape.shapes])
else:
shape_shapely_objects.append(o_shape.shapely_object)
# check if obstacle is in lane
for shapely_obj in shape_shapely_objects:
if lanelet_shapely_obj.intersects(shapely_obj):
res.append(o)
break
return res
def find_lanelet_successors_in_range(self, lanelet_network: "LaneletNetwork", max_length=50.0) -> List[List[int]]:
"""
Finds all possible successor paths (id sequences) within max_length.
:param lanelet_network: lanelet network
:param max_length: abort once length of path is reached
:return: list of lanelet IDs
"""
paths = [[s] for s in self.successor]
paths_final = []
lengths = [lanelet_network.find_lanelet_by_id(s).distance[-1] for s in self.successor]
while paths:
paths_next = []
lengths_next = []
for p, le in zip(paths, lengths):
successors = lanelet_network.find_lanelet_by_id(p[-1]).successor
if not successors:
paths_final.append(p)
else:
for s in successors:
if s in p or s == self.lanelet_id or le >= max_length:
# prevent loops and consider length of first successor
paths_final.append(p)
continue
l_next = le + lanelet_network.find_lanelet_by_id(s).distance[-1]
if l_next < max_length:
paths_next.append(p + [s])
lengths_next.append(l_next)
else:
paths_final.append(p + [s])
paths = paths_next
lengths = lengths_next
return paths_final
def add_dynamic_obstacle_to_lanelet(self, obstacle_id: int, time_step: int):
"""
Adds a dynamic obstacle ID to lanelet
:param obstacle_id: obstacle ID to add
:param time_step: time step at which the obstacle should be added
"""
if self.dynamic_obstacles_on_lanelet.get(time_step) is None:
self.dynamic_obstacles_on_lanelet[time_step] = set()
self.dynamic_obstacles_on_lanelet[time_step].add(obstacle_id)
def add_static_obstacle_to_lanelet(self, obstacle_id: int):
"""
Adds a static obstacle ID to lanelet
:param obstacle_id: obstacle ID to add
"""
self.static_obstacles_on_lanelet.add(obstacle_id)
def add_traffic_sign_to_lanelet(self, traffic_sign_id: int):
"""
Adds a traffic sign ID to lanelet
:param traffic_sign_id: traffic sign ID to add
"""
self.traffic_signs.add(traffic_sign_id)
def add_traffic_light_to_lanelet(self, traffic_light_id: int):
"""
Adds a traffic light ID to lanelet
:param traffic_light_id: traffic light ID to add
"""
self.traffic_lights.add(traffic_light_id)
def dynamic_obstacle_by_time_step(self, time_step) -> Set[int]:
"""
Returns all dynamic obstacles on lanelet at specific time step
:param time_step: time step of interest
:returns: list of obstacle IDs
"""
if self.dynamic_obstacles_on_lanelet.get(time_step) is not None:
return self.dynamic_obstacles_on_lanelet.get(time_step)
else:
return set()
class LaneletNetwork(IDrawable):
"""
Class which represents a network of connected lanelets
"""
def __init__(self):
"""
Constructor for LaneletNetwork
"""
self._lanelets: Dict[int, Lanelet] = {}
# lanelet_id, shapely_polygon
self._buffered_polygons: Dict[int, ShapelyPolygon] = {}
self._strtee = None
# id(shapely_polygon), lanelet_id
self._lanelet_id_index_by_id: Dict[int, int] = {}
self._intersections: Dict[int, Intersection] = {}
self._traffic_signs: Dict[int, TrafficSign] = {}
self._traffic_lights: Dict[int, TrafficLight] = {}
# pickling of STRtree is not supported by shapely at the moment
# use this workaround described in this issue:
# https://github.com/Toblerity/Shapely/issues/1033
def _create_strtree(self):
"""
Creates spatial index for lanelets for faster querying the lanelets by position.
Since it is an immutable object, it has to be recreated after every lanelet addition or it should be done
once after all lanelets are added.
"""
# validate buffered polygons
self._buffered_polygons = {lanelet_id: lanelet_shapely_polygon for lanelet_id, lanelet_shapely_polygon in
self._buffered_polygons.items() if
assert_shapely_polygon(lanelet_id, lanelet_shapely_polygon)}
self._lanelet_id_index_by_id = {id(lanelet_shapely_polygon): lanelet_id for lanelet_id, lanelet_shapely_polygon
in self._buffered_polygons.items()}
self._strtee = STRtree(list(self._buffered_polygons.values()))
def remove_lanelet(self, lanelet_id: int, rtree: bool = True):
"""
Removes a lanelet from a lanelet network and deletes all references.
@param lanelet_id: ID of lanelet which should be removed.
@param rtree: Boolean indicating whether rtree should be initialized
"""
if lanelet_id in self._lanelets.keys():
del self._lanelets[lanelet_id]
del self._buffered_polygons[lanelet_id]
self.cleanup_lanelet_references()
if rtree:
self._create_strtree()
def cleanup_lanelet_references(self):
"""
Deletes lanelet IDs which do not exist in the lanelet network. Useful when cutting out lanelet networks.
"""
existing_ids = set(self._lanelets.keys())
for la in self.lanelets:
la._predecessor = list(set(la.predecessor).intersection(existing_ids))
la._successor = list(set(la.successor).intersection(existing_ids))
la._adj_left = None if la.adj_left is None or la.adj_left not in existing_ids else la.adj_left
la._adj_left_same_direction = None \
if la.adj_left_same_direction is None or la.adj_left not in existing_ids else la.adj_left_same_direction
la._adj_right = None if la.adj_right is None or la.adj_right not in existing_ids else la.adj_right
la._adj_right_same_direction = None \
if la.adj_right_same_direction is None or la.adj_right not in existing_ids else \
la.adj_right_same_direction
for inter in self.intersections:
for inc in inter.incomings:
inc._incoming_lanelets = set(inc.incoming_lanelets).intersection(existing_ids)
inc._successors_straight = set(inc.successors_straight).intersection(existing_ids)
inc._successors_right = set(inc.successors_right).intersection(existing_ids)
inc._successors_left = set(inc.successors_left).intersection(existing_ids)
inter._crossings = set(inter.crossings).intersection(existing_ids)
def remove_traffic_sign(self, traffic_sign_id: int):
"""
Removes a traffic sign from a lanelet network and deletes all references.
@param traffic_sign_id: ID of traffic sign which should be removed.
"""
if traffic_sign_id in self._traffic_signs.keys():
del self._traffic_signs[traffic_sign_id]
self.cleanup_traffic_sign_references()
def cleanup_traffic_sign_references(self):
"""
Deletes traffic sign IDs which do not exist in the lanelet network. Useful when cutting out lanelet networks.
"""
existing_ids = set(self._traffic_signs.keys())
for la in self.lanelets:
la._traffic_signs = la.traffic_signs.intersection(existing_ids)
if la.stop_line is not None and la.stop_line.traffic_sign_ref is not None:
la.stop_line._traffic_sign_ref = la.stop_line.traffic_sign_ref.intersection(existing_ids)
def remove_traffic_light(self, traffic_light_id: int):
"""
Removes a traffic light from a lanelet network and deletes all references.
@param traffic_light_id: ID of traffic sign which should be removed.
"""
if traffic_light_id in self._traffic_lights.keys():
del self._traffic_lights[traffic_light_id]
self.cleanup_traffic_light_references()
def cleanup_traffic_light_references(self):
"""
Deletes traffic light IDs which do not exist in the lanelet network. Useful when cutting out lanelet networks.
"""
existing_ids = set(self._traffic_lights.keys())
for la in self.lanelets:
la._traffic_lights = la.traffic_lights.intersection(existing_ids)
if la.stop_line is not None and la.stop_line.traffic_light_ref is not None:
la.stop_line._traffic_light_ref = la.stop_line.traffic_light_ref.intersection(existing_ids)
def remove_intersection(self, intersection_id: int):
"""
Removes a intersection from a lanelet network and deletes all references.
@param intersection_id: ID of intersection which should be removed.
"""
if intersection_id in self._intersections.keys():
del self._intersections[intersection_id]
def find_lanelet_by_id(self, lanelet_id: int) -> Lanelet:
"""
Finds a lanelet for a given lanelet_id
:param lanelet_id: The id of the lanelet to find
:return: The lanelet object if the id exists and None otherwise
"""
assert is_natural_number(
lanelet_id), '<LaneletNetwork/find_lanelet_by_id>: provided id is not valid! id = {}'.format(lanelet_id)
return self._lanelets[lanelet_id] if lanelet_id in self._lanelets else None
def find_traffic_sign_by_id(self, traffic_sign_id: int) -> TrafficSign:
"""
Finds a traffic sign for a given traffic_sign_id
:param traffic_sign_id: The id of the traffic sign to find
:return: The traffic sign object if the id exists and None otherwise
"""
assert is_natural_number(
traffic_sign_id), '<LaneletNetwork/find_traffic_sign_by_id>: provided id is not valid! ' \
'id = {}'.format(traffic_sign_id)
return self._traffic_signs[traffic_sign_id] if traffic_sign_id in self._traffic_signs else None
def find_traffic_light_by_id(self, traffic_light_id: int) -> TrafficLight:
"""
Finds a traffic light for a given traffic_light_id
:param traffic_light_id: The id of the traffic light to find
:return: The traffic light object if the id exists and None otherwise
"""
assert is_natural_number(
traffic_light_id), '<LaneletNetwork/find_traffic_light_by_id>: provided id is not valid! ' \
'id = {}'.format(traffic_light_id)
return self._traffic_lights[traffic_light_id] if traffic_light_id in self._traffic_lights else None
def find_intersection_by_id(self, intersection_id: int) -> Intersection:
"""
Finds a intersection for a given intersection_id
:param intersection_id: The id of the intersection to find
:return: The intersection object if the id exists and None otherwise
"""
assert is_natural_number(intersection_id), '<LaneletNetwork/find_intersection_by_id>: ' \
'provided id is not valid! id = {}'.format(intersection_id)
return self._intersections[intersection_id] if intersection_id in self._intersections else None
def add_lanelet(self, lanelet: Lanelet, rtree: bool = True):
"""
Adds a lanelet to the LaneletNetwork
:param lanelet: The lanelet to add
:param eps: The size increase of the buffered polygons
:param rtree: Boolean indicating whether rtree should be initialized
:return: True if the lanelet has successfully been added to the network, false otherwise
"""
assert isinstance(lanelet, Lanelet), '<LaneletNetwork/add_lanelet>: provided lanelet is not of ' \
'type lanelet! type = {}'.format(type(lanelet))
# check if lanelet already exists in network and warn user
if lanelet.lanelet_id in self._lanelets.keys():
warnings.warn('Lanelet already exists in network! No changes are made.')
return False
else:
self._lanelets[lanelet.lanelet_id] = lanelet
self._buffered_polygons[lanelet.lanelet_id] = lanelet.polygon.shapely_object
if rtree:
self._create_strtree()
return True
def add_traffic_sign(self, traffic_sign: TrafficSign, lanelet_ids: Set[int]):
"""
Adds a traffic sign to the LaneletNetwork
:param traffic_sign: The traffic sign to add
:param lanelet_ids: Lanelets the traffic sign should be referenced from
:return: True if the traffic sign has successfully been added to the network, false otherwise
"""
assert isinstance(traffic_sign, TrafficSign), '<LaneletNetwork/add_traffic_sign>: provided traffic sign is ' \
'not of type traffic_sign! type = {}'.format(type(traffic_sign))
# check if traffic already exists in network and warn user
if traffic_sign.traffic_sign_id in self._traffic_signs.keys():
warnings.warn('Traffic sign with ID {} already exists in network! '
'No changes are made.'.format(traffic_sign.traffic_sign_id))
return False
else:
self._traffic_signs[traffic_sign.traffic_sign_id] = traffic_sign
for lanelet_id in lanelet_ids:
lanelet = self.find_lanelet_by_id(lanelet_id)
if lanelet is not None:
lanelet.add_traffic_sign_to_lanelet(traffic_sign.traffic_sign_id)
else:
warnings.warn('Traffic sign cannot be referenced to lanelet because the lanelet does not exist.')
return True
def add_traffic_light(self, traffic_light: TrafficLight, lanelet_ids: Set[int]):
"""
Adds a traffic light to the LaneletNetwork
:param traffic_light: The traffic light to add
:param lanelet_ids: Lanelets the traffic sign should be referenced from
:return: True if the traffic light has successfully been added to the network, false otherwise
"""
assert isinstance(traffic_light, TrafficLight), '<LaneletNetwork/add_traffic_light>: provided traffic light ' \
'is not of type traffic_light! ' \
'type = {}'.format(type(traffic_light))
# check if traffic already exists in network and warn user
if traffic_light.traffic_light_id in self._traffic_lights.keys():
warnings.warn('Traffic light already exists in network! No changes are made.')
return False
else:
self._traffic_lights[traffic_light.traffic_light_id] = traffic_light
for lanelet_id in lanelet_ids:
lanelet = self.find_lanelet_by_id(lanelet_id)
if lanelet is not None:
lanelet.add_traffic_light_to_lanelet(traffic_light.traffic_light_id)
else:
warnings.warn('Traffic light cannot be referenced to lanelet because the lanelet does not exist.')
return True
def add_intersection(self, intersection: Intersection):
"""
Adds a intersection to the LaneletNetwork
:param intersection: The intersection to add
:return: True if the traffic light has successfully been added to the network, false otherwise
"""
assert isinstance(intersection, Intersection), '<LaneletNetwork/add_intersection>: provided intersection is ' \
'not of type Intersection! type = {}'.format(type(intersection))
# check if traffic already exists in network and warn user
if intersection.intersection_id in self._intersections.keys():
warnings.warn('Intersection already exists in network! No changes are made.')
return False
else:
self._intersections[intersection.intersection_id] = intersection
return True
def add_lanelets_from_network(self, lanelet_network: 'LaneletNetwork'):
"""
Adds lanelets from a given network object to the current network
:param lanelet_network: The lanelet network
:return: True if all lanelets have been added to the network, false otherwise
"""
flag = True
# add lanelets to the network
for la in lanelet_network.lanelets:
flag = flag and self.add_lanelet(la, rtree=False)
self._create_strtree()
return flag
def translate_rotate(self, translation: np.ndarray, angle: float):
"""
Translates and rotates the complete lanelet network
:param translation: The translation given as [x_off,y_off] for the x and y translation
:param angle: The rotation angle in radian (counter-clockwise defined)
"""
assert is_real_number_vector(translation,
2), '<LaneletNetwork/translate_rotate>: provided translation is not valid! ' \
'translation = {}'.format(translation)
assert is_valid_orientation(
angle), '<LaneletNetwork/translate_rotate>: provided angle is not valid! angle = {}'.format(angle)
# rotate each lanelet
for lanelet in self._lanelets.values():
lanelet.translate_rotate(translation, angle)
for traffic_sign in self._traffic_signs.values():
traffic_sign.translate_rotate(translation, angle)
for traffic_light in self._traffic_lights.values():
traffic_light.translate_rotate(translation, angle)
def find_lanelet_by_position(self, point_list: List[np.ndarray]) -> List[List[int]]:
"""
Finds the lanelet id of a given position
:param point_list: The list of positions to check
:return: A list of lanelet ids. If the position could not be matched to a lanelet, an empty list is returned
"""
assert isinstance(point_list,
ValidTypes.LISTS), '<Lanelet/contains_points>: provided list of points is not a list! type ' \
'= {}'.format(
type(point_list))
return [[self._get_lanelet_id_by_shapely_polygon(lanelet_shapely_polygon) for lanelet_shapely_polygon in
self._strtee.query(point) if lanelet_shapely_polygon.intersects(point)
or lanelet_shapely_polygon.buffer(1e-15).intersects(point)] for point in
[ShapelyPoint(point) for point in point_list]]
def find_lanelet_by_shape(self, shape: Shape) -> List[int]:
"""
Finds the lanelet id of a given shape
:param shape: The shape to check
:return: A list of lanelet ids. If the position could not be matched to a lanelet, an empty list is returned
"""
assert isinstance(shape, (Circle, Polygon, Rectangle)), '<Lanelet/find_lanelet_by_shape>: ' \
'provided shape is not a shape! ' \
'type = {}'.format(type(shape))
return [self._get_lanelet_id_by_shapely_polygon(lanelet_shapely_polygon) for lanelet_shapely_polygon in
self._strtee.query(shape.shapely_object) if lanelet_shapely_polygon.intersects(shape.shapely_object)]
def filter_obstacles_in_network(self, obstacles: List[Obstacle]) -> List[Obstacle]:
"""
Returns the list of obstacles which are located in the lanelet network
:param obstacles: The list of obstacles to check
:return: The list of obstacles which are located in the lanelet network
"""
res = list()
obstacle_to_lanelet_map = self.map_obstacles_to_lanelets(obstacles)
for k in obstacle_to_lanelet_map.keys():
obs = obstacle_to_lanelet_map[k]
for o in obs:
if o not in res:
res.append(o)
return res
def map_obstacles_to_lanelets(self, obstacles: List[Obstacle]) -> Dict[int, List[Obstacle]]:
"""
Maps a given list of obstacles to the lanelets of the lanelet network
:param obstacles: The list of CR obstacles
:return: A dictionary with the lanelet id as key and the list of obstacles on the lanelet as a List[Obstacles]
"""
mapping = {}
for la in self.lanelets:
# map obstacles to current lanelet
mapped_objs = la.get_obstacles(obstacles)
# check if mapping is not empty
if len(mapped_objs) > 0:
mapping[la.lanelet_id] = mapped_objs
return mapping
def lanelets_in_proximity(self, point: np.ndarray, radius: float) -> List[Lanelet]:
"""
Finds all lanelets which intersect a given circle, defined by the center point and radius
:param point: The center of the circle
:param radius: The radius of the circle
:return: The list of lanelets which intersect the given circle
"""
assert is_real_number_vector(point, length=2), '<LaneletNetwork/lanelets_in_proximity>: provided point is ' \
'not valid! point = {}'.format(point)
assert is_positive(
radius), '<LaneletNetwork/lanelets_in_proximity>: provided radius is not valid! radius = {}'.format(
radius)
# get list of lanelet ids
ids = self._lanelets.keys()
# output list
lanes = dict()
rad_sqr = radius ** 2
# distance dict for sorting
distance_list = list()
# go through list of lanelets
for i in ids:
# if current lanelet has not already been added to lanes list
if i not in lanes:
lanelet = self.find_lanelet_by_id(i)
# compute distances (we are not using the sqrt for computational effort)
distance = (lanelet.center_vertices - point) ** 2.
distance = distance[:, 0] + distance[:, 1]
# check if at least one distance is smaller than the radius
if any(np.greater_equal(rad_sqr, distance)):
lanes[i] = self.find_lanelet_by_id(i)
distance_list.append(np.min(distance))
# check if adjacent lanelets can be added as well
index_min_dist = np.argmin(distance - rad_sqr)
# check right side of lanelet
if lanelet.adj_right is not None:
p = (lanelet.right_vertices[index_min_dist, :] - point) ** 2
p = p[0] + p[1]
if np.greater(rad_sqr, p) and lanelet.adj_right not in lanes:
lanes[lanelet.adj_right] = self.find_lanelet_by_id(lanelet.adj_right)
distance_list.append(p)
# check left side of lanelet
if lanelet.adj_left is not None:
p = (lanelet.left_vertices[index_min_dist, :] - point) ** 2
p = p[0] + p[1]
if np.greater(rad_sqr, p) and lanelet.adj_left not in lanes:
lanes[lanelet.adj_left] = self.find_lanelet_by_id(lanelet.adj_left)
distance_list.append(p)
# sort list according to distance
indices = np.argsort(distance_list)
lanelets = list(lanes.values())
# return sorted list
return [lanelets[i] for i in indices]
| 45.103774 | 120 | 0.629314 |
a0bf221732ca55e79444af87da162c6c9266b8fc
| 363 |
py
|
Python
|
djangoProject/djangoProject/myApp/urls.py
|
EldiiarDzhunusov/Code
|
6b0708e4007233d3efdc74c09d09ee5bc377a45d
|
[
"MIT"
] | 2 |
2020-10-12T06:50:03.000Z
|
2021-06-08T17:19:43.000Z
|
djangoProject/djangoProject/myApp/urls.py
|
EldiiarDzhunusov/Code
|
6b0708e4007233d3efdc74c09d09ee5bc377a45d
|
[
"MIT"
] | null | null | null |
djangoProject/djangoProject/myApp/urls.py
|
EldiiarDzhunusov/Code
|
6b0708e4007233d3efdc74c09d09ee5bc377a45d
|
[
"MIT"
] | 1 |
2020-12-22T16:44:50.000Z
|
2020-12-22T16:44:50.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path("test/", views.index, name = "index"),
path('completed/',views.show_completed,name= "completed"),
path('<int:action_id>/', views.show_action, name='action'),
path('update/', views.update_status, name="update_status"),
path('new/', views.new_action,name = "new_action"),
]
| 33 | 63 | 0.674931 |
a0c050b20614c1dbb61208ccd768082e1160610d
| 14,673 |
py
|
Python
|
tests/test_data_frame.py
|
gordonwatts/dataframe_expressions
|
cf135415f739377e9c2accb82606957417c7e0e6
|
[
"MIT"
] | 4 |
2020-03-16T14:22:33.000Z
|
2021-09-08T17:56:47.000Z
|
tests/test_data_frame.py
|
gordonwatts/dataframe_expressions
|
cf135415f739377e9c2accb82606957417c7e0e6
|
[
"MIT"
] | 26 |
2020-05-28T20:58:42.000Z
|
2020-10-21T01:27:17.000Z
|
tests/test_data_frame.py
|
gordonwatts/dataframe_expressions
|
cf135415f739377e9c2accb82606957417c7e0e6
|
[
"MIT"
] | null | null | null |
import ast
from typing import List, Optional, cast
import pytest
from dataframe_expressions import (
Column, DataFrame, ast_Callable, ast_Column, ast_DataFrame, define_alias)
from .utils_for_testing import reset_var_counter # NOQA
# numpy math functions (??)
# Advanced math operators
# (https://docs.python.org/3/reference/datamodel.html?highlight=__add__#emulating-numeric-types)
# the operator "in" (contains)? to see if one jet is in another collection?
# the operator len
# Make sure if d1 and d2 are two different sized,sourced DataFrames, then d1[d2.x] fails
# Filter functions - so pass a filter that gets called with whatever you are filtering on, and
# returns.
# https://stackoverflow.com/questions/847936/how-can-i-find-the-number-of-arguments-of-a-python-function
# Aliases allow some recursion, but with total flexability. If there is a circle and you want
# things done a second time, they
# won't be. Perhaps when we have an actual problem we can resolve this.
| 28.827112 | 122 | 0.647584 |
a0c2eb12b9028951da45c66cf06efe7db3fad008
| 520 |
py
|
Python
|
redis/redismq/redismq.py
|
dineshkumar2509/learning-python
|
e8af11ff0b396da4c3f2cfe21d14131bae4b2adb
|
[
"MIT"
] | 86 |
2015-06-13T16:53:55.000Z
|
2022-03-24T20:56:42.000Z
|
redis/redismq/redismq.py
|
pei-zheng-yi/learning-python
|
55e350dfe44cf04f7d4408e76e72d2f467bd42ce
|
[
"MIT"
] | 9 |
2015-05-27T07:52:44.000Z
|
2022-03-29T21:52:40.000Z
|
redis/redismq/redismq.py
|
pei-zheng-yi/learning-python
|
55e350dfe44cf04f7d4408e76e72d2f467bd42ce
|
[
"MIT"
] | 124 |
2015-12-10T01:17:18.000Z
|
2021-11-08T04:03:38.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import redis
rc = redis.StrictRedis(host='localhost', port=6379, db=0)
| 15.294118 | 57 | 0.634615 |
a0c41cacd5163331beb9572314dcb4bf4d9b8235
| 12,660 |
py
|
Python
|
main.py
|
ESSAKHI10/SharpZone
|
1d145cb22c5a8f6777d2f6e05a9a16f8e528c92c
|
[
"MIT"
] | null | null | null |
main.py
|
ESSAKHI10/SharpZone
|
1d145cb22c5a8f6777d2f6e05a9a16f8e528c92c
|
[
"MIT"
] | null | null | null |
main.py
|
ESSAKHI10/SharpZone
|
1d145cb22c5a8f6777d2f6e05a9a16f8e528c92c
|
[
"MIT"
] | null | null | null |
# import encode
import eel
import cv2
import io
import numpy as np
import base64
import os
import time
import face_recognition
import pickle
import imutils
import datetime
from multiprocessing.pool import ThreadPool
import random
import shutil
from database import *
from camera import VideoCamera
from SceneChangeDetect import sceneChangeDetect
import login
import encode_student_data
import warnings
warnings.filterwarnings('ignore')
eel.init('web')
# ------ Global Variable ----
camera_status = 1
capture_status = False
student_id = ''
fullnamee = ''
def gen(url):
video = cv2.VideoCapture(url)
global camera_status
global capture_status
camera_status = 1
while camera_status == 1:
success, img = video.read()
if success == False:
print("cam nt cnt")
break
if capture_status == True:
save_path = 'dataset/' + student_id + '_' + fullnamee
filename = save_path + "/photo" + \
str(random.randint(0, 999)) + ".jpg"
if not os.path.exists(save_path):
os.makedirs(save_path)
cv2.imwrite(filename, img)
send_capture_photo(img)
capture_status = False
ret, jpeg = cv2.imencode('.jpg', img)
img = jpeg.tobytes()
yield img
# adding new student data
def delete_student_data_file(student_id):
# delete face data from file
# load the face data
with open('encodings.pickle', 'rb') as f:
face_data = pickle.load(f)
index = []
encodings = face_data['encodings']
names = face_data['names']
# count face data length
for i, item in enumerate(names):
if student_id in item:
index.append(i)
# delete id
for i in index:
names.remove(student_id)
# delete encoding
for i in index:
del encodings[index[0]]
# saved modified face data
face_data['names'] = names
face_data['encodings'] = encodings
f = open("encodings.pickle", "wb")
f.write(pickle.dumps(face_data))
f.close()
eel.start('template/pages/samples/login.html', size=(1307, 713))
#eel.start('template/index.html', size=(1307, 713))
# eel.start('dashboard.html', size=(1307, 713))
| 29.648712 | 169 | 0.562243 |
39f81d8b6eef50e0aea91f95d8884d5a0a59d256
| 3,713 |
py
|
Python
|
models/job.py
|
k-wojcik/kylin_client_tool
|
0fe827d1c8a86e3da61c85c48f78ce03c9260f3c
|
[
"Apache-2.0"
] | null | null | null |
models/job.py
|
k-wojcik/kylin_client_tool
|
0fe827d1c8a86e3da61c85c48f78ce03c9260f3c
|
[
"Apache-2.0"
] | null | null | null |
models/job.py
|
k-wojcik/kylin_client_tool
|
0fe827d1c8a86e3da61c85c48f78ce03c9260f3c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = 'Huang, Hua'
from models.object import JsonSerializableObj
| 30.434426 | 75 | 0.622408 |
39f8dcdaeba92c1fff96ab2beb0ef7065bdd2f6c
| 1,770 |
py
|
Python
|
stakingsvc/walletgui/views/paymentmethodview.py
|
biz2013/xwjy
|
8f4b5e3e3fc964796134052ff34d58d31ed41904
|
[
"Apache-2.0"
] | 1 |
2019-12-15T16:56:44.000Z
|
2019-12-15T16:56:44.000Z
|
stakingsvc/walletgui/views/paymentmethodview.py
|
biz2013/xwjy
|
8f4b5e3e3fc964796134052ff34d58d31ed41904
|
[
"Apache-2.0"
] | 87 |
2018-01-06T10:18:31.000Z
|
2022-03-11T23:32:30.000Z
|
stakingsvc/walletgui/views/paymentmethodview.py
|
biz2013/xwjy
|
8f4b5e3e3fc964796134052ff34d58d31ed41904
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging, sys
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from walletgui.controller.global_constants import *
from walletgui.controller.crypto_utils import CryptoUtility
from walletgui.controller.walletmanager import WalletManager
from walletgui.controller.paymentmethodmanager import PaymentMethodManager
from walletgui.views import errorpageview
from walletgui.views.models.useraccountinfo import *
logger = logging.getLogger("site.dashboard")
| 37.659574 | 76 | 0.70565 |
39f8e72f0d8a4ab5e6ca1adccd579c3125c23d90
| 1,643 |
py
|
Python
|
tools.py
|
yflyzhang/cascade_virality
|
9d856a3fbe45330a9434ba4bad9d5f248e2f1dd5
|
[
"MIT"
] | 6 |
2020-09-09T15:31:02.000Z
|
2022-02-16T04:57:55.000Z
|
tools.py
|
yflyzhang/cascade_virality
|
9d856a3fbe45330a9434ba4bad9d5f248e2f1dd5
|
[
"MIT"
] | null | null | null |
tools.py
|
yflyzhang/cascade_virality
|
9d856a3fbe45330a9434ba4bad9d5f248e2f1dd5
|
[
"MIT"
] | null | null | null |
import numpy as np
import networkx as nx
# For illustration purpose only [easy to understand the process]
# -----------------------------
def pure_cascade_virality(G):
'''G is a directed graph(tree)'''
if not nx.is_weakly_connected(G):
# return None
return
nodes = [k for (k,v) in G.out_degree() if v>0] # non-leaf nodes
virality = 0
for source in nodes:
path_lens = nx.single_source_shortest_path_length(G, source) # shortest path length
path_lens = {k: v for k, v in path_lens.items() if v > 0} # filter 0
virality += np.array(list(path_lens.values())).mean() # mean length from source to other nodes
return virality
# Works in a recursive manner [more efficient]
# -----------------------------
def recursive_path_length(G, V, seed):
'''G is a directed graph(tree)'''
V[seed] = []
for i in G.successors(seed):
V[seed].append(1)
V[seed] += [j+1 for j in recursive_path_length(G, V, i)]
return V[seed]
def recursive_cascade_virality(G, source=None):
'''G is a directed graph(tree)'''
if not nx.is_weakly_connected(G):
# return None
return
if not source:
# if root is not given, find it by yourself
source = [k for (k,v) in G.in_degree() if v==0][0]
V_dic = {}
recursive_path_length(G, V_dic, source)
# return V_dic # return original paths
virality = 0
for (k, v) in V_dic.items():
# print(k, v)
if len(v)>0:
virality += np.mean(v)
return virality # return cascade virality
| 23.471429 | 105 | 0.57395 |
39f9818d1295e4cbcfc1bb13178c81b8bc72f7ba
| 1,492 |
py
|
Python
|
pyjira/actions.py
|
FulcrumIT/pyjira
|
8ed0d22136808ba95ace253e66dd4ad7bb6b387a
|
[
"MIT"
] | 1 |
2020-11-05T10:24:15.000Z
|
2020-11-05T10:24:15.000Z
|
pyjira/actions.py
|
FulcrumIT/pyjira
|
8ed0d22136808ba95ace253e66dd4ad7bb6b387a
|
[
"MIT"
] | null | null | null |
pyjira/actions.py
|
FulcrumIT/pyjira
|
8ed0d22136808ba95ace253e66dd4ad7bb6b387a
|
[
"MIT"
] | 2 |
2017-05-15T20:06:25.000Z
|
2020-11-17T09:46:34.000Z
|
import pyjira.api as _api
import json as _json
def get_issues(id, limit=50):
"""Return 50 issues for a project.
Parameters:
- id: id of a project.
- limit: max number of results to be returned.
"""
return _api.rest("/search?jql=project=" + str(id) + "&maxResults=" + str(limit))
def get_issue(id):
"""Get issue and its details.
Parameters:
- id: id of an issue.
"""
return _api.rest("/issue/" + str(id))
def get_all_fields():
"""Get all existing fields."""
return _api.rest("/field")
def get_field(id):
"""Get field and its details.
Parameters:
- id: id of a field.
"""
fields = _json.loads(get_all_fields())
for f in fields:
if (f["id"] == str(id) or
f["id"].replace("customfield_", "") == str(id)):
return _json.dumps(f)
def get_issue_fields(id, field_names_enabled=True):
"""Get all fields listed for an issue.
Parameters:
- id: id of an issue.
- field_names_enabled: if False, returns result with "customfield_" names.
True by default.
"""
issue = _json.loads(get_issue(id))
result = {}
for key, value in issue["fields"].items():
if ("customfield_" in key and
value and field_names_enabled):
field = _json.loads(get_field(key))
field_name = field["name"]
result[field_name] = value
elif value:
result[key] = value
return _json.dumps(result)
| 24.866667 | 84 | 0.589812 |
39fb765d92c4d3395b8dec3f9fb952f0fa19dddd
| 963 |
py
|
Python
|
gongish/cli/serve.py
|
meyt/gongish
|
0e3cd478677c19c9331a2b563ce792d16f2860b3
|
[
"MIT"
] | null | null | null |
gongish/cli/serve.py
|
meyt/gongish
|
0e3cd478677c19c9331a2b563ce792d16f2860b3
|
[
"MIT"
] | 1 |
2021-09-11T22:53:48.000Z
|
2021-09-11T22:53:48.000Z
|
gongish/cli/serve.py
|
meyt/gongish
|
0e3cd478677c19c9331a2b563ce792d16f2860b3
|
[
"MIT"
] | null | null | null |
import importlib
from argparse import ArgumentParser
from wsgiref.simple_server import make_server
p = ArgumentParser(
prog="gongish serve", description="Serve a WSGI application"
)
p.add_argument(
"module",
nargs="?",
help="Module and application name (e.g: myapp:app)",
type=str,
)
p.add_argument(
"-b",
"--bind",
type=str,
help="Bind address (default: localhost:8080)",
default="localhost:8080",
)
| 22.395349 | 64 | 0.626168 |
39fbc1222f2bd73553fac90c67abef716e48ef7b
| 2,248 |
py
|
Python
|
tests/test_cmd_trie.py
|
ncloudioj/rhino-rox
|
7e3c70edebca5cc0f847d777c2bff02218b4ca69
|
[
"BSD-3-Clause"
] | 1 |
2016-06-14T11:16:43.000Z
|
2016-06-14T11:16:43.000Z
|
tests/test_cmd_trie.py
|
ncloudioj/rhino-rox
|
7e3c70edebca5cc0f847d777c2bff02218b4ca69
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_cmd_trie.py
|
ncloudioj/rhino-rox
|
7e3c70edebca5cc0f847d777c2bff02218b4ca69
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import redis
import random
| 35.125 | 77 | 0.607651 |
39fbe8706a2051eee9dadaa5adf1cf67f5342a04
| 325 |
py
|
Python
|
4. 01.07.2021/0. Secret Messages. New position.py
|
AntonVasko/CodeClub-2021-SUMMER
|
14a80168bb7c2eb3c0c157d6d5b7630c05decb31
|
[
"CC0-1.0"
] | null | null | null |
4. 01.07.2021/0. Secret Messages. New position.py
|
AntonVasko/CodeClub-2021-SUMMER
|
14a80168bb7c2eb3c0c157d6d5b7630c05decb31
|
[
"CC0-1.0"
] | null | null | null |
4. 01.07.2021/0. Secret Messages. New position.py
|
AntonVasko/CodeClub-2021-SUMMER
|
14a80168bb7c2eb3c0c157d6d5b7630c05decb31
|
[
"CC0-1.0"
] | null | null | null |
#Secret Messages. New position
alphabet = 'abcdefghijklmnopqrstuvwxyz'
key = 3
character = input('Please enter a character ')
position = alphabet.find(character)
print('Position of a character ', character, ' is ', position)
newPosition = position + key
print('New position of a character ', character, ' is ', newPosition)
| 32.5 | 69 | 0.747692 |
39fcb7feb468394c971a4be4fe1ebd1c774cf3a6
| 1,376 |
py
|
Python
|
examples/plot/lines.py
|
beidongjiedeguang/manim-express
|
e9c89b74da3692db3ea9b568727e78d5cbcef503
|
[
"MIT"
] | 12 |
2021-06-14T07:28:29.000Z
|
2022-02-25T02:49:49.000Z
|
examples/plot/lines.py
|
beidongjiedeguang/manim-kunyuan
|
e9c89b74da3692db3ea9b568727e78d5cbcef503
|
[
"MIT"
] | 1 |
2022-02-01T12:30:14.000Z
|
2022-02-01T12:30:14.000Z
|
examples/plot/lines.py
|
beidongjiedeguang/manim-express
|
e9c89b74da3692db3ea9b568727e78d5cbcef503
|
[
"MIT"
] | 2 |
2021-05-13T13:24:15.000Z
|
2021-05-18T02:56:22.000Z
|
from examples.example_imports import *
from manim_express.eager import PlotObj
scene = EagerModeScene(screen_size=Size.bigger)
graph = Line().scale(0.2)
# t0 = time.time()
#
# delta_t = 0.5
# for a in np.linspace(3, 12, 3):
# graph2 = ParametricCurve(lambda t: [t,
# 0.8 * np.abs(t) ** (6 / 7) + 0.9 * np.sqrt(abs(a - t ** 2)) * np.sin(
# a * t + 0.2),
# 0],
# t_range=(-math.sqrt(a), math.sqrt(a))).scale(0.5)
# scene.play(Transform(graph, graph2), run_time=3)
ps = np.random.rand(10, 3)
print(ps.shape)
print(ps[:, 0].max())
theta = np.linspace(0, 2 * PI, 100)
x = np.cos(theta)
y = np.sin(theta)
p = PlotObj(x, y)
scene.play(ShowCreation(p))
s = PlotObj(theta, x).set_color(RED)
scene.play(ShowCreation(s))
grid = p.get_grid(3, 3)
scene.add(grid)
scene.play(grid.animate.shift(LEFT))
scene.play(grid.animate.set_submobject_colors_by_gradient(BLUE, GREEN, RED))
scene.play(grid.animate.set_height(TAU - MED_SMALL_BUFF))
# scene.play(grid.animate.apply_complex_function(np.exp), run_time=5)
scene.play(
grid.animate.apply_function(
lambda p: [
p[0] + 0.5 * math.sin(p[1]),
p[1] + 0.5 * math.sin(p[0]),
p[2]
]
),
run_time=5,
)
scene.hold_on()
| 27.52 | 111 | 0.5625 |
39fede8d13d0249be971c45d4492a0a209527ae6
| 785 |
py
|
Python
|
get_observation.py
|
RadixSeven/FhirGaP
|
1fb8ff8b86089cdec9b1f796e06aeb0e20db14a0
|
[
"Apache-2.0"
] | null | null | null |
get_observation.py
|
RadixSeven/FhirGaP
|
1fb8ff8b86089cdec9b1f796e06aeb0e20db14a0
|
[
"Apache-2.0"
] | null | null | null |
get_observation.py
|
RadixSeven/FhirGaP
|
1fb8ff8b86089cdec9b1f796e06aeb0e20db14a0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Note that this is python3 only
import argparse
import requests
parser = argparse.ArgumentParser(
"Get an observation from a FHIR server with authentication")
parser.add_argument(
"id", help="The observation id to retrieve")
parser.add_argument(
"auth", default="Admin",
help="The authorization string to use. \"Bearer \" will be added to "
"the front.")
parser.add_argument(
"--url", default="http://35.245.174.218:8080/hapi-fhir-jpaserver/fhir/",
help="The base url of the server")
args = parser.parse_args()
headers = {
'Content-Type': "application/fhir+json; charset=utf-8",
'Authorization': "Bearer " + args.auth,
}
response = requests.get(args.url + "/Observation/" + args.id, headers=headers)
print(response.json())
| 30.192308 | 78 | 0.699363 |
39ff330d14f36471f4ac2d1038f80fb721a10e6c
| 14,331 |
py
|
Python
|
resources/src/mythbox/mythtv/enums.py
|
bopopescu/ServerStatus
|
a883598248ad6f5273eb3be498e3b04a1fab6510
|
[
"MIT"
] | null | null | null |
resources/src/mythbox/mythtv/enums.py
|
bopopescu/ServerStatus
|
a883598248ad6f5273eb3be498e3b04a1fab6510
|
[
"MIT"
] | 1 |
2015-04-21T22:05:02.000Z
|
2015-04-22T22:27:15.000Z
|
resources/src/mythbox/mythtv/enums.py
|
GetSomeBlocks/Score_Soccer
|
a883598248ad6f5273eb3be498e3b04a1fab6510
|
[
"MIT"
] | 2 |
2018-04-17T17:34:39.000Z
|
2020-07-26T03:43:33.000Z
|
#
# MythBox for XBMC - http://mythbox.googlecode.com
# Copyright (C) 2010 [email protected]
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
from odict import odict
# --------------------------------------------------------------------------------------------------------------
# Duplicate Check Method Check for Duplicates in Episode Filter dupin dupmethod Makes sense
# --------------------------------------------------------------------------------------------------------------
# None All Recordings None 15 1 Y
# Subtitle All Recordings None 15 2 Y
# Description All Recordings None 15 4 Y
# Subtitle & Desc All Recordings None 15 6 Y
# Subtitle then Desc All Recordings None 15 8 Y
#
# None Current Recordings None 1 1 Y
# Subtitle Current Recordings None 1 2 Y
#
# None Current Recordings New Epi Only 17 (16+1) 1 Y
# None All Recordings New Epi Only 31 (16+15) 1 Y
# None All Recordings Exclude Generics 79 (64+15 1 Y
# None Previous Recordings Exclude Rep&Gen 98 (64+32+2) 1 Y
#
| 29.670807 | 114 | 0.526551 |
2600aabf67e890934f21d69c79a38729246c8b46
| 3,989 |
py
|
Python
|
model_training_scripts/image_processing.py
|
jc2554/iVending
|
2a6b04143a56e202eba99b0a509945cf31aa956d
|
[
"MIT"
] | null | null | null |
model_training_scripts/image_processing.py
|
jc2554/iVending
|
2a6b04143a56e202eba99b0a509945cf31aa956d
|
[
"MIT"
] | null | null | null |
model_training_scripts/image_processing.py
|
jc2554/iVending
|
2a6b04143a56e202eba99b0a509945cf31aa956d
|
[
"MIT"
] | null | null | null |
"""
script to post-process training images by using OpenCV face detection
and normalization
MIT License
Copyright (c) 2019 JinJie Chen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
import cv2
import numpy as np
import os
"""
process all image in the user_id subdirectory , save processed images in the
user_id folderdirectory
"""
"""
Normalize image by
Truncate out the face from teh image using the bounding box
Resize the image with interpolation using openCv
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--user', help='user id, -1 for all')
args = parser.parse_args()
print(args)
classifier = cv2.CascadeClassifier("../src/models/haarcascade_frontalface_default.xml")
images, labels, labels_dic = process_images(args.user)
print("num images: ", len(images))
print("labels_dic: ", labels_dic)
| 36.263636 | 102 | 0.656806 |
260184f873fdef40a6660e5cdbc4d152fa8c734a
| 1,816 |
py
|
Python
|
flask_map.py
|
zenranda/proj5-map
|
13dc8866483f45ac806342c1b3aa2eec1354a0dc
|
[
"Artistic-2.0"
] | null | null | null |
flask_map.py
|
zenranda/proj5-map
|
13dc8866483f45ac806342c1b3aa2eec1354a0dc
|
[
"Artistic-2.0"
] | null | null | null |
flask_map.py
|
zenranda/proj5-map
|
13dc8866483f45ac806342c1b3aa2eec1354a0dc
|
[
"Artistic-2.0"
] | null | null | null |
import flask
from flask import render_template
from flask import request
from flask import url_for
import json
import logging
###
# Globals
###
app = flask.Flask(__name__)
import CONFIG
###
# Pages
###
#############
#
# Set up to run from cgi-bin script, from
# gunicorn, or stand-alone.
#
app.secret_key = CONFIG.secret_key
app.debug=CONFIG.DEBUG
app.logger.setLevel(logging.DEBUG)
if __name__ == "__main__":
print("Opening for global access on port {}".format(CONFIG.PORT))
app.run(port=CONFIG.PORT, host="0.0.0.0")
| 25.222222 | 91 | 0.614537 |
2602aa16755c35ffe309d7e1deefd2b15d53fedd
| 3,717 |
py
|
Python
|
tcvx21/grillix_post/observables/parallel_gradient_m.py
|
dsoliveir/TCV-X21
|
784c55adb33417e21a6736e2504a3895a9348dbe
|
[
"CC-BY-4.0"
] | 1 |
2021-12-13T11:52:39.000Z
|
2021-12-13T11:52:39.000Z
|
tcvx21/grillix_post/observables/parallel_gradient_m.py
|
dsoliveir/TCV-X21
|
784c55adb33417e21a6736e2504a3895a9348dbe
|
[
"CC-BY-4.0"
] | 2 |
2021-12-18T17:18:52.000Z
|
2022-01-26T09:23:23.000Z
|
tcvx21/grillix_post/observables/parallel_gradient_m.py
|
dsoliveir/TCV-X21
|
784c55adb33417e21a6736e2504a3895a9348dbe
|
[
"CC-BY-4.0"
] | 2 |
2021-12-13T12:56:09.000Z
|
2022-01-25T20:30:28.000Z
|
import xarray as xr
import numpy as np
from pathlib import Path
from tcvx21.grillix_post.components import FieldlineTracer
from tcvx21.grillix_post.lineouts import Lineout
xr.set_options(keep_attrs=True)
def initialise_lineout_for_parallel_gradient(
lineout, grid, equi, norm, npol, stored_trace: Path = None
):
"""
Traces to find the forward and reverse lineouts for a given lineout
Expensive! Needs to be done once per lineout that you want to take gradients with
"""
fieldline_tracer = FieldlineTracer(equi)
try:
print(f"Attempting to read stored trace from {stored_trace}")
ds = xr.open_dataset(stored_trace)
assert np.allclose(ds["lineout_x"], lineout.r_points)
assert np.allclose(ds["lineout_y"], lineout.z_points)
except (FileNotFoundError, ValueError):
forward_trace, reverse_trace = fieldline_tracer.find_neighbouring_points(
lineout.r_points, lineout.z_points, n_toroidal_planes=int(npol)
)
ds = xr.Dataset(
data_vars=dict(
forward_x=("points", forward_trace[:, 0]),
forward_y=("points", forward_trace[:, 1]),
forward_l=("points", forward_trace[:, 2]),
reverse_x=("points", reverse_trace[:, 0]),
reverse_y=("points", reverse_trace[:, 1]),
reverse_l=("points", reverse_trace[:, 2]),
lineout_x=("points", lineout.r_points),
lineout_y=("points", lineout.z_points),
)
)
if stored_trace is not None:
if stored_trace.exists():
stored_trace.unlink()
ds.to_netcdf(stored_trace)
lineout.forward_lineout = Lineout(ds["forward_x"], ds["forward_y"])
lineout.forward_lineout.setup_interpolation_matrix(grid, use_source_points=True)
lineout.reverse_lineout = Lineout(ds["reverse_x"], ds["reverse_y"])
lineout.reverse_lineout.setup_interpolation_matrix(grid, use_source_points=True)
lineout.forward_distance = xr.DataArray(
ds["forward_l"], dims="interp_points"
).assign_attrs(norm=norm.R0)
lineout.reverse_distance = xr.DataArray(
ds["reverse_l"], dims="interp_points"
).assign_attrs(norm=norm.R0)
def compute_parallel_gradient(lineout, field):
"""
Computes the parallel gradient via centred differences
Note that you should multiply this by the penalisation direction function to get the direction 'towards the
wall'. This isn't quite the same as projecting onto the wall normal, but for computing the parallel
heat flux this is actually more helpful
"""
assert hasattr(lineout, "forward_lineout") and hasattr(
lineout, "reverse_lineout"
), f"Have to call initialise_lineout_for_parallel_gradient on lineout before trying to compute_parallel_gradient"
parallel_gradients = [
compute_gradient_on_plane(lineout, field, plane)
for plane in range(field.sizes["phi"])
]
return xr.concat(parallel_gradients, dim="phi")
def compute_gradient_on_plane(lineout, field, plane):
"""Computes the parallel gradient on a single plane"""
forward_value = lineout.forward_lineout.interpolate(
field.isel(phi=np.mod(plane + 1, field.sizes["phi"]))
)
reverse_value = lineout.forward_lineout.interpolate(
field.isel(phi=np.mod(plane - 1, field.sizes["phi"]))
)
two_plane_distance = lineout.forward_distance - lineout.reverse_distance
centred_difference = forward_value - reverse_value
return (
(centred_difference / two_plane_distance)
.assign_coords(phi=plane)
.assign_attrs(norm=field.norm / two_plane_distance.norm)
)
| 37.17 | 117 | 0.684154 |
2603181f0b3082fdc38b19973a3aa85c33523f68
| 22,466 |
py
|
Python
|
tests/hwsim/test_ap_ht.py
|
rzr/wpasupplicant
|
3f7ac05878ba965e941f2b5b80b8cb744e63f506
|
[
"Unlicense"
] | 19 |
2015-04-02T13:50:00.000Z
|
2022-01-19T02:45:18.000Z
|
tests/hwsim/test_ap_ht.py
|
jku/hostap
|
a61fcc131aa6a7e396eee6a3c613001bf0475cd1
|
[
"Unlicense"
] | 3 |
2016-03-16T13:46:10.000Z
|
2016-08-30T12:42:52.000Z
|
tests/hwsim/test_ap_ht.py
|
jku/hostap
|
a61fcc131aa6a7e396eee6a3c613001bf0475cd1
|
[
"Unlicense"
] | 11 |
2015-05-18T07:37:12.000Z
|
2021-11-12T10:28:50.000Z
|
# Test cases for HT operations with hostapd
# Copyright (c) 2013-2014, Jouni Malinen <[email protected]>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import time
import logging
logger = logging.getLogger()
import struct
import subprocess
import hostapd
def test_ap_ht40_scan(dev, apdev):
"""HT40 co-ex scan"""
clear_scan_cache(apdev[0]['ifname'])
params = { "ssid": "test-ht40",
"channel": "5",
"ht_capab": "[HT40-]"}
hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False)
state = hapd.get_status_field("state")
if state != "HT_SCAN":
time.sleep(0.1)
state = hapd.get_status_field("state")
if state != "HT_SCAN":
raise Exception("Unexpected interface state - expected HT_SCAN")
ev = hapd.wait_event(["AP-ENABLED"], timeout=10)
if not ev:
raise Exception("AP setup timed out")
state = hapd.get_status_field("state")
if state != "ENABLED":
raise Exception("Unexpected interface state - expected ENABLED")
freq = hapd.get_status_field("freq")
if freq != "2432":
raise Exception("Unexpected frequency")
pri = hapd.get_status_field("channel")
if pri != "5":
raise Exception("Unexpected primary channel")
sec = hapd.get_status_field("secondary_channel")
if sec != "-1":
raise Exception("Unexpected secondary channel")
dev[0].connect("test-ht40", key_mgmt="NONE", scan_freq=freq)
def test_ap_ht40_scan_conflict(dev, apdev):
"""HT40 co-ex scan conflict"""
clear_scan_cache(apdev[0]['ifname'])
params = { "ssid": "test-ht40",
"channel": "6",
"ht_capab": "[HT40+]"}
hostapd.add_ap(apdev[1]['ifname'], params)
params = { "ssid": "test-ht40",
"channel": "5",
"ht_capab": "[HT40-]"}
hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False)
state = hapd.get_status_field("state")
if state != "HT_SCAN":
time.sleep(0.1)
state = hapd.get_status_field("state")
if state != "HT_SCAN":
raise Exception("Unexpected interface state - expected HT_SCAN")
ev = hapd.wait_event(["AP-ENABLED"], timeout=10)
if not ev:
raise Exception("AP setup timed out")
state = hapd.get_status_field("state")
if state != "ENABLED":
raise Exception("Unexpected interface state - expected ENABLED")
freq = hapd.get_status_field("freq")
if freq != "2432":
raise Exception("Unexpected frequency")
pri = hapd.get_status_field("channel")
if pri != "5":
raise Exception("Unexpected primary channel")
sec = hapd.get_status_field("secondary_channel")
if sec != "0":
raise Exception("Unexpected secondary channel: " + sec)
dev[0].connect("test-ht40", key_mgmt="NONE", scan_freq=freq)
def test_ap_ht40_scan_legacy_conflict(dev, apdev):
"""HT40 co-ex scan conflict with legacy 20 MHz AP"""
clear_scan_cache(apdev[0]['ifname'])
params = { "ssid": "legacy-20",
"channel": "7", "ieee80211n": "0" }
hostapd.add_ap(apdev[1]['ifname'], params)
params = { "ssid": "test-ht40",
"channel": "5",
"ht_capab": "[HT40-]"}
hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False)
state = hapd.get_status_field("state")
if state != "HT_SCAN":
time.sleep(0.1)
state = hapd.get_status_field("state")
if state != "HT_SCAN":
raise Exception("Unexpected interface state - expected HT_SCAN")
ev = hapd.wait_event(["AP-ENABLED"], timeout=10)
if not ev:
raise Exception("AP setup timed out")
state = hapd.get_status_field("state")
if state != "ENABLED":
raise Exception("Unexpected interface state - expected ENABLED")
freq = hapd.get_status_field("freq")
if freq != "2432":
raise Exception("Unexpected frequency: " + freq)
pri = hapd.get_status_field("channel")
if pri != "5":
raise Exception("Unexpected primary channel: " + pri)
sec = hapd.get_status_field("secondary_channel")
if sec != "0":
raise Exception("Unexpected secondary channel: " + sec)
dev[0].connect("test-ht40", key_mgmt="NONE", scan_freq=freq)
def test_ap_ht40_scan_match(dev, apdev):
"""HT40 co-ex scan matching configuration"""
clear_scan_cache(apdev[0]['ifname'])
params = { "ssid": "test-ht40",
"channel": "5",
"ht_capab": "[HT40-]"}
hostapd.add_ap(apdev[1]['ifname'], params)
params = { "ssid": "test-ht40",
"channel": "5",
"ht_capab": "[HT40-]"}
hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False)
state = hapd.get_status_field("state")
if state != "HT_SCAN":
time.sleep(0.1)
state = hapd.get_status_field("state")
if state != "HT_SCAN":
raise Exception("Unexpected interface state - expected HT_SCAN")
ev = hapd.wait_event(["AP-ENABLED"], timeout=10)
if not ev:
raise Exception("AP setup timed out")
state = hapd.get_status_field("state")
if state != "ENABLED":
raise Exception("Unexpected interface state - expected ENABLED")
freq = hapd.get_status_field("freq")
if freq != "2432":
raise Exception("Unexpected frequency")
pri = hapd.get_status_field("channel")
if pri != "5":
raise Exception("Unexpected primary channel")
sec = hapd.get_status_field("secondary_channel")
if sec != "-1":
raise Exception("Unexpected secondary channel: " + sec)
dev[0].connect("test-ht40", key_mgmt="NONE", scan_freq=freq)
def test_ap_ht40_5ghz_match(dev, apdev):
"""HT40 co-ex scan on 5 GHz with matching pri/sec channel"""
clear_scan_cache(apdev[0]['ifname'])
try:
params = { "ssid": "test-ht40",
"hw_mode": "a",
"channel": "36",
"country_code": "US",
"ht_capab": "[HT40+]"}
hostapd.add_ap(apdev[1]['ifname'], params)
params = { "ssid": "test-ht40",
"hw_mode": "a",
"channel": "36",
"ht_capab": "[HT40+]"}
hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False)
state = hapd.get_status_field("state")
if state != "HT_SCAN":
time.sleep(0.1)
state = hapd.get_status_field("state")
if state != "HT_SCAN":
raise Exception("Unexpected interface state - expected HT_SCAN")
ev = hapd.wait_event(["AP-ENABLED"], timeout=10)
if not ev:
raise Exception("AP setup timed out")
state = hapd.get_status_field("state")
if state != "ENABLED":
raise Exception("Unexpected interface state - expected ENABLED")
freq = hapd.get_status_field("freq")
if freq != "5180":
raise Exception("Unexpected frequency")
pri = hapd.get_status_field("channel")
if pri != "36":
raise Exception("Unexpected primary channel")
sec = hapd.get_status_field("secondary_channel")
if sec != "1":
raise Exception("Unexpected secondary channel: " + sec)
dev[0].connect("test-ht40", key_mgmt="NONE", scan_freq=freq)
finally:
subprocess.call(['sudo', 'iw', 'reg', 'set', '00'])
def test_ap_ht40_5ghz_switch(dev, apdev):
"""HT40 co-ex scan on 5 GHz switching pri/sec channel"""
clear_scan_cache(apdev[0]['ifname'])
try:
params = { "ssid": "test-ht40",
"hw_mode": "a",
"channel": "36",
"country_code": "US",
"ht_capab": "[HT40+]"}
hostapd.add_ap(apdev[1]['ifname'], params)
params = { "ssid": "test-ht40",
"hw_mode": "a",
"channel": "40",
"ht_capab": "[HT40-]"}
hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False)
state = hapd.get_status_field("state")
if state != "HT_SCAN":
time.sleep(0.1)
state = hapd.get_status_field("state")
if state != "HT_SCAN":
raise Exception("Unexpected interface state - expected HT_SCAN")
ev = hapd.wait_event(["AP-ENABLED"], timeout=10)
if not ev:
raise Exception("AP setup timed out")
state = hapd.get_status_field("state")
if state != "ENABLED":
raise Exception("Unexpected interface state - expected ENABLED")
freq = hapd.get_status_field("freq")
if freq != "5180":
raise Exception("Unexpected frequency: " + freq)
pri = hapd.get_status_field("channel")
if pri != "36":
raise Exception("Unexpected primary channel: " + pri)
sec = hapd.get_status_field("secondary_channel")
if sec != "1":
raise Exception("Unexpected secondary channel: " + sec)
dev[0].connect("test-ht40", key_mgmt="NONE", scan_freq=freq)
finally:
subprocess.call(['sudo', 'iw', 'reg', 'set', '00'])
def test_ap_ht40_5ghz_switch2(dev, apdev):
"""HT40 co-ex scan on 5 GHz switching pri/sec channel (2)"""
clear_scan_cache(apdev[0]['ifname'])
try:
params = { "ssid": "test-ht40",
"hw_mode": "a",
"channel": "36",
"country_code": "US",
"ht_capab": "[HT40+]"}
hostapd.add_ap(apdev[1]['ifname'], params)
id = dev[0].add_network()
dev[0].set_network(id, "mode", "2")
dev[0].set_network_quoted(id, "ssid", "wpas-ap-open")
dev[0].set_network(id, "key_mgmt", "NONE")
dev[0].set_network(id, "frequency", "5200")
dev[0].set_network(id, "scan_freq", "5200")
dev[0].select_network(id)
time.sleep(1)
params = { "ssid": "test-ht40",
"hw_mode": "a",
"channel": "40",
"ht_capab": "[HT40-]"}
hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False)
state = hapd.get_status_field("state")
if state != "HT_SCAN":
time.sleep(0.1)
state = hapd.get_status_field("state")
if state != "HT_SCAN":
raise Exception("Unexpected interface state - expected HT_SCAN")
ev = hapd.wait_event(["AP-ENABLED"], timeout=10)
if not ev:
raise Exception("AP setup timed out")
state = hapd.get_status_field("state")
if state != "ENABLED":
raise Exception("Unexpected interface state - expected ENABLED")
freq = hapd.get_status_field("freq")
if freq != "5180":
raise Exception("Unexpected frequency: " + freq)
pri = hapd.get_status_field("channel")
if pri != "36":
raise Exception("Unexpected primary channel: " + pri)
sec = hapd.get_status_field("secondary_channel")
if sec != "1":
raise Exception("Unexpected secondary channel: " + sec)
dev[0].connect("test-ht40", key_mgmt="NONE", scan_freq=freq)
finally:
subprocess.call(['sudo', 'iw', 'reg', 'set', '00'])
def test_obss_scan(dev, apdev):
"""Overlapping BSS scan request"""
params = { "ssid": "obss-scan",
"channel": "6",
"ht_capab": "[HT40-]",
"obss_interval": "10" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
params = { "ssid": "another-bss",
"channel": "9",
"ieee80211n": "0" }
hostapd.add_ap(apdev[1]['ifname'], params)
dev[0].connect("obss-scan", key_mgmt="NONE", scan_freq="2437")
hapd.set("ext_mgmt_frame_handling", "1")
logger.info("Waiting for OBSS scan to occur")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=15)
if ev is None:
raise Exception("Timed out while waiting for OBSS scan to start")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"], timeout=10)
if ev is None:
raise Exception("Timed out while waiting for OBSS scan results")
received = False
for i in range(0, 4):
frame = hapd.mgmt_rx(timeout=5)
if frame is None:
raise Exception("MGMT RX wait timed out")
if frame['subtype'] != 13:
continue
payload = frame['payload']
if len(payload) < 3:
continue
(category, action, ie) = struct.unpack('BBB', payload[0:3])
if category != 4:
continue
if action != 0:
continue
if ie == 72:
logger.info("20/40 BSS Coexistence report received")
received = True
break
if not received:
raise Exception("20/40 BSS Coexistence report not seen")
def test_obss_scan_40_intolerant(dev, apdev):
"""Overlapping BSS scan request with 40 MHz intolerant AP"""
params = { "ssid": "obss-scan",
"channel": "6",
"ht_capab": "[HT40-]",
"obss_interval": "10" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
params = { "ssid": "another-bss",
"channel": "7",
"ht_capab": "[40-INTOLERANT]" }
hostapd.add_ap(apdev[1]['ifname'], params)
dev[0].connect("obss-scan", key_mgmt="NONE", scan_freq="2437")
hapd.set("ext_mgmt_frame_handling", "1")
logger.info("Waiting for OBSS scan to occur")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=15)
if ev is None:
raise Exception("Timed out while waiting for OBSS scan to start")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"], timeout=10)
if ev is None:
raise Exception("Timed out while waiting for OBSS scan results")
received = False
for i in range(0, 4):
frame = hapd.mgmt_rx(timeout=5)
if frame is None:
raise Exception("MGMT RX wait timed out")
if frame['subtype'] != 13:
continue
payload = frame['payload']
if len(payload) < 3:
continue
(category, action, ie) = struct.unpack('BBB', payload[0:3])
if category != 4:
continue
if action != 0:
continue
if ie == 72:
logger.info("20/40 BSS Coexistence report received")
received = True
break
if not received:
raise Exception("20/40 BSS Coexistence report not seen")
def test_olbc(dev, apdev):
"""OLBC detection"""
params = { "ssid": "test-olbc",
"channel": "6",
"ht_capab": "[HT40-]",
"ap_table_expiration_time": "2" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
status = hapd.get_status()
if status['olbc'] != '0' or status['olbc_ht'] != '0':
raise Exception("Unexpected OLBC information")
params = { "ssid": "olbc-ap",
"hw_mode": "b",
"channel": "6",
"wmm_enabled": "0" }
hostapd.add_ap(apdev[1]['ifname'], params)
time.sleep(0.5)
status = hapd.get_status()
if status['olbc'] != '1' or status['olbc_ht'] != '1':
raise Exception("Missing OLBC information")
hapd_global = hostapd.HostapdGlobal()
hapd_global.remove(apdev[1]['ifname'])
logger.info("Waiting for OLBC state to time out")
cleared = False
for i in range(0, 15):
time.sleep(1)
status = hapd.get_status()
if status['olbc'] == '0' and status['olbc_ht'] == '0':
cleared = True
break
if not cleared:
raise Exception("OLBC state did nto time out")
def test_olbc_5ghz(dev, apdev):
"""OLBC detection on 5 GHz"""
try:
params = { "ssid": "test-olbc",
"country_code": "FI",
"hw_mode": "a",
"channel": "36",
"ht_capab": "[HT40+]" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
status = hapd.get_status()
if status['olbc'] != '0' or status['olbc_ht'] != '0':
raise Exception("Unexpected OLBC information")
params = { "ssid": "olbc-ap",
"country_code": "FI",
"hw_mode": "a",
"channel": "36",
"ieee80211n": "0",
"wmm_enabled": "0" }
hostapd.add_ap(apdev[1]['ifname'], params)
time.sleep(0.5)
status = hapd.get_status()
if status['olbc_ht'] != '1':
raise Exception("Missing OLBC information")
finally:
subprocess.call(['sudo', 'iw', 'reg', 'set', '00'])
def test_ap_require_ht(dev, apdev):
"""Require HT"""
params = { "ssid": "require-ht",
"require_ht": "1" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False)
dev[1].connect("require-ht", key_mgmt="NONE", scan_freq="2412",
disable_ht="1", wait_connect=False)
dev[0].connect("require-ht", key_mgmt="NONE", scan_freq="2412")
ev = dev[1].wait_event(["CTRL-EVENT-ASSOC-REJECT"])
if ev is None:
raise Exception("Association rejection timed out")
if "status_code=27" not in ev:
raise Exception("Unexpected rejection status code")
dev[2].connect("require-ht", key_mgmt="NONE", scan_freq="2412",
ht_mcs="0x01 00 00 00 00 00 00 00 00 00",
disable_max_amsdu="1", ampdu_factor="2",
ampdu_density="1", disable_ht40="1", disable_sgi="1",
disable_ldpc="1")
def test_ap_require_ht_limited_rates(dev, apdev):
"""Require HT with limited supported rates"""
params = { "ssid": "require-ht",
"supported_rates": "60 120 240 360 480 540",
"require_ht": "1" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False)
dev[1].connect("require-ht", key_mgmt="NONE", scan_freq="2412",
disable_ht="1", wait_connect=False)
dev[0].connect("require-ht", key_mgmt="NONE", scan_freq="2412")
ev = dev[1].wait_event(["CTRL-EVENT-ASSOC-REJECT"])
if ev is None:
raise Exception("Association rejection timed out")
if "status_code=27" not in ev:
raise Exception("Unexpected rejection status code")
def test_ap_ht_capab_not_supported(dev, apdev):
"""HT configuration with driver not supporting all ht_capab entries"""
params = { "ssid": "test-ht40",
"channel": "5",
"ht_capab": "[HT40-][LDPC][SMPS-STATIC][SMPS-DYNAMIC][GF][SHORT-GI-20][SHORT-GI-40][TX-STBC][RX-STBC1][RX-STBC12][RX-STBC123][DELAYED-BA][MAX-AMSDU-7935][DSSS_CCK-40][LSIG-TXOP-PROT]"}
hapd = hostapd.add_ap(apdev[0]['ifname'], params, no_enable=True)
if "FAIL" not in hapd.request("ENABLE"):
raise Exception("Unexpected ENABLE success")
def test_ap_ht_40mhz_intolerant_sta(dev, apdev):
"""Associated STA indicating 40 MHz intolerant"""
clear_scan_cache(apdev[0]['ifname'])
params = { "ssid": "intolerant",
"channel": "6",
"ht_capab": "[HT40-]" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
if hapd.get_status_field("num_sta_ht40_intolerant") != "0":
raise Exception("Unexpected num_sta_ht40_intolerant value")
if hapd.get_status_field("secondary_channel") != "-1":
raise Exception("Unexpected secondary_channel")
dev[0].connect("intolerant", key_mgmt="NONE", scan_freq="2437")
if hapd.get_status_field("num_sta_ht40_intolerant") != "0":
raise Exception("Unexpected num_sta_ht40_intolerant value")
if hapd.get_status_field("secondary_channel") != "-1":
raise Exception("Unexpected secondary_channel")
dev[2].connect("intolerant", key_mgmt="NONE", scan_freq="2437",
ht40_intolerant="1")
time.sleep(1)
if hapd.get_status_field("num_sta_ht40_intolerant") != "1":
raise Exception("Unexpected num_sta_ht40_intolerant value (expected 1)")
if hapd.get_status_field("secondary_channel") != "0":
raise Exception("Unexpected secondary_channel (did not disable 40 MHz)")
dev[2].request("DISCONNECT")
time.sleep(1)
if hapd.get_status_field("num_sta_ht40_intolerant") != "0":
raise Exception("Unexpected num_sta_ht40_intolerant value (expected 0)")
if hapd.get_status_field("secondary_channel") != "-1":
raise Exception("Unexpected secondary_channel (did not re-enable 40 MHz)")
def test_ap_ht_40mhz_intolerant_ap(dev, apdev):
"""Associated STA reports 40 MHz intolerant AP after association"""
clear_scan_cache(apdev[0]['ifname'])
params = { "ssid": "ht",
"channel": "6",
"ht_capab": "[HT40-]",
"obss_interval": "1" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].connect("ht", key_mgmt="NONE", scan_freq="2437")
if hapd.get_status_field("secondary_channel") != "-1":
raise Exception("Unexpected secondary channel information")
logger.info("Start 40 MHz intolerant AP")
params = { "ssid": "intolerant",
"channel": "5",
"ht_capab": "[40-INTOLERANT]" }
hapd2 = hostapd.add_ap(apdev[1]['ifname'], params)
logger.info("Waiting for co-ex report from STA")
ok = False
for i in range(0, 20):
time.sleep(1)
if hapd.get_status_field("secondary_channel") == "0":
logger.info("AP moved to 20 MHz channel")
ok = True
break
if not ok:
raise Exception("AP did not move to 20 MHz channel")
if "OK" not in hapd2.request("DISABLE"):
raise Exception("Failed to disable 40 MHz intolerant AP")
# make sure the intolerant AP disappears from scan results more quickly
dev[0].scan(only_new=True)
dev[0].scan(freq="2432", only_new=True)
logger.info("Waiting for AP to move back to 40 MHz channel")
ok = False
for i in range(0, 30):
time.sleep(1)
if hapd.get_status_field("secondary_channel") == "-1":
ok = True
if not ok:
raise Exception("AP did not move to 40 MHz channel")
| 37.694631 | 199 | 0.582614 |
26043d5d77004fcf11c43eb7691efa8015b6c1e6
| 1,039 |
py
|
Python
|
rhymes.py
|
hayderkharrufa/arabic_poetry_generator
|
82f2ed726ec6270c3ee1d2f7c7aa1783df973708
|
[
"MIT"
] | 72 |
2020-05-29T19:58:22.000Z
|
2022-03-11T18:53:56.000Z
|
rhymes.py
|
mahfoudhich/arabic_poem_generator
|
82f2ed726ec6270c3ee1d2f7c7aa1783df973708
|
[
"MIT"
] | 1 |
2020-06-12T11:03:45.000Z
|
2020-08-05T17:52:27.000Z
|
rhymes.py
|
mahfoudhich/arabic_poem_generator
|
82f2ed726ec6270c3ee1d2f7c7aa1783df973708
|
[
"MIT"
] | 34 |
2020-06-04T14:38:39.000Z
|
2022-03-16T20:50:56.000Z
|
# coding: utf-8
# author: Haydara https://www.youtube.com/haydara
import pickle
with open('vocabs.pkl', 'rb') as pickle_load:
voc_list = pickle.load(pickle_load)
allowed_chars = ['', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', ' ']
max_word_length = 9
| 29.685714 | 102 | 0.505294 |
2604cf6f50e982afd7ab5c70c9417b6682140a5d
| 6,513 |
py
|
Python
|
tests/test_facade.py
|
seucolega/lista-de-listas-cli
|
48815fac9cf3332c5e4fbc935d6ddd09be2738a8
|
[
"MIT"
] | null | null | null |
tests/test_facade.py
|
seucolega/lista-de-listas-cli
|
48815fac9cf3332c5e4fbc935d6ddd09be2738a8
|
[
"MIT"
] | null | null | null |
tests/test_facade.py
|
seucolega/lista-de-listas-cli
|
48815fac9cf3332c5e4fbc935d6ddd09be2738a8
|
[
"MIT"
] | null | null | null |
import facade
import pytest
import schemas
| 24.484962 | 79 | 0.76094 |
2607fe4913aa92b0a573f52ce885f77ac1e7a144
| 17,667 |
py
|
Python
|
ots_eval/outlier_detection/doots.py
|
YellowOfTheEgg/ots-eval
|
8ec08e60330d41f8f7ffd571dd6301cdedaefd99
|
[
"BSD-3-Clause"
] | 3 |
2021-03-28T14:46:57.000Z
|
2022-01-03T17:25:19.000Z
|
ots_eval/outlier_detection/doots.py
|
YellowOfTheEgg/ots-eval
|
8ec08e60330d41f8f7ffd571dd6301cdedaefd99
|
[
"BSD-3-Clause"
] | null | null | null |
ots_eval/outlier_detection/doots.py
|
YellowOfTheEgg/ots-eval
|
8ec08e60330d41f8f7ffd571dd6301cdedaefd99
|
[
"BSD-3-Clause"
] | 1 |
2022-01-11T10:56:14.000Z
|
2022-01-11T10:56:14.000Z
|
import pandas
import numpy as np
from .reference_histogram_outlier import HistOutlier
from typing import Union, Tuple
| 49.487395 | 138 | 0.572819 |
260902dd3e508f3dd93dbd19435e62ca56223adf
| 3,405 |
py
|
Python
|
SWIG_fast_functions/test.py
|
twni2016/OrganSegRSTN_PyTorch
|
bf571320e718c8f138e04d48645e3b4dfe75801d
|
[
"MIT"
] | 100 |
2018-08-01T04:42:36.000Z
|
2022-03-23T07:01:21.000Z
|
SWIG_fast_functions/test.py
|
bharat3012/OrganSegRSTN_PyTorch
|
aff23489b1f3006761e3270178adfcccb63d0de9
|
[
"MIT"
] | 12 |
2018-08-07T10:35:47.000Z
|
2022-02-21T09:09:42.000Z
|
SWIG_fast_functions/test.py
|
bharat3012/OrganSegRSTN_PyTorch
|
aff23489b1f3006761e3270178adfcccb63d0de9
|
[
"MIT"
] | 35 |
2018-08-06T21:27:36.000Z
|
2021-11-03T10:20:16.000Z
|
import numpy as np
import fast_functions as ff
import time
print('python')
G = np.zeros((512,512,240),dtype=np.uint8)
G[128:384,128:384,60:180]=1
volume_data = np.load('1.npz')
F = volume_data['volume'].astype(np.uint8)
start_time = time.time()
F = post_processing(F, F, 1.0, False)
print(time.time() - start_time)
start_time = time.time()
for l in range(10):
DSC = DSC_computation(F,G)
print(DSC)
print(time.time() - start_time)
print('SWIG')
volume_data = np.load('1.npz')
G = np.zeros((512,512,240),dtype=np.uint8)
G[128:384,128:384,60:180]=1
F = volume_data['volume'].astype(np.uint8)
start_time = time.time()
ff.post_processing(F, F, 1.0, False)
print(time.time() - start_time)
start_time = time.time()
for l in range(10):
P = np.zeros(3, dtype = np.uint32)
ff.DSC_computation(F,G,P)
print(P, float(P[2]) * 2 / (P[0] + P[1]))
print(time.time() - start_time)
| 29.608696 | 85 | 0.595888 |
260aad7a8e1f7dc10f9fc0b29cb23cbf4ba1d39e
| 221 |
py
|
Python
|
smart-chatbot-zero/Rerank/data_preprocess1.py
|
WenRichard/Customer-Chatbot
|
48508c40574ffac8ced414a5bea799e2c85341ca
|
[
"MIT"
] | 268 |
2019-07-26T01:40:43.000Z
|
2022-03-28T14:54:57.000Z
|
xiaotian-chatbot1.0/Rerank/data_preprocess1.py
|
abc668/Customer-Chatbot
|
48508c40574ffac8ced414a5bea799e2c85341ca
|
[
"MIT"
] | 7 |
2019-08-13T04:17:55.000Z
|
2020-08-06T08:57:34.000Z
|
xiaotian-chatbot1.0/Rerank/data_preprocess1.py
|
abc668/Customer-Chatbot
|
48508c40574ffac8ced414a5bea799e2c85341ca
|
[
"MIT"
] | 113 |
2019-07-26T01:40:47.000Z
|
2022-03-18T13:22:44.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2019/5/25 16:09
# @Author : Alan
# @Email : [email protected]
# @File : data_preprocess2.py
# @Software: PyCharm
#
| 20.090909 | 36 | 0.638009 |
260b023afea60f62495ec2404352213dae65708e
| 1,160 |
py
|
Python
|
PageMonitor/project/lib/config.py
|
DanylZhang/IdeaWorkspace
|
726be80db4ca7dac4104ebaa22b795f37aca73e0
|
[
"MIT"
] | null | null | null |
PageMonitor/project/lib/config.py
|
DanylZhang/IdeaWorkspace
|
726be80db4ca7dac4104ebaa22b795f37aca73e0
|
[
"MIT"
] | null | null | null |
PageMonitor/project/lib/config.py
|
DanylZhang/IdeaWorkspace
|
726be80db4ca7dac4104ebaa22b795f37aca73e0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding:utf-8
import pymysql
default_config = {
'host': '139.196.96.149',
'port': 13306,
'user': 'dataway-rw',
'password': 'QqHVMhmN*8',
'db': 'jumei',
'charset': 'utf8mb4'
}
apollo_config = {
'host': '127.0.0.1',
'port': 11306,
'user': 'apollo-rw',
'password': 'QBT094bt',
'db': 'apollo',
'charset': 'utf8mb4',
'autocommit': True
}
allsite_config = {
'host': '127.0.0.1',
'port': 15306,
'user': 'apollo-rw',
'password': 'QBT094bt',
'db': 'all_site',
'charset': 'utf8mb4'
}
dataway_config = {
'host': '139.196.96.149',
'port': 13306,
'user': 'dataway-rw',
'password': 'QqHVMhmN*8',
'db': 'jumei',
'charset': 'utf8mb4'
}
dw_entity_config = {
'host': '127.0.0.1',
'port': 18306,
'user': 'qbt',
'password': 'QBT094bt',
'db': 'dw_entity',
'charset': 'utf8mb4',
'autocommit': True
}
channel_config = {
'host': 'channel.ecdataway.com',
'port': 3306,
'user': 'comment_catcher',
'password': 'cc33770880',
'db': 'monitor',
'charset': 'utf8mb4',
'cursorclass': pymysql.cursors.DictCursor
}
| 20.714286 | 45 | 0.546552 |
260b70a7a637b6e3448163b26c95e89556398218
| 70 |
py
|
Python
|
24/aoc24-1-cython.py
|
combs/AdventOfCode2021
|
925df8a3526cb9c0dde368cf828673f345096e06
|
[
"MIT"
] | null | null | null |
24/aoc24-1-cython.py
|
combs/AdventOfCode2021
|
925df8a3526cb9c0dde368cf828673f345096e06
|
[
"MIT"
] | null | null | null |
24/aoc24-1-cython.py
|
combs/AdventOfCode2021
|
925df8a3526cb9c0dde368cf828673f345096e06
|
[
"MIT"
] | null | null | null |
import pyximport
pyximport.install()
from aoc24 import do_it
do_it()
| 11.666667 | 23 | 0.8 |
260b9f9c6262684a4bbfdcc0510786d9313421e4
| 358 |
py
|
Python
|
tests/main.py
|
zodiuxus/opensimplex
|
d8c761d91834a809e51987d25439549c50f0effb
|
[
"MIT"
] | null | null | null |
tests/main.py
|
zodiuxus/opensimplex
|
d8c761d91834a809e51987d25439549c50f0effb
|
[
"MIT"
] | null | null | null |
tests/main.py
|
zodiuxus/opensimplex
|
d8c761d91834a809e51987d25439549c50f0effb
|
[
"MIT"
] | null | null | null |
from opensimplex import OpenSimplex
import torch, time
print(opensimplex_test('cuda'))
print('')
print(opensimplex_test('cpu'))
| 27.538462 | 47 | 0.701117 |
260c11b37ca0f5a211fd4291ad4e8a2a93dbd3d4
| 83 |
py
|
Python
|
pybktreespellchecker/__init__.py
|
tomasrasymas/pybktree-spell-checker
|
e1f7547957a4257a9b6ce470ebc8bcc95767c5d4
|
[
"MIT"
] | 3 |
2019-01-07T21:34:29.000Z
|
2020-07-20T23:43:01.000Z
|
pybktreespellchecker/__init__.py
|
tomasrasymas/pybktree-spell-checker
|
e1f7547957a4257a9b6ce470ebc8bcc95767c5d4
|
[
"MIT"
] | null | null | null |
pybktreespellchecker/__init__.py
|
tomasrasymas/pybktree-spell-checker
|
e1f7547957a4257a9b6ce470ebc8bcc95767c5d4
|
[
"MIT"
] | null | null | null |
from .levenshtein_distance import levenshtein_distance
from .bk_tree import BKTree
| 27.666667 | 54 | 0.879518 |
260d3744fb17af1e21703f4ae4917654e0d07e54
| 10,731 |
py
|
Python
|
part1/ELMo/modules/elmo.py
|
peter850706/Contextual-embeddings-for-sequence-classification
|
e26ba68f6aa30ec07319dcd37a04a8f56e07d7b0
|
[
"MIT"
] | null | null | null |
part1/ELMo/modules/elmo.py
|
peter850706/Contextual-embeddings-for-sequence-classification
|
e26ba68f6aa30ec07319dcd37a04a8f56e07d7b0
|
[
"MIT"
] | null | null | null |
part1/ELMo/modules/elmo.py
|
peter850706/Contextual-embeddings-for-sequence-classification
|
e26ba68f6aa30ec07319dcd37a04a8f56e07d7b0
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from collections import namedtuple
from ELMo.modules.char_embedding import CharEmbedding
| 63.875 | 191 | 0.594353 |
260d56541b9590ff3dcf8aa4ac7f649e63e42413
| 3,106 |
py
|
Python
|
src/app/externalOutages/createRealTimeOutage.py
|
nagasudhirpulla/wrldc_codebook
|
8fbc795074e16e2012b29ae875b99aa721a7f021
|
[
"MIT"
] | null | null | null |
src/app/externalOutages/createRealTimeOutage.py
|
nagasudhirpulla/wrldc_codebook
|
8fbc795074e16e2012b29ae875b99aa721a7f021
|
[
"MIT"
] | 21 |
2021-01-08T18:03:32.000Z
|
2021-02-02T16:17:34.000Z
|
src/app/externalOutages/createRealTimeOutage.py
|
nagasudhirpulla/wrldc_codebook
|
8fbc795074e16e2012b29ae875b99aa721a7f021
|
[
"MIT"
] | null | null | null |
import datetime as dt
import cx_Oracle
from src.app.externalOutages.getReasonId import getReasonId
def createRealTimeOutage(pwcDbConnStr: str, elemTypeId: int, elementId: int, outageDt: dt.datetime, outageTypeId: int,
reason: str, elementName: str, sdReqId: int, outageTagId: int) -> int:
"""create a new row in real time outages pwc table and return the id of newly created row
Args:
pwcDbConnStr (str): [description]
elemTypeId (int): [description]
elementId (int): [description]
outageDt (dt.datetime): [description]
outageTypeId (int): [description]
reason (str): [description]
elementName (str): [description]
sdReqId (int): [description]
outageTagId (int): [description]
Returns:
int: id of newly created row
"""
newRtoId = -1
if outageDt == None:
return -1
if reason == None or reason == "":
reason = "NA"
reasId = getReasonId(pwcDbConnStr, reason, outageTypeId)
if reasId == -1:
return -1
outageDate: dt.datetime = dt.datetime(
outageDt.year, outageDt.month, outageDt.day)
outageTime: str = dt.datetime.strftime(outageDt, "%H:%M")
newRtoIdFetchSql = """
SELECT MAX(rto.ID)+1 FROM REPORTING_WEB_UI_UAT.real_time_outage rto
"""
rtoInsertSql = """
insert into reporting_web_ui_uat.real_time_outage rto(ID, ENTITY_ID, ELEMENT_ID, OUTAGE_DATE,
OUTAGE_TIME, RELAY_INDICATION_SENDING_ID, RELAY_INDICATION_RECIEVING_ID, CREATED_DATE,
SHUT_DOWN_TYPE, REASON_ID, CREATED_BY, MODIFIED_BY, REGION_ID, ELEMENTNAME,
SHUTDOWNREQUEST_ID, LOAD_AFFECTED, IS_LOAD_OR_GEN_AFFECTED, SHUTDOWN_TAG_ID, IS_DELETED) values
(:id, :elemTypeId, :elementId, :outageDate, :outageTime, 0, 0, CURRENT_TIMESTAMP, :outageTypeId,
:reasonId, 123, 123, 4, :elementName, :sdReqId, 0, 0, :outageTagId, NULL)
"""
dbConn = None
dbCur = None
try:
# get connection with raw data table
dbConn = cx_Oracle.connect(pwcDbConnStr)
# get cursor for raw data table
dbCur = dbConn.cursor()
# execute the new rto id fetch sql
dbCur.execute(newRtoIdFetchSql)
dbRows = dbCur.fetchall()
newRtoId = dbRows[0][0]
sqlData = {"id": newRtoId, "elemTypeId": elemTypeId, "elementId": elementId,
"outageDate": outageDate, "outageTime": outageTime,
"outageTypeId": outageTypeId, "reasonId": reasId,
"elementName": elementName, "sdReqId": sdReqId,
"outageTagId": outageTagId}
# execute the new row insertion sql
dbCur.execute(rtoInsertSql, sqlData)
# commit the changes
dbConn.commit()
except Exception as e:
newRtoId = -1
print('Error while creating new real time outage entry in pwc table')
print(e)
finally:
# closing database cursor and connection
if dbCur is not None:
dbCur.close()
if dbConn is not None:
dbConn.close()
return newRtoId
| 36.541176 | 118 | 0.640052 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.