blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
60741e6eaadbfc2492da1af309879c5578cd89a2 | b39d9ef9175077ac6f03b66d97b073d85b6bc4d0 | /Flector_medicated_plaster_SmPC.py | b4cb409483f8f16e7249caaae0809f3c362a58ec | [] | no_license | urudaro/data-ue | 2d840fdce8ba7e759b5551cb3ee277d046464fe0 | 176c57533b66754ee05a96a7429c3e610188e4aa | refs/heads/master | 2021-01-22T12:02:16.931087 | 2013-07-16T14:05:41 | 2013-07-16T14:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | {'_data': [['Common', [['Skin', u'2,95%'], ['General', u'5%']]], ['Uncommon', []], ['Rare', []]],
'_note': u' ?MSFU',
'_pages': [3, 4],
u'_rank': 2,
u'_type': u'MSFU'} | [
"daro@daro-ThinkPad-X220.(none)"
] | daro@daro-ThinkPad-X220.(none) |
c7e3ef11b42f06a59db7b6364d8c81fa825e86a8 | fa08645f5804c98cb5f8354a851e8b4dc93f8224 | /assignment2/code/hot_city_mapper.py | 0b78294d7d3878aedab09eb727efcaf599035f58 | [] | no_license | chrizandr/distributed_computing | a9aab6ba04cc5282a573f910434bd7525894f7cc | 7979630bd97e36928f272f89be20e9990eb82747 | refs/heads/master | 2021-05-04T12:11:13.397801 | 2019-07-17T13:06:20 | 2019-07-17T13:06:20 | 120,289,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | #!/usr/bin/env python3
"""mapper.py."""
import sys
# input comes from STDIN (standard input)
data = []
for line in sys.stdin:
line = line.strip()
city, date, hi, lo = line.split(',')
print('{}\t{}'.format(city, hi))
| [
"[email protected]"
] | |
a4316d4be42d7c16f885ca1d1fb52f0985d3fdfd | 21945fb6674298a3b2de064c55ca01205c646a1c | /week07/homework3.py | bb7da6f1bfdf92cab0f094fbbce5d737be54a4a1 | [] | no_license | Jiangjao/Python005-01 | c200c4feff1042c7069595e64d6e7abd6530b762 | 04275c2d5d3d134f9ea0f8fe5eabceae9317683b | refs/heads/main | 2023-03-23T20:51:05.371201 | 2021-03-14T08:04:06 | 2021-03-14T08:04:06 | 314,480,746 | 0 | 0 | null | 2020-11-20T07:39:25 | 2020-11-20T07:39:25 | null | UTF-8 | Python | false | false | 408 | py | import time
import math
def timer(func):
def inner(*args, **kwargs):
start = time.time()
func(*args, **kwargs)
end = time.time()
time_delata = end - start
# time_delata = round((end - start),10)
print(func.__name__,f"花费时间大概{time_delata}毫秒")
return time_delata
return inner
@timer
def sayHello():
print('hello')
sayHello()
| [
"[email protected]"
] | |
3e126c527ecdbabbe3d020d939bbf9ae244d670e | 0adb68bbf576340c8ba1d9d3c07320ab3bfdb95e | /regexlib/2021-5-15/python_re2_test_file/regexlib_7979.py | bffb80e110380d59813cacb6a92cf1f35316152e | [
"MIT"
] | permissive | agentjacker/ReDoS-Benchmarks | c7d6633a3b77d9e29e0ee2db98d5dfb60cde91c6 | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | refs/heads/main | 2023-05-10T13:57:48.491045 | 2021-05-21T11:19:39 | 2021-05-21T11:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | # 7979
# \b([A-Za-z0-9]+)(-|_|\.)?(\w+)?@\w+\.(\w+)?(\.)?(\w+)?(\.)?(\w+)?\b
# POLYNOMIAL
# nums:5
# POLYNOMIAL AttackString:""+"A"*10000+"! _1SLQ_1"
import re2 as re
from time import perf_counter
regex = """\b([A-Za-z0-9]+)(-|_|\.)?(\w+)?@\w+\.(\w+)?(\.)?(\w+)?(\.)?(\w+)?\b"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "A" * i * 10000 + "! _1SLQ_1"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!") | [
"[email protected]"
] | |
4fbe0312706cd863d435ee206d2cd50d864aa244 | 433ca57245fe15afd309323e82f3bdf3287b4831 | /authentication/urls.py | f79cea9e911a931f09d7db1c6c59dadfc0af64d4 | [] | no_license | greenteamer/ceiling-django | db5170faada0f1582c744fa28c638e8671dc2ab9 | b4a469ae7d2ce6ed36ae51af60633de1fdb43ea4 | refs/heads/master | 2020-04-09T19:01:40.273226 | 2018-12-05T14:39:15 | 2018-12-05T14:39:15 | 160,531,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | # -*- coding: utf-8 -*-
# !/usr/bin/env python
from django.conf.urls import url
from authentication.views import *
urlpatterns = [
url(r'logout/$', logoutView),
url(r'login/$', loginView),
url(r'register/$', registerView),
]
| [
"[email protected]"
] | |
9b252939ed7fd8376f54021b32dab3f487c5ebcb | 9ac405635f3ac9332e02d0c7803df757417b7fee | /permisos/migrations/0002_aditionaldefaultpermission.py | 5aea9ff8ce03397279fa7f4af97be041d2238bc0 | [] | no_license | odecsarrollo/07_intranet_proyectos | 80af5de8da5faeb40807dd7df3a4f55f432ff4c0 | 524aeebb140bda9b1bf7a09b60e54a02f56fec9f | refs/heads/master | 2023-01-08T04:59:57.617626 | 2020-09-25T18:01:09 | 2020-09-25T18:01:09 | 187,250,667 | 0 | 0 | null | 2022-12-30T09:36:37 | 2019-05-17T16:41:35 | JavaScript | UTF-8 | Python | false | false | 677 | py | # Generated by Django 2.0.2 on 2018-02-17 19:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('permisos', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AditionalDefaultPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'permissions': (('list_user', 'Can list user'), ('list_permission', 'Can list permission'), ('detail_user', 'Can see user')),
'managed': False,
},
),
]
| [
"[email protected]"
] | |
92a9d3a6a54f6b4fa8608f888852f05dc5e91407 | faa0ce2a95da958be3bfb171cdff29eeb43c3eb6 | /py-exercises/unicorn-rpg/item/tonics.py | 27820bee3bba706d563907978d5efc5f887263e1 | [
"MIT"
] | permissive | julianapeace/digitalcrafts-exercises | 98fe4e20420c47cf9d92d16c45ac60dc35a49a6a | 98e6680138d55c5d093164a47da53e1ddb6d064c | refs/heads/master | 2021-08-30T04:17:09.997205 | 2017-12-16T00:22:22 | 2017-12-16T00:22:22 | 103,176,043 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | from item.base import Item
class Tonic(Item):
def equip(self, hero):
print("{}'s health is {} out of {}.".format(
hero.name, hero.health, hero.max_health))
self.pause()
print("{} eats the {}.".format(
hero.name, self.name))
self.pause()
for i in range(3):
print("~ * CHOMP * ~")
self.pause()
hero.health = hero.max_health
print("{}'s health is now {} out of {}!".format(
hero.name, hero.health, hero.max_health))
| [
"[email protected]"
] | |
d0c478661d612888172e09bd9e3c4ebf31caf74f | 68e5e2c9a7e9372f536edf3d99847067eb734e75 | /11-使用django-rest-framework/typeidea/typeidea/typeidea/settings/develop.py | 9c2b99d4d96bb7a0ad7244b42325d5c3b25ce633 | [] | no_license | gy0109/Django-enterprise-development-logs--huyang | f04d21df6d45f5d2f226760d35e38042f74a7ea8 | ab4505f8cdaf0c1f9e3635591cd74645a374a73f | refs/heads/master | 2020-05-17T05:24:51.602859 | 2019-05-08T03:42:13 | 2019-05-08T03:42:13 | 183,534,431 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | from .base import *
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'typeidea',
'USER': 'root',
'PASSWORD': 'gy0109',
'HOST': '127.0.0.1',
'PORT': '3306',
'TEST': {
'CHARSET': 'utf8', #
# 'COLLATION': 'utf8_general_ci',
'NAME': 'mytextdatabase', # 配置单元测试的的数据库
},
# 'CHARSET': 'utf8'
}
}
# debug_toolbar本地系统优化方式的配置
INSTALLED_APPS += [
'debug_toolbar',
'debug_toolbar_line_profiler',
]
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
INTERNAL_IPS = ['127.0.0.1']
# # debug_toobar本地系统优化配置--第三方包panel --火焰图
DEBUG_TOOLBAR_PANELS = [
# 'djdt_flamegraph.FlamegraphPanel', 报错啊
'debug_toolbar_line_profiler.panel.ProfilingPanel',
]
# | [
"[email protected]"
] | |
eeac7bb8fd1fbecbee6a1ae7589e83913bd1427a | 03f0a82e829a5711a9165d8f7d3762ca0c1ceaea | /ahgl/apps/tournaments/migrations/0012_auto__add_match__add_tournament__add_tournamentround__add_map__add_gam.py | 695c8bd8bf2a382d2838ab11c06b8685eda5009c | [
"BSD-2-Clause"
] | permissive | day9tv/ahgl | 4d273a39e06334cc15eb12031de0a806366396b9 | 5e06cfecb28c153c1b83ef89112fc217897131cb | refs/heads/master | 2021-01-22T08:32:53.663312 | 2012-10-06T21:32:52 | 2012-10-06T21:32:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,974 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Match'
db.create_table('tournaments_match', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tournament', self.gf('django.db.models.fields.related.ForeignKey')(related_name='matches', to=orm['tournaments.Tournament'])),
('published', self.gf('django.db.models.fields.BooleanField')(default=False)),
('publish_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('creation_date', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True)),
('referee', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.Profile'], null=True, blank=True)),
('home_submitted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('away_submitted', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('tournaments', ['Match'])
# Adding model 'Tournament'
db.create_table('tournaments_tournament', (
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, primary_key=True, db_index=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=False)),
('featured_game', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tournaments.Game'], null=True, blank=True)),
))
db.send_create_signal('tournaments', ['Tournament'])
# Adding M2M table for field map_pool on 'Tournament'
db.create_table('tournaments_tournament_map_pool', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('tournament', models.ForeignKey(orm['tournaments.tournament'], null=False)),
('map', models.ForeignKey(orm['tournaments.map'], null=False))
))
db.create_unique('tournaments_tournament_map_pool', ['tournament_id', 'map_id'])
# Adding model 'TournamentRound'
db.create_table('tournaments_tournamentround', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=40)),
('tournament', self.gf('django.db.models.fields.related.ForeignKey')(related_name='rounds', to=orm['tournaments.Tournament'])),
('stage', self.gf('django.db.models.fields.IntegerField')()),
('structure', self.gf('django.db.models.fields.CharField')(default='G', max_length=1)),
))
db.send_create_signal('tournaments', ['TournamentRound'])
# Adding model 'Map'
db.create_table('tournaments_map', (
('name', self.gf('django.db.models.fields.CharField')(max_length=50, primary_key=True)),
))
db.send_create_signal('tournaments', ['Map'])
# Adding model 'Game'
db.create_table('tournaments_game', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('match', self.gf('django.db.models.fields.related.ForeignKey')(related_name='games', to=orm['tournaments.Match'])),
('map', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tournaments.Map'])),
('order', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
('home_player', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='home_games', null=True, to=orm['profiles.Profile'])),
('home_race', self.gf('django.db.models.fields.CharField')(max_length=1, blank=True)),
('away_player', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='away_games', null=True, to=orm['profiles.Profile'])),
('away_race', self.gf('django.db.models.fields.CharField')(max_length=1, blank=True)),
('winner', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='game_wins', null=True, to=orm['profiles.Profile'])),
('loser', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='game_losses', null=True, to=orm['profiles.Profile'])),
('forfeit', self.gf('django.db.models.fields.BooleanField')(default=False)),
('replay', self.gf('django.db.models.fields.files.FileField')(max_length=300, null=True, blank=True)),
('vod', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('is_ace', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('tournaments', ['Game'])
# Adding unique constraint on 'Game', fields ['order', 'match']
db.create_unique('tournaments_game', ['order', 'match_id'])
def backwards(self, orm):
# Removing unique constraint on 'Game', fields ['order', 'match']
db.delete_unique('tournaments_game', ['order', 'match_id'])
# Deleting model 'Match'
db.delete_table('tournaments_match')
# Deleting model 'Tournament'
db.delete_table('tournaments_tournament')
# Removing M2M table for field map_pool on 'Tournament'
db.delete_table('tournaments_tournament_map_pool')
# Deleting model 'TournamentRound'
db.delete_table('tournaments_tournamentround')
# Deleting model 'Map'
db.delete_table('tournaments_map')
# Deleting model 'Game'
db.delete_table('tournaments_game')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.profile': {
'Meta': {'object_name': 'Profile'},
'bnet_profile': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'char_code': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'char_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'custom_thumb': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'photo': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'questions_answers': ('profiles.fields.HTMLField', [], {'attributes': '[]', 'blank': 'True', 'tags': "['ol', 'ul', 'li', 'strong', 'em', 'p']"}),
'race': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'tournaments.game': {
'Meta': {'ordering': "('order',)", 'unique_together': "(('order', 'match'),)", 'object_name': 'Game'},
'away_player': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'away_games'", 'null': 'True', 'to': "orm['profiles.Profile']"}),
'away_race': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'forfeit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'home_player': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'home_games'", 'null': 'True', 'to': "orm['profiles.Profile']"}),
'home_race': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_ace': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'loser': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'game_losses'", 'null': 'True', 'to': "orm['profiles.Profile']"}),
'map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tournaments.Map']"}),
'match': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'games'", 'to': "orm['tournaments.Match']"}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'replay': ('django.db.models.fields.files.FileField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'vod': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'winner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'game_wins'", 'null': 'True', 'to': "orm['profiles.Profile']"})
},
'tournaments.map': {
'Meta': {'object_name': 'Map'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'})
},
'tournaments.match': {
'Meta': {'object_name': 'Match'},
'away_submitted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'creation_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'home_submitted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publish_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'referee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Profile']", 'null': 'True', 'blank': 'True'}),
'tournament': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'matches'", 'to': "orm['tournaments.Tournament']"})
},
'tournaments.tournament': {
'Meta': {'object_name': 'Tournament'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_game': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tournaments.Game']", 'null': 'True', 'blank': 'True'}),
'map_pool': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['tournaments.Map']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True', 'db_index': 'True'})
},
'tournaments.tournamentround': {
'Meta': {'object_name': 'TournamentRound'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'stage': ('django.db.models.fields.IntegerField', [], {}),
'structure': ('django.db.models.fields.CharField', [], {'default': "'G'", 'max_length': '1'}),
'tournament': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rounds'", 'to': "orm['tournaments.Tournament']"})
}
}
complete_apps = ['tournaments']
| [
"[email protected]"
] | |
0042ad247469d035eb3bd6bbca7c8f0d3244cf04 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03623/s199798593.py | 72bc11bf89ea2eb7278b4860b9b2a08a11cd95e1 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | import sys
input = sys.stdin.readline
x,a,b = list(map(int,input().split()))
print('A' if abs(x-a) < abs(x-b) else 'B')
| [
"[email protected]"
] | |
c31e783fbf0dafb8d8f7922508e19f3339053b96 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/stdlib-big-966.py | 9ec39bda388f268fc18d4cd2b06cef12ee58ec27 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,989 | py | # ChocoPy library functions
def int_to_str(x: int) -> str:
digits:[str] = None
result:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str2(x: int, x2: int) -> str:
digits:[str] = None
digits2:[str] = None
result:str = ""
result2:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str3(x: int, x2: int, x3: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str4(x: int, x2: int, x3: int, x4: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str5(x: int, x2: int, x3: int, x4: int, x5: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
digits5:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
result5:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + $Exp
return result
def str_to_int(x: str) -> int:
result:int = 0
digit:int = 0
char:str = ""
sign:int = 1
first_char:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int2(x: str, x2: str) -> int:
result:int = 0
result2:int = 0
digit:int = 0
digit2:int = 0
char:str = ""
char2:str = ""
sign:int = 1
sign2:int = 1
first_char:bool = True
first_char2:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int3(x: str, x2: str, x3: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
char:str = ""
char2:str = ""
char3:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int4(x: str, x2: str, x3: str, x4: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
first_char4:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int5(x: str, x2: str, x3: str, x4: str, x5: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
result5:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
digit5:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
char5:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
sign5:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
first_char4:bool = True
first_char5:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
# Input parameters
c:int = 42
c2:int = 42
c3:int = 42
c4:int = 42
c5:int = 42
n:int = 10
n2:int = 10
n3:int = 10
n4:int = 10
n5:int = 10
# Run [-nc, nc] with step size c
s:str = ""
s2:str = ""
s3:str = ""
s4:str = ""
s5:str = ""
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
i = -n * c
# Crunch
while i <= n * c:
s = int_to_str(i)
print(s)
i = str_to_int(s) + c
| [
"[email protected]"
] | |
1fa37d8012d63b7f1df5afe32309fb8374bcd5c0 | 0f79fd61dc47fcafe22f83151c4cf5f2f013a992 | /CodeUp/[059~064] 기초-조건및선택실행구조/060.py | 922d887c4b510638f2be4dabaac638aa661ab7a6 | [] | no_license | sangm1n/problem-solving | 670e119f28b0f0e293dbc98fc8a1aea74ea465ab | bc03f8ea9a6a4af5d58f8c45c41e9f6923f55c62 | refs/heads/master | 2023-04-22T17:56:21.967766 | 2021-05-05T12:34:01 | 2021-05-05T12:34:01 | 282,863,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | # 세 정수가 입력되었을 때, 짝(even)/홀(odd)을 출력
a, b, c = map(int, input().split())
print('even' if a%2==0 else 'odd')
print('even' if b%2==0 else 'odd')
print('even' if c%2==0 else 'odd')
| [
"[email protected]"
] | |
e52284eb219985f90a9b3c75e20f27b97df69abc | 893597d91fe6de25cdd3e8427c4ebba29d3cabe1 | /tests/test_scaffold.py | af20eb27938d8aea2c9c8159b9a8598d08f22bb7 | [
"Apache-2.0"
] | permissive | AugustasV/ploomber | d51cefd529bdbf6c2bc82485ba77add6bb346f2b | b631a1b21da64bb7b9525db1c29c32ee3c0e48b4 | refs/heads/master | 2023-08-27T02:22:55.556200 | 2021-10-31T02:52:28 | 2021-10-31T02:52:28 | 423,189,549 | 0 | 0 | Apache-2.0 | 2021-10-31T15:44:17 | 2021-10-31T15:44:16 | null | UTF-8 | Python | false | false | 3,380 | py | from pathlib import Path
import ast
import pytest
from ploomber import tasks
from ploomber import scaffold
@pytest.mark.parametrize('name', ['task.py', 'task.ipynb'])
@pytest.mark.parametrize('extract_upstream', [False, True])
@pytest.mark.parametrize('extract_product', [False, True])
def test_renders_valid_script(name, extract_product, extract_upstream):
loader = scaffold.ScaffoldLoader('ploomber_add')
out = loader.render(name,
params=dict(extract_product=extract_product,
extract_upstream=extract_upstream))
# make sure it generates valid python code, except for the sql template
if not name.endswith('.sql'):
ast.parse(out)
@pytest.mark.parametrize('extract_upstream', [False, True])
@pytest.mark.parametrize('extract_product', [False, True])
def test_renders_valid_function(extract_product, extract_upstream):
loader = scaffold.ScaffoldLoader('ploomber_add')
out = loader.render('function.py',
params=dict(function_name='some_function',
extract_product=extract_product,
extract_upstream=extract_upstream))
module = ast.parse(out)
assert module.body[0].name == 'some_function'
def test_create_function(backup_test_pkg, tmp_directory):
loader = scaffold.ScaffoldLoader('ploomber_add')
loader.create('test_pkg.functions.new_function',
dict(extract_product=False, extract_upstream=True),
tasks.PythonCallable)
code = Path(backup_test_pkg, 'functions.py').read_text()
module = ast.parse(code)
function_names = {
element.name
for element in module.body if hasattr(element, 'name')
}
assert 'new_function' in function_names
def test_add_task_from_scaffold(backup_test_pkg, tmp_directory):
yaml = """
meta:
source_loader:
module: test_pkg
extract_product: True
tasks:
- source: notebook.ipynb
- source: notebook.py
- source: test_pkg.functions.my_new_function
"""
Path('pipeline.yaml').write_text(yaml)
# FIXME: this will fail because TaskSpec validates that the
# dotted path actually exists. I think the cleanest solution
# is to add a special class method for DAGSpec that allows the lazy
# load to skip validating the last attribute...
spec, path_to_spec = scaffold.load_dag()
scaffold.add(spec, path_to_spec)
code = Path(backup_test_pkg, 'functions.py').read_text()
module = ast.parse(code)
function_names = {
element.name
for element in module.body if hasattr(element, 'name')
}
assert 'my_new_function' in function_names
assert Path(backup_test_pkg, 'notebook.ipynb').exists()
assert Path(backup_test_pkg, 'notebook.py').exists()
def test_add_task_when_using_import_tasks_from(tmp_directory):
spec = """
meta:
import_tasks_from: subdir/tasks.yaml
extract_product: True
tasks: []
"""
tasks = """
- source: notebook.py
"""
Path('pipeline.yaml').write_text(spec)
subdir = Path('subdir')
subdir.mkdir()
(subdir / 'tasks.yaml').write_text(tasks)
spec, path_to_spec = scaffold.load_dag()
scaffold.add(spec, path_to_spec)
assert (subdir / 'notebook.py').exists()
| [
"[email protected]"
] | |
8d9d092d70ec00427f55769a5247b4663f100ca9 | c829275111b9025dcccc9ac1b92d8dc51adbb71d | /manage.py | 5621943733c6ce168fc625587ad0942d94e5925f | [
"MIT"
] | permissive | Ken-mbira/PHOTO_BOOK | f1bd1bd65af228b0600bf69da12840897eb109ad | d47cd8dabd4b92e3befdafe2d99db266be31ffff | refs/heads/master | 2023-08-19T06:55:07.309342 | 2021-10-12T11:05:00 | 2021-10-12T11:05:00 | 414,297,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'photobook.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
c29072f0f943d98e21ef746e7367c84f8a47ff84 | a908a9f1f6028fe78a5b23621dff4b8fa7047414 | /flatmap_lv1_larger/constants.py | 8bb59c011796771343de9fdada9c6a22e6f139a2 | [] | no_license | ViZDoomBot/stable-baselines-agent | 502edd5e64c45a6adbe073a22e477b0e41ac213d | a76c1c3449dab518462f6a7bc2a0dcb668f08b77 | refs/heads/main | 2023-04-27T14:45:27.389418 | 2021-05-05T06:54:51 | 2021-05-05T06:54:51 | 340,531,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,914 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : constants.py
# @Author: harry
# @Date : 1/27/21 7:08 PM
# @Desc : some constant definitions
import sys
import os
import vizdoom as vzd
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from common.utils import make_expand_action_f
# atomic actions
NUM_ATOMIC_ACTIONS = 7
MOVE_FORWARD = 0
MOVE_BACKWARD = 1
MOVE_LEFT = 2
MOVE_RIGHT = 3
TURN_LEFT = 4
TURN_RIGHT = 5
ATTACK = 6
_expand_action = make_expand_action_f(NUM_ATOMIC_ACTIONS)
# action space (both atomic and combination actions)
ACTION_LIST = [
_expand_action(),
_expand_action(MOVE_FORWARD),
_expand_action(MOVE_BACKWARD),
_expand_action(MOVE_LEFT),
_expand_action(MOVE_RIGHT),
_expand_action(TURN_LEFT),
_expand_action(TURN_RIGHT),
_expand_action(ATTACK),
_expand_action(MOVE_FORWARD, TURN_LEFT),
_expand_action(MOVE_FORWARD, TURN_RIGHT),
_expand_action(MOVE_LEFT, TURN_RIGHT),
_expand_action(MOVE_RIGHT, TURN_LEFT),
_expand_action(MOVE_FORWARD, ATTACK),
_expand_action(MOVE_BACKWARD, ATTACK),
_expand_action(MOVE_LEFT, ATTACK),
_expand_action(MOVE_RIGHT, ATTACK),
]
CONSTANTS_DICT = {
'scenario_cfg_path': '../scenarios/flatmap_lv1_no_hud.cfg',
'game_args': '-host 1 -deathmatch +sv_forcerespawn 1 +sv_noautoaim 1 '
'+sv_respawnprotect 1 +sv_spawnfarthest 1 +sv_nocrouch 1 +viz_respawn_delay 0 +viz_nocheat 0',
'num_bots': 8,
'action_list': ACTION_LIST,
'num_actions': len(ACTION_LIST),
'resized_height': 120,
'resized_width': 120,
'preprocess_shape': (120, 120),
'extra_features': [vzd.GameVariable.HEALTH, vzd.GameVariable.AMMO5, vzd.GameVariable.ARMOR],
'extra_features_norm_factor': [100.0, 50.0, 200.0],
}
| [
"[email protected]"
] | |
c5b2a1385a8b9cc616d3b0327b4a190a9d888330 | 08cfc4fb5f0d2f11e4e226f12520a17c5160f0a2 | /kubernetes/client/models/v1_load_balancer_ingress.py | aa2f3c191214800fe58682c91dad6f3156d6d464 | [
"Apache-2.0"
] | permissive | ex3cv/client-python | 5c6ee93dff2424828d064b5a2cdbed3f80b74868 | 2c0bed9c4f653472289324914a8f0ad4cbb3a1cb | refs/heads/master | 2021-07-12T13:37:26.049372 | 2017-10-16T20:19:01 | 2017-10-16T20:19:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,071 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1LoadBalancerIngress(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'hostname': 'str',
'ip': 'str'
}
attribute_map = {
'hostname': 'hostname',
'ip': 'ip'
}
def __init__(self, hostname=None, ip=None):
"""
V1LoadBalancerIngress - a model defined in Swagger
"""
self._hostname = None
self._ip = None
self.discriminator = None
if hostname is not None:
self.hostname = hostname
if ip is not None:
self.ip = ip
@property
def hostname(self):
"""
Gets the hostname of this V1LoadBalancerIngress.
Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)
:return: The hostname of this V1LoadBalancerIngress.
:rtype: str
"""
return self._hostname
@hostname.setter
def hostname(self, hostname):
"""
Sets the hostname of this V1LoadBalancerIngress.
Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)
:param hostname: The hostname of this V1LoadBalancerIngress.
:type: str
"""
self._hostname = hostname
@property
def ip(self):
"""
Gets the ip of this V1LoadBalancerIngress.
IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)
:return: The ip of this V1LoadBalancerIngress.
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""
Sets the ip of this V1LoadBalancerIngress.
IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)
:param ip: The ip of this V1LoadBalancerIngress.
:type: str
"""
self._ip = ip
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1LoadBalancerIngress):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
7aaa4e6359d495ce0c93b2480ad757acdcb75af7 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/test/processors/support/PCodeTestControlBlock.pyi | cb2ed1addcc5cfe466618a3b60b750d14cce393e | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,691 | pyi | from typing import List
import ghidra.program.model.address
import ghidra.test.processors.support
import ghidra.test.processors.support.PCodeTestAbstractControlBlock
import java.lang
class PCodeTestControlBlock(ghidra.test.processors.support.PCodeTestAbstractControlBlock):
"""
PCodeTestControlBlock data is read from each binary test file and
identified by the MAIN_CONTROL_BLOCK_MAGIC 64-bit character field value at the start of the
data structure. Only one instance of this should exist within the binary.
"""
cachedProgramPath: unicode
testFile: ghidra.test.processors.support.PCodeTestFile
def equals(self, __a0: object) -> bool: ...
def getBreakOnDoneAddress(self) -> ghidra.program.model.address.Address: ...
def getBreakOnErrorAddress(self) -> ghidra.program.model.address.Address: ...
def getBreakOnPassAddress(self) -> ghidra.program.model.address.Address: ...
def getClass(self) -> java.lang.Class: ...
@overload
def getFunctionInfo(self, functionIndex: int) -> ghidra.test.processors.support.PCodeTestAbstractControlBlock.FunctionInfo: ...
@overload
def getFunctionInfo(self, functionName: unicode) -> ghidra.test.processors.support.PCodeTestAbstractControlBlock.FunctionInfo: ...
def getInfoStructureAddress(self) -> ghidra.program.model.address.Address: ...
def getNumberFunctions(self) -> int: ...
def getPrintfBufferAddress(self) -> ghidra.program.model.address.Address: ...
def getSprintf5Address(self) -> ghidra.program.model.address.Address: ...
def getTestGroups(self) -> List[ghidra.test.processors.support.PCodeTestGroup]: ...
def getTestResults(self) -> ghidra.test.processors.support.PCodeTestResults: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def breakOnDoneAddress(self) -> ghidra.program.model.address.Address: ...
@property
def breakOnErrorAddress(self) -> ghidra.program.model.address.Address: ...
@property
def breakOnPassAddress(self) -> ghidra.program.model.address.Address: ...
@property
def printfBufferAddress(self) -> ghidra.program.model.address.Address: ...
@property
def sprintf5Address(self) -> ghidra.program.model.address.Address: ...
@property
def testGroups(self) -> List[object]: ...
@property
def testResults(self) -> ghidra.test.processors.support.PCodeTestResults: ...
| [
"[email protected]"
] | |
287106fb8446b7e462b44ea9ab651a9c4016c4e0 | 2fac796fa58c67fb5a4a95a6e7f28cbef169318b | /python/hash-function.py | a3d79b09efd919544f997d3ba614a680411707d1 | [] | no_license | jwyx3/practices | f3fe087432e79c8e34f3af3a78dd10278b66dd38 | 6fec95b9b4d735727160905e754a698513bfb7d8 | refs/heads/master | 2021-03-12T20:41:59.816448 | 2019-04-14T06:47:30 | 2019-04-14T06:47:30 | 18,814,777 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,255 | py | """
In data structure Hash, hash function is used to convert a string(or any other type) into an integer smaller than hash size and bigger or equal to zero. The objective of designing a hash function is to "hash" the key as unreasonable as possible. A good hash function can avoid collision as less as possible. A widely used hash function algorithm is using a magic number 33, consider any string as a 33 based big integer like follow:
hashcode("abcd") = (ascii(a) * 33^3 + ascii(b) * 33^2 + ascii(c) *33 + ascii(d)) % HASH_SIZE
= (97* 33^3 + 98 * 33^2 + 99 * 33 +100) % HASH_SIZE
= 3595978 % HASH_SIZE
here HASH_SIZE is the capacity of the hash table (you can assume a hash table is like an array with index 0 ~ HASH_SIZE-1).
Given a string as a key and the size of hash table, return the hash value of this key.
"""
class Solution:
"""
@param key: A String you should hash
@param HASH_SIZE: An integer
@return an integer
"""
def hashCode(self, key, HASH_SIZE):
# write your code here
ans = 0
for c in key:
ans = ans * 33 + ord(c)
# this will accelerate computation
ans %= HASH_SIZE
return ans
| [
"[email protected]"
] | |
ef9386fa4dfd9193cb188171976c71f1e3f4e457 | fa0c53ac2a91409eaf0fc7c082a40caae3ffa0d8 | /com/lc/demo/music163SpidersDemo/HeatMap/get_fansinfo.py | b015ef13d7f5a43765520d9b80976475977d36cc | [] | no_license | ahviplc/pythonLCDemo | aba6d8deb1e766841461bd772560d1d50450057b | 22f149600dcfd4d769e9f74f1f12e3c3564e88c2 | refs/heads/master | 2023-07-24T01:41:59.791913 | 2023-07-07T02:32:45 | 2023-07-07T02:32:45 | 135,969,516 | 7 | 2 | null | 2023-02-02T03:24:14 | 2018-06-04T04:12:49 | Python | UTF-8 | Python | false | false | 7,108 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
get_fansinfo.py
网易云音乐歌手粉丝地域分布热力图(一):获取粉丝数据
Version: 1.0
Author: LC
DateTime: 2019年1月22日13:03:52
一加壹博客最Top-一起共创1+1>2的力量!~LC
LC博客url: http://oneplusone.top/index.html
"""
import requests
import codecs
import base64
import random
import math
from Crypto.Cipher import AES
# 获取粉丝页的json数据
def get_fans_json(url, data):
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Cookie': 'WM_TID=36fj4OhQ7NdU9DhsEbdKFbVmy9tNk1KM; _iuqxldmzr_=32; _ntes_nnid=26fc3120577a92f179a3743269d8d0d9,1536048184013; _ntes_nuid=26fc3120577a92f179a3743269d8d0d9; __utmc=94650624; __utmz=94650624.1536199016.26.8.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); WM_NI=2Uy%2FbtqzhAuF6WR544z5u96yPa%2BfNHlrtTBCGhkg7oAHeZje7SJiXAoA5YNCbyP6gcJ5NYTs5IAJHQBjiFt561sfsS5Xg%2BvZx1OW9mPzJ49pU7Voono9gXq9H0RpP5HTclE%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6eed5cb8085b2ab83ee7b87ac8c87cb60f78da2dac5439b9ca4b1d621f3e900b4b82af0fea7c3b92af28bb7d0e180b3a6a8a2f84ef6899ed6b740baebbbdab57394bfe587cd44b0aebcb5c14985b8a588b6658398abbbe96ff58d868adb4bad9ffbbacd49a2a7a0d7e6698aeb82bad779f7978fabcb5b82b6a7a7f73ff6efbd87f259f788a9ccf552bcef81b8bc6794a686d5bc7c97e99a90ee66ade7a9b9f4338cf09e91d33f8c8cad8dc837e2a3; JSESSIONID-WYYY=G%5CSvabx1X1F0JTg8HK5Z%2BIATVQdgwh77oo%2BDOXuG2CpwvoKPnNTKOGH91AkCHVdm0t6XKQEEnAFP%2BQ35cF49Y%2BAviwQKVN04%2B6ZbeKc2tNOeeC5vfTZ4Cme%2BwZVk7zGkwHJbfjgp1J9Y30o1fMKHOE5rxyhwQw%2B%5CDH6Md%5CpJZAAh2xkZ%3A1536204296617; __utma=94650624.1052021654.1536048185.1536199016.1536203113.27; __utmb=94650624.12.10.1536203113',
'Host': 'music.163.com',
'Referer': 'http://music.163.com/',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/66.0.3359.181 Safari/537.36'}
try:
r = requests.post(url, headers=headers, data=data)
r.encoding = "utf-8"
if r.status_code == 200:
# 返回json格式的数据
return r.json()
except:
print("爬取失败!")
# 获取粉丝地理位置
def get_location(uid):
# 粉丝数据
uri = 'https://music.163.com/weapi/user/playlist?csrf_token=cdee144903c5a32e6752f50180329fc9'
# uid为粉丝id
id_msg = '{uid: "' + str(
uid) + '", wordwrap: "7", offset: "0", total: "true", limit: "36", csrf_token: "cdee144903c5a32e6752f50180329fc9"}'
params, encSecKey = get_params(id_msg)
data = {'params': params, 'encSecKey': encSecKey}
userjson = get_fans_json(uri, data)
if len(userjson["playlist"]) > 0:
return userjson['playlist'][0]['creator']['city'], userjson['playlist'][0]['creator']['gender']
else:
print("id为{}用户没有创建歌单".format(uid))
# 构造包含昵称:地理位置的字典
def get_items(html):
uinfos = []
for item in html['followeds']:
fans_items = {}
fans_items['nickname'] = item['nickname']
# gender=0 没有性别信息,gender=1表示男,gender=2表示女
fans_items['location'], fans_items['gender'] = get_location(item['userId'])
uinfos.append(fans_items)
return uinfos
# 生成16个随机字符
def generate_random_strs(length):
string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
# 控制次数参数i
i = 0
# 初始化随机字符串
random_strs = ""
while i < length:
e = random.random() * len(string)
# 向下取整
e = math.floor(e)
random_strs = random_strs + list(string)[e]
i = i + 1
return random_strs
# AES加密
def AESencrypt(msg, key):
# 如果不是16的倍数则进行填充(paddiing)
padding = 16 - len(msg) % 16
# 这里使用padding对应的单字符进行填充
msg = msg + padding * chr(padding)
# 用来加密或者解密的初始向量(必须是16位)
iv = '0102030405060708'
cipher = AES.new(key, AES.MODE_CBC, iv)
# 加密后得到的是bytes类型的数据
encryptedbytes = cipher.encrypt(msg)
# 使用Base64进行编码,返回byte字符串
encodestrs = base64.b64encode(encryptedbytes)
# 对byte字符串按utf-8进行解码
enctext = encodestrs.decode('utf-8')
return enctext
# RSA加密
def RSAencrypt(randomstrs, key, f):
# 随机字符串逆序排列
string = randomstrs[::-1]
# 将随机字符串转换成byte类型数据
text = bytes(string, 'utf-8')
seckey = int(codecs.encode(text, encoding='hex'), 16) ** int(key, 16) % int(f, 16)
return format(seckey, 'x').zfill(256)
# 获取参数
def get_params(id_msg):
# msg也可以写成msg = {"offset":"页面偏移量=(页数-1) * 20", "limit":"20"},offset和limit这两个参数必须有(js)
# limit最大值为100,当设为100时,获取第二页时,默认前一页是20个评论,也就是说第二页最新评论有80个,有20个是第一页显示的
# msg = '{"rid":"R_SO_4_1302938992","offset":"0","total":"True","limit":"100","csrf_token":""}'
# offset = (page-1) * 20
# msg = '{offset":' + str(offset) + ',"limit":"20"}'
# msg = '{"rid":"R_SO_4_1302938992","offset":' + str(offset) + ',"total":"True","limit":"20","csrf_token":""}'
key = '0CoJUm6Qyw8W8jud'
f = '00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7'
e = '010001'
enctext = AESencrypt(id_msg, key)
# 生成长度为16的随机字符串
i = generate_random_strs(16)
# 两次AES加密之后得到params的值
encText = AESencrypt(enctext, i)
# RSA加密之后得到encSecKey的值
encSecKey = RSAencrypt(i, e, f)
return encText, encSecKey
# 歌手的粉丝页面,这里是薛之谦
# url = 'https://music.163.com/#/user/fans?id=97137413'
# 包含粉丝数据的页面的URL
# 歌手id
aid = '97137413'
url = 'https://music.163.com/weapi/user/getfolloweds?csrf_token=cdee144903c5a32e6752f50180329fc9'
page = 1
print("开始把粉丝信息写入fansinfo.txt")
while page:
offset = (page - 1) * 20
id_msg = '{userId: "' + aid + '", offset: ' + str(
offset) + ', total: "true", limit: "20", csrf_token: "cdee144903c5a32e6752f50180329fc9"}'
params, encSecKey = get_params(id_msg)
data = {'params': params, 'encSecKey': encSecKey}
html = get_fans_json(url, data)
if html is None:
break
else:
with open('fansinfo.txt', 'at', encoding='utf-8') as f:
for item in get_items(html):
f.write("{}\n".format(item))
f.close()
page += 1
| [
"[email protected]"
] | |
59b0690ca2abff226a2abfa17d3dc1b13b7496a8 | ab79f8297105a7d412303a8b33eaa25038f38c0b | /farmasi/vit_purchase_revision/__init__.py | ade7c3e199f0b4dff97e52ac1225012d0367bada | [] | no_license | adahra/addons | 41a23cbea1e35079f7a9864ade3c32851ee2fb09 | c5a5678379649ccdf57a9d55b09b30436428b430 | refs/heads/master | 2022-06-17T21:22:22.306787 | 2020-05-15T10:51:14 | 2020-05-15T10:51:14 | 264,167,002 | 1 | 0 | null | 2020-05-15T10:39:26 | 2020-05-15T10:39:26 | null | UTF-8 | Python | false | false | 1,083 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase_order
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"prog1@381544ba-743e-41a5-bf0d-221725b9d5af"
] | prog1@381544ba-743e-41a5-bf0d-221725b9d5af |
aafe5c830dbc10c7fa2a9bf7f557c558070e4f4d | ea3897a5ffe63abd7ced5676c9121f6bb970bb97 | /detransapp/manager/tipo_veiculo.py | de59c365c60009669c75c552ea7ba717754d252b | [] | no_license | brunowber/transnote2 | a045bdef18a9b07b70cc74483023dd877728682c | 7b799a71380aca342e879c5556cc24fcebdac1ca | refs/heads/master | 2020-04-10T08:53:29.613086 | 2018-03-15T19:52:53 | 2018-03-15T19:52:53 | 124,267,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | # coding: utf-8
"""Gerencia os tipos de veiculos"""
from datetime import datetime
from django.db import models
from django.db.models import Q
from django.core.paginator import Paginator
from django.conf import settings
class TipoVeiculoManager(models.Manager):
"""Classe pra gerenciar os tipos de veiculos"""
def get_page(self, page, procurar):
if procurar is not None and procurar != '':
tipos = self.filter(Q(descricao__icontains=procurar))
else:
tipos = self.filter()
tipos = tipos.order_by('descricao')
paginator = Paginator(tipos, settings.NR_REGISTROS_PAGINA)
try:
tipos_page = paginator.page(page)
except Exception:
tipos_page = paginator.page(paginator.num_pages)
return tipos_page
def get_tipos_veiculo_sicronismo(self, data=None):
if data:
data = datetime.strptime(data, '%d/%m/%Y %H:%M:%S')
query = self.filter(data_alterado__gt=data)
return query.all()
return self.all()
| [
"[email protected]"
] | |
cfe6d1f97f618d4b6127c93dcdbfc2a3c8f22a1b | f2dd3825da3ed8b6e52058453a9340f5330581c2 | /0x01-python-if_else_loops_functions/6-print_comb3.py | 126ab1fd63b2a2b9d3402e4a4033068c893f1881 | [] | no_license | mlaizure/holbertonschool-higher_level_programming | 855c8b2672b59d27cba4c05ad8a7bb5f30b5d253 | 0d2bcdab97c31e79093497a1783a52e56e10ac6e | refs/heads/main | 2023-04-22T23:20:58.284834 | 2021-05-13T01:00:20 | 2021-05-13T01:00:20 | 319,399,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | #!/usr/bin/python3
for i in range(10):
for j in range(1, 10):
if (i == 8) and (j == 9):
print("{}{}".format(i, j))
elif (j > i):
print("{}{}".format(i, j), end=", ")
| [
"[email protected]"
] | |
1109d7204a3f0bd23def33ad91ac12f7647c0af5 | cf6d53e0cbf09a57e63967596bc6e9dce7dcb9e7 | /tests/models/test_models.py | 2dbc66aa067281880ecfd45709da1eadbd7cea70 | [
"MIT"
] | permissive | TaiSakuma/acondbs | 16d3bf32a3bd62a81a8575ed2433844acc4fc2a1 | 990ab44ce4081cc0e04148a8375f7ce7081c2dee | refs/heads/master | 2021-05-26T16:09:39.772512 | 2020-04-21T17:58:41 | 2020-04-21T17:58:41 | 254,131,992 | 0 | 0 | MIT | 2020-04-08T15:48:28 | 2020-04-08T15:48:27 | null | UTF-8 | Python | false | false | 460 | py | from acondbs.db.sa import sa
# __________________________________________________________________||
def test_models(app):
'''test the models declared
'''
expected = {
'simulations', 'simulation_file_paths',
'maps', 'map_file_paths',
'beams', 'beam_file_paths'
}
model_names = sa.Model.metadata.tables.keys()
assert expected == model_names
# __________________________________________________________________||
| [
"[email protected]"
] | |
d95162a0c5e7b99f7717e2edcb78e2546dd0d3ca | 564ccf876cd04d199dd2364e2e138989598be98d | /Stock/venv/Lib/site-packages/statsmodels/multivariate/tests/test_factor.py | abb54ed36e7206b64d0ff837686b95c7cd8db69f | [] | no_license | hcz2000/pywork | 7eedcc7d53d85036b823a2ed96a1bffe761a8aec | 345820faa87de131203a98932a039725ff2bebda | refs/heads/master | 2023-08-15T10:48:36.176886 | 2023-07-26T09:23:09 | 2023-07-26T09:23:09 | 134,735,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,771 | py | # -*- coding: utf-8 -*-
import warnings
from statsmodels.compat.pandas import PD_LT_1_4
import os
import numpy as np
import pandas as pd
from statsmodels.multivariate.factor import Factor
from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
assert_raises, assert_array_equal,
assert_array_less, assert_allclose)
import pytest
try:
import matplotlib.pyplot as plt
missing_matplotlib = False
plt.switch_backend('Agg')
except ImportError:
missing_matplotlib = True
# Example data
# https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/
# viewer.htm#statug_introreg_sect012.htm
X = pd.DataFrame([['Minas Graes', 2.068, 2.070, 1.580, 1, 0],
['Minas Graes', 2.068, 2.074, 1.602, 2, 1],
['Minas Graes', 2.090, 2.090, 1.613, 3, 0],
['Minas Graes', 2.097, 2.093, 1.613, 4, 1],
['Minas Graes', 2.117, 2.125, 1.663, 5, 0],
['Minas Graes', 2.140, 2.146, 1.681, 6, 1],
['Matto Grosso', 2.045, 2.054, 1.580, 7, 0],
['Matto Grosso', 2.076, 2.088, 1.602, 8, 1],
['Matto Grosso', 2.090, 2.093, 1.643, 9, 0],
['Matto Grosso', 2.111, 2.114, 1.643, 10, 1],
['Santa Cruz', 2.093, 2.098, 1.653, 11, 0],
['Santa Cruz', 2.100, 2.106, 1.623, 12, 1],
['Santa Cruz', 2.104, 2.101, 1.653, 13, 0]],
columns=['Loc', 'Basal', 'Occ', 'Max', 'id', 'alt'])
def test_auto_col_name():
# Test auto generated variable names when endog_names is None
mod = Factor(None, 2, corr=np.eye(11), endog_names=None,
smc=False)
assert_array_equal(mod.endog_names,
['var00', 'var01', 'var02', 'var03', 'var04', 'var05',
'var06', 'var07', 'var08', 'var09', 'var10'])
def test_direct_corr_matrix():
# Test specifying the correlation matrix directly
mod = Factor(None, 2, corr=np.corrcoef(X.iloc[:, 1:-1], rowvar=0),
smc=False)
results = mod.fit(tol=1e-10)
a = np.array([[0.965392158864, 0.225880658666255],
[0.967587154301, 0.212758741910989],
[0.929891035996, -0.000603217967568],
[0.486822656362, -0.869649573289374]])
assert_array_almost_equal(results.loadings, a, decimal=8)
# Test set and get endog_names
mod.endog_names = X.iloc[:, 1:-1].columns
assert_array_equal(mod.endog_names, ['Basal', 'Occ', 'Max', 'id'])
# Test set endog_names with the wrong number of elements
assert_raises(ValueError, setattr, mod, 'endog_names',
X.iloc[:, :1].columns)
def test_unknown_fa_method_error():
# Test raise error if an unkonwn FA method is specified in fa.method
mod = Factor(X.iloc[:, 1:-1], 2, method='ab')
assert_raises(ValueError, mod.fit)
def test_example_compare_to_R_output():
# Testing basic functions and compare to R output
# R code for producing the results:
# library(psych)
# library(GPArotation)
# Basal = c(2.068, 2.068, 2.09, 2.097, 2.117, 2.14, 2.045, 2.076, 2.09, 2.111, 2.093, 2.1, 2.104)
# Occ = c(2.07, 2.074, 2.09, 2.093, 2.125, 2.146, 2.054, 2.088, 2.093, 2.114, 2.098, 2.106, 2.101)
# Max = c(1.58, 1.602, 1.613, 1.613, 1.663, 1.681, 1.58, 1.602, 1.643, 1.643, 1.653, 1.623, 1.653)
# id = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13)
# Y <- cbind(Basal, Occ, Max, id)
# a <- fa(Y, nfactors=2, fm="pa", rotate="none", SMC=FALSE, min.err=1e-10)
# b <- cbind(a$loadings[,1], -a$loadings[,2])
# b
# a <- fa(Y, nfactors=2, fm="pa", rotate="Promax", SMC=TRUE, min.err=1e-10)
# b <- cbind(a$loadings[,1], a$loadings[,2])
# b
# a <- fa(Y, nfactors=2, fm="pa", rotate="Varimax", SMC=TRUE, min.err=1e-10)
# b <- cbind(a$loadings[,1], a$loadings[,2])
# b
# a <- fa(Y, nfactors=2, fm="pa", rotate="quartimax", SMC=TRUE, min.err=1e-10)
# b <- cbind(a$loadings[,1], -a$loadings[,2])
# b
# a <- fa(Y, nfactors=2, fm="pa", rotate="oblimin", SMC=TRUE, min.err=1e-10)
# b <- cbind(a$loadings[,1], a$loadings[,2])
# b
# No rotation without squared multiple correlations prior
# produce same results as in R `fa`
mod = Factor(X.iloc[:, 1:-1], 2, smc=False)
results = mod.fit(tol=1e-10)
a = np.array([[0.965392158864, 0.225880658666255],
[0.967587154301, 0.212758741910989],
[0.929891035996, -0.000603217967568],
[0.486822656362, -0.869649573289374]])
assert_array_almost_equal(results.loadings, a, decimal=8)
# No rotation WITH squared multiple correlations prior
# produce same results as in R `fa`
mod = Factor(X.iloc[:, 1:-1], 2, smc=True)
results = mod.fit()
a = np.array([[0.97541115, 0.20280987],
[0.97113975, 0.17207499],
[0.9618705, -0.2004196],
[0.37570708, -0.45821379]])
assert_array_almost_equal(results.loadings, a, decimal=8)
# Same as R GRArotation
results.rotate('varimax')
a = np.array([[0.98828898, -0.12587155],
[0.97424206, -0.15354033],
[0.84418097, -0.502714],
[0.20601929, -0.55558235]])
assert_array_almost_equal(results.loadings, a, decimal=8)
results.rotate('quartimax') # Same as R fa
a = np.array([[0.98935598, 0.98242714, 0.94078972, 0.33442284],
[0.117190049, 0.086943252, -0.283332952, -0.489159543]])
assert_array_almost_equal(results.loadings, a.T, decimal=8)
results.rotate('equamax') # Not the same as R fa
results.rotate('promax') # Not the same as R fa
results.rotate('biquartimin') # Not the same as R fa
results.rotate('oblimin') # Same as R fa
a = np.array([[1.02834170170, 1.00178840104, 0.71824931384,
-0.00013510048],
[0.06563421, 0.03096076, -0.39658839, -0.59261944]])
assert_array_almost_equal(results.loadings, a.T, decimal=8)
# Testing result summary string
results.rotate('varimax')
desired = (
""" Factor analysis results
=============================
Eigenvalues
-----------------------------
Basal Occ Max id
-----------------------------
2.9609 0.3209 0.0000 -0.0000
-----------------------------
-----------------------------
Communality
-----------------------------
Basal Occ Max id
-----------------------------
0.9926 0.9727 0.9654 0.3511
-----------------------------
-----------------------------
Pre-rotated loadings
-----------------------------------
factor 0 factor 1
-----------------------------------
Basal 0.9754 0.2028
Occ 0.9711 0.1721
Max 0.9619 -0.2004
id 0.3757 -0.4582
-----------------------------
-----------------------------
varimax rotated loadings
-----------------------------------
factor 0 factor 1
-----------------------------------
Basal 0.9883 -0.1259
Occ 0.9742 -0.1535
Max 0.8442 -0.5027
id 0.2060 -0.5556
=============================
""")
actual = results.summary().as_text()
actual = "\n".join(line.rstrip() for line in actual.splitlines()) + "\n"
assert_equal(actual, desired)
@pytest.mark.skipif(missing_matplotlib, reason='matplotlib not available')
def test_plots(close_figures):
mod = Factor(X.iloc[:, 1:], 3)
results = mod.fit()
results.rotate('oblimin')
fig = results.plot_scree()
fig_loadings = results.plot_loadings()
assert_equal(3, len(fig_loadings))
@pytest.mark.smoke
def test_getframe_smoke():
# mostly smoke tests for now
mod = Factor(X.iloc[:, 1:-1], 2, smc=True)
res = mod.fit()
df = res.get_loadings_frame(style='raw')
assert_(isinstance(df, pd.DataFrame))
lds = res.get_loadings_frame(style='strings', decimals=3, threshold=0.3)
# Old implementation that warns
if PD_LT_1_4:
with warnings.catch_warnings():
warnings.simplefilter("always")
lds.to_latex()
else:
# Smoke test using new style to_latex
lds.style.to_latex()
# The Styler option require jinja2, skip if not available
try:
from jinja2 import Template # noqa:F401
except ImportError:
return
# TODO: separate this and do pytest.skip?
# Old implementation that warns
if PD_LT_1_4:
with warnings.catch_warnings():
warnings.simplefilter("always")
lds.to_latex()
else:
# Smoke test using new style to_latex
lds.style.to_latex()
try:
from pandas.io import formats as pd_formats
except ImportError:
from pandas import formats as pd_formats
ldf = res.get_loadings_frame(style='display')
assert_(isinstance(ldf, pd_formats.style.Styler))
assert_(isinstance(ldf.data, pd.DataFrame))
res.get_loadings_frame(style='display', decimals=3, threshold=0.2)
res.get_loadings_frame(style='display', decimals=3, color_max='GAINSBORO')
res.get_loadings_frame(style='display', decimals=3, threshold=0.45, highlight_max=False, sort_=False)
def test_factor_missing():
xm = X.iloc[:, 1:-1].copy()
nobs, k_endog = xm.shape
xm.iloc[2,2] = np.nan
mod = Factor(xm, 2)
assert_equal(mod.nobs, nobs - 1)
assert_equal(mod.k_endog, k_endog)
assert_equal(mod.endog.shape, (nobs - 1, k_endog))
def _zscore(x):
# helper function
return (x - x.mean(0)) / x.std(0)
@pytest.mark.smoke
def test_factor_scoring():
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
csv_path = os.path.join(dir_path, 'results', 'factor_data.csv')
y = pd.read_csv(csv_path)
csv_path = os.path.join(dir_path, 'results', 'factors_stata.csv')
f_s = pd.read_csv(csv_path)
# mostly smoke tests for now
mod = Factor(y, 2)
res = mod.fit(maxiter=1)
res.rotate('varimax')
f_reg = res.factor_scoring(method='reg')
assert_allclose(f_reg * [1, -1], f_s[["f1", 'f2']].values,
atol=1e-4, rtol=1e-3)
f_bart = res.factor_scoring()
assert_allclose(f_bart * [1, -1], f_s[["f1b", 'f2b']].values,
atol=1e-4, rtol=1e-3)
# check we have high correlation to ols and gls
f_ols = res.factor_scoring(method='ols')
f_gls = res.factor_scoring(method='gls')
f_reg_z = _zscore(f_reg)
f_ols_z = _zscore(f_ols)
f_gls_z = _zscore(f_gls)
assert_array_less(0.98, (f_ols_z * f_reg_z).mean(0))
assert_array_less(0.999, (f_gls_z * f_reg_z).mean(0))
# with oblique rotation
res.rotate('oblimin')
# Note: Stata has second factor with flipped sign compared to statsmodels
assert_allclose(res._corr_factors()[0, 1], (-1) * 0.25651037, rtol=1e-3)
f_reg = res.factor_scoring(method='reg')
assert_allclose(f_reg * [1, -1], f_s[["f1o", 'f2o']].values,
atol=1e-4, rtol=1e-3)
f_bart = res.factor_scoring()
assert_allclose(f_bart * [1, -1], f_s[["f1ob", 'f2ob']].values,
atol=1e-4, rtol=1e-3)
# check we have high correlation to ols and gls
f_ols = res.factor_scoring(method='ols')
f_gls = res.factor_scoring(method='gls')
f_reg_z = _zscore(f_reg)
f_ols_z = _zscore(f_ols)
f_gls_z = _zscore(f_gls)
assert_array_less(0.97, (f_ols_z * f_reg_z).mean(0))
assert_array_less(0.999, (f_gls_z * f_reg_z).mean(0))
# check provided endog
f_ols2 = res.factor_scoring(method='ols', endog=res.model.endog)
assert_allclose(f_ols2, f_ols, rtol=1e-13)
| [
"[email protected]"
] | |
6f97590f8abcda2a4dafacaf8bbebbfc853f1fad | f78a027c0073685f554fece35906bc61adcc0eaa | /tenable/sc/accept_risks.py | ec14863f793f08b1d60dfa76e4cda8fa2458c8a2 | [
"MIT"
] | permissive | hdsiles/pyTenable | c235e8d89f79c70f96e4cc56fcfcca5202f6932a | 86296990755f43ed6211ee293b189bbe741b22c9 | refs/heads/master | 2020-04-24T07:11:27.317161 | 2019-02-20T21:20:07 | 2019-02-20T21:20:07 | 171,790,514 | 0 | 0 | MIT | 2019-02-21T03:05:18 | 2019-02-21T03:05:18 | null | UTF-8 | Python | false | false | 10,595 | py | '''
accept_risks
============
.. warning:: This module is flagged as "beta", and may change.
The following methods allow for interaction into the Tenable.sc
`Accept Risk <https://docs.tenable.com/sccv/api/Accept-Risk-Rule.html>`_ API.
Methods available on ``sc.accept_risks``:
.. rst-class:: hide-signature
.. autoclass:: AcceptRiskAPI
.. automethod:: apply
.. automethod:: create
.. automethod:: delete
.. automethod:: details
.. automethod:: list
.. automethod:: update
'''
from .base import SCEndpoint
class AcceptRiskAPI(SCEndpoint):
_code_status = 'beta'
def _constructor(self, **kw):
'''
document creator for acceptRisk creation and update calls.
'''
if 'repos' in kw:
# as repositories are passed in the API as a series of sub-documents
# with the ID attribute set, we will convert the simply list that
# was passed to us into a series of documents as the API expects.
kw['repositories'] = [{'id': self._check('repo:id', r, int)}
for r in self._check('repos', kw['repos'], list)]
del(kw['repos'])
if 'plugin_id' in kw:
# the plugin parameter
kw['plugin'] = {'id': self._check('plugin_id', kw['plugin_id'], int)}
del(kw['plugin_id'])
if 'port' in kw:
# as the port will only be passed if the default of "any" isn't
# desired, we should check to make sure that the value passed is an
# integer, and then convert it into a string.
kw['protocol'] = str(self._check('port', kw['port'], int))
if 'protocol' in kw:
# as the protocol will only be passed if the default of "any" isn't
# desired, we should check to make sure that the value passed is an
# integer, and then convert it into a string.
kw['protocol'] = str(self._check('protocol', kw['protocol'], int))
if 'comments' in kw:
# if a comment is attached to the rule, then lets just make sure
# that we actually have a string here before moving on.
self._check('comments', kw['comments'], str)
if 'expires' in kw:
# how many days until the accept risk rule expires? We should
# simply checkt o see if the value is an integer.
self._check('expires', kw['expires'], int)
if 'ips' in kw:
# if the ips list is passed, then
kw['hostType'] = 'ip'
kw['hostValue'] = ','.join(self._check('ips', kw['ips'], list))
del(kw['ips'])
if 'uuids' in kw:
kw['hostType'] = 'uuid'
kw['hostValue'] = ','.join(self._check('uuids', kw['uuids'], list))
del(kw['uuids'])
if 'asset_list' in kw:
kw['hostType'] = 'asset'
kw['hostValue'] = {'id': self._check('asset_list', kw['asset_list'], int)}
del(kw['asset_list'])
return kw
def list(self, fields=None):
'''
Retrieves the list of accepted risk rules.
+ `SC Accept Risk List <https://docs.tenable.com/sccv/api/Accept-Risk-Rule.html#AcceptRiskRuleRESTReference-/acceptRiskRule>`_
Args:
fields (list, optional):
A list of attributes to return for each accepted risk rule.
Returns:
list: A list of accepted risk rules.
Examples:
>>> for rule in sc.accept_risks.list():
... pprint(rule)
'''
params = dict()
if fields:
params['fields'] = ','.join([self._check('field', f, str)
for f in fields])
return self._api.get('acceptRiskRule', params=params).json()['response']
def details(self, id, fields=None):
'''
Retrieves the details of an accepted risk rule.
+ `SC Accept Risk Details <https://docs.tenable.com/sccv/api/Accept-Risk-Rule.html#AcceptRiskRuleRESTReference-/acceptRiskRule/{id}>`_
Args:
id (int): The identifier for the accept risk rule.
fields (list, optional):
A list of attributes to return for each accepted risk rule.
Returns:
dict: The accept risk rule details.
Examples:
>>> rule = sc.accept_risks.details(1)
>>> pprint(rule)
'''
params = dict()
if fields:
params['fields'] = ','.join([self._check('field', f, str)
for f in fields])
return self._api.get('acceptRiskRule/{}'.format(self._check('id', id, int)),
params=params).json()['response']
def delete(self, id):
'''
Removes the accepted risk rule from Tenable.sc
+ `SC Accept Risk Delete <https://docs.tenable.com/sccv/api/Accept-Risk-Rule.html#acceptRiskRule_id_DELETE>`_
Args:
id (int): The identifier for the accept risk rule.
Returns:
str: Empty string response from the API.
Examples:
>>> sc.accept_risks.delete(1)
'''
return self._api.delete('acceptRiskRule/{}'.format(
self._check('id', id, int))).json()['response']
def apply(self, id, repo=None):
'''
Applies the accept risk rule for either all repositories, or the
repository specified.
+ `SC Accept Risk Apply <https://docs.tenable.com/sccv/api/Accept-Risk-Rule.html#AcceptRiskRuleRESTReference-/acceptRiskRule/apply>`_
Args:
id (int): The identifier for the accept risk rule.
repo (int, optional):
A specific repository to apply the rule to. The default if not
specified is all repositories (``0``).
Returns:
str: Empty string response from the API.
Examples:
>>> sc.accept_risks.apply(1)
'''
payload = dict()
if repo:
payload['repository'] = {'id': self._check('repo', repo, int)}
return self._api.post('acceptRiskRule/{}/apply'.format(
self._check('id', id, int)), json=payload).json()['response']
def create(self, plugin_id, repos, **kw):
'''
Creates a new accept risk rule. Either ips, uuids, or asset_list must
be specified.
+ `SC Accept Risk Create <https://docs.tenable.com/sccv/api/Accept-Risk-Rule.html#acceptRiskRule_POST>`_
Args:
plugin_id (int): The plugin to apply the accept risk rule to.
repos (list):
The list of repositories to apply this accept risk rule to.
asset_list (int, optional):
The asset list id to apply the accept risk rule to. Please note
that ``asset_list``, ``ips``, and ``uuids`` are mutually
exclusive.
comments (str, optional):
The comment associated to the accept risk rule.
expires (int, optional):
When should the rule expire? if no expiration is set, the rule
will never expire.
ips (list, optional):
A list of IPs to apply the accept risk rule to. Please note
that ``asset_list``, ``ips``, and ``uuids`` are mutually
exclusive.
port (int, optional):
The port to restrict this accept risk rule to. The default is
unrestricted.
protocol (int, optional):
The protocol to restrict the accept risk rule to. The default
is unrestricted.
uuids (list, optional):
The agent uuids to apply the accept risk rule to. Please note
that ``asset_list``, ``ips``, and ``uuids`` are mutually
exclusive.
Returns:
dict: The newly created accept risk rule definition.
Examples:
Create a rule to accept 97737 on 2 IPs for 90 days.
>>> rule = sc.accept_risks.create(97737, [1],
... ips=['192.168.0.101', '192.168.0.102'], expires=90)
'''
kw['plugin_id'] = plugin_id
kw['repos'] = repos
payload = self._constructor(**kw)
return self._api.post('acceptRiskRule', json=payload).json()['response']
def update(self, id, **kw):
'''
Creates a new accept risk rule. Either ips, uuids, or asset_list must
be specified.
+ `SC Accept Risk Create <https://docs.tenable.com/sccv/api/Accept-Risk-Rule.html#acceptRiskRule_POST>`_
Args:
id (int):
The identifier for the accept risk rule.
asset_list (int, optional):
The asset list id to apply the accept risk rule to. Please note
that ``asset_list``, ``ips``, and ``uuids`` are mutually
exclusive.
comments (str, optional):
The comment associated to the accept risk rule.
expires (int, optional):
When should the rule expire? if no expiration is set, the rule
will never expire.
ips (list, optional):
A list of IPs to apply the accept risk rule to. Please note
that ``asset_list``, ``ips``, and ``uuids`` are mutually
exclusive.
plugin_id (int, optional):
The plugin to apply the accept risk rule to.
port (int, optional):
The port to restrict this accept risk rule to. The default is
unrestricted.
protocol (int, optional):
The protocol to restrict the accept risk rule to. The default
is unrestricted.
repos (list):
The list of repositories to apply this accept risk rule to.
uuids (list, optional):
The agent uuids to apply the accept risk rule to. Please note
that ``asset_list``, ``ips``, and ``uuids`` are mutually
exclusive.
Returns:
dict: The newly created accept risk rule definition.
Examples:
Update the rule to accept 97737 on 2 IPs for 90 days.
>>> rule = sc.accept_risks.update(1,
... ips=['192.168.0.101', '192.168.0.102'], expires=90)
'''
payload = self._constructor(**kw)
return self._api.patch('acceptRiskRule/{}'.format(
self._check('id', id, int)), json=payload).json()['response']
| [
"[email protected]"
] | |
7720303c24c0be9856fb62ddbf3740ccfd24190a | 35e95fa04f4ba9d54244503f36fa29dfde8a0f75 | /docs/source/conf.py | 29952c93ff0a78a6a61a9b1e39647ceaddba1af7 | [
"MIT"
] | permissive | sanjana-dev/pMuTT | e4d949582337d93620d069a5d0592520b4ccd093 | 2246cffa875b4aa06ba0d7dec99a866b179e2e0e | refs/heads/master | 2020-11-29T16:16:27.337590 | 2019-12-12T16:59:16 | 2019-12-12T16:59:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,651 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'pmutt'
copyright = '2019, Vlachos Research Group'
author = 'Vlachos Research Group'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.2.16'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
#'nbsphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pmuttdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pmutt.tex', 'pmutt Documentation',
'Vlachos Research Group', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pmutt', 'pmutt Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pmutt', 'pmutt Documentation',
author, 'pmutt', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Napoleon settings
napoleon_google_docstring = True
# Latex equations options
imgmath_image_format = 'svg'
imgmath_font_size = 18
# Logo
html_logo = './logos/pmutt_inverse_icon.png'
html_favicon = './logos/p_icon.ico' | [
"[email protected]"
] | |
e6a7d567f745661c1573b2541231bfcb08a5de43 | 5c335469a9198d61e2095293d06ce78e781726d0 | /python/Semester 1/Tutorials/Tutorial 10/Problem5.py | 0009cf6d74244aeb60117c57758a3646c01cb532 | [] | no_license | LaurenceGA/programmingProjects | 5e203b450d11bff9cdb652661934ec3f797a6860 | 1fe3ea9a89be4c32cd68dd46da7a842b933c438b | refs/heads/master | 2021-01-15T10:59:09.890932 | 2017-06-18T11:20:28 | 2017-06-18T11:20:28 | 36,592,569 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | #!/usr/bin/env python
__author__ = 'Laurence Armstrong'
authorship_string = "{} created on {} by {} ({})\n{}\n".format(
"Problem5.py", "25/05/15", __author__, 15062061, "-----" * 15) \
if __name__ == '__main__' else ""
print(authorship_string, end="")
def get_details():
title = input("Title: ")
if title.lower() == 'exit':
return None
cost = input("Cost: ")
new_dict = {
'title': title,
'cost': cost
}
return new_dict
item = {}
items = []
while item is not None:
if item is not None:
item = get_details()
items.append(item)
for i in items:
print(i) | [
"[email protected]"
] | |
26f5e40567cd2201cd0369e9fa6957b2b1ad1a94 | 37c243e2f0aab70cbf38013d1d91bfc3a83f7972 | /pp7TeV/HeavyIonsAnalysis/JetAnalysis/python/jets/ak2CaloJetSequence_pPb_mc_bTag_cff.py | c487933c933e39e160512fb8ad38e7b52a68e56d | [] | no_license | maoyx/CMSWork | 82f37256833cbe4c60cb8df0b4eb68ceb12b65e7 | 501456f3f3e0f11e2f628b40e4d91e29668766d5 | refs/heads/master | 2021-01-01T18:47:55.157534 | 2015-03-12T03:47:15 | 2015-03-12T03:47:15 | 10,951,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,696 | py |
import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.patHeavyIonSequences_cff import *
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
from HeavyIonsAnalysis.JetAnalysis.bTaggers_cff import *
from RecoJets.JetProducers.JetIDParams_cfi import *
ak2Calomatch = patJetGenJetMatch.clone(
src = cms.InputTag("ak2CaloJets"),
matched = cms.InputTag("ak2HiGenJets")
)
ak2Caloparton = patJetPartonMatch.clone(src = cms.InputTag("ak2CaloJets")
)
ak2Calocorr = patJetCorrFactors.clone(
useNPV = False,
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("ak2CaloJets"),
payload = "AK2Calo_HI"
)
ak2CaloJetID= cms.EDProducer('JetIDProducer', JetIDParams, src = cms.InputTag('ak2CaloJets'))
ak2Caloclean = heavyIonCleanedGenJets.clone(src = cms.InputTag('ak2HiGenJets'))
ak2CalobTagger = bTaggers("ak2Calo")
#create objects locally since they dont load properly otherwise
ak2Calomatch = ak2CalobTagger.match
ak2Caloparton = ak2CalobTagger.parton
ak2CaloPatJetFlavourAssociation = ak2CalobTagger.PatJetFlavourAssociation
ak2CaloJetTracksAssociatorAtVertex = ak2CalobTagger.JetTracksAssociatorAtVertex
ak2CaloSimpleSecondaryVertexHighEffBJetTags = ak2CalobTagger.SimpleSecondaryVertexHighEffBJetTags
ak2CaloSimpleSecondaryVertexHighPurBJetTags = ak2CalobTagger.SimpleSecondaryVertexHighPurBJetTags
ak2CaloCombinedSecondaryVertexBJetTags = ak2CalobTagger.CombinedSecondaryVertexBJetTags
ak2CaloCombinedSecondaryVertexMVABJetTags = ak2CalobTagger.CombinedSecondaryVertexMVABJetTags
ak2CaloJetBProbabilityBJetTags = ak2CalobTagger.JetBProbabilityBJetTags
ak2CaloSoftMuonByPtBJetTags = ak2CalobTagger.SoftMuonByPtBJetTags
ak2CaloSoftMuonByIP3dBJetTags = ak2CalobTagger.SoftMuonByIP3dBJetTags
ak2CaloTrackCountingHighEffBJetTags = ak2CalobTagger.TrackCountingHighEffBJetTags
ak2CaloTrackCountingHighPurBJetTags = ak2CalobTagger.TrackCountingHighPurBJetTags
ak2CaloPatJetPartonAssociation = ak2CalobTagger.PatJetPartonAssociation
ak2CaloImpactParameterTagInfos = ak2CalobTagger.ImpactParameterTagInfos
ak2CaloJetProbabilityBJetTags = ak2CalobTagger.JetProbabilityBJetTags
ak2CaloPositiveOnlyJetProbabilityJetTags = ak2CalobTagger.PositiveOnlyJetProbabilityJetTags
ak2CaloNegativeOnlyJetProbabilityJetTags = ak2CalobTagger.NegativeOnlyJetProbabilityJetTags
ak2CaloNegativeTrackCountingHighEffJetTags = ak2CalobTagger.NegativeTrackCountingHighEffJetTags
ak2CaloNegativeTrackCountingHighPur = ak2CalobTagger.NegativeTrackCountingHighPur
ak2CaloNegativeOnlyJetBProbabilityJetTags = ak2CalobTagger.NegativeOnlyJetBProbabilityJetTags
ak2CaloPositiveOnlyJetBProbabilityJetTags = ak2CalobTagger.PositiveOnlyJetBProbabilityJetTags
ak2CaloSecondaryVertexTagInfos = ak2CalobTagger.SecondaryVertexTagInfos
ak2CaloSimpleSecondaryVertexHighEffBJetTags = ak2CalobTagger.SimpleSecondaryVertexHighEffBJetTags
ak2CaloSimpleSecondaryVertexHighPurBJetTags = ak2CalobTagger.SimpleSecondaryVertexHighPurBJetTags
ak2CaloCombinedSecondaryVertexBJetTags = ak2CalobTagger.CombinedSecondaryVertexBJetTags
ak2CaloCombinedSecondaryVertexMVABJetTags = ak2CalobTagger.CombinedSecondaryVertexMVABJetTags
ak2CaloSecondaryVertexNegativeTagInfos = ak2CalobTagger.SecondaryVertexNegativeTagInfos
ak2CaloSimpleSecondaryVertexNegativeHighEffBJetTags = ak2CalobTagger.SimpleSecondaryVertexNegativeHighEffBJetTags
ak2CaloSimpleSecondaryVertexNegativeHighPurBJetTags = ak2CalobTagger.SimpleSecondaryVertexNegativeHighPurBJetTags
ak2CaloCombinedSecondaryVertexNegativeBJetTags = ak2CalobTagger.CombinedSecondaryVertexNegativeBJetTags
ak2CaloCombinedSecondaryVertexPositiveBJetTags = ak2CalobTagger.CombinedSecondaryVertexPositiveBJetTags
ak2CaloSoftMuonTagInfos = ak2CalobTagger.SoftMuonTagInfos
ak2CaloSoftMuonBJetTags = ak2CalobTagger.SoftMuonBJetTags
ak2CaloSoftMuonByIP3dBJetTags = ak2CalobTagger.SoftMuonByIP3dBJetTags
ak2CaloSoftMuonByPtBJetTags = ak2CalobTagger.SoftMuonByPtBJetTags
ak2CaloNegativeSoftMuonByPtBJetTags = ak2CalobTagger.NegativeSoftMuonByPtBJetTags
ak2CaloPositiveSoftMuonByPtBJetTags = ak2CalobTagger.PositiveSoftMuonByPtBJetTags
ak2CaloPatJetFlavourId = cms.Sequence(ak2CaloPatJetPartonAssociation*ak2CaloPatJetFlavourAssociation)
ak2CaloJetBtaggingIP = cms.Sequence(ak2CaloImpactParameterTagInfos *
(ak2CaloTrackCountingHighEffBJetTags +
ak2CaloTrackCountingHighPurBJetTags +
ak2CaloJetProbabilityBJetTags +
ak2CaloJetBProbabilityBJetTags +
ak2CaloPositiveOnlyJetProbabilityJetTags +
ak2CaloNegativeOnlyJetProbabilityJetTags +
ak2CaloNegativeTrackCountingHighEffJetTags +
ak2CaloNegativeTrackCountingHighPur +
ak2CaloNegativeOnlyJetBProbabilityJetTags +
ak2CaloPositiveOnlyJetBProbabilityJetTags
)
)
ak2CaloJetBtaggingSV = cms.Sequence(ak2CaloImpactParameterTagInfos
*
ak2CaloSecondaryVertexTagInfos
* (ak2CaloSimpleSecondaryVertexHighEffBJetTags
+
ak2CaloSimpleSecondaryVertexHighPurBJetTags
+
ak2CaloCombinedSecondaryVertexBJetTags
+
ak2CaloCombinedSecondaryVertexMVABJetTags
)
)
ak2CaloJetBtaggingNegSV = cms.Sequence(ak2CaloImpactParameterTagInfos
*
ak2CaloSecondaryVertexNegativeTagInfos
* (ak2CaloSimpleSecondaryVertexNegativeHighEffBJetTags
+
ak2CaloSimpleSecondaryVertexNegativeHighPurBJetTags
+
ak2CaloCombinedSecondaryVertexNegativeBJetTags
+
ak2CaloCombinedSecondaryVertexPositiveBJetTags
)
)
ak2CaloJetBtaggingMu = cms.Sequence(ak2CaloSoftMuonTagInfos * (ak2CaloSoftMuonBJetTags
+
ak2CaloSoftMuonByIP3dBJetTags
+
ak2CaloSoftMuonByPtBJetTags
+
ak2CaloNegativeSoftMuonByPtBJetTags
+
ak2CaloPositiveSoftMuonByPtBJetTags
)
)
ak2CaloJetBtagging = cms.Sequence(ak2CaloJetBtaggingIP
*ak2CaloJetBtaggingSV
*ak2CaloJetBtaggingNegSV
*ak2CaloJetBtaggingMu
)
ak2CalopatJetsWithBtagging = patJets.clone(jetSource = cms.InputTag("ak2CaloJets"),
genJetMatch = cms.InputTag("ak2Calomatch"),
genPartonMatch = cms.InputTag("ak2Caloparton"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("ak2Calocorr")),
JetPartonMapSource = cms.InputTag("ak2CaloPatJetFlavourAssociation"),
trackAssociationSource = cms.InputTag("ak2CaloJetTracksAssociatorAtVertex"),
discriminatorSources = cms.VInputTag(cms.InputTag("ak2CaloSimpleSecondaryVertexHighEffBJetTags"),
cms.InputTag("ak2CaloSimpleSecondaryVertexHighPurBJetTags"),
cms.InputTag("ak2CaloCombinedSecondaryVertexBJetTags"),
cms.InputTag("ak2CaloCombinedSecondaryVertexMVABJetTags"),
cms.InputTag("ak2CaloJetBProbabilityBJetTags"),
cms.InputTag("ak2CaloJetProbabilityBJetTags"),
cms.InputTag("ak2CaloSoftMuonByPtBJetTags"),
cms.InputTag("ak2CaloSoftMuonByIP3dBJetTags"),
cms.InputTag("ak2CaloTrackCountingHighEffBJetTags"),
cms.InputTag("ak2CaloTrackCountingHighPurBJetTags"),
),
jetIDMap = cms.InputTag("ak2CaloJetID"),
addBTagInfo = True,
addTagInfos = True,
addDiscriminators = True,
addAssociatedTracks = True,
addJetCharge = False,
addJetID = True,
getJetMCFlavour = True,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
embedCaloTowers = False,
embedPFCandidates = True
)
ak2CaloJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("ak2CalopatJetsWithBtagging"),
genjetTag = 'ak2HiGenJets',
rParam = 0.2,
matchJets = cms.untracked.bool(False),
matchTag = 'patJetsWithBtagging',
pfCandidateLabel = cms.untracked.InputTag('particleFlow'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = True,
isMC = True,
genParticles = cms.untracked.InputTag("hiGenParticles"),
eventInfoTag = cms.InputTag("generator"),
doLifeTimeTagging = cms.untracked.bool(True),
doLifeTimeTaggingExtras = cms.untracked.bool(True),
bTagJetName = cms.untracked.string("ak2Calo"),
genPtMin = cms.untracked.double(15),
hltTrgResults = cms.untracked.string('TriggerResults::'+'HISIGNAL')
)
ak2CaloJetSequence_mc = cms.Sequence(
ak2Caloclean
*
ak2Calomatch
*
ak2Caloparton
*
ak2Calocorr
*
ak2CaloJetID
*
ak2CaloPatJetFlavourId
*
ak2CaloJetTracksAssociatorAtVertex
*
ak2CaloJetBtagging
*
ak2CalopatJetsWithBtagging
*
ak2CaloJetAnalyzer
)
ak2CaloJetSequence_data = cms.Sequence(ak2Calocorr
*
ak2CaloJetTracksAssociatorAtVertex
*
ak2CaloJetBtagging
*
ak2CalopatJetsWithBtagging
*
ak2CaloJetAnalyzer
)
ak2CaloJetSequence_jec = ak2CaloJetSequence_mc
ak2CaloJetSequence_mix = ak2CaloJetSequence_mc
ak2CaloJetSequence = cms.Sequence(ak2CaloJetSequence_mc)
| [
"[email protected]"
] | |
853343599b84552784d51c77d51a4ebb35546e67 | 0232863fe1e0111b7b8641ef720b888584ed5ab3 | /venv/bin/django-admin | 91812a8852cc20c5950d49c2be084f3044b867b0 | [] | no_license | Smikhalcv/orm_migrations | fbc90322414e0e77707afa74410daf7c2e7bc703 | 1431d83f0bdddea3b0801556e9e35513dd121727 | refs/heads/master | 2022-12-04T10:11:21.180579 | 2020-08-31T10:14:21 | 2020-08-31T10:14:21 | 291,681,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | #!/home/dell-ubuntu/Документы/Python/django/orm_migrations/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"[email protected]"
] | ||
05029970f89ac9d562ce26601e5dc8e55d0f8313 | 3ef70fe63acaa665e2b163f30f1abd0a592231c1 | /stackoverflow/venv/lib/python3.6/site-packages/twisted/conch/ssh/connection.py | 16ef6444a0539d1bb857d8fd0212dc4499882dbc | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | wistbean/learn_python3_spider | 14914b63691ac032955ba1adc29ad64976d80e15 | 40861791ec4ed3bbd14b07875af25cc740f76920 | refs/heads/master | 2023-08-16T05:42:27.208302 | 2023-03-30T17:03:58 | 2023-03-30T17:03:58 | 179,152,420 | 14,403 | 3,556 | MIT | 2022-05-20T14:08:34 | 2019-04-02T20:19:54 | Python | UTF-8 | Python | false | false | 26,151 | py | # -*- test-case-name: twisted.conch.test.test_connection -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module contains the implementation of the ssh-connection service, which
allows access to the shell and port-forwarding.
Maintainer: Paul Swartz
"""
from __future__ import division, absolute_import
import string
import struct
import twisted.internet.error
from twisted.conch.ssh import service, common
from twisted.conch import error
from twisted.internet import defer
from twisted.python import log
from twisted.python.compat import (
nativeString, networkString, long, _bytesChr as chr)
class SSHConnection(service.SSHService):
"""
An implementation of the 'ssh-connection' service. It is used to
multiplex multiple channels over the single SSH connection.
@ivar localChannelID: the next number to use as a local channel ID.
@type localChannelID: L{int}
@ivar channels: a L{dict} mapping a local channel ID to C{SSHChannel}
subclasses.
@type channels: L{dict}
@ivar localToRemoteChannel: a L{dict} mapping a local channel ID to a
remote channel ID.
@type localToRemoteChannel: L{dict}
@ivar channelsToRemoteChannel: a L{dict} mapping a C{SSHChannel} subclass
to remote channel ID.
@type channelsToRemoteChannel: L{dict}
@ivar deferreds: a L{dict} mapping a local channel ID to a C{list} of
C{Deferreds} for outstanding channel requests. Also, the 'global'
key stores the C{list} of pending global request C{Deferred}s.
"""
name = b'ssh-connection'
def __init__(self):
self.localChannelID = 0 # this is the current # to use for channel ID
self.localToRemoteChannel = {} # local channel ID -> remote channel ID
self.channels = {} # local channel ID -> subclass of SSHChannel
self.channelsToRemoteChannel = {} # subclass of SSHChannel ->
# remote channel ID
self.deferreds = {"global": []} # local channel -> list of deferreds
# for pending requests or 'global' -> list of
# deferreds for global requests
self.transport = None # gets set later
def serviceStarted(self):
if hasattr(self.transport, 'avatar'):
self.transport.avatar.conn = self
def serviceStopped(self):
"""
Called when the connection is stopped.
"""
# Close any fully open channels
for channel in list(self.channelsToRemoteChannel.keys()):
self.channelClosed(channel)
# Indicate failure to any channels that were in the process of
# opening but not yet open.
while self.channels:
(_, channel) = self.channels.popitem()
log.callWithLogger(channel, channel.openFailed,
twisted.internet.error.ConnectionLost())
# Errback any unfinished global requests.
self._cleanupGlobalDeferreds()
def _cleanupGlobalDeferreds(self):
"""
All pending requests that have returned a deferred must be errbacked
when this service is stopped, otherwise they might be left uncalled and
uncallable.
"""
for d in self.deferreds["global"]:
d.errback(error.ConchError("Connection stopped."))
del self.deferreds["global"][:]
# packet methods
def ssh_GLOBAL_REQUEST(self, packet):
"""
The other side has made a global request. Payload::
string request type
bool want reply
<request specific data>
This dispatches to self.gotGlobalRequest.
"""
requestType, rest = common.getNS(packet)
wantReply, rest = ord(rest[0:1]), rest[1:]
ret = self.gotGlobalRequest(requestType, rest)
if wantReply:
reply = MSG_REQUEST_FAILURE
data = b''
if ret:
reply = MSG_REQUEST_SUCCESS
if isinstance(ret, (tuple, list)):
data = ret[1]
self.transport.sendPacket(reply, data)
def ssh_REQUEST_SUCCESS(self, packet):
"""
Our global request succeeded. Get the appropriate Deferred and call
it back with the packet we received.
"""
log.msg('RS')
self.deferreds['global'].pop(0).callback(packet)
def ssh_REQUEST_FAILURE(self, packet):
"""
Our global request failed. Get the appropriate Deferred and errback
it with the packet we received.
"""
log.msg('RF')
self.deferreds['global'].pop(0).errback(
error.ConchError('global request failed', packet))
def ssh_CHANNEL_OPEN(self, packet):
"""
The other side wants to get a channel. Payload::
string channel name
uint32 remote channel number
uint32 remote window size
uint32 remote maximum packet size
<channel specific data>
We get a channel from self.getChannel(), give it a local channel number
and notify the other side. Then notify the channel by calling its
channelOpen method.
"""
channelType, rest = common.getNS(packet)
senderChannel, windowSize, maxPacket = struct.unpack('>3L', rest[:12])
packet = rest[12:]
try:
channel = self.getChannel(channelType, windowSize, maxPacket,
packet)
localChannel = self.localChannelID
self.localChannelID += 1
channel.id = localChannel
self.channels[localChannel] = channel
self.channelsToRemoteChannel[channel] = senderChannel
self.localToRemoteChannel[localChannel] = senderChannel
self.transport.sendPacket(MSG_CHANNEL_OPEN_CONFIRMATION,
struct.pack('>4L', senderChannel, localChannel,
channel.localWindowSize,
channel.localMaxPacket)+channel.specificData)
log.callWithLogger(channel, channel.channelOpen, packet)
except Exception as e:
log.err(e, 'channel open failed')
if isinstance(e, error.ConchError):
textualInfo, reason = e.args
if isinstance(textualInfo, (int, long)):
# See #3657 and #3071
textualInfo, reason = reason, textualInfo
else:
reason = OPEN_CONNECT_FAILED
textualInfo = "unknown failure"
self.transport.sendPacket(
MSG_CHANNEL_OPEN_FAILURE,
struct.pack('>2L', senderChannel, reason) +
common.NS(networkString(textualInfo)) + common.NS(b''))
def ssh_CHANNEL_OPEN_CONFIRMATION(self, packet):
"""
The other side accepted our MSG_CHANNEL_OPEN request. Payload::
uint32 local channel number
uint32 remote channel number
uint32 remote window size
uint32 remote maximum packet size
<channel specific data>
Find the channel using the local channel number and notify its
channelOpen method.
"""
(localChannel, remoteChannel, windowSize,
maxPacket) = struct.unpack('>4L', packet[: 16])
specificData = packet[16:]
channel = self.channels[localChannel]
channel.conn = self
self.localToRemoteChannel[localChannel] = remoteChannel
self.channelsToRemoteChannel[channel] = remoteChannel
channel.remoteWindowLeft = windowSize
channel.remoteMaxPacket = maxPacket
log.callWithLogger(channel, channel.channelOpen, specificData)
def ssh_CHANNEL_OPEN_FAILURE(self, packet):
"""
The other side did not accept our MSG_CHANNEL_OPEN request. Payload::
uint32 local channel number
uint32 reason code
string reason description
Find the channel using the local channel number and notify it by
calling its openFailed() method.
"""
localChannel, reasonCode = struct.unpack('>2L', packet[:8])
reasonDesc = common.getNS(packet[8:])[0]
channel = self.channels[localChannel]
del self.channels[localChannel]
channel.conn = self
reason = error.ConchError(reasonDesc, reasonCode)
log.callWithLogger(channel, channel.openFailed, reason)
def ssh_CHANNEL_WINDOW_ADJUST(self, packet):
"""
The other side is adding bytes to its window. Payload::
uint32 local channel number
uint32 bytes to add
Call the channel's addWindowBytes() method to add new bytes to the
remote window.
"""
localChannel, bytesToAdd = struct.unpack('>2L', packet[:8])
channel = self.channels[localChannel]
log.callWithLogger(channel, channel.addWindowBytes, bytesToAdd)
def ssh_CHANNEL_DATA(self, packet):
"""
The other side is sending us data. Payload::
uint32 local channel number
string data
Check to make sure the other side hasn't sent too much data (more
than what's in the window, or more than the maximum packet size). If
they have, close the channel. Otherwise, decrease the available
window and pass the data to the channel's dataReceived().
"""
localChannel, dataLength = struct.unpack('>2L', packet[:8])
channel = self.channels[localChannel]
# XXX should this move to dataReceived to put client in charge?
if (dataLength > channel.localWindowLeft or
dataLength > channel.localMaxPacket): # more data than we want
log.callWithLogger(channel, log.msg, 'too much data')
self.sendClose(channel)
return
#packet = packet[:channel.localWindowLeft+4]
data = common.getNS(packet[4:])[0]
channel.localWindowLeft -= dataLength
if channel.localWindowLeft < channel.localWindowSize // 2:
self.adjustWindow(channel, channel.localWindowSize - \
channel.localWindowLeft)
#log.msg('local window left: %s/%s' % (channel.localWindowLeft,
# channel.localWindowSize))
log.callWithLogger(channel, channel.dataReceived, data)
def ssh_CHANNEL_EXTENDED_DATA(self, packet):
"""
The other side is sending us exteneded data. Payload::
uint32 local channel number
uint32 type code
string data
Check to make sure the other side hasn't sent too much data (more
than what's in the window, or than the maximum packet size). If
they have, close the channel. Otherwise, decrease the available
window and pass the data and type code to the channel's
extReceived().
"""
localChannel, typeCode, dataLength = struct.unpack('>3L', packet[:12])
channel = self.channels[localChannel]
if (dataLength > channel.localWindowLeft or
dataLength > channel.localMaxPacket):
log.callWithLogger(channel, log.msg, 'too much extdata')
self.sendClose(channel)
return
data = common.getNS(packet[8:])[0]
channel.localWindowLeft -= dataLength
if channel.localWindowLeft < channel.localWindowSize // 2:
self.adjustWindow(channel, channel.localWindowSize -
channel.localWindowLeft)
log.callWithLogger(channel, channel.extReceived, typeCode, data)
def ssh_CHANNEL_EOF(self, packet):
"""
The other side is not sending any more data. Payload::
uint32 local channel number
Notify the channel by calling its eofReceived() method.
"""
localChannel = struct.unpack('>L', packet[:4])[0]
channel = self.channels[localChannel]
log.callWithLogger(channel, channel.eofReceived)
def ssh_CHANNEL_CLOSE(self, packet):
"""
The other side is closing its end; it does not want to receive any
more data. Payload::
uint32 local channel number
Notify the channnel by calling its closeReceived() method. If
the channel has also sent a close message, call self.channelClosed().
"""
localChannel = struct.unpack('>L', packet[:4])[0]
channel = self.channels[localChannel]
log.callWithLogger(channel, channel.closeReceived)
channel.remoteClosed = True
if channel.localClosed and channel.remoteClosed:
self.channelClosed(channel)
def ssh_CHANNEL_REQUEST(self, packet):
"""
The other side is sending a request to a channel. Payload::
uint32 local channel number
string request name
bool want reply
<request specific data>
Pass the message to the channel's requestReceived method. If the
other side wants a reply, add callbacks which will send the
reply.
"""
localChannel = struct.unpack('>L', packet[:4])[0]
requestType, rest = common.getNS(packet[4:])
wantReply = ord(rest[0:1])
channel = self.channels[localChannel]
d = defer.maybeDeferred(log.callWithLogger, channel,
channel.requestReceived, requestType, rest[1:])
if wantReply:
d.addCallback(self._cbChannelRequest, localChannel)
d.addErrback(self._ebChannelRequest, localChannel)
return d
def _cbChannelRequest(self, result, localChannel):
"""
Called back if the other side wanted a reply to a channel request. If
the result is true, send a MSG_CHANNEL_SUCCESS. Otherwise, raise
a C{error.ConchError}
@param result: the value returned from the channel's requestReceived()
method. If it's False, the request failed.
@type result: L{bool}
@param localChannel: the local channel ID of the channel to which the
request was made.
@type localChannel: L{int}
@raises ConchError: if the result is False.
"""
if not result:
raise error.ConchError('failed request')
self.transport.sendPacket(MSG_CHANNEL_SUCCESS, struct.pack('>L',
self.localToRemoteChannel[localChannel]))
def _ebChannelRequest(self, result, localChannel):
"""
Called if the other wisde wanted a reply to the channel requeset and
the channel request failed.
@param result: a Failure, but it's not used.
@param localChannel: the local channel ID of the channel to which the
request was made.
@type localChannel: L{int}
"""
self.transport.sendPacket(MSG_CHANNEL_FAILURE, struct.pack('>L',
self.localToRemoteChannel[localChannel]))
def ssh_CHANNEL_SUCCESS(self, packet):
"""
Our channel request to the other side succeeded. Payload::
uint32 local channel number
Get the C{Deferred} out of self.deferreds and call it back.
"""
localChannel = struct.unpack('>L', packet[:4])[0]
if self.deferreds.get(localChannel):
d = self.deferreds[localChannel].pop(0)
log.callWithLogger(self.channels[localChannel],
d.callback, '')
def ssh_CHANNEL_FAILURE(self, packet):
"""
Our channel request to the other side failed. Payload::
uint32 local channel number
Get the C{Deferred} out of self.deferreds and errback it with a
C{error.ConchError}.
"""
localChannel = struct.unpack('>L', packet[:4])[0]
if self.deferreds.get(localChannel):
d = self.deferreds[localChannel].pop(0)
log.callWithLogger(self.channels[localChannel],
d.errback,
error.ConchError('channel request failed'))
# methods for users of the connection to call
def sendGlobalRequest(self, request, data, wantReply=0):
"""
Send a global request for this connection. Current this is only used
for remote->local TCP forwarding.
@type request: L{bytes}
@type data: L{bytes}
@type wantReply: L{bool}
@rtype C{Deferred}/L{None}
"""
self.transport.sendPacket(MSG_GLOBAL_REQUEST,
common.NS(request)
+ (wantReply and b'\xff' or b'\x00')
+ data)
if wantReply:
d = defer.Deferred()
self.deferreds['global'].append(d)
return d
def openChannel(self, channel, extra=b''):
"""
Open a new channel on this connection.
@type channel: subclass of C{SSHChannel}
@type extra: L{bytes}
"""
log.msg('opening channel %s with %s %s'%(self.localChannelID,
channel.localWindowSize, channel.localMaxPacket))
self.transport.sendPacket(MSG_CHANNEL_OPEN, common.NS(channel.name)
+ struct.pack('>3L', self.localChannelID,
channel.localWindowSize, channel.localMaxPacket)
+ extra)
channel.id = self.localChannelID
self.channels[self.localChannelID] = channel
self.localChannelID += 1
def sendRequest(self, channel, requestType, data, wantReply=0):
"""
Send a request to a channel.
@type channel: subclass of C{SSHChannel}
@type requestType: L{bytes}
@type data: L{bytes}
@type wantReply: L{bool}
@rtype C{Deferred}/L{None}
"""
if channel.localClosed:
return
log.msg('sending request %r' % (requestType))
self.transport.sendPacket(MSG_CHANNEL_REQUEST, struct.pack('>L',
self.channelsToRemoteChannel[channel])
+ common.NS(requestType)+chr(wantReply)
+ data)
if wantReply:
d = defer.Deferred()
self.deferreds.setdefault(channel.id, []).append(d)
return d
def adjustWindow(self, channel, bytesToAdd):
"""
Tell the other side that we will receive more data. This should not
normally need to be called as it is managed automatically.
@type channel: subclass of L{SSHChannel}
@type bytesToAdd: L{int}
"""
if channel.localClosed:
return # we're already closed
self.transport.sendPacket(MSG_CHANNEL_WINDOW_ADJUST, struct.pack('>2L',
self.channelsToRemoteChannel[channel],
bytesToAdd))
log.msg('adding %i to %i in channel %i' % (bytesToAdd,
channel.localWindowLeft, channel.id))
channel.localWindowLeft += bytesToAdd
def sendData(self, channel, data):
"""
Send data to a channel. This should not normally be used: instead use
channel.write(data) as it manages the window automatically.
@type channel: subclass of L{SSHChannel}
@type data: L{bytes}
"""
if channel.localClosed:
return # we're already closed
self.transport.sendPacket(MSG_CHANNEL_DATA, struct.pack('>L',
self.channelsToRemoteChannel[channel]) +
common.NS(data))
def sendExtendedData(self, channel, dataType, data):
"""
Send extended data to a channel. This should not normally be used:
instead use channel.writeExtendedData(data, dataType) as it manages
the window automatically.
@type channel: subclass of L{SSHChannel}
@type dataType: L{int}
@type data: L{bytes}
"""
if channel.localClosed:
return # we're already closed
self.transport.sendPacket(MSG_CHANNEL_EXTENDED_DATA, struct.pack('>2L',
self.channelsToRemoteChannel[channel],dataType) \
+ common.NS(data))
def sendEOF(self, channel):
"""
Send an EOF (End of File) for a channel.
@type channel: subclass of L{SSHChannel}
"""
if channel.localClosed:
return # we're already closed
log.msg('sending eof')
self.transport.sendPacket(MSG_CHANNEL_EOF, struct.pack('>L',
self.channelsToRemoteChannel[channel]))
def sendClose(self, channel):
"""
Close a channel.
@type channel: subclass of L{SSHChannel}
"""
if channel.localClosed:
return # we're already closed
log.msg('sending close %i' % channel.id)
self.transport.sendPacket(MSG_CHANNEL_CLOSE, struct.pack('>L',
self.channelsToRemoteChannel[channel]))
channel.localClosed = True
if channel.localClosed and channel.remoteClosed:
self.channelClosed(channel)
# methods to override
def getChannel(self, channelType, windowSize, maxPacket, data):
"""
The other side requested a channel of some sort.
channelType is the type of channel being requested,
windowSize is the initial size of the remote window,
maxPacket is the largest packet we should send,
data is any other packet data (often nothing).
We return a subclass of L{SSHChannel}.
By default, this dispatches to a method 'channel_channelType' with any
non-alphanumerics in the channelType replace with _'s. If it cannot
find a suitable method, it returns an OPEN_UNKNOWN_CHANNEL_TYPE error.
The method is called with arguments of windowSize, maxPacket, data.
@type channelType: L{bytes}
@type windowSize: L{int}
@type maxPacket: L{int}
@type data: L{bytes}
@rtype: subclass of L{SSHChannel}/L{tuple}
"""
log.msg('got channel %r request' % (channelType))
if hasattr(self.transport, "avatar"): # this is a server!
chan = self.transport.avatar.lookupChannel(channelType,
windowSize,
maxPacket,
data)
else:
channelType = channelType.translate(TRANSLATE_TABLE)
attr = 'channel_%s' % nativeString(channelType)
f = getattr(self, attr, None)
if f is not None:
chan = f(windowSize, maxPacket, data)
else:
chan = None
if chan is None:
raise error.ConchError('unknown channel',
OPEN_UNKNOWN_CHANNEL_TYPE)
else:
chan.conn = self
return chan
def gotGlobalRequest(self, requestType, data):
"""
We got a global request. pretty much, this is just used by the client
to request that we forward a port from the server to the client.
Returns either:
- 1: request accepted
- 1, <data>: request accepted with request specific data
- 0: request denied
By default, this dispatches to a method 'global_requestType' with
-'s in requestType replaced with _'s. The found method is passed data.
If this method cannot be found, this method returns 0. Otherwise, it
returns the return value of that method.
@type requestType: L{bytes}
@type data: L{bytes}
@rtype: L{int}/L{tuple}
"""
log.msg('got global %s request' % requestType)
if hasattr(self.transport, 'avatar'): # this is a server!
return self.transport.avatar.gotGlobalRequest(requestType, data)
requestType = nativeString(requestType.replace(b'-',b'_'))
f = getattr(self, 'global_%s' % requestType, None)
if not f:
return 0
return f(data)
def channelClosed(self, channel):
"""
Called when a channel is closed.
It clears the local state related to the channel, and calls
channel.closed().
MAKE SURE YOU CALL THIS METHOD, even if you subclass L{SSHConnection}.
If you don't, things will break mysteriously.
@type channel: L{SSHChannel}
"""
if channel in self.channelsToRemoteChannel: # actually open
channel.localClosed = channel.remoteClosed = True
del self.localToRemoteChannel[channel.id]
del self.channels[channel.id]
del self.channelsToRemoteChannel[channel]
for d in self.deferreds.pop(channel.id, []):
d.errback(error.ConchError("Channel closed."))
log.callWithLogger(channel, channel.closed)
MSG_GLOBAL_REQUEST = 80
MSG_REQUEST_SUCCESS = 81
MSG_REQUEST_FAILURE = 82
MSG_CHANNEL_OPEN = 90
MSG_CHANNEL_OPEN_CONFIRMATION = 91
MSG_CHANNEL_OPEN_FAILURE = 92
MSG_CHANNEL_WINDOW_ADJUST = 93
MSG_CHANNEL_DATA = 94
MSG_CHANNEL_EXTENDED_DATA = 95
MSG_CHANNEL_EOF = 96
MSG_CHANNEL_CLOSE = 97
MSG_CHANNEL_REQUEST = 98
MSG_CHANNEL_SUCCESS = 99
MSG_CHANNEL_FAILURE = 100
OPEN_ADMINISTRATIVELY_PROHIBITED = 1
OPEN_CONNECT_FAILED = 2
OPEN_UNKNOWN_CHANNEL_TYPE = 3
OPEN_RESOURCE_SHORTAGE = 4
EXTENDED_DATA_STDERR = 1
messages = {}
for name, value in locals().copy().items():
if name[:4] == 'MSG_':
messages[value] = name # Doesn't handle doubles
alphanums = networkString(string.ascii_letters + string.digits)
TRANSLATE_TABLE = b''.join([chr(i) in alphanums and chr(i) or b'_'
for i in range(256)])
SSHConnection.protocolMessages = messages
| [
"[email protected]"
] | |
5431783e06c845aae4765079bfd50eda31b68404 | af29d4d447f03485b7b736a914e5620148e17a09 | /FirstPy/game0.py | 31ffa46fe7f3c4649b4e360e087b1a28a04efc27 | [] | no_license | Flyhiee/YesPython | e2649f40e083d2a609fbc37619e68b5bc8d46482 | 642fc4d2d6b8c1f68607823a9dd03fa18ef014c0 | refs/heads/master | 2020-04-16T09:43:43.203844 | 2019-01-31T03:11:09 | 2019-01-31T03:11:09 | 165,474,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | print("Welcome!")
g = input("Guess the number: ")
guess = int(g)
if guess == 5:
print("You win!")
else:
print("You lose!")
print("Game over!")
| [
"[email protected]"
] | |
1eaf182b0e73247e26c9e8358da022870f789f95 | 3649dce8b44c72bbfee56adf4e29ca6c5ba2703a | /code_up2767.py | ae839227919c462ed08a8a7aa67ef53ff3a18735 | [] | no_license | beOk91/code_up | 03c7aca76e955e3a59d797299749e7fc2457f24a | ca1042ce216cc0a80e9b3d3ad363bc29c4ed7690 | refs/heads/master | 2022-12-06T08:23:00.788315 | 2020-08-20T11:21:59 | 2020-08-20T11:21:59 | 284,844,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | k,n=map(int,input().strip().split())
k_list=list(map(int,input().strip().split()))
for i in range(k,n):
sum=0
for j in range(i-1,i-k-1,-1):
sum+=k_list[j]
k_list.append(sum)
print(k_list[n-1]%100007)
"""
2 3
7 7 14
1,
""" | [
"[email protected]"
] | |
aeb0b56dea0c1e975ee0bc44ba268aa0fa073b6d | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /SdGE4ZBtuMKyxDqQ6_11.py | 2e4abab70e2ebb9141017db55244f13d9a4fe0c1 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | """
Create a function that takes a string and returns the first character that
repeats. If there is no repeat of a character, then return "-1".
### Examples
first_repeat("legolas") ➞ "l"
first_repeat("Gandalf") ➞ "a"
first_repeat("Balrog") ➞ "-1"
first_repeat("Isildur") ➞ "-1"
### Notes
Tests are case sensitive.
"""
def first_repeat(chars):
unique = []
for n in chars:
if n not in unique:
unique.append(n)
else:
return n
return '-1'
| [
"[email protected]"
] | |
7ebe17e04fc46ba0c5301b9c134504b0a8a7ebcb | f2af0ade7716ef8441c9c085f3297f66a0cbcfec | /app/passion_applets/object_page/applets/games/word_spell.py | 5918f1ba2149c5081dc826fce5b2862f235cb490 | [] | no_license | vectorhuztt/study_center_applets_copy | 09fa0d2aafa55bb3cebfcf72ea4c64ebcfad54a5 | e162b6999ebd879ed2833ab54d15591e4864fa70 | refs/heads/master | 2022-09-16T15:18:23.161164 | 2020-06-05T01:39:28 | 2020-06-05T01:39:28 | 269,503,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,271 | py | import random
import string
import time
from selenium.webdriver.common.by import By
from app.passion_applets.object_page.applets.common import CommonPage
from app.passion_applets.object_page.applets.games.word_reform import ReformWordGame
from conf.decorator import teststep
class WordSpellGame(CommonPage):
def __init__(self):
super().__init__()
self.word_reform = ReformWordGame()
@teststep
def wait_check_spell_page(self):
"""单词拼写页面检查点"""
locator = (By.CSS_SELECTOR, '.DCPX-index--container')
return self.get_wait_check_page_ele(locator)
@teststep
def wait_check_random_spell_page(self):
"""单词随机拼写页面检查点"""
locator = (By.CSS_SELECTOR, '.DCPX-index--input.DCPX-index--spell')
return self.get_wait_check_page_ele(locator)
@teststep
def wait_check_spell_right_word_page(self):
"""随机拼写正确单词页面检查点"""
locator = (By.CSS_SELECTOR, '.DCPX-index--word.DCPX-index--visible')
return self.get_wait_check_page_ele(locator)
@teststep
def wait_check_listen_spell_page(self):
"""单词听写页面检查点"""
locator = (By.CSS_SELECTOR, '.DCPX-index--quiz-voice')
return self.get_wait_check_page_ele(locator)
@teststep
def word_explain(self):
"""单词解释"""
locator = (By.CSS_SELECTOR, '.DCPX-index--explain')
return self.get_wait_check_page_ele(locator)
@teststep
def word_input_wrap(self):
"""拼写输入栏"""
locator = (By.CSS_SELECTOR, '.DCPX-index--input')
return self.get_wait_check_page_ele(locator)
@teststep
def random_input_warp(self):
"""随机拼写输入栏"""
locator = (By.CSS_SELECTOR, '.DCPX-index--blank')
return self.get_wait_check_page_ele(locator, single=False)
@teststep
def forget_btn(self):
"""忘记了按钮"""
locator = (By.XPATH, "//wx-view[@class='DCPX-index--container']//span[2][contains(text(),'忘记了')]/..")
return self.get_wait_check_page_ele(locator)
@teststep
def keyboard_key(self, word_alpha):
"""键盘字母定位"""
locator = (By.XPATH, '//wx-view[@class="Keyboard-index--letter-wrap"]//'
'span[2][text()="{}"]'.format(word_alpha))
return self.get_wait_check_page_ele(locator)
@teststep
def keyboard_icon_key(self):
"""键盘图标键"""
locator = (By.CSS_SELECTOR, '.Keyboard-index--icon')
return self.get_wait_check_page_ele(locator, single=False)
@teststep
def enter_key(self):
"""键盘字母定位"""
locator = (By.CSS_SELECTOR, '.Keyboard-index--letter-wrap')
ele = self.get_wait_check_page_ele(locator, single=False)
return ele[-1]
@teststep
def page_alpha(self):
"""获取挖空后的单词"""
locator = (By.CSS_SELECTOR, '.DCPX-index--spell wx-text')
ele = self.get_wait_check_page_ele(locator, single=False)
alpha_list = []
for x in ele:
if x.text == '':
alpha_list.append('_')
else:
alpha_list.append(x.text)
return alpha_list
@teststep
def input_word_operate(self, input_words, do_right):
for alpha in input_words:
if alpha == ' ':
continue
self.keyboard_key(alpha).click()
self.enter_key().click()
if not do_right:
if not self.wait_check_spell_right_word_page():
self.base_assert.except_error('提交拼写后,未发现随机拼写的正确单词')
self.enter_key().click()
time.sleep(2)
else:
time.sleep(4)
@teststep
def random_spell_operate(self, right_answer, do_right):
"""随机拼写操作"""
if self.wait_check_random_spell_page():
print('---- 随机拼写游戏 ----\n')
page_word = self.page_alpha()
input_list = [y for x, y in zip(page_word, right_answer) if x != y]
random_str = random.sample(string.ascii_lowercase, len(input_list))
input_alphas = input_list if do_right else random_str
print('输入单词:', input_alphas, '\n')
self.input_word_operate(input_alphas, do_right=do_right)
@teststep
def spell_operate(self, right_answer, wrong_count=None, do_right=False, is_review=False):
"""拼写单词操作"""
if do_right:
input_word = right_answer
else:
length = random.choice(range(2, 5))
input_word = random.sample(string.ascii_lowercase, length)
print('输入单词:', input_word, '\n')
self.input_word_operate(input_word, do_right=do_right)
# 插入错题游戏 随机拼写、还原单词
if not do_right and not is_review:
if wrong_count < 5:
status = random.choice([True, False])
if not self.wait_check_random_spell_page():
self.base_assert.except_error('单词拼写错误后, 未进入随机拼写页面')
else:
self.random_spell_operate(right_answer, do_right=status)
else:
if self.wait_check_random_spell_page():
self.base_assert.except_error('单词拼写错误第五次依然会有随机拼写游戏')
@teststep
def spell_word_game_operate(self, all_word_info: dict, *, do_right: bool, bank_index: list, wrong_index: list,
right_info: list, skip_index_info: list, record_id_info: dict,
type_list: list, word_count: int, is_review: bool):
"""
单词拼写页面
:param all_word_info 正确单词/短语信息
:param do_right 是否做对
:param bank_index 全局的题目索引
:param wrong_index 单词拼写的错题索引
:param right_info 单词拼写的的做对单词
:param skip_index_info 跳过的索引信息
:param record_id_info 记录的单词/短语id
:param type_list 记录的游戏类型
:param word_count 单词个数
:param is_review 是否是复习状态
"""
while self.wait_check_spell_page():
bank_id = self.game_id()
# 判断做对单词是否再次出现
if bank_id in right_info:
self.base_assert.except_error('单词已完成, 但是再次出现 ' + bank_id)
# 判断当前计数是否与出现的id一致
game_type = self.exclude_flask_index_judge(type_list, record_id_info, all_word_info, word_count, is_review)
right_answer = all_word_info[bank_id]['word']
wrong_bank_id = list(all_word_info.keys())[0]
print('单词id:', bank_id)
print('单词解释:', self.word_explain().text)
print('正确答案:', right_answer)
skip_id = list(all_word_info.keys())[1] if len(all_word_info) > 1 else -1
if do_right:
self.spell_operate(right_answer, do_right=True, is_review=is_review)
right_info.append(bank_id)
else:
if wrong_bank_id == bank_id:
wrong_index.append(bank_index[0])
bank_count = len(wrong_index)
if bank_count >= 5:
right_info.append(bank_id)
if is_review:
self.spell_operate(right_answer, is_review=True)
else:
self.spell_operate(right_answer, wrong_count=len(wrong_index))
else:
if is_review:
right_info.append(bank_id)
self.spell_operate(right_answer, do_right=True)
else:
if bank_id != skip_id:
right_info.append(bank_id)
self.spell_operate(right_answer, do_right=True)
else:
self.forget_btn().click()
skip_index_info.append(bank_index[0])
if len(skip_index_info) >= 5:
right_info.append(bank_id)
if self.wait_check_listen_spell_page():
self.base_assert.except_error('跳过第五次依然会出现听写游戏')
else:
if not self.wait_check_listen_spell_page():
self.base_assert.except_error('点击忘记按钮后, 未进入单词听写页面')
self.spell_operate(right_answer, wrong_count=len(skip_index_info))
bank_index[0] += 1
if bank_id not in record_id_info[game_type]:
record_id_info[game_type].append(bank_id)
print('wrong:', wrong_index)
print("skip:", skip_index_info)
print('-' * 30, '\n')
| [
"[email protected]"
] | |
53bce614ae220236a08bbd5dc7b205c4aed9d7a1 | 3777658387aa9e78d7c04202d7fd47d59b9e1271 | /images/corner_detection.py | abf9f4ccfd6fa888532ab46528130cf60a510f7f | [] | no_license | jocoder22/PythonDataScience | 709363ada65b6db61ee73c27d8be60587a74f072 | c5a9af42e41a52a7484db0732ac93b5945ade8bb | refs/heads/master | 2022-11-08T17:21:08.548942 | 2022-10-27T03:21:53 | 2022-10-27T03:21:53 | 148,178,242 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,427 | py | #!/usr/bin/env python
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# import matplotlib.image as mpimg
import cv2
from skimage import io, exposure
from skimage.feature import hog
def print2(*args):
for arg in args:
print(arg, end='\n\n')
sp = {"sep":"\n\n", "end":"\n\n"}
# url = 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTEc92kYxyNsx6ZxWYF6KJJz-QZWUj0jXBleB2tEg6yBekggb28'
url = 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSESxr13ODvh5lfb1TxT8LgDbzWP44sD5n1z_Nf-697su_jona3zw'
url2 = "https://avatars2.githubusercontent.com/u/31042629?s=400&v=4"
# load image
# imgg = cv2.imread('car22.jpg')
imgg = io.imread(url2)
# converting to gray scale
Img_gray = cv2.cvtColor(imgg, cv2.COLOR_BGR2GRAY)
# remove noise
img = cv2.GaussianBlur(Img_gray,(7,7),0)
img_c = cv2.cornerHarris(img, 3, 5, 0.1)
img_dilate = cv2.dilate(img_c, np.ones((5, 5), np.uint8), iterations=1)
print(img_dilate.max(), **sp)
Img_gray2 = imgg.copy()
Img_gray2[img_dilate > 0.02 * img_dilate.max()] = [255, 0, 0]
cv2.imshow('lamborghini_with_Corners', Img_gray2)
cv2.waitKey()
cv2.destroyAllWindows()
plt.imshow(Img_gray2)
plt.axis('off')
plt.show()
features, hog_img = hog(Img_gray,visualize=True,
pixels_per_cell=(9, 9), cells_per_block=(2, 2))
img_hog = exposure.rescale_intensity(hog_img, in_range=(0, 2))
plt.imshow(img_hog)
plt.axis('off')
plt.show() | [
"[email protected]"
] | |
2e4d94adf3df80850558430a6d6e600a9f3aa1b7 | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /tensorflow/contrib/timeseries/python/timeseries/model_utils.py | 249975a83e3669875aa693d97311fff716dbf8b7 | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 4,115 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for training and constructing time series Models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
# TODO(agarwal): Remove and replace with functionality from tf.slim
def fully_connected(inp,
inp_size,
layer_size,
name,
activation=nn_ops.relu,
dtype=dtypes.float32):
"""Helper method to create a fully connected hidden layer."""
wt = variable_scope.get_variable(
name="{}_weight".format(name), shape=[inp_size, layer_size], dtype=dtype)
bias = variable_scope.get_variable(
name="{}_bias".format(name),
shape=[layer_size],
initializer=init_ops.zeros_initializer())
output = nn_ops.xw_plus_b(inp, wt, bias)
if activation is not None:
assert callable(activation)
output = activation(output)
return output
def parameter_switch(parameter_overrides):
"""Create a function which chooses between overridden and model parameters.
Args:
parameter_overrides: A dictionary with explicit overrides of model
parameters, mapping from Tensors to their overridden values.
Returns:
A function which takes a Tensor and returns the override if it is specified,
or otherwise the evaluated value (given current Variable values).
"""
def get_passed_or_trained_value(parameter):
return ops.convert_to_tensor(
parameter_overrides.get(parameter, parameter)).eval()
return get_passed_or_trained_value
def canonicalize_times_or_steps_from_output(times, steps,
previous_model_output):
"""Canonicalizes either relative or absolute times, with error checking."""
if steps is not None and times is not None:
raise ValueError("Only one of `steps` and `times` may be specified.")
if steps is None and times is None:
raise ValueError("One of `steps` and `times` must be specified.")
if times is not None:
times = numpy.array(times)
if len(times.shape) != 2:
times = times[None, ...]
if (previous_model_output[feature_keys.FilteringResults.TIMES].shape[0] !=
times.shape[0]):
raise ValueError(
("`times` must have a batch dimension matching"
" the previous model output (got a batch dimension of {} for `times`"
" and {} for the previous model output).").format(
times.shape[0], previous_model_output[
feature_keys.FilteringResults.TIMES].shape[0]))
if not (previous_model_output[feature_keys.FilteringResults.TIMES][:, -1] <
times[:, 0]).all():
raise ValueError("Prediction times must be after the corresponding "
"previous model output.")
if steps is not None:
predict_times = (
previous_model_output[feature_keys.FilteringResults.TIMES][:, -1:] + 1 +
numpy.arange(steps)[None, ...])
else:
predict_times = times
return predict_times
| [
"[email protected]"
] | |
feff111ff9f8504c1e9fe1ed1302d348f5120e23 | 63c261c8bfd7c15f6cdb4a08ea2354a6cd2b7761 | /acaizerograu/acaizerograu/env/Scripts/pilfile.py | 1261f3a53d2c0e74d9317192e9b38c5f23dc8bb5 | [] | no_license | filhosdaputa/AcaiZero | 93295498d95bcc13d020f2255e6b87a12cff04bf | 99a775f823d98a0b7b10e685936f1c12ccd1a70a | refs/heads/master | 2022-10-29T05:31:10.512990 | 2017-08-11T13:49:06 | 2017-08-11T13:49:06 | 149,019,853 | 0 | 1 | null | 2022-10-18T00:41:16 | 2018-09-16T17:38:48 | JavaScript | UTF-8 | Python | false | false | 2,734 | py | #!C:\Users\IGOR\Source\Repos\AcaiZero\acaizerograu\acaizerograu\env\Scripts\python.exe
#
# The Python Imaging Library.
# $Id$
#
# a utility to identify image files
#
# this script identifies image files, extracting size and
# pixel mode information for known file formats. Note that
# you don't need the PIL C extension to use this module.
#
# History:
# 0.0 1995-09-01 fl Created
# 0.1 1996-05-18 fl Modified options, added debugging mode
# 0.2 1996-12-29 fl Added verify mode
# 0.3 1999-06-05 fl Don't mess up on class exceptions (1.5.2 and later)
# 0.4 2003-09-30 fl Expand wildcards on Windows; robustness tweaks
#
from __future__ import print_function
import getopt
import glob
import logging
import sys
from PIL import Image
if len(sys.argv) == 1:
print("PIL File 0.4/2003-09-30 -- identify image files")
print("Usage: pilfile [option] files...")
print("Options:")
print(" -f list supported file formats")
print(" -i show associated info and tile data")
print(" -v verify file headers")
print(" -q quiet, don't warn for unidentified/missing/broken files")
sys.exit(1)
try:
opt, args = getopt.getopt(sys.argv[1:], "fqivD")
except getopt.error as v:
print(v)
sys.exit(1)
verbose = quiet = verify = 0
logging_level = "WARNING"
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats:")
for i in id:
print(i, end=' ')
sys.exit(1)
elif o == "-i":
verbose = 1
elif o == "-q":
quiet = 1
elif o == "-v":
verify = 1
elif o == "-D":
logging_level = "DEBUG"
logging.basicConfig(level=logging_level)
def globfix(files):
# expand wildcards where necessary
if sys.platform == "win32":
out = []
for file in files:
if glob.has_magic(file):
out.extend(glob.glob(file))
else:
out.append(file)
return out
return files
for file in globfix(args):
try:
im = Image.open(file)
print("%s:" % file, im.format, "%dx%d" % im.size, im.mode, end=' ')
if verbose:
print(im.info, im.tile, end=' ')
print()
if verify:
try:
im.verify()
except:
if not quiet:
print("failed to verify image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
except IOError as v:
if not quiet:
print(file, "failed:", v)
except:
import traceback
if not quiet:
print(file, "failed:", "unexpected error")
traceback.print_exc(file=sys.stdout)
| [
"[email protected]"
] | |
9bb7f7bcd1ff8fe2c8ca5d83a2b9b489a473dba8 | 0a3e24df172a206a751217e5f85b334f39983101 | /Design Pattern/mytest.py | 16a6f34bcba5218fc479b4cc79867dbbf33df36d | [] | no_license | yeboahd24/python202 | 1f399426a1f46d72da041ab3d138c582c695462d | d785a038183e52941e0cee8eb4f6cedd3c6a35ed | refs/heads/main | 2023-05-06T04:14:19.336839 | 2021-02-10T02:53:19 | 2021-02-10T02:53:19 | 309,841,303 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py |
class AgeValidator(object):
"""docstring for Student"""
def __init__(self, age):
self._age = age
self._name = None
def __get__(self, instance, owner):
if self._age is None:
raise ValueError(f'{self._age}: is not set')
if self._age < 18:
raise ValueError(f'{self._age} must be greater than or equal to 18')
return self._age
def __set_name__(self, name, owner):
self._name = name
def __set__(self, instance, value):
self._age = value
class Client:
age = AgeValidator(19) # default value for now
def __init__(self, age):
self.age = age
def foo(self):
return self.age
t = Client(7)
t.age = 2
print(t.foo())
# Error occures because the AgeValidator is acting as decorator here
# but you can you use getter and setter property but if you realize that you
# you will need the validator in most of your class then implement descriptors, that is my
# openion, so that you can reuse this validator without trying to use getter and setter
# througout your class.
# this makes you avoid DRY
| [
"[email protected]"
] | |
b335745902329e09d03ed7d0b83fc9fbebbc4a2c | 47b4a652bf47afbff07a7148c3b4a94b86f85bb2 | /swap_start/auto_playok_com/debug/debug_state.py | 860ae0d1fec53e1bcb3151062b77c6f9bf99d71d | [
"MIT"
] | permissive | yudongqiu/gomoku | 3423253dcac52c0b738249900f1e86b31ca99524 | 4a95f2a5008f31fed5cb92c6bd6d55f9669ddd06 | refs/heads/master | 2022-10-28T09:16:38.939698 | 2021-01-30T21:01:48 | 2021-01-30T21:01:48 | 84,468,572 | 3 | 1 | MIT | 2022-09-30T09:03:45 | 2017-03-09T17:12:22 | Python | UTF-8 | Python | false | false | 1,528 | py | # coding: utf-8
import numpy as np
import AI_debug
from tf_model import load_existing_model
board_size=15
def read_board_state(f):
# default
black_stones = set()
white_stones = set()
board = [black_stones, white_stones]
last_move = None
playing = 0
# read and parse board
for line in open(f):
if '|' in line:
line_idx, contents = line.split('|', maxsplit=1)
row_i = int(line_idx) - 1
ls = contents.split()
if len(ls) == board_size:
for col_j, s in enumerate(ls):
stone = (row_i+1, col_j+1)
if s == 'x':
black_stones.add(stone)
elif s == 'X':
black_stones.add(stone)
last_move = stone
playing = 1
elif s == 'o':
white_stones.add(stone)
elif s == 'O':
white_stones.add(stone)
last_move = stone
playing = 0
elif s == '-':
pass
else:
print(f'found unknown stone: {s}')
board_state = [board, last_move, playing, board_size]
return board_state
board_state = read_board_state('debug_board.txt')
model = load_existing_model('tf_model.h5')
AI_debug.tf_predict_u.model = model
AI_debug.initialize()
print(AI_debug.strategy(board_state))
| [
"[email protected]"
] | |
825c81bfb2926ce343f78e46f9a4f7df0c279581 | 65dd982b7791b11b4f6e02b8c46300098d9b5bb3 | /heat-2014.2.1/heat/openstack/common/strutils.py | bd690b0829141d35b2aca22297f15e92314c9359 | [
"Apache-2.0"
] | permissive | xiongmeng1108/openstack_gcloud | 83f58b97e333d86d141493b262d3c2261fd823ac | d5d3e4f8d113a626f3da811b8e48742d35550413 | refs/heads/master | 2021-01-10T01:21:13.911165 | 2016-03-25T08:21:14 | 2016-03-25T08:21:14 | 54,700,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,007 | py | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import math
import re
import sys
import unicodedata
import six
from heat.openstack.common.gettextutils import _
UNIT_PREFIX_EXPONENT = {
'k': 1,
'K': 1,
'Ki': 1,
'M': 2,
'Mi': 2,
'G': 3,
'Gi': 3,
'T': 4,
'Ti': 4,
}
UNIT_SYSTEM_INFO = {
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
}
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False, default=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else returns the value specified by 'default'.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, six.string_types):
subject = six.text_type(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = _("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return default
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming text/bytes string using `incoming` if they're not
already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming text/bytes string using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
if isinstance(text, six.text_type):
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
else:
return text
def string_to_bytes(text, unit_system='IEC', return_int=False):
"""Converts a string into an float representation of bytes.
The units supported for IEC ::
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
KB, KiB, MB, MiB, GB, GiB, TB, TiB
The units supported for SI ::
kb(it), Mb(it), Gb(it), Tb(it)
kB, MB, GB, TB
Note that the SI unit system does not support capital letter 'K'
:param text: String input for bytes size conversion.
:param unit_system: Unit system for byte size conversion.
:param return_int: If True, returns integer representation of text
in bytes. (default: decimal)
:returns: Numerical representation of text in bytes.
:raises ValueError: If text has an invalid value.
"""
try:
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
except KeyError:
msg = _('Invalid unit system: "%s"') % unit_system
raise ValueError(msg)
match = reg_ex.match(text)
if match:
magnitude = float(match.group(1))
unit_prefix = match.group(2)
if match.group(3) in ['b', 'bit']:
magnitude /= 8
else:
msg = _('Invalid string format: %s') % text
raise ValueError(msg)
if not unit_prefix:
res = magnitude
else:
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
if return_int:
return int(math.ceil(res))
return res
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of str
"""
value = safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)
| [
"[email protected]"
] | |
738f342dc72bafe4df18bfca2f9beaa61bcf7526 | 2c4efe2ce49a900c68348f50e71802994c84900a | /braindecode/braindecode/venv1/Lib/site-packages/numba/pycc/__init__.py | 20f60564072472c96972fbdf850306034aaf0c7b | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | sisi2/Masterthesis | b508632526e82b23c2efb34729141bfdae078fa0 | 7ce17644af47db4ad62764ed062840a10afe714d | refs/heads/master | 2022-11-19T15:21:28.272824 | 2018-08-13T15:02:20 | 2018-08-13T15:02:20 | 131,345,102 | 2 | 1 | null | 2022-11-15T14:08:07 | 2018-04-27T21:09:21 | Python | UTF-8 | Python | false | false | 3,769 | py | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import os
import logging
import subprocess
import tempfile
import sys
# Public API
from .cc import CC
from .decorators import export, exportmany
def get_ending(args):
if args.llvm:
return ".bc"
elif args.olibs:
return ".o"
elif args.python:
return find_pyext_ending()
else:
return find_shared_ending()
def main(args=None):
import argparse
from .compiler import ModuleCompiler
from .platform import Toolchain, find_shared_ending, find_pyext_ending
from . import decorators
parser = argparse.ArgumentParser(
description="DEPRECATED - Compile Python modules to a single shared library")
parser.add_argument("inputs", nargs='+', help="Input file(s)")
parser.add_argument("-o", nargs=1, dest="output",
help="Output file (default is name of first input -- with new ending)")
group = parser.add_mutually_exclusive_group()
group.add_argument("-c", action="store_true", dest="olibs",
help="Create object file from each input instead of shared-library")
group.add_argument("--llvm", action="store_true",
help="Emit llvm instead of native code")
parser.add_argument('--header', action="store_true",
help="Emit C header file with function signatures")
parser.add_argument('--python', action='store_true',
help='Emit additionally generated Python wrapper and '
'extension module code in output')
parser.add_argument('-d', '--debug', action='store_true',
help='Print extra debug information')
args = parser.parse_args(args)
logger = logging.getLogger(__name__)
if args.debug:
logger.setLevel(logging.DEBUG)
logger.warn("The 'pycc' script is DEPRECATED; "
"please use the numba.pycc.CC API instead")
if args.output:
args.output = args.output[0]
output_base = os.path.split(args.output)[1]
module_name = os.path.splitext(output_base)[0]
else:
input_base = os.path.splitext(args.inputs[0])[0]
module_name = os.path.split(input_base)[1]
args.output = input_base + get_ending(args)
logger.debug('args.output --> %s', args.output)
if args.header:
print('ERROR: pycc --header has been disabled in this release due to a known issue')
sys.exit(1)
logger.debug('inputs --> %s', args.inputs)
decorators.process_input_files(args.inputs)
compiler = ModuleCompiler(decorators.export_registry, module_name=module_name)
if args.llvm:
logger.debug('emit llvm')
compiler.write_llvm_bitcode(args.output, wrap=args.python)
elif args.olibs:
logger.debug('emit object file')
compiler.write_native_object(args.output, wrap=args.python)
else:
logger.debug('emit shared library')
logger.debug('write to temporary object file %s', tempfile.gettempdir())
toolchain = Toolchain()
toolchain.debug = args.debug
temp_obj = (tempfile.gettempdir() + os.sep +
os.path.basename(args.output) + '.o')
compiler.write_native_object(temp_obj, wrap=args.python)
libraries = toolchain.get_python_libraries()
toolchain.link_shared(args.output, [temp_obj],
toolchain.get_python_libraries(),
toolchain.get_python_library_dirs(),
export_symbols=compiler.dll_exports)
os.remove(temp_obj)
| [
"[email protected]"
] | |
ed958dd42c7a14865477f3186bac33e217d934c0 | 5ec06dab1409d790496ce082dacb321392b32fe9 | /clients/python/generated/swaggeraemosgi/model/org_apache_sling_scripting_sightly_js_impl_jsapi_sly_bindings_values_prov_properties.py | bd5e147154970e87e3f26d09972fdb4faefab966 | [
"Apache-2.0"
] | permissive | shinesolutions/swagger-aem-osgi | e9d2385f44bee70e5bbdc0d577e99a9f2525266f | c2f6e076971d2592c1cbd3f70695c679e807396b | refs/heads/master | 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 | Apache-2.0 | 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null | UTF-8 | Python | false | false | 7,125 | py | """
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0-pre.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from swaggeraemosgi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from swaggeraemosgi.model.config_node_property_array import ConfigNodePropertyArray
globals()['ConfigNodePropertyArray'] = ConfigNodePropertyArray
class OrgApacheSlingScriptingSightlyJsImplJsapiSlyBindingsValuesProvProperties(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'org_apache_sling_scripting_sightly_js_bindings': (ConfigNodePropertyArray,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'org_apache_sling_scripting_sightly_js_bindings': 'org.apache.sling.scripting.sightly.js.bindings', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""OrgApacheSlingScriptingSightlyJsImplJsapiSlyBindingsValuesProvProperties - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
org_apache_sling_scripting_sightly_js_bindings (ConfigNodePropertyArray): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"[email protected]"
] | |
386aef704c39f9ff78b51e326d7b5f9648aa8c20 | a4ea525e226d6c401fdb87a6e9adfdc5d07e6020 | /src/azure-cli/azure/cli/command_modules/ams/tests/latest/test_ams_asset_track_scenarios.py | 1473bfb655a7598b6f756712f888f44ca3befcfb | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | Azure/azure-cli | 13340eeca2e288e66e84d393fa1c8a93d46c8686 | a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca | refs/heads/dev | 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 | MIT | 2023-09-14T11:11:05 | 2016-02-04T00:21:51 | Python | UTF-8 | Python | false | false | 15,145 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import time
from azure.cli.core.util import CLIError
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer
from azure.cli.command_modules.ams._test_utils import _get_test_data_file
class AmsAssetFilterTests(ScenarioTest):
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_ams_asset_track_create(self, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
outputContainer = self.create_random_name(prefix='output', length=14)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'westeurope',
'outputContainer': outputContainer,
'assetTrackFilePath': _get_test_data_file('assetTrack.ttml'),
'assetTrackFileName': 'assetTrack.ttml',
'sampleIsmFilePath': _get_test_data_file('sampleIsmFile.ism'),
'trackName': self.create_random_name(prefix='track', length=12)
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location}')
self.cmd('az ams account storage add -a {amsname} -g {rg} -n {storageAccount}')
outputAssetName = self.create_random_name(prefix='asset', length=12)
self.kwargs.update({
'outputAssetName': outputAssetName
})
self.cmd('az ams asset create -a {amsname} -n {outputAssetName} -g {rg} --container {outputContainer}')
self.kwargs['storage_key'] = str(
self.cmd('az storage account keys list -n {storageAccount} -g {rg} --query "[0].value"').output)
self.cmd(
'az storage blob upload --no-progress --account-name {storageAccount} --container {outputContainer} --file "{assetTrackFilePath}" --name {assetTrackFileName} --account-key {storage_key}')
self.cmd(
'az storage blob upload --no-progress --account-name {storageAccount} --container {outputContainer} --file "{sampleIsmFilePath}" --account-key {storage_key}')
_RETRY_TIMES = 7
for retry_time in range(0, _RETRY_TIMES):
try:
self.cmd(
'az ams asset-track create -a {amsname} -g {rg} --track-name {trackName} --track-type Text --asset-name {outputAssetName} --file-name {assetTrackFileName}',
checks=[
self.check('name', '{trackName}'),
self.check('track.fileName', '{assetTrackFileName}')
])
break
except Exception: # pylint: disable=broad-except
if retry_time < _RETRY_TIMES:
time.sleep(10)
else:
raise
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_ams_asset_track_show(self, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
outputContainer = self.create_random_name(prefix='output', length=14)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'westeurope',
'outputContainer': outputContainer,
'assetTrackFilePath': _get_test_data_file('assetTrack.ttml'),
'assetTrackFileName': 'assetTrack.ttml',
'sampleIsmFilePath': _get_test_data_file('sampleIsmFile.ism'),
'trackName': self.create_random_name(prefix='track', length=12)
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location}')
self.cmd('az ams account storage add -a {amsname} -g {rg} -n {storageAccount}')
outputAssetName = self.create_random_name(prefix='asset', length=12)
self.kwargs.update({
'outputAssetName': outputAssetName
})
self.cmd('az ams asset create -a {amsname} -n {outputAssetName} -g {rg} --container {outputContainer}')
self.kwargs['storage_key'] = str(
self.cmd('az storage account keys list -n {storageAccount} -g {rg} --query "[0].value"').output)
self.cmd(
'az storage blob upload --no-progress --account-name {storageAccount} --container {outputContainer} --file "{assetTrackFilePath}" --name {assetTrackFileName} --account-key {storage_key}')
self.cmd(
'az storage blob upload --no-progress --account-name {storageAccount} --container {outputContainer} --file "{sampleIsmFilePath}" --account-key {storage_key}')
_RETRY_TIMES = 5
for retry_time in range(0, _RETRY_TIMES):
try:
self.cmd(
'az ams asset-track create -a {amsname} -g {rg} --track-name {trackName} --track-type Text --asset-name {outputAssetName} --file-name {assetTrackFileName}',
checks=[
self.check('name', '{trackName}'),
self.check('track.fileName', '{assetTrackFileName}')
])
self.cmd(
'az ams asset-track show -a {amsname} -g {rg} --track-name {trackName} --asset-name {outputAssetName}',
checks=[
self.check('name', '{trackName}')
])
break
except Exception: # pylint: disable=broad-except
if retry_time < _RETRY_TIMES:
time.sleep(10)
else:
raise
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_ams_asset_track_list(self, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
outputContainer = self.create_random_name(prefix='output', length=14)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'westeurope',
'outputContainer': outputContainer,
'assetTrackFilePath': _get_test_data_file('assetTrack.ttml'),
'assetTrackFileName': 'assetTrack.ttml',
'sampleIsmFilePath': _get_test_data_file('sampleIsmFile.ism'),
'trackName1': self.create_random_name(prefix='track', length=12),
'trackName2': self.create_random_name(prefix='track2', length=12)
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location}')
self.cmd('az ams account storage add -a {amsname} -g {rg} -n {storageAccount}')
outputAssetName = self.create_random_name(prefix='asset', length=12)
self.kwargs.update({
'outputAssetName': outputAssetName
})
self.cmd('az ams asset create -a {amsname} -n {outputAssetName} -g {rg} --container {outputContainer}')
self.kwargs['storage_key'] = str(
self.cmd('az storage account keys list -n {storageAccount} -g {rg} --query "[0].value"').output)
self.cmd(
'az storage blob upload --no-progress --account-name {storageAccount} --container {outputContainer} --file "{assetTrackFilePath}" --name {assetTrackFileName} --account-key {storage_key}')
self.cmd(
'az storage blob upload --no-progress --account-name {storageAccount} --container {outputContainer} --file "{sampleIsmFilePath}" --account-key {storage_key}')
_RETRY_TIMES = 5
for retry_time in range(0, _RETRY_TIMES):
try:
self.cmd(
'az ams asset-track create -a {amsname} -g {rg} --track-name {trackName} --track-type Text --asset-name {outputAssetName} --file-name {assetTrackFileName}')
self.cmd(
'az ams asset-track create -a {amsname} -g {rg} --track-name {trackName2} --track-type Text --asset-name {outputAssetName} --file-name {assetTrackFileName}')
self.cmd(
'az ams asset-track list -a {amsname} -g {rg} --asset-name {outputAssetName}',
checks=[
self.check('length(@)', 2)
])
break
except Exception: # pylint: disable=broad-except
if retry_time < _RETRY_TIMES:
time.sleep(10)
else:
raise
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_ams_asset_track_delete(self, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
outputContainer = self.create_random_name(prefix='output', length=14)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'westeurope',
'outputContainer': outputContainer,
'assetTrackFilePath': _get_test_data_file('assetTrack.ttml'),
'assetTrackFileName': 'assetTrack.ttml',
'sampleIsmFilePath': _get_test_data_file('sampleIsmFile.ism'),
'trackName1': self.create_random_name(prefix='track', length=12),
'trackName2': self.create_random_name(prefix='track2', length=12)
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location}')
self.cmd('az ams account storage add -a {amsname} -g {rg} -n {storageAccount}')
outputAssetName = self.create_random_name(prefix='asset', length=12)
self.kwargs.update({
'outputAssetName': outputAssetName
})
self.cmd('az ams asset create -a {amsname} -n {outputAssetName} -g {rg} --container {outputContainer}')
self.kwargs['storage_key'] = str(
self.cmd('az storage account keys list -n {storageAccount} -g {rg} --query "[0].value"').output)
self.cmd(
'az storage blob upload --no-progress --account-name {storageAccount} --container {outputContainer} --file "{assetTrackFilePath}" --name {assetTrackFileName} --account-key {storage_key}')
self.cmd(
'az storage blob upload --no-progress --account-name {storageAccount} --container {outputContainer} --file "{sampleIsmFilePath}" --account-key {storage_key}')
_RETRY_TIMES = 5
for retry_time in range(0, _RETRY_TIMES):
try:
self.cmd(
'az ams asset-track create -a {amsname} -g {rg} --track-name {trackName} --track-type Text --asset-name {outputAssetName} --file-name {assetTrackFileName}')
self.cmd(
'az ams asset-track create -a {amsname} -g {rg} --track-name {trackName2} --track-type Text --asset-name {outputAssetName} --file-name {assetTrackFileName}')
self.cmd(
'az ams asset-track list -a {amsname} -g {rg} --asset-name {outputAssetName}',
checks=[
self.check('length(@)', 2)
])
self.cmd(
'az ams asset-track delete -a {amsname} -g {rg} --asset-name {outputAssetName} --track-name {trackName2}',
checks=[
self.check('length(@)', 1)
]
)
break
except Exception: # pylint: disable=broad-except
if retry_time < _RETRY_TIMES:
time.sleep(10)
else:
raise
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_ams_asset_track_update(self, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
outputContainer = self.create_random_name(prefix='output', length=14)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'westeurope',
'outputContainer': outputContainer,
'assetTrackFilePath': _get_test_data_file('assetTrack.ttml'),
'assetTrackFileName': 'assetTrack.ttml',
'sampleIsmFilePath': _get_test_data_file('sampleIsmFile.ism'),
'trackName': self.create_random_name(prefix='track', length=12)
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location}')
self.cmd('az ams account storage add -a {amsname} -g {rg} -n {storageAccount}')
outputAssetName = self.create_random_name(prefix='asset', length=12)
self.kwargs.update({
'outputAssetName': outputAssetName
})
self.cmd('az ams asset create -a {amsname} -n {outputAssetName} -g {rg} --container {outputContainer}')
self.kwargs['storage_key'] = str(
self.cmd('az storage account keys list -n {storageAccount} -g {rg} --query "[0].value"').output)
self.cmd(
'az storage blob upload --no-progress --account-name {storageAccount} --container {outputContainer} --file "{assetTrackFilePath}" --name {assetTrackFileName} --account-key {storage_key}')
self.cmd(
'az storage blob upload --no-progress --account-name {storageAccount} --container {outputContainer} --file "{sampleIsmFilePath}" --account-key {storage_key}')
_RETRY_TIMES = 5
for retry_time in range(0, _RETRY_TIMES):
try:
self.cmd(
'az ams asset-track create -a {amsname} -g {rg} --track-name {trackName} --track-type Text --asset-name {outputAssetName} --file-name {assetTrackFileName}',
checks=[
self.check('name', '{trackName}'),
self.check('track.fileName', '{assetTrackFileName}')
])
self.kwargs.update({
'displayName': 'newDisplayName',
'playerVisibility': 'Hidden'
})
self.cmd(
'az ams asset-track update -a {amsname} -g {rg} --track-name {trackName} --asset-name {outputAssetName} --display-name {displayName} --player-visibility {playerVisibility}',
checks=[
self.check('track.displayName', '{displayName}'),
self.check('track.playerVisibility', '{playerVisibility}')
])
break
except Exception: # pylint: disable=broad-except
if retry_time < _RETRY_TIMES:
time.sleep(10)
else:
raise
| [
"[email protected]"
] | |
2ca8d537c01563e7b278dbb6e2b6594cbbc80763 | 99e494d9ca83ebafdbe6fbebc554ab229edcbacc | /.history/Day 1/Test/Answers/NegativeMarking_20210304211434.py | 57b1e9d86c4af6e1af80ae1355f111567af35e11 | [] | no_license | Datta2901/CCC | c0364caa1e4937bc7bce68e4847c8d599aef0f59 | 4debb2c1c70df693d0e5f68b5798bd9c7a7ef3dc | refs/heads/master | 2023-04-19T10:05:12.372578 | 2021-04-23T12:50:08 | 2021-04-23T12:50:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | t = int(input())
for i in range(t):
questions,requiredscore = map(int,input().split())
if questions * 4 < requiredscore:
print(-1)
continue
attempt = (requiredscore/questions) + 3
accuracy = attempt / 7
print(format(accuracy*100,'.2f')
# Here Accuracy can be find by using two linear equations
# They are Total Score(Required Score) = 4 * x - 3 * y
# Total Questions = x + y
# Here x is the total | [
"[email protected]"
] | |
3987a84881cb00c259e5f634796e5624fed300d3 | 6674f4300961d9ca7fbfb667734fb91b26fc7881 | /cutter.py | 23a52d5ea2f98dd23c1e6879e0862a329c9e6fb2 | [] | no_license | seffka/sounds | 2d1232c5e2a0bf4ca5ab2fae06e515ec078aab4a | 84770ed73a47f42af847012cd987f0e3b6a15db6 | refs/heads/master | 2021-01-12T00:13:13.331170 | 2017-01-19T15:06:55 | 2017-01-19T15:06:55 | 78,686,903 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,623 | py | import sys
sys.path.append('/Users/seffka/DSPMA/sms-tools/software/models/')
from utilFunctions import wavread, wavwrite
from scipy.signal import get_window
import matplotlib.pyplot as plt
import numpy as np
import os
from os import listdir
from os.path import isfile, join, splitext
import essentia
import essentia.standard
def processLength(l, x, instrument, pitch):
s = int(44100 * l / 1000.0)
_8ms = int(44100 * .008)
aw = np.ones(s)
dw = np.ones(s)
hw = get_window('hamming', _8ms)
dw[:_8ms / 2] = hw[:_8ms / 2]
dw[-_8ms / 2:] = hw[-_8ms / 2:]
aw[-_8ms / 2:] = hw[-_8ms / 2:]
ax = x[:s] * aw
dx = x[int(44100 * 0.08): int(44100 * 0.08) + s] * dw
file_a = instrument + '_a_' + str(l) + '_' + pitch + '.wav'
file_d = instrument + '_d_' + str(l) + '_' + pitch + '.wav'
writer = essentia.standard.MonoWriter(filename=join('hacked', file_a))
writer(ax.astype(np.float32))
writer = essentia.standard.MonoWriter(filename=join('hacked', file_d))
writer(dx.astype(np.float32))
f = [f for f in listdir('raw') if isfile(join('raw', f)) and splitext(join('.', f))[1] == '.wav' and 'intro.wav' not in f]
for file in f:
loader = essentia.standard.MonoLoader(filename=join('raw', file))
x = loader()
parts = splitext(file)[0].split('_')
instrument = parts[0]
pitch = parts[1]
processLength(16, x, instrument, pitch)
processLength(24, x, instrument, pitch)
processLength(32, x, instrument, pitch)
processLength(64, x, instrument, pitch)
processLength(128, x, instrument, pitch)
processLength(500, x, instrument, pitch)
| [
"[email protected]"
] | |
012aaaa9e7fdbf6d4aeeaa7fa858790eab08e2eb | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/security/azure-mgmt-security/azure/mgmt/security/v2023_02_01_preview/aio/operations/_health_report_operations.py | 852329018d706ec02c3aa68d58b22afb9b4dd791 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 4,653 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._health_report_operations import build_get_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class HealthReportOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.security.v2023_02_01_preview.aio.SecurityCenter`'s
:attr:`health_report` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(self, resource_id: str, health_report_name: str, **kwargs: Any) -> _models.HealthReport:
"""Get health report of resource.
:param resource_id: The identifier of the resource. Required.
:type resource_id: str
:param health_report_name: The health report Key - Unique key for the health report type.
Required.
:type health_report_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HealthReport or the result of cls(response)
:rtype: ~azure.mgmt.security.v2023_02_01_preview.models.HealthReport
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-02-01-preview"))
cls: ClsType[_models.HealthReport] = kwargs.pop("cls", None)
request = build_get_request(
resource_id=resource_id,
health_report_name=health_report_name,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("HealthReport", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/{resourceId}/providers/Microsoft.Security/healthReports/{healthReportName}"}
| [
"[email protected]"
] | |
b90e6ea708395e48959ab1848de991922eb9a778 | adb6314474c49d3780005f110115c2323f3a343e | /hr_employee_updation/__manifest__.py | 4284e142f966d701c0fb80489b75c3f16dfa8b04 | [] | no_license | viethoang66666/seatek_viet | d86996a215ae426a5dce3054360f204e3d0867a1 | 5ebad7ede4690e1bb9e2c1063abf677e675631b4 | refs/heads/master | 2023-04-25T00:37:17.236513 | 2021-05-12T10:07:00 | 2021-05-12T10:07:00 | 366,660,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,817 | py | # -*- coding: utf-8 -*-
###################################################################################
# A part of Open HRMS Project <https://www.openhrms.com>
#
# Cybrosys Technologies Pvt. Ltd.
# Copyright (C) 2018-TODAY Cybrosys Technologies (<https://www.cybrosys.com>).
# Author: Jesni Banu (<https://www.cybrosys.com>)
# Last modified 09Dec2020 by htkhoa - Seatek
# This program is free software: you can modify
# it under the terms of the GNU Affero General Public License (AGPL) as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
###################################################################################
{
'name': 'OpenSea HRMS Employee Info 2.0.2',
'version': '12.0.2.0.2',
'summary': """Adding Advanced Fields In Employee Master""",
'description': 'Add more information in employee records. Last modified 06Dec2020 by htkhoa',
'category': 'Generic Modules/Human Resources',
'author': 'Cybrosys Techno Solutions',
'company': 'Cybrosys Techno Solutions',
'website': "https://www.openhrms.com",
'depends': ['base', 'hr', 'mail', 'hr_gamification'],
'data': [
],
'demo': [],
'images': ['static/description/banner.jpg'],
'license': 'AGPL-3',
'installable': True,
'auto_install': False,
'application': False,
}
| [
"[email protected]"
] | |
6b062341d5d9055c048a0f573b4535d9fdd25741 | 930bc970069d8cbcfb36725a90492eff50638ecc | /code/dk-iris-pipeline/airflow_home/dags/iris-dag.py | 293423870eb24c0bad97b6b4bc19c1f95bd27dbe | [
"MIT"
] | permissive | databill86/airflow4ds | 4770d856569c4db4b55b2d9dfda010e21c4cd790 | b5ae213f7169c54d31f4eca58d235ec6b09fd56f | refs/heads/master | 2021-09-25T17:26:43.340747 | 2018-10-24T16:09:49 | 2018-10-24T16:09:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | import sys
import os
PROJECT_DIRECTORY = os.getenv(key='AIRFLOW_HOME')
sys.path.append(PROJECT_DIRECTORY)
from src import get_raw_iris, get_clean_iris
import datetime as dt
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
default_args = {
'owner': 'me',
'depends_on_past': False,
'start_date': dt.datetime(2018, 8, 22),
'retries': 1,
'retry_delay': dt.timedelta(minutes=5),
}
with DAG('airflow_tutorial_v01',
default_args=default_args,
schedule_interval='0 0 * * *',
) as dag:
print_hello = BashOperator(task_id='print_hello', bash_command='echo "hello"')
sleep = BashOperator(task_id='sleep', bash_command='sleep 5')
get_data = PythonOperator(task_id='get_raw_iris', python_callable=get_raw_iris)
clean_data = PythonOperator(task_id='get_clean_iris', python_callable=get_clean_iris)
print_hello >> sleep >> get_data >> clean_data
| [
"[email protected]"
] | |
d25faa0b2ef3fc7ee416c23bf66da07d35197723 | 3f84ff1f506287bf0bb3b0840947e3ef23f22c87 | /04day/6-王者游戏私有方法.py | 8b73d2a76e870dc88eac560b0f4a097706823bd4 | [] | no_license | 2099454967/wbx | 34b61c0fc98a227562ea7822f2fa56c5d01d3654 | 316e7ac7351b532cb134aec0740e045261015920 | refs/heads/master | 2020-03-18T06:09:58.544919 | 2018-05-28T13:01:19 | 2018-05-28T13:01:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | class Game():
def __init__(self):
self.__size = 100
def getSize(self):
return self.__size
def setSize(self,size):
self.__size = size
#大招要想发动,必须要有蓝
#加了两个下划线,就不能直接调用了
def __dazhao(self,mp):
print("十步杀一人")
def fadazhao(self,mp):
if mp <= 80:
print("蓝不够")
else:
self.__dazhao(mp)
wangzhe = Game()
#wangzhe.__dazhao(100)
wangzhe.fadazhao(100)
print(wangzhe.__size)
| [
"[email protected]"
] | |
95d1e9159392dcaf85675e26e0bdec4db28e9fea | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /nmoohLwP962r6P355_7.py | 0bc8cd0fa601e62247825d104fab5cc4f71290af | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | """
In this challenge, you have to establish if the digits of a given number form
a straight arithmetic sequence (either increasing or decreasing). A straight
sequence has an equal step between every pair of digits.
Given an integer `n`, implement a function that returns:
* `"Not Straight"` if `n` is lower than 100 or if its digits are not an arithmetic sequence.
* `"Trivial Straight"` if `n` has a single repeating digit.
* An integer being the step of the sequence if the `n` digits are a straight arithmetic sequence.
### Examples
straight_digital(123) ➞ 1
# 2 - 1 = 1 | 3 - 2 = 1
straight_digital(753) ➞ -2
# 5 - 7 = -2 | 3 - 5 = -2
straight_digital(666) ➞ "Trivial Straight"
# There's a single repeating digit (step = 0).
straight_digital(124) ➞ "Not Straight"
# 2 - 1 = 1 | 4 - 2 = 2
# A valid sequence has always the same step between its digits.
straight_digital(99) ➞ "Not Straight"
# The number is lower than 100.
### Notes
* The step of the sequence can be either positive or negative (see example #2).
* Trivia: there are infinite straight digital numbers, but only 96 of them are made of at least two different digits.
"""
def straight_digital(number):
x = ''.join(
n
for n in str(number)
if n.isdigit()
)
d = [
int(j) - int(i)
for i, j in zip(x, x[1:])
]
if len(set(d)) > 1 or number < 100:
return 'Not Straight'
elif len(set(x)) == 1:
return 'Trivial Straight'
else:
return d[0]
| [
"[email protected]"
] | |
772024ffe81f495ca9834e7489711dd25fcd010b | 7bb34b9837b6304ceac6ab45ce482b570526ed3c | /external/webkit/Source/WebCore/WebCore.gyp/scripts/action_makenames.py | ecf543f44980bab6edee79a875d0e89de645676e | [
"Apache-2.0",
"LGPL-2.0-only",
"BSD-2-Clause",
"LGPL-2.1-only",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LicenseRef-scancode-other-copyleft"
] | permissive | ghsecuritylab/android_platform_sony_nicki | 7533bca5c13d32a8d2a42696344cc10249bd2fd8 | 526381be7808e5202d7865aa10303cb5d249388a | refs/heads/master | 2021-02-28T20:27:31.390188 | 2013-10-15T07:57:51 | 2013-10-15T07:57:51 | 245,730,217 | 0 | 0 | Apache-2.0 | 2020-03-08T00:59:27 | 2020-03-08T00:59:26 | null | UTF-8 | Python | false | false | 6,871 | py | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# action_makenames.py is a harness script to connect actions sections of
# gyp-based builds to make_names.pl.
#
# usage: action_makenames.py OUTPUTS -- INPUTS [-- OPTIONS]
#
# Multiple OUTPUTS, INPUTS, and OPTIONS may be listed. The sections are
# separated by -- arguments.
#
# The directory name of the first output is chosen as the directory in which
# make_names will run. If the directory name for any subsequent output is
# different, those files will be moved to the desired directory.
#
# Multiple INPUTS may be listed. An input with a basename matching
# "make_names.pl" is taken as the path to that script. Inputs with names
# ending in TagNames.in or tags.in are taken as tag inputs. Inputs with names
# ending in AttributeNames.in or attrs.in are taken as attribute inputs. There
# may be at most one tag input and one attribute input. A make_names.pl input
# is required and at least one tag or attribute input must be present.
#
# OPTIONS is a list of additional options to pass to make_names.pl. This
# section need not be present.
import os
import posixpath
import shutil
import subprocess
import sys
def SplitArgsIntoSections(args):
sections = []
while len(args) > 0:
if not '--' in args:
# If there is no '--' left, everything remaining is an entire section.
dashes = len(args)
else:
dashes = args.index('--')
sections.append(args[:dashes])
# Next time through the loop, look at everything after this '--'.
if dashes + 1 == len(args):
# If the '--' is at the end of the list, we won't come back through the
# loop again. Add an empty section now corresponding to the nothingness
# following the final '--'.
args = []
sections.append(args)
else:
args = args[dashes + 1:]
return sections
def main(args):
sections = SplitArgsIntoSections(args[1:])
assert len(sections) == 2 or len(sections) == 3
(outputs, inputs) = sections[:2]
if len(sections) == 3:
options = sections[2]
else:
options = []
# Make all output pathnames absolute so that they can be accessed after
# changing directory.
for index in xrange(0, len(outputs)):
outputs[index] = os.path.abspath(outputs[index])
outputDir = os.path.dirname(outputs[0])
# Look at the inputs and figure out which ones are make_names.pl, tags, and
# attributes. There can be at most one of each, and those are the only
# input types supported. make_names.pl is required and at least one of tags
# and attributes is required.
makeNamesInput = None
tagInput = None
attrInput = None
for input in inputs:
# Make input pathnames absolute so they can be accessed after changing
# directory. On Windows, convert \ to / for inputs to the perl script to
# work around the intermix of activepython + cygwin perl.
inputAbs = os.path.abspath(input)
inputAbsPosix = inputAbs.replace(os.path.sep, posixpath.sep)
inputBasename = os.path.basename(input)
if inputBasename == 'make_names.pl':
assert makeNamesInput == None
makeNamesInput = inputAbs
elif inputBasename.endswith('TagNames.in') \
or inputBasename.endswith('tags.in'):
assert tagInput == None
tagInput = inputAbsPosix
elif inputBasename.endswith('AttributeNames.in') \
or inputBasename.endswith('attrs.in'):
assert attrInput == None
attrInput = inputAbsPosix
else:
assert False
assert makeNamesInput != None
assert tagInput != None or attrInput != None
# scriptsPath is a Perl include directory, located relative to
# makeNamesInput.
scriptsPath = os.path.normpath(
os.path.join(os.path.dirname(makeNamesInput), os.pardir, 'bindings', 'scripts'))
# Change to the output directory because make_names.pl puts output in its
# working directory.
os.chdir(outputDir)
# Build up the command.
command = ['perl', '-I', scriptsPath, makeNamesInput]
if tagInput != None:
command.extend(['--tags', tagInput])
if attrInput != None:
command.extend(['--attrs', attrInput])
command.extend(options)
# Do it. check_call is new in 2.5, so simulate its behavior with call and
# assert.
returnCode = subprocess.call(command)
assert returnCode == 0
# Go through the outputs. Any output that belongs in a different directory
# is moved. Do a copy and delete instead of rename for maximum portability.
# Note that all paths used in this section are still absolute.
for output in outputs:
thisOutputDir = os.path.dirname(output)
if thisOutputDir != outputDir:
outputBasename = os.path.basename(output)
src = os.path.join(outputDir, outputBasename)
dst = os.path.join(thisOutputDir, outputBasename)
shutil.copyfile(src, dst)
os.unlink(src)
return returnCode
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
"[email protected]"
] | |
d39c97d024cef112b1e44e961021e9d7cff0637d | 917c44bfb0b6fdcce7ad4148e6cbd89fd0e61901 | /tests/factories.py | ca87699b78e4d8b4e95ab6e339d458ceda503f02 | [] | no_license | onepercentclub/django-bb-salesforce | 39c3a0071d52b0c021c545aa32aeca310ad6a1ec | 6d00a9521271612a174d7e66dc65a6751f1636f4 | refs/heads/master | 2021-01-21T04:33:06.979227 | 2016-07-15T08:58:28 | 2016-07-15T08:58:28 | 32,444,339 | 0 | 1 | null | 2016-07-15T08:58:28 | 2015-03-18T07:31:47 | Python | UTF-8 | Python | false | false | 1,450 | py | import factory
from tests.models import Member, Country, SubRegion, Region, Address
class MemberFactory(factory.DjangoModelFactory):
FACTORY_FOR = Member
username = factory.Sequence(lambda n: u'jd_{0}'.format(n))
first_name = factory.Sequence(lambda f: u'John_{0}'.format(f))
last_name = factory.Sequence(lambda l: u'Doe_{0}'.format(l))
email = factory.Sequence(lambda l: u'user_{0}@gmail.com'.format(l))
class RegionFactory(factory.DjangoModelFactory):
FACTORY_FOR = Region
name = factory.Sequence(lambda n: u'Region{0}'.format(n))
class SubRegionFactory(factory.DjangoModelFactory):
FACTORY_FOR = SubRegion
name = factory.Sequence(lambda n: u'SubRegion{0}'.format(n))
region = factory.SubFactory(RegionFactory)
class CountryFactory(factory.DjangoModelFactory):
FACTORY_FOR = Country
name = factory.Sequence(lambda n: u'Country_{0}'.format(n))
subregion = factory.SubFactory(SubRegionFactory)
class AddressFactory(factory.DjangoModelFactory):
FACTORY_FOR = Address
user = factory.SubFactory(MemberFactory)
line1 = factory.Sequence(lambda n: u'street_{0}'.format(n))
line2 = factory.Sequence(lambda n: u'extra_{0}'.format(n))
city = factory.Sequence(lambda n: u'city_{0}'.format(n))
state = factory.Sequence(lambda n: u'state_{0}'.format(n))
postal_code = factory.Sequence(lambda n: u'zipcode_{0}'.format(n))
country = factory.SubFactory(CountryFactory)
| [
"[email protected]"
] | |
f0f11595eb6cb036039a579f42b8e3513cebbdf1 | 655c51822cc0484a7cfab9d0de6e9f1fde144dba | /autoserver/api/src/plugins/memory.py | 053f8b6098248b8bc598047631f4fbaf6a175412 | [] | no_license | summer93/CMDB | 6f1988749cc8078cd219d4dd44f5dfcb0f2cc8f7 | 62c714354870c9b984f722efdf62e0d78bcf1450 | refs/heads/master | 2021-01-15T18:26:45.928105 | 2017-08-10T09:24:33 | 2017-08-10T09:24:33 | 99,783,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,628 | py | from repository import models
class Memory(object):
def __init__(self):
pass
@classmethod
def initial(cls):
return cls()
def process(self,server_info,hostname,server_obj):
print('start')
# ############### 处理内存信息 ##################
new_memory_dict = server_info['memory']['data']
old_memory_list = models.Memory.objects.filter(server_obj=server_obj)
# 交集:5, 创建:3,删除4;
new_slot_list = list(new_memory_dict.keys())
old_slot_list = []
for item in old_memory_list:
old_slot_list.append(item.slot)
# 交集:更新[5,]
update_list = set(new_slot_list).intersection(old_slot_list)
# 差集: 创建[3]
create_list = set(new_slot_list).difference(old_slot_list)
# 差集: 创建[4]
del_list = set(old_slot_list).difference(new_slot_list)
if del_list:
# 删除
models.Memory.objects.filter(server_obj=server_obj, slot__in=del_list).delete()
# 记录日志
models.AssetRecord.objects.create(asset_obj=server_obj.asset, content="移除内存:%s" % ("、".join(del_list),))
# 增加、
record_list = []
for slot in create_list:
memory_dict = new_memory_dict[slot]
memory_dict['server_obj'] = server_obj
models.Memory.objects.create(**memory_dict)
temp = "新增内存:位置{slot},容量{capacity},型号:{model},speed:{speed},manufacturer:{manufacturer},sn:{sn}".format(**memory_dict)
record_list.append(temp)
if record_list:
content = ";".join(record_list)
models.AssetRecord.objects.create(asset_obj=server_obj.asset, content=content)
# ############ 更新 ############
record_list = []
row_map = {'capacity': '容量', 'speed': '类型', 'model': '型号'}
print(update_list)
for slot in update_list:
new_memory_row = new_memory_dict[slot]
ol_memory_row = models.Memory.objects.filter(slot=slot, server_obj=server_obj).first()
for k, v in new_memory_row.items():
value = getattr(ol_memory_row, k)
if v != value:
record_list.append("槽位%s,%s由%s变更为%s" % (slot, row_map[k], value, v,))
setattr(ol_memory_row, k, v)
ol_memory_row.save()
if record_list:
content = ";".join(record_list)
models.AssetRecord.objects.create(asset_obj=server_obj.asset, content=content)
| [
"[email protected]"
] | |
9ac50c0ee92de53c33c21cabe6fe78b6597c7f90 | 30ee21b97e6105288101d1031ed7f96aaf6e141e | /lib/_org/stemma_soil_sensor/seesaw.py | 44e86e096d1fb9c2c0e9aa3883dd97e705d298c1 | [] | no_license | josmet52/micropython | b3d1d69ad7eb6832ce26c15e3580dae99b5bb87f | d8f2267b556ba3b15861898c2c2eb5e086dcf9ce | refs/heads/main | 2023-06-13T06:18:30.151497 | 2021-07-09T10:14:25 | 2021-07-09T10:14:25 | 353,124,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,189 | py | """
This is a lightweight port from CircuitPython to MicroPython
of Dean Miller's https://github.com/adafruit/Adafruit_CircuitPython_seesaw/blob/master/adafruit_seesaw/seesaw.py
* Author(s): Mihai Dinculescu
Implementation Notes
--------------------
**Hardware:**
* Adafruit ATSAMD09 Breakout with SeeSaw: https://www.adafruit.com/product/3657
**Software and Dependencies:**
* MicroPython firmware: https://micropython.org
**Tested on:**
* Hardware: Adafruit HUZZAH32 - ESP32 Feather https://learn.adafruit.com/adafruit-huzzah32-esp32-feather/overview
* Firmware: MicroPython v1.12 https://micropython.org/resources/firmware/esp32-idf3-20191220-v1.12.bin
"""
import time
STATUS_BASE = const(0x00)
TOUCH_BASE = const(0x0F)
_STATUS_HW_ID = const(0x01)
_STATUS_SWRST = const(0x7F)
_HW_ID_CODE = const(0x55)
class Seesaw:
"""Driver for SeeSaw I2C generic conversion trip.
:param I2C i2c: I2C bus the SeeSaw is connected to.
:param int addr: I2C address of the SeeSaw device."""
def __init__(self, i2c, addr):
self.i2c = i2c
self.addr = addr
self.sw_reset()
def sw_reset(self):
"""Trigger a software reset of the SeeSaw chip"""
self._write8(STATUS_BASE, _STATUS_SWRST, 0xFF)
time.sleep(.500)
chip_id = self._read8(STATUS_BASE, _STATUS_HW_ID)
if chip_id != _HW_ID_CODE:
raise RuntimeError("SeeSaw hardware ID returned (0x{:x}) is not "
"correct! Expected 0x{:x}. Please check your wiring."
.format(chip_id, _HW_ID_CODE))
def _write8(self, reg_base, reg, value):
self._write(reg_base, reg, bytearray([value]))
def _read8(self, reg_base, reg):
ret = bytearray(1)
self._read(reg_base, reg, ret)
return ret[0]
def _read(self, reg_base, reg, buf, delay=.005):
self._write(reg_base, reg)
time.sleep(delay)
self.i2c.readfrom_into(self.addr, buf)
def _write(self, reg_base, reg, buf=None):
full_buffer = bytearray([reg_base, reg])
if buf is not None:
full_buffer += buf
self.i2c.writeto(self.addr, full_buffer)
| [
"[email protected]"
] | |
0a1633e7f5726b1b1a413b9103d0b62f676f3cbd | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/arc050/A/4582561.py | 4957d0b097731037be60f433dc686459d0a3ffb7 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | C, c = input().split()
print('Yes' if C == c.upper() else 'No') | [
"[email protected]"
] | |
e497b5560b1432d4414794b40c5d8c8d8ce1e288 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_lifted.py | 40e546c7fcc541ef50d02efb32fc82ae8192b35f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
from xai.brain.wordbase.nouns._lift import _LIFT
#calss header
class _LIFTED(_LIFT, ):
def __init__(self,):
_LIFT.__init__(self)
self.name = "LIFTED"
self.specie = 'nouns'
self.basic = "lift"
self.jsondata = {}
| [
"[email protected]"
] | |
d16b3fca1a54c3a45ff1c1042b286a16635babe0 | 6970cd9ca073ae7becabcbc58cbd9e9f567b9d23 | /imgaug/augmenters/__init__.py | 47bca888063c5b7432f56c9cb67b256237ed91d7 | [
"MIT"
] | permissive | Liuxiang0358/imgaug | 02c7eb57152ebdd7e92a8779a74c98c8ee041cc0 | 3a0c787ed32729dc47c06ea62c20c42997ad4305 | refs/heads/master | 2020-07-03T00:44:42.866083 | 2019-08-10T10:23:18 | 2019-08-10T10:23:18 | 201,729,987 | 1 | 0 | MIT | 2019-08-11T07:01:49 | 2019-08-11T07:01:49 | null | UTF-8 | Python | false | false | 598 | py | from __future__ import absolute_import
from imgaug.augmenters.arithmetic import *
from imgaug.augmenters.blend import *
from imgaug.augmenters.blur import *
from imgaug.augmenters.color import *
from imgaug.augmenters.contrast import *
from imgaug.augmenters.convolutional import *
from imgaug.augmenters.edges import *
from imgaug.augmenters.flip import *
from imgaug.augmenters.geometric import *
from imgaug.augmenters.meta import *
from imgaug.augmenters.pooling import *
from imgaug.augmenters.segmentation import *
from imgaug.augmenters.size import *
from imgaug.augmenters.weather import *
| [
"[email protected]"
] | |
44db22541fa13bb0514ef0a372f738e3da9b270b | 6933b96b9c10ca70da57b1b384126e20fa21d9b2 | /FTP全自动采集爆破/sqlite0.5/test.py | 05ef237848989a15f4732d62c26b3018bf0eb7c9 | [] | no_license | Dawson0x00/scan | 5bb2e85756b8e86ba43f6d63182a7e806c560bfc | e9f274e26ac924a47cf3216e707dc1a724937775 | refs/heads/master | 2021-01-19T14:12:59.107938 | 2017-02-22T02:51:27 | 2017-02-22T02:51:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,367 | py | #!/usr/local/bin/python
#-*- coding: UTF-8 -*-
queue = []
def enQ():
queue.append(raw_input('Enter new string: ').strip())
#调用list的列表的pop()函数.pop(0)为列表的第一个元素
def deQ(): #www.2cto.com
if len(queue) == 0:
print 'Cannot pop from an empty queue!'
else:
print 'Removed [', queue.pop(0) ,']'
def viewQ():
print queue
CMDs = {'e': enQ, 'd': deQ, 'v': viewQ}
def showmenu():
pr = """
(E)nqueue
(D)equeue
(V)iew
(Q)uit
Enter choice: """
while True:
while True:
try:
choice = raw_input(pr).strip()[0].lower()
except (EOFError, KeyboardInterrupt, IndexError):
choice = 'q'
print '\nYou picked: [%s]' % choice
if choice not in 'devq':
print 'Invalid option, try again'
else:
break
if choice == 'q':
break
CMDs[choice]()
if __name__ == '__main__':
showmenu()
#def worker():
# while True:
# task = _queue.pop()
# ......
#
#
# _writequeue.push(.....)
#python 队列
#if __name__=='__main__':
# while True:
# if threadpool.idle() > 0:
# tasks = sql.fetch_task(threadpool.idel())
# _queue.push(tass)
#
# time.sleep(5)
#
#
# _writequeue.toitems().update() | [
"[email protected]"
] | |
521f6e601d211e16f6f2ec8d5e0818cd6b21b957 | 4d4485378bec02daa527c12a6051be4254a86589 | /usr/models/layerbylayer/tfnmt_model.py | cdbd294355c140c8c07dc7f2368279074b8e43af | [
"Apache-2.0"
] | permissive | colmantse/tensor2tensor-usr | 163f5bff5e94dea75760e76fa4b07b6b2657a74c | 8129cc7de2bb880dc328b4189ed613b26015c151 | refs/heads/master | 2021-07-13T01:22:29.626739 | 2017-10-10T00:53:54 | 2017-10-10T00:53:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | # coding=utf-8
"""Layer-by-layer model definitions."""
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
from tensor2tensor.layers import common_layers
from usr import utils as usr_utils
import tensorflow as tf
| [
"[email protected]"
] | |
d1b18b6f214a8f4dda769d317c09e88b0bae2d87 | 5830b76dda9c2d0b62b1929f3a5be26606089c8c | /alembic/env.py | 2b67e16b02665b1e8cf828f13f800f454cb0cd94 | [] | no_license | umatbro/minikametr | 0356eaff1c011b26065479cb417648ddf18f4796 | 4844e707c24559b3ad61b89f017a177fb52a5384 | refs/heads/master | 2023-08-25T02:30:25.753701 | 2021-10-11T10:18:10 | 2021-10-11T10:18:10 | 415,385,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,167 | py | from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
from db import DATABASE_URL
from models import * # noqa
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
config.set_main_option("sqlalchemy.url", DATABASE_URL)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = [SQLModel.metadata]
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| [
"[email protected]"
] | |
b291e92f85bca69d74a58603ffcc38c2215c29b2 | 26bd175ffb3bd204db5bcb70eec2e3dfd55fbe9f | /exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/monitoring/librato_annotation.py | 6ee3895763b1496c9b4c9cf358c1564d0a715a01 | [
"GPL-3.0-only",
"MIT",
"GPL-3.0-or-later",
"CC0-1.0",
"GPL-1.0-or-later"
] | permissive | tr3ck3r/linklight | 37814ed19173d893cdff161355d70a1cf538239b | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | refs/heads/master | 2021-04-11T04:33:02.727318 | 2020-03-25T17:38:41 | 2020-03-25T17:38:41 | 248,992,437 | 0 | 0 | MIT | 2020-03-21T14:26:25 | 2020-03-21T14:26:25 | null | UTF-8 | Python | false | false | 5,451 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (C) Seth Edwards, 2014
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: librato_annotation
short_description: create an annotation in librato
description:
- Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically
author: "Seth Edwards (@Sedward)"
requirements: []
options:
user:
description:
- Librato account username
required: true
api_key:
description:
- Librato account api key
required: true
name:
description:
- The annotation stream name
- If the annotation stream does not exist, it will be created automatically
required: false
title:
description:
- The title of an annotation is a string and may contain spaces
- The title should be a short, high-level summary of the annotation e.g. v45 Deployment
required: true
source:
description:
- A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population
required: false
description:
description:
- The description contains extra metadata about a particular annotation
- The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo!
required: false
start_time:
description:
- The unix timestamp indicating the time at which the event referenced by this annotation started
required: false
end_time:
description:
- The unix timestamp indicating the time at which the event referenced by this annotation ended
- For events that have a duration, this is a useful way to annotate the duration of the event
required: false
links:
description:
- See examples
required: true
'''
EXAMPLES = '''
# Create a simple annotation event with a source
- librato_annotation:
user: [email protected]
api_key: XXXXXXXXXXXXXXXXX
title: App Config Change
source: foo.bar
description: This is a detailed description of the config change
# Create an annotation that includes a link
- librato_annotation:
user: [email protected]
api_key: XXXXXXXXXXXXXXXXXX
name: code.deploy
title: app code deploy
description: this is a detailed description of a deployment
links:
- rel: example
href: http://www.example.com/deploy
# Create an annotation with a start_time and end_time
- librato_annotation:
user: [email protected]
api_key: XXXXXXXXXXXXXXXXXX
name: maintenance
title: Maintenance window
description: This is a detailed description of maintenance
start_time: 1395940006
end_time: 1395954406
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def post_annotation(module):
user = module.params['user']
api_key = module.params['api_key']
name = module.params['name']
title = module.params['title']
url = 'https://metrics-api.librato.com/v1/annotations/%s' % name
params = {}
params['title'] = title
if module.params['source'] is not None:
params['source'] = module.params['source']
if module.params['description'] is not None:
params['description'] = module.params['description']
if module.params['start_time'] is not None:
params['start_time'] = module.params['start_time']
if module.params['end_time'] is not None:
params['end_time'] = module.params['end_time']
if module.params['links'] is not None:
params['links'] = module.params['links']
json_body = module.jsonify(params)
headers = {}
headers['Content-Type'] = 'application/json'
# Hack send parameters the way fetch_url wants them
module.params['url_username'] = user
module.params['url_password'] = api_key
response, info = fetch_url(module, url, data=json_body, headers=headers)
response_code = str(info['status'])
response_body = info['body']
if info['status'] != 201:
if info['status'] >= 400:
module.fail_json(msg="Request Failed. Response code: " + response_code + " Response body: " + response_body)
else:
module.fail_json(msg="Request Failed. Response code: " + response_code)
response = response.read()
module.exit_json(changed=True, annotation=response)
def main():
module = AnsibleModule(
argument_spec=dict(
user=dict(required=True),
api_key=dict(required=True),
name=dict(required=False),
title=dict(required=True),
source=dict(required=False),
description=dict(required=False),
start_time=dict(required=False, default=None, type='int'),
end_time=dict(required=False, default=None, type='int'),
links=dict(type='list')
)
)
post_annotation(module)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
437e3b76c5db61174df0709315d5522b40f253a9 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /LSpPdiycJ75MiwvgQ_23.py | b36b16ff2450b87301651121df6fdfacf4586124 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py |
import math
def grid_pos(lst):
return (math.factorial((lst[0])+(lst[1])))/((math.factorial(lst[0]))*math.factorial(lst[1]))
| [
"[email protected]"
] | |
dabc8ad186e73d5e2493027f46098c5e57ecf998 | 9d74cbd676e629f8acdc68a4bac3dea0a98b9776 | /yc204/779.py | a67f3265cfaf192ec25967ebdf2dfdcd4f940743 | [
"MIT"
] | permissive | c-yan/yukicoder | 01166de35e8059eaa8e3587456bba52f35bd0e44 | dcfd89b0a03759156dcea8c2e61a7705543dc0d4 | refs/heads/master | 2022-03-20T06:50:48.225922 | 2022-02-25T15:48:50 | 2022-02-25T15:48:50 | 237,735,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | Y, M, D = map(int, input().split())
s = '%04d%02d%02d' % (Y, M, D)
if '19890108' <= s <= '20190430':
print('Yes')
else:
print('No')
| [
"[email protected]"
] | |
4e516a93303283e18e6694b9df114a2fffd8fce6 | 13b72e5c48f5f7213d9a569f699dc1554bc363dd | /demo/library/use_sf_2.py | a4a0a1fadf2c873065d272eabfe43c0dc0192d99 | [] | no_license | srikanthpragada/PYTHON_02_MAR_2021 | 6997fece4ad627bb767c0bca5a5e166369087e68 | 5dfd0c471378bd22379ac0d66f8785d4d315017b | refs/heads/master | 2023-04-04T20:28:22.738042 | 2021-04-17T14:19:48 | 2021-04-17T14:19:48 | 344,498,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | from mylib import *
print(string_funs.hasupper('Abc')) | [
"[email protected]"
] | |
7e4616203464603cd0f1a2b181256c57a50b7d60 | b20b8858b5da05c60e7dac02feb1187f88cc3294 | /djangoproj/djangoapp/csc/nl/ja/utterance.py | f9d4798c1f3d2281a4dfe50b869a1ffb4b6df3d7 | [
"MIT"
] | permissive | pbarton666/buzz_bot | 18f15322e072d784e0916c5b8c147b53df5dc9d4 | 9f44c66e8ecb10e231f70989421f164d7a55029a | refs/heads/master | 2021-01-10T05:26:43.809377 | 2015-09-25T20:04:58 | 2015-09-25T20:04:58 | 43,027,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | from csc.nl.ja.debug import *
from csc.nl.ja.util import *
from csc.nl.ja.tree import *
import MeCab
import CaboCha
import re
class JaUtterance(JaTreeBranch, JaLanguageNode):
''' Represents an entire utterance '''
def __init__(self, children):
JaTreeBranch.__init__(self)
self.children = children
for child in self.children:
child.parent = self
dump_lines = JaDebug.dump_lines_utterance
def __str__(self):
return self.surface
@shared_property
def is_utterance(self):
return True
from csc.nl.ja.chunk import *
from csc.nl.ja.cabocha_token import *
from csc.nl.ja.parser import *
| [
"[email protected]"
] | |
b61371ddb6434f40ce292a1daa67942fd1dc6e2f | 80b545522375b2b8bbfdff0f540b1172e53b140c | /core/views.py | b4ff506ea7c62eb6707ebbbca84f665d297348f3 | [] | no_license | DuncanMoyo/django-ecommerce-website | 18e1e8dcf358de6758ad7974a703145ed5cab4db | 21783c3d4159adffabbfc522099cf9c55346bed8 | refs/heads/master | 2022-12-11T15:14:51.039929 | 2019-08-31T20:48:59 | 2019-08-31T20:48:59 | 196,033,607 | 0 | 0 | null | 2022-12-08T05:51:59 | 2019-07-09T15:14:11 | JavaScript | UTF-8 | Python | false | false | 17,248 | py | from django.shortcuts import render, get_object_or_404, redirect
from .models import Item, Order, OrderItem, Address, Payment, Coupon, Refund
from django.views.generic import ListView, View, DetailView
from django.utils import timezone
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from .forms import CheckoutForm, CouponForm, RefundForm
from django.conf import settings
import stripe
import random
import string
stripe.api_key = settings.STRIPE_SECRET_KEY
def create_ref_code():
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=20))
def products(request):
context = {
'items': Item.objects.all()
}
return render(request, 'products.html', context)
def is_valid_form(values):
valid = True
for field in values:
if field == '':
valid = False
return valid
class CheckoutView(View):
def get(self, *args, **kwargs):
try:
order = Order.objects.get(user=self.request.user, ordered=False)
form = CheckoutForm()
context = {
'form': form,
'couponform': CouponForm(),
'order': order,
'DISPLAY_COUPON_FORM': True
}
shipping_address_qs = Address.objects.filter(
user=self.request.user,
address_type='S',
default=True
)
if shipping_address_qs.exists():
context.update({'default_shipping_address': shipping_address_qs[0]})
billing_address_qs = Address.objects.filter(
user=self.request.user,
address_type='B',
default=True
)
if billing_address_qs.exists():
context.update({'default_billing_address': billing_address_qs[0]})
return render(self.request, 'checkout.html', context)
except ObjectDoesNotExist:
messages.warning(self.request, 'You do not have an active order')
return redirect('core:checkout')
def post(self, *args, **kwargs):
form = CheckoutForm(self.request.POST or None)
try:
order = Order.objects.get(user=self.request.user, ordered=False)
if form.is_valid():
use_default_shipping = form.cleaned_data.get('use_default_shipping')
if use_default_shipping:
print('Using the default shipping address')
address_qs = Address.objects.filter(
user=self.request.user,
address_type='S',
default=True
)
if address_qs.exists():
shipping_address = address_qs[0]
order.shipping_address = shipping_address
order.save()
else:
messages.warning(self.request, 'No default shipping address available')
return redirect('core:checkout')
else:
print('User is entering a new shipping address')
shipping_address1 = form.cleaned_data.get('shipping_address')
shipping_address2 = form.cleaned_data.get('shipping_address2')
shipping_country = form.cleaned_data.get('shipping_country')
shipping_zip = form.cleaned_data.get('shipping_zip')
if is_valid_form([shipping_address1, shipping_country, shipping_zip]):
shipping_address = Address(
user=self.request.user,
street_address=shipping_address1,
apartment_address=shipping_address2,
country=shipping_country,
zip=shipping_zip,
address_type='S'
)
shipping_address.save()
order.shipping_address = shipping_address
order.save()
set_default_shipping = form.cleaned_data.get('set_default_shipping')
if set_default_shipping:
shipping_address.default = True
shipping_address.save()
else:
messages.warning(self.request, 'Please fill required shipping address fields, please?')
use_default_billing = form.cleaned_data.get('use_default_billing')
same_billing_address = form.cleaned_data.get('same_billing_address')
if same_billing_address:
billing_address = shipping_address
billing_address.pk = None
billing_address.save()
billing_address.address_type = 'B'
billing_address.save()
order.billing_address = billing_address
order.save()
elif use_default_billing:
print('Using the default billing address')
address_qs = Address.objects.filter(
user=self.request.user,
address_type='B',
default=True
)
if address_qs.exists():
billing_address = address_qs[0]
order.billing_address = billing_address
order.save()
else:
messages.warning(self.request, 'No default billing address available')
return redirect('core:checkout')
else:
print('User is entering a new billing address')
billing_address1 = form.cleaned_data.get('billing_address')
billing_address2 = form.cleaned_data.get('billing_address2')
billing_country = form.cleaned_data.get('billing_country')
billing_zip = form.cleaned_data.get('billing_zip')
if is_valid_form([billing_address1, billing_country, billing_zip]):
billing_address = Address(
user=self.request.user,
street_address=billing_address1,
apartment_address=billing_address2,
country=billing_country,
zip=billing_zip,
address_type='B'
)
billing_address.save()
order.billing_address = billing_address
order.save()
set_default_billing = form.cleaned_data.get('set_default_billing')
if set_default_billing:
billing_address.default = True
billing_address.save()
else:
messages.warning(self.request, 'Please fill required billing address fields, please?')
payment_option = form.cleaned_data.get('payment_option')
if payment_option == 'S':
return redirect('core:payment', payment_option='stripe')
elif payment_option == 'P':
return redirect('core:payment', payment_option='paypal')
else:
messages.warning(self.request, 'Invalid payment option selected')
return redirect('core:checkout')
else:
messages.warning(self.request, 'Form is invalid')
context = {
'form': form
}
return render(self.request, 'checkout.html', context)
except ObjectDoesNotExist:
messages.warning(self.request, 'You do not have an active order')
return redirect('core:order-summary')
class PaymentView(View):
def get(self, *args, **kwargs):
order = Order.objects.get(user=self.request.user, ordered=False)
if order.billing_address is True:
context = {
'order': order,
'DISPLAY_COUPON_FORM': False
}
return render(self.request, 'payment.html', context)
else:
messages.warning(self.request, 'You have not added a billing address')
return redirect('core:checkout')
def post(self, *args, **kwargs):
order = Order.objects.get(user=self.request.user, ordered=False)
token = self.request.POST.get('stripeToken')
amount = int(order.get_total() * 100)
try:
charge = stripe.Charge.create(
amount=amount,
currency="usd",
source=token, # obtained with Stripe.js
)
# create the payment
payment = Payment()
payment.stripe_charge_id = charge['id']
payment.user = self.request.user
payment.amount = order.get_total()
payment.save()
# assign payment to the order
order_items = order.items.all()
order_items.update(ordered=True)
for item in order_items:
item.save()
order.ordered = True
order.payment = payment
order.ref_code = create_ref_code()
order.save()
messages.success(self.request, 'Your order was successful')
return redirect('/')
except stripe.error.CardError as e:
# Since it's a decline, stripe.error.CardError will be caught
body = e.json_body
err = body.get('error', {})
messages.warning(self.request, f"{err.get('message')}")
return redirect('/')
except stripe.error.RateLimitError as e:
# Too many requests made to the API too quickly
messages.warning(self.request, 'Rate Limit Error')
return redirect('/')
except stripe.error.InvalidRequestError as e:
# Invalid parameters were supplied to Stripe's API
messages.warning(self.request, 'Invalid Parameters')
return redirect('/')
except stripe.error.AuthenticationError as e:
# Authentication with Stripe's API failed
# (maybe you changed API keys recently)
messages.warning(self.request, 'Not authenticated')
return redirect('/')
except stripe.error.APIConnectionError as e:
# Network communication with Stripe failed
messages.warning(self.request, 'Network Error')
return redirect('/')
except stripe.error.StripeError as e:
# Display a very generic error to the user, and maybe send
# yourself an email
messages.warning(self.request, 'Something went wrong. You were not charged, please try again')
return redirect('/')
except Exception as e:
# Send email to ourselves
messages.warning(self.request, 'A serious error has occured, we have been notified')
return redirect('/')
class HomeView(ListView):
model = Item
paginate_by = 4
template_name = 'home.html'
class OrderSummaryView(LoginRequiredMixin, View):
def get(self, *args, **kwargs):
try:
order = Order.objects.get(user=self.request.user, ordered=False)
context = {
'object': order
}
return render(self.request, 'order_summary.html', context)
except ObjectDoesNotExist:
messages.warning(self.request, 'You do not have an active order')
return redirect('/')
class ItemDetailView(DetailView):
model = Item
template_name = 'product.html'
@login_required
def add_to_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
# check if the order item is in the order
if order.items.filter(item__slug=item.slug).exists():
order_item.quantity += 1
order_item.save()
messages.info(request, 'This item quantity was updated')
return redirect('core:order-summary')
else:
order.items.add(order_item)
messages.info(request, 'This item was added to your cart')
return redirect('core:order-summary')
else:
ordered_date = timezone.now()
order = Order.objects.create(user=request.user, ordered_date=ordered_date)
order.items.add(order_item)
messages.info(request, 'This item was added to your cart')
return redirect('core:order-summary')
@login_required
def remove_from_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_qs = Order.objects.filter(
user=request.user,
ordered=False
)
if order_qs.exists():
order = order_qs[0]
if order.items.filter(item__slug=item.slug).exists():
order_item = OrderItem.objects.filter(
item=item,
user=request.user,
ordered=False
)[0]
order.items.remove(order_item)
messages.info(request, 'This item was removed from your cart.')
return redirect('core:order-summary')
else:
messages.info(request, 'This item was not in your cart')
return redirect('core:product', slug=slug)
else:
messages.info(request, 'You do not have an active order')
return redirect('core:product', slug=slug)
@login_required
def remove_single_item_from_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_qs = Order.objects.filter(
user=request.user,
ordered=False
)
if order_qs.exists():
order = order_qs[0]
if order.items.filter(item__slug=item.slug).exists():
order_item = OrderItem.objects.filter(
item=item,
user=request.user,
ordered=False
)[0]
if order_item.quantity > 1:
order_item.quantity -= 1
order_item.save()
else:
order.items.remove(order_item)
messages.info(request, 'This item quantity was updated')
return redirect('core:order-summary')
else:
messages.info(request, 'This item was not in your cart')
return redirect('core:product', slug=slug)
else:
messages.info(request, 'You do not have an active order')
return redirect('core:product', slug=slug)
def get_coupon(request, code):
try:
coupon = Coupon.objects.get(code=code)
return coupon
except ObjectDoesNotExist:
messages.info(request, 'This coupon does not exist')
return redirect('core:checkout')
class AddCouponView(View):
def post(self, *args, **kwargs):
form = CouponForm(self.request.POST or None)
if form.is_valid():
try:
code = form.cleaned_data.get('code')
order = Order.objects.get(user=self.request.user, ordered=False)
order.coupon = get_coupon(self.request, code)
order.save()
messages.success(self.request, 'Successfully added Coupon')
return redirect('core:checkout')
except ObjectDoesNotExist:
messages.warning(self.request, 'You do not have an active order')
return redirect('core:checkout')
class RequestRefundView(View):
def get(self, *args, **kwargs):
form = RefundForm()
context = {
'form': form
}
return render(self.request, 'request_refund.html', context)
def post(self, *args, **kwargs):
form = RefundForm(self.request.POST)
if form.is_valid():
ref_code = form.cleaned_data.get('ref_code')
message = form.cleaned_data.get('message')
email = form.cleaned_data.get('email')
# edit the order
try:
order = Order.objects.get(ref_code=ref_code)
order.refund_requested = True
order.save()
# store the refund
refund = Refund()
refund.order = order
refund.reason = message
refund.email = email
refund.save()
messages.success(self.request, 'Your request has been received')
return redirect('core:request-refund')
except ObjectDoesNotExist:
messages.warning(self.request, 'This order does not exist')
return redirect('core:request-refund')
| [
"[email protected]"
] | |
2bfec92c85686a48a0bf480793637b9b2fbe0c90 | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/voice-service/azext_voice_service/__init__.py | 74808adfe69dcf2b4492cb572043ecf8ee1296d8 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 1,563 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
from azure.cli.core import AzCommandsLoader
from azext_voice_service._help import helps # pylint: disable=unused-import
class VoiceServiceCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
from azure.cli.core.commands import CliCommandType
custom_command_type = CliCommandType(
operations_tmpl='azext_voice_service.custom#{}')
super().__init__(cli_ctx=cli_ctx,
custom_command_type=custom_command_type)
def load_command_table(self, args):
from azext_voice_service.commands import load_command_table
from azure.cli.core.aaz import load_aaz_command_table
try:
from . import aaz
except ImportError:
aaz = None
if aaz:
load_aaz_command_table(
loader=self,
aaz_pkg_name=aaz.__name__,
args=args
)
load_command_table(self, args)
return self.command_table
def load_arguments(self, command):
from azext_voice_service._params import load_arguments
load_arguments(self, command)
COMMAND_LOADER_CLS = VoiceServiceCommandsLoader
| [
"[email protected]"
] | |
1c9c05109df0cd972f65b8c29bd5c952deceb307 | c83bb15f5f4ec5c5d2b2e05a7222eaf1fd4f49e5 | /myops_run.py | ad4beb49c9217b542c766c3de0bd0d6f15327695 | [
"Apache-2.0"
] | permissive | Xinya-liuliu/MyoPS2020 | 47fd2a0a3dd050ae128a1ddbe3b9b966e0cb45e0 | 6ac6157070315c7917a59954476682c1144f3845 | refs/heads/main | 2023-07-31T22:27:31.485620 | 2021-09-24T05:39:33 | 2021-09-24T05:39:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | # -*- coding: utf-8 -*-
from __future__ import print_function, division
import os
import sys
from pymic.util.parse_config import parse_config
from pymic.net_run.agent_seg import SegmentationAgent
from path_config import path_dict
def main():
if(len(sys.argv) < 4):
print('Number of arguments should be 4. e.g.')
print(' python myops_run.py train config.cfg 1')
exit()
stage = str(sys.argv[1])
cfg_file = str(sys.argv[2])
fold = str(sys.argv[3])
if(not os.path.isfile(cfg_file)):
print("configure file does not exist: {0:} ".format(cfg_file))
exit()
# reset data dir of each fold for configure
config = parse_config(cfg_file)
data_dir = config['dataset']['root_dir']
data_dir = data_dir.replace('MyoPS_data_dir', path_dict['MyoPS_data_dir'])
config['dataset']['root_dir'] = data_dir
for item in ['train_csv', 'valid_csv', 'test_csv']:
config['dataset'][item] = config['dataset'][item].replace("foldi", "fold" + fold)
ckpt_save_dir = config['training']['ckpt_save_dir']
ckpt_save_dir = ckpt_save_dir.replace("fold_i", "fold_" + fold)
config['training']['ckpt_save_dir'] = ckpt_save_dir
agent = SegmentationAgent(config, stage)
agent.run()
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
50074aa47c4416235453ba945d4a0326e34c5eb4 | 06e359c19c14a549d28ce8ab62a6e1e5f40f0ea8 | /ScikitLearn/unsupervised/biClusteringL.py | 8b78e68f28cf461c745d7aebe4febc5c7c318e19 | [
"Apache-2.0"
] | permissive | thomasearnest/MDeepLearn | 90c6f4c6d5f148f91b1ce95471cad42baee26977 | 00eb7211a3a40a9da02114923647dfd6ac24f138 | refs/heads/master | 2021-10-20T07:27:33.168927 | 2019-02-26T13:37:48 | 2019-02-26T13:37:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name:biClusteringL
Description : 双聚类,对行列同时进行聚类
Email : [email protected]
Date:2018/1/1
"""
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.datasets import make_biclusters
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(shape=(300, 300), n_clusters=5, noise=0.5, random_state=0)
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_, (rows, columns))
print('scores: {}'.format(score))
| [
"[email protected]"
] | |
b39073640b54f81c5cab62529f62b21ee9042550 | 57120090948f99de2258a6f01a0cc65443441ce9 | /hyperclass/exe/hyperclass/IndianaPines.py | 867c2bd892b75164b4d35fd74fe05a5cd177b27f | [] | no_license | MysteriousSonOfGod/hyperclass | c67eff91f6f0f64fa4a92f8567243ef5cd8fa3c8 | e8cec11b364e8b049e7432b95ce20a2c5de94235 | refs/heads/master | 2023-01-28T16:42:09.289664 | 2020-12-07T22:54:50 | 2020-12-07T22:54:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,443 | py | from hyperclass.gui.application import HCApplication
from hyperclass.data.manager import dataManager
from hyperclass.gui.labels import labelsManager
from hyperclass.gui.spatial.application import SpatialAppConsole
import sys
ref_file = "/Users/tpmaxwel/Dropbox/Tom/Data/Aviris/IndianPines/documentation/Site3_Project_and_Ground_Reference_Files/19920612_AVIRIS_IndianPine_Site3_gr.tif"
classes = [ ('Alfalfa', [255, 254, 137] ),
('Corn-notill', [3,28,241] ),
('Corn-mintill', [255, 89, 1] ),
('Corn', [5, 255, 133] ),
('Grass/Pasture', [255, 2, 251] ),
('Grass/Trees', [89, 1, 255 ]),
('Grass/pasture-mowed', [3, 171, 255]),
('Hay-windrowed', [12, 255, 7 ]),
('Oats', [172, 175, 84 ]),
('Soybean-notill',[160, 78,158]),
('Soybean-mintill', [101, 173, 255]),
('Soybean-cleantill', [60, 91, 112] ),
('Wheat', [104, 192, 63]),
('Woods', [139, 69, 46]),
('Bldg-Grass-Tree-Drives', [119, 255, 172]),
('Stone/steel-towers', [254, 255, 3])
]
tabs = dict( Reference=dict( type="reference", classes=classes, path=ref_file ) )
default_settings = {}
app = HCApplication()
labelsManager.setLabels( classes )
dataManager.initProject( 'hyperclass', default_settings )
hyperclass = SpatialAppConsole( tabs=tabs )
hyperclass.show()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
5f44dbc1c1096fa46abc233b27ca9419b8772346 | 8a9ad387c58bfcda5806211397450d728aadbd80 | /myenv/bin/easy_install | 0320e8f6112973120de32ab5ca4d3ce46bb468cc | [] | no_license | miyago/my-1st-blog | 9a7f92799ac04785ff9e0a28199920af5ca88c19 | 5bc8d28823e1d9059600d2808efecf52e262aa2a | refs/heads/master | 2020-04-23T01:21:05.853519 | 2019-02-15T10:32:59 | 2019-02-15T10:32:59 | 170,809,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | #!/mnt/d/GitLab/testdjango/djangogirls/myenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
3dbbb9d3f258c8d80d132dd44156efd15a3ad0bb | 2af1489471d199861b2e7cd63676d842eb65ec5a | /channelstream/wsgi_app.py | 3af579f0067e57ade46e4412500c4da566c0c211 | [
"BSD-3-Clause"
] | permissive | webclinic017/channelstream | 637df12982817bd6c74fc1cb91c8571e5c551eef | 73434adca2812a682b739f86bf1ca320e1f1603c | refs/heads/master | 2023-05-12T15:55:13.453463 | 2020-10-21T19:08:08 | 2020-10-21T19:08:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,229 | py | import datetime
import uuid
import importlib
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.config import Configurator
from pyramid.renderers import JSON
from pyramid.security import NO_PERMISSION_REQUIRED
from channelstream import patched_json as json
from channelstream.wsgi_views.wsgi_security import APIFactory
def datetime_adapter(obj, request):
return obj.isoformat()
def uuid_adapter(obj, request):
return str(obj)
def make_app(server_config):
config = Configurator(
settings=server_config, root_factory=APIFactory, default_permission="access"
)
config.include("pyramid_jinja2")
module_, class_ = server_config["signature_checker"].rsplit(".", maxsplit=1)
signature_checker_cls = getattr(importlib.import_module(module_), class_)
config.registry.signature_checker = signature_checker_cls(server_config["secret"])
authn_policy = AuthTktAuthenticationPolicy(
server_config["cookie_secret"], max_age=2592000
)
authz_policy = ACLAuthorizationPolicy()
config.set_authentication_policy(authn_policy)
config.set_authorization_policy(authz_policy)
json_renderer = JSON(serializer=json.dumps, indent=4)
json_renderer.add_adapter(datetime.datetime, datetime_adapter)
json_renderer.add_adapter(uuid.UUID, uuid_adapter)
config.add_renderer("json", json_renderer)
config.add_subscriber(
"channelstream.subscribers.handle_new_request", "pyramid.events.NewRequest"
)
config.add_request_method("channelstream.utils.handle_cors", "handle_cors")
config.include("channelstream.wsgi_views")
config.scan("channelstream.wsgi_views.server")
config.scan("channelstream.wsgi_views.error_handlers")
config.scan("channelstream.events")
config.include("pyramid_apispec.views")
config.pyramid_apispec_add_explorer(
spec_route_name="openapi_spec",
script_generator="channelstream.utils:swagger_ui_script_template",
permission="admin",
route_args={
"factory": "channelstream.wsgi_views.wsgi_security:AdminAuthFactory"
},
)
app = config.make_wsgi_app()
return app
| [
"[email protected]"
] | |
f86cf73fb42b39100b549b4648351468bffff0c4 | 9672b0bd6c73f35fdcc04dcf884d2e8425e78359 | /resources/exceptions.py | 96182522c64fecb5138ccb6a30069f52aef07680 | [
"MIT"
] | permissive | surfedushare/pol-research | a91d5b6c95184719d721e3a8541e36b77a9ed1c6 | 5d07782fba0a894e934efb1dd199b6a4a19f678b | refs/heads/master | 2022-01-07T00:50:24.427941 | 2019-05-02T12:29:53 | 2019-05-02T12:29:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | class DGResourceException(Exception):
def __init__(self, message, resource):
super().__init__(message)
self.resource = resource
class DGShellError(DGResourceException):
pass
| [
"[email protected]"
] | |
fe7f50a000e9b56ff52ec396658796c65e6df426 | f2cc1dc87486833613fb83543c68784849fd7319 | /addons/random_data_generator/__init__.py | 50cf640fd111b0d52751d006390358b0da42d1dd | [] | no_license | EduardoUrzuaBo/platziChallenge | cc953e2615653d575cf079bceea4fdcad75a4da0 | a8f06c98f14ee58db47848ec287dcd105b685dcb | refs/heads/master | 2023-07-29T10:44:16.469765 | 2021-09-05T20:14:42 | 2021-09-05T20:14:42 | 403,379,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | from .random_data_generator import RandomDataGenerator
| [
"[email protected]"
] | |
c161b56d5d336d5b33f873a135e6ff06d4a82968 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_74/931.py | 67218b9c71e79a373560e676c36322fa8eb846ab | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,047 | py | import sys
class Runner(object):
def __init__(self, test_case):
self.test_case = test_case
self.state = {
"O": 1,
"B": 1,
}
def run(self):
def done():
return not bool(self.test_case.instructions)
def next(robot):
next_buttons = [i['button'] for i in self.test_case.instructions if i['robot'] == robot]
return next_buttons[0] if next_buttons else 0
def next_button():
return self.test_case.instructions[0]
def press():
self.test_case.instructions.pop(0)
def wait():
pass
def move(robot, forward=True):
if forward:
self.state[robot] += 1
else:
self.state[robot] -= 1
steps = 0
while True:
if done(): break
n = next_button()
for robot in self.state.keys():
robots_next = next(robot)
if robots_next == self.state[robot]:
if n['robot'] == robot:
press()
else:
wait()
else:
move(robot, self.state[robot] < robots_next)
steps += 1
print 'Case #%d: %d' % (self.test_case.number, steps)
class TestCase(object):
def __init__(self, spec_string, number):
self.number = number
self.instructions = []
spec = spec_string.strip().split(" ")
spec.pop(0) # discard count.
while spec:
robot = spec.pop(0)
button = int(spec.pop(0))
t = {'robot': robot, 'button': button}
self.instructions.append(t)
def read_input():
lines = sys.stdin.readlines()
return [TestCase(test_case_spec, test_case_number) for test_case_spec, test_case_number in zip(lines[1:], xrange(1, len(lines))) ]
if __name__ == '__main__':
test_cases = read_input()
for test_case in test_cases:
Runner(test_case).run()
| [
"[email protected]"
] | |
3aa380bc45ee5078c114892476157413ad664058 | 908655251066427f654ee33ebdf804f9f302fcc3 | /Tests/CartPoleAST/Test/ast_reward.py | c8213b436bd92572cc9153556fabc64880fe402c | [] | no_license | maxiaoba/MCTSPO | be567f80f1dcf5c35ac857a1e6690e1ac599a59d | eedfccb5a94e089bd925b58f3d65eef505378bbc | refs/heads/main | 2023-07-05T02:20:16.752650 | 2021-07-06T06:04:40 | 2021-07-06T06:04:40 | 381,811,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py |
# useful packages for math and debugging
import numpy as np
import pdb
# Define the class, inherit from the base
class ASTReward(object):
def __init__(self):
pass
def give_reward(self, action, **kwargs):
"""
returns the reward for a given time step
Input
-----
kwargs : accepts relevant info for computing the reward
Outputs
-------
(observation, reward, done, info)
reward [Float] : amount of reward due to the previous action
"""
raise NotImplementedError | [
"[email protected]"
] | |
209da61669af27b11a8a5affd1de64c8fb12117a | d312ced5d6a06d35937b3f3d6a7415746ef06f71 | /weibo2rss/urls.py | 6db52f1cd121a8c315918fba590d36665f1c5d7c | [] | no_license | chu888chu888/Python-SAE-weibo2rss | 931ccaf2cd8438ebbfd6eff3b65a54b88e63e999 | 80f0c9e862505218a4284fcebd67276e8b372e31 | refs/heads/master | 2020-04-10T08:44:54.448809 | 2012-12-09T07:07:34 | 2012-12-09T07:07:34 | 9,112,168 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from weibo2rss.views import *
urlpatterns = patterns('',
url(r'^$', getfeedurl), # 主页
url(r'^callback/$', callback), # 微博授权返回页
url(r'^timeline/(?P<uid>\d+)/$', timeline), # 微博timeline rss页,通过user id获取
url(r'^favorites/(?P<uid>\d+)/$', favorites), # 微博favorites rss页,通过user id获取
url(r'^admin/root/weibo/clean/$', clean), # 用于定时清理数据库过期授权,非对外页面,清理周期在config.yaml下cron字段定义
)
| [
"[email protected]"
] | |
34b2d24f65f93831c306c1d5e2fc2783c605cea3 | 3f48e3308674212408c3b6ca972eb4f793bf142b | /f7_chipseq/f7_diff_binding_on_UTX_sep_k4me1/f1_differential_binding_figs/py4c_heatmap_across_CellTypes_k27ac_increased.py | d8d3e09d9ee4e0d87d9e5c650b7f448484669bd7 | [] | no_license | zanglab/utx_code | 8497840ace81e0337f92f04fafbb691f0ed24865 | 32fc7851207f650b3cc78974ab798f8606099e56 | refs/heads/main | 2023-06-28T17:38:33.231877 | 2021-07-27T01:31:00 | 2021-07-27T01:31:00 | 388,648,753 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,138 | py | import sys,argparse
import os,glob,re
import numpy as np
import pandas as pd
import matplotlib
# matplotlib.use('Agg')
from matplotlib import gridspec
import matplotlib.pyplot as plt
matplotlib.rcParams['font.size']=14
matplotlib.rcParams["font.sans-serif"] = ["Arial", "Liberation Sans", "Bitstream Vera Sans"]
matplotlib.rcParams["font.family"] = "sans-serif"
import seaborn as sns
sns.set_style("whitegrid", {'axes.grid' : False,'grid.color': 'grey'})
sns.set_style("ticks",{'ytick.color': 'k','axes.edgecolor': 'k'})
from matplotlib.colors import LinearSegmentedColormap
#plus = re.compile('\+')
#minus = re.compile('\-')
matplotlib.rcParams["font.sans-serif"] = ["Arial"]
from scipy.interpolate import interpn
def window_cumulative(df,half_window=7,step=1):
smooth_df_columns = np.arange(0,len(df.columns),step)
smooth_df = pd.DataFrame(index=df.index,columns=smooth_df_columns)#;print(smooth_df.columns)
for col in smooth_df_columns:
window_left = max(col-half_window,0)
window_right = min(col+half_window,len(df.columns)-1)
smooth_df.loc[:,col] = df.iloc[:,window_left:window_right+1].mean(axis=1)
#print(df,smooth_df)
return smooth_df
def signal_centered(df):
center_position = int(df.shape[1]/2)
for row in df.index:
vals = df.loc[row]
max_index = list(vals).index(vals.max())
# move max to center
if max_index<center_position:
df.loc[row] = np.append(np.zeros(center_position-max_index),vals)[:df.shape[1]]
elif max_index>center_position:
df.loc[row] = np.append(vals[max_index-center_position:],np.zeros(max_index-center_position))
return df
def return_vlim(factor,peak_file):
if re.search('islands',peak_file):
factor_match_clim = {'UTX':2,
'UTXFEB':2,
'H3K27me3':4,
'MLL4':4,
'H3K27ac':4,
'H3K4me1':4,
'H3K4me2':4,
'H3K4me3':4}
else:
factor_match_clim = {'UTX':3,
'UTXFEB':3,
'H3K27me3':5,
'MLL4':5,
'H3K27ac':5,
'H3K4me1':5,
'H3K4me2':5,
'H3K4me3':5}
cbar_vmax = factor_match_clim[factor]
return cbar_vmax*0.05,cbar_vmax
def prepare_each_subfig(df_tmp,gs,heatmap_pos,peak_file,factor,celltype):
# read the binding pattern for each factor/celltype
csv_file='../data_binding_patter_readCount/readCount_csv/{}_{}_on_{}_es2kb_bin200.csv'.format(celltype,factor,peak_file)
df = pd.read_csv(csv_file,sep='\t',index_col=0)
df = df.loc[df_tmp.index[:]]
df = window_cumulative(df)
# normalization by readcount
norm_df = pd.read_csv('{}/f0_data_process/chip_seq/final_chipseq/sicer2_islands/get_readCount_on_sicer_islads/total_reads_in_Islands.csv'.format(project_dir),index_col=0)
norm_col = 'total' if re.search('UTX',factor) else 'total_in_islads'
print(peak_file,factor,celltype,norm_col)
norm_factor = norm_df.loc['{}_{}'.format(celltype,factor),norm_col]/1000000
df = 50*df/norm_factor # per kb per million mapped reads
pal = sns.light_palette('red',as_cmap=True)
vmin,vmax = return_vlim(factor,peak_file)
# vmin=None;vmax=None
all_values = [i for col in df.columns for i in df[col].values]
df = df.clip(upper=np.percentile(all_values,98))
ax = plt.subplot(gs[0,heatmap_pos])
g = sns.heatmap(df,yticklabels=False,xticklabels=True,cbar=True,cmap=pal,\
vmin=vmin,vmax=vmax,cbar_kws={"shrink": 0.6})
ax.set_ylabel('')
cbar = g.collections[0].colorbar
cbar.ax.set_position([.9,0.36,.8,.5])
cbar.set_ticks([vmin,vmax])
cbar.set_ticklabels([0,vmax])
if heatmap_pos==0:
ax.set_ylabel('UTX binding sites \n (#{})\n'.format(df.shape[0]),va='baseline')
if not heatmap_pos==3:
cbar.remove()
xp = g.get_xticks()
ax.set_xticks([xp[0],xp[-1]])
ax.set_xticklabels(['-2kb','2kb'],rotation=45,fontsize=13)
ax.set_title('{}\n{}'.format(factor, cellType_labels[celltype]),fontsize=14)
return df.mean()
# ==== dictionary of matched colors/labels
cellType_colors = {'Vector':'tab:blue',\
'WT':'tab:red',\
'DEL':'k',\
'EIF':'tab:purple',\
'TPR':'tab:green',\
'MT2':'tab:orange',\
'FUS':'tab:gray'}
cellType_labels= {'Vector':'Vector',\
'WT':'WT',\
'DEL':'$\Delta$cIDR',\
'EIF':'UTX-eIF$_{IDR}$',\
'TPR':'$\Delta$TPR',\
'MT2':'MT2',\
'FUS':'UTX-FUS$_{IDR}$'}
cellTypes = ['Vector','WT','DEL','EIF']
factors = ['UTX','UTXFEB','H3K27me3','MLL4','H3K4me1','H3K4me2','H3K4me3','H3K27ac']
peak_files = ['UTX_peaks','UTX_islands','UTXFEB_islands','UTXFEB_peaks']
project_dir='/nv/vol190/zanglab/zw5j/since2019_projects/UTX_HaoJiang/'
# project_dir='/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang/'
indir='../f0_data_integration/f2_combined_data/'
outdir = 'f4c_heatmap_across_CellTypes_k72ac_increased'
os.makedirs(outdir,exist_ok=True)
# k4me1_log2fc_col='H3K4me1_WT_over_H3K4me1_Vector_log2FC'
# k4me1_log2avg_col = 'H3K4me1_WT_over_H3K4me1_Vector_log2AVG'
hm_log2fc_col='H3K27ac_WT_over_H3K27ac_Vector_log2FC'
hm_log2avg_col = 'H3K27ac_WT_over_H3K27ac_Vector_log2AVG'
fc_thres = [1.5,2]
log2avg_thre = 0
# == rank value by UTX signal
# csv_file='{}//f7_chipseq/f2_differential_binding_on_202011_UTX_WT_peaks/data_binding_pattern/rpkm_csv/Vector_UTX_es2kb_bin200_on_202011_UTX_WT_peaks.csv'.format(project_dir)
# index_df = pd.read_csv(csv_file,sep='\t',index_col=0)
# ranked_index = index_df.sum(axis=1).sort_values(ascending=False).index
for peak_file in peak_files[:]:
master_df = pd.read_csv('{}/combined_DiffNormReads_on_{}.csv'.format(indir,peak_file),index_col=0)
for fc_thre in fc_thres:
master_df_tmp = master_df[(master_df[hm_log2fc_col]> np.log2(fc_thre)) & (master_df[hm_log2avg_col]>log2avg_thre)]
for factor in factors[:]:
# data for composite plot
composite_data = {}
# heatmap of each factor
fig = plt.figure(figsize = (6,2))
width_ratio = [1,1,1,1]
gs = gridspec.GridSpec(1,4,width_ratios=width_ratio,wspace=.1)
heatmap_pos=0
for celltype in cellTypes[:]:
avg_binding = prepare_each_subfig(master_df_tmp,gs,heatmap_pos,peak_file,factor,celltype)
composite_data[celltype]=avg_binding
heatmap_pos+=1
plt.savefig(outdir+os.sep+'fcthre_{}_{}_{}_binding.png'.format(fc_thre,peak_file,factor,),bbox_inches='tight',pad_inches=0.1,transparent=True,dpi=600)
plt.show()
plt.close()
# == composite plot
fig = plt.figure(figsize = (3,2))
for celltype in cellTypes[:]:
plt.plot(composite_data[celltype],
label = cellType_labels[celltype],
color = cellType_colors[celltype])
plt.ylabel('{} signal'.format(factor))
# plt.ylim(ymax=9 if norm_pattern=='RPKM' else 5)
plt.axes().set_xticks([0,100,200])
plt.axes().set_xticklabels(['-2kb','0','2kb'])
plt.legend(fontsize=12,borderaxespad=0.1,labelspacing=.2,handletextpad=0.2,
handlelength=1,loc="upper right",
bbox_to_anchor=[1.65,1],
frameon=False)
plt.savefig(outdir+os.sep+'composite_fcthre_{}_{}_{}_binding.png'.format(fc_thre,peak_file,factor),bbox_inches='tight',pad_inches=0.1,transparent=True,dpi=600)
plt.show()
plt.close()
| [
"[email protected]"
] | |
d536b7df1b4b365bb585e612059f09bc083f533b | a2d44f3c89acb7424cc2771f5c0a926e2d902c77 | /transformers/src/transformers/image_utils.py | 951d682c944f2d94717a44fa909c6a0151e442a2 | [
"Apache-2.0"
] | permissive | amazon-science/masked-diffusion-lm | 94845ff123eb586fca0247b0db7baf12dfee6a6d | 16b0294398d596198bc9f75375eaa6814f792dcb | refs/heads/main | 2023-08-03T02:23:14.301531 | 2023-05-04T19:54:58 | 2023-05-04T19:54:58 | 626,021,474 | 38 | 0 | Apache-2.0 | 2023-08-14T22:24:30 | 2023-04-10T16:19:44 | Python | UTF-8 | Python | false | false | 12,839 | py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Union
import numpy as np
import PIL.Image
import PIL.ImageOps
import requests
from .file_utils import _is_torch, is_torch_available
IMAGENET_DEFAULT_MEAN = [0.485, 0.456, 0.406]
IMAGENET_DEFAULT_STD = [0.229, 0.224, 0.225]
IMAGENET_STANDARD_MEAN = [0.5, 0.5, 0.5]
IMAGENET_STANDARD_STD = [0.5, 0.5, 0.5]
ImageInput = Union[
PIL.Image.Image, np.ndarray, "torch.Tensor", List[PIL.Image.Image], List[np.ndarray], List["torch.Tensor"] # noqa
]
def is_torch_tensor(obj):
return _is_torch(obj) if is_torch_available() else False
def load_image(image: Union[str, "PIL.Image.Image"]) -> "PIL.Image.Image":
"""
Loads `image` to a PIL Image.
Args:
image (`str` or `PIL.Image.Image`):
The image to convert to the PIL Image format.
Returns:
`PIL.Image.Image`: A PIL Image.
"""
if isinstance(image, str):
if image.startswith("http://") or image.startswith("https://"):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
image = PIL.Image.open(requests.get(image, stream=True).raw)
elif os.path.isfile(image):
image = PIL.Image.open(image)
else:
raise ValueError(
f"Incorrect path or url, URLs must start with `http://` or `https://`, and {image} is not a valid path"
)
elif isinstance(image, PIL.Image.Image):
image = image
else:
raise ValueError(
"Incorrect format used for image. Should be an url linking to an image, a local path, or a PIL image."
)
image = PIL.ImageOps.exif_transpose(image)
image = image.convert("RGB")
return image
# In the future we can add a TF implementation here when we have TF models.
class ImageFeatureExtractionMixin:
"""
Mixin that contain utilities for preparing image features.
"""
def _ensure_format_supported(self, image):
if not isinstance(image, (PIL.Image.Image, np.ndarray)) and not is_torch_tensor(image):
raise ValueError(
f"Got type {type(image)} which is not supported, only `PIL.Image.Image`, `np.array` and "
"`torch.Tensor` are."
)
def to_pil_image(self, image, rescale=None):
"""
Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if
needed.
Args:
image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor`):
The image to convert to the PIL Image format.
rescale (`bool`, *optional*):
Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will
default to `True` if the image type is a floating type, `False` otherwise.
"""
self._ensure_format_supported(image)
if is_torch_tensor(image):
image = image.numpy()
if isinstance(image, np.ndarray):
if rescale is None:
# rescale default to the array being of floating type.
rescale = isinstance(image.flat[0], np.floating)
# If the channel as been moved to first dim, we put it back at the end.
if image.ndim == 3 and image.shape[0] in [1, 3]:
image = image.transpose(1, 2, 0)
if rescale:
image = image * 255
image = image.astype(np.uint8)
return PIL.Image.fromarray(image)
return image
def to_numpy_array(self, image, rescale=None, channel_first=True):
"""
Converts `image` to a numpy array. Optionally rescales it and puts the channel dimension as the first
dimension.
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to convert to a NumPy array.
rescale (`bool`, *optional*):
Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Will
default to `True` if the image is a PIL Image or an array/tensor of integers, `False` otherwise.
channel_first (`bool`, *optional*, defaults to `True`):
Whether or not to permute the dimensions of the image to put the channel dimension first.
"""
self._ensure_format_supported(image)
if isinstance(image, PIL.Image.Image):
image = np.array(image)
if is_torch_tensor(image):
image = image.numpy()
if rescale is None:
rescale = isinstance(image.flat[0], np.integer)
if rescale:
image = image.astype(np.float32) / 255.0
if channel_first and image.ndim == 3:
image = image.transpose(2, 0, 1)
return image
def normalize(self, image, mean, std):
"""
Normalizes `image` with `mean` and `std`. Note that this will trigger a conversion of `image` to a NumPy array
if it's a PIL Image.
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to normalize.
mean (`List[float]` or `np.ndarray` or `torch.Tensor`):
The mean (per channel) to use for normalization.
std (`List[float]` or `np.ndarray` or `torch.Tensor`):
The standard deviation (per channel) to use for normalization.
"""
self._ensure_format_supported(image)
if isinstance(image, PIL.Image.Image):
image = self.to_numpy_array(image)
if isinstance(image, np.ndarray):
if not isinstance(mean, np.ndarray):
mean = np.array(mean).astype(image.dtype)
if not isinstance(std, np.ndarray):
std = np.array(std).astype(image.dtype)
elif is_torch_tensor(image):
import torch
if not isinstance(mean, torch.Tensor):
mean = torch.tensor(mean)
if not isinstance(std, torch.Tensor):
std = torch.tensor(std)
if image.ndim == 3 and image.shape[0] in [1, 3]:
return (image - mean[:, None, None]) / std[:, None, None]
else:
return (image - mean) / std
def resize(self, image, size, resample=PIL.Image.BILINEAR, default_to_square=True, max_size=None):
"""
Resizes `image`. Note that this will trigger a conversion of `image` to a PIL Image.
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to resize.
size (`int` or `Tuple[int, int]`):
The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be
matched to this.
If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If
`size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to
this number. i.e, if height > width, then image will be rescaled to (size * height / width, size).
resample (`int`, *optional*, defaults to `PIL.Image.BILINEAR`):
The filter to user for resampling.
default_to_square (`bool`, *optional*, defaults to `True`):
How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a
square (`size`,`size`). If set to `False`, will replicate
[`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)
with support for resizing only the smallest edge and providing an optional `max_size`.
max_size (`int`, *optional*, defaults to `None`):
The maximum allowed for the longer edge of the resized image: if the longer edge of the image is
greater than `max_size` after being resized according to `size`, then the image is resized again so
that the longer edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller
edge may be shorter than `size`. Only used if `default_to_square` is `False`.
"""
self._ensure_format_supported(image)
if not isinstance(image, PIL.Image.Image):
image = self.to_pil_image(image)
if isinstance(size, list):
size = tuple(size)
if isinstance(size, int) or len(size) == 1:
if default_to_square:
size = (size, size) if isinstance(size, int) else (size[0], size[0])
else:
width, height = image.size
# specified size only for the smallest edge
short, long = (width, height) if width <= height else (height, width)
requested_new_short = size if isinstance(size, int) else size[0]
if short == requested_new_short:
return image
new_short, new_long = requested_new_short, int(requested_new_short * long / short)
if max_size is not None:
if max_size <= requested_new_short:
raise ValueError(
f"max_size = {max_size} must be strictly greater than the requested "
f"size for the smaller edge size = {size}"
)
if new_long > max_size:
new_short, new_long = int(max_size * new_short / new_long), max_size
size = (new_short, new_long) if width <= height else (new_long, new_short)
return image.resize(size, resample=resample)
def center_crop(self, image, size):
"""
Crops `image` to the given size using a center crop. Note that if the image is too small to be cropped to the
size given, it will be padded (so the returned result has the size asked).
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to resize.
size (`int` or `Tuple[int, int]`):
The size to which crop the image.
"""
self._ensure_format_supported(image)
if not isinstance(size, tuple):
size = (size, size)
# PIL Image.size is (width, height) but NumPy array and torch Tensors have (height, width)
image_shape = (image.size[1], image.size[0]) if isinstance(image, PIL.Image.Image) else image.shape[-2:]
top = (image_shape[0] - size[0]) // 2
bottom = top + size[0] # In case size is odd, (image_shape[0] + size[0]) // 2 won't give the proper result.
left = (image_shape[1] - size[1]) // 2
right = left + size[1] # In case size is odd, (image_shape[1] + size[1]) // 2 won't give the proper result.
# For PIL Images we have a method to crop directly.
if isinstance(image, PIL.Image.Image):
return image.crop((left, top, right, bottom))
# Check if all the dimensions are inside the image.
if top >= 0 and bottom <= image_shape[0] and left >= 0 and right <= image_shape[1]:
return image[..., top:bottom, left:right]
# Otherwise, we may need to pad if the image is too small. Oh joy...
new_shape = image.shape[:-2] + (max(size[0], image_shape[0]), max(size[1], image_shape[1]))
if isinstance(image, np.ndarray):
new_image = np.zeros_like(image, shape=new_shape)
elif is_torch_tensor(image):
new_image = image.new_zeros(new_shape)
top_pad = (new_shape[-2] - image_shape[0]) // 2
bottom_pad = top_pad + image_shape[0]
left_pad = (new_shape[-1] - image_shape[1]) // 2
right_pad = left_pad + image_shape[1]
new_image[..., top_pad:bottom_pad, left_pad:right_pad] = image
top += top_pad
bottom += top_pad
left += left_pad
right += left_pad
return new_image[
..., max(0, top) : min(new_image.shape[-2], bottom), max(0, left) : min(new_image.shape[-1], right)
]
| [
"[email protected]"
] | |
02455e17cb15c2f1da50bd50f0afd2b4b54e1341 | ed81cc186915e55cd0fbf3ba7717193f68290c46 | /mcq_v2/quiz_qus/migrations/0003_auto_20181001_2205.py | 6e738716db91f38e7b7068f599b4a9b1aeb263ec | [] | no_license | MMIL/MCQ_Module_V2 | ea07daf8845afd5023edbea716b0f3808f921e06 | 02586652971eb8d5b952ac8542172a57ab909c3e | refs/heads/master | 2022-12-11T22:34:12.547492 | 2020-03-02T17:23:20 | 2020-03-02T17:23:20 | 146,754,594 | 1 | 4 | null | 2022-11-22T02:52:48 | 2018-08-30T13:34:56 | CSS | UTF-8 | Python | false | false | 387 | py | # Generated by Django 2.1.1 on 2018-10-01 22:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quiz_qus', '0002_question_qus_id'),
]
operations = [
migrations.AlterField(
model_name='question',
name='qus_id',
field=models.IntegerField(unique=True),
),
]
| [
"[email protected]"
] | |
14a76995fb36ee801799c35e39e7112c522601c9 | 7b13e6acb2a1f26936462ed795ee4508b4088042 | /算法题目/算法题目/动态规划/最长公共子序列/最长公共子序列.py | 57720483fde66ff1f0318f2fcec1681da5c3ea10 | [] | no_license | guojia60180/algorithm | ed2b0fd63108f30cd596390e64ae659666d1c2c6 | ea81ff2722c7c350be5e1f0cd6d4290d366f2988 | refs/heads/master | 2020-04-19T08:25:55.110548 | 2019-05-13T13:29:39 | 2019-05-13T13:29:39 | 168,076,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 995 | py | #Author guo
'''
动态规划想法
定义二维数组dp[i][j]用来存储最长公共子序列的长度
其中dp[i][j]表示S1前i个字符与S2前j个字符最长公共子序列长度
考虑S1i与S2j是否值相等分为两种情况
相等时,dp[i][j]=dp[i-1][j-2]+1
不相等时,取最大值
'''
#递归
def recursive_lcs(stra,strb):
if len(stra)==0 or len(strb)==0:
return 0
if stra[0]==strb[0]:
return recursive_lcs(stra[1:],strb[1:])+1
else:
return max([recursive_lcs(stra[1:],strb),recursive_lcs(stra,strb[1:])])
#动态规划
def lcs(stra,strb):
m=[[0 for i in range(len(strb)+1)]for j in range(len(stra)+1)]
mmax=0
p=0#最长匹配中对应在stra的最后一位
for i in range(len(stra)):
for j in range(len(strb)):
if stra[i]==strb[j]:
m[i+1][j+1]=m[i][j]+1
if m[i+1][j+1]>mmax:
mmax=m[i+1][j+1]
p=i+1
return stra[p-mmax:p],mmax
| [
"[email protected]"
] | |
1a46bb5ed824ced4f62fc30a3ac0f058c451d445 | 5368c5c131da8ab226015cfd561cc3016c60fc82 | /venv/bin/chardetect | 8ae64b45acdfbfbd24ea506daac71d53d66b8ec8 | [] | no_license | ojudsonleo/DevCode | d621eed2d6555fa9c3fc37314edfc0646c4d1710 | ee22b9ed560275f276672d62e219c8d429726c23 | refs/heads/main | 2023-05-27T10:50:15.593840 | 2021-06-08T05:53:45 | 2021-06-08T05:53:45 | 374,292,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | #!/home/admin/Desktop/Python/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
cb76a7dfc791e4ea861199bb4573e6a0bed1781d | 5545d3c3e910ccb5b45b2277a71ad3c3ea3caedc | /jamenson/runtime/bases.py | 0f10210b6209ad6eb4bc9aa0a786f28149df0744 | [
"Apache-2.0"
] | permissive | matthagy/Jamenson | 61de19c71da6e133bf7d8efbb933a1036cf1e6f5 | 18a0fdd60b3d56ed4a6d4e792132535324490634 | refs/heads/master | 2016-09-11T04:31:28.895242 | 2013-04-04T00:14:44 | 2013-04-04T00:14:44 | 1,781,863 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py |
class CachingBase(object):
class __metaclass__(type):
def __new__(cls, name, bases, dct):
dct = dict(dct)
dct['_cache'] = dict()
return type.__new__(cls, name, bases, dct)
def __new__(cls, *args):
key = cls.get_key(*args)
try:
return cls._cache[key]
except KeyError:
self = cls._cache[key] = object.__new__(cls)
self._init_cached(*key)
return self
| [
"[email protected]"
] | |
126228681daae08e9c75e7866d9198d157b849a6 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/quantopian_zipline/zipline-master/zipline/testing/fixtures.py | 01cecdbce22e170f4469abd7318caa878cb6f902 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 51,181 | py | import sqlite3
from unittest import TestCase
from contextlib2 import ExitStack
from logbook import NullHandler, Logger
from six import with_metaclass, iteritems
from toolz import flip
import pandas as pd
import responses
from .core import (
create_daily_bar_data,
create_minute_bar_data,
tmp_dir,
)
from ..data.data_portal import (
DataPortal,
DEFAULT_MINUTE_HISTORY_PREFETCH,
DEFAULT_DAILY_HISTORY_PREFETCH,
)
from ..data.resample import (
minute_frame_to_session_frame,
MinuteResampleSessionBarReader
)
from ..data.us_equity_pricing import (
SQLiteAdjustmentReader,
SQLiteAdjustmentWriter,
)
from ..data.us_equity_pricing import (
BcolzDailyBarReader,
BcolzDailyBarWriter,
)
from ..data.minute_bars import (
BcolzMinuteBarReader,
BcolzMinuteBarWriter,
US_EQUITIES_MINUTES_PER_DAY,
FUTURES_MINUTES_PER_DAY,
)
from ..finance.trading import TradingEnvironment
from ..utils import factory
from ..utils.classproperty import classproperty
from ..utils.final import FinalMeta, final
from .core import tmp_asset_finder, make_simple_equity_info
from zipline.assets import Equity, Future
from zipline.finance.asset_restrictions import NoRestrictions
from zipline.pipeline import SimplePipelineEngine
from zipline.pipeline.loaders.testing import make_seeded_random_loader
from zipline.protocol import BarData
from zipline.utils.calendars import (
get_calendar,
register_calendar)
class ZiplineTestCase(with_metaclass(FinalMeta, TestCase)):
"""
Shared extensions to core unittest.TestCase.
Overrides the default unittest setUp/tearDown functions with versions that
use ExitStack to correctly clean up resources, even in the face of
exceptions that occur during setUp/setUpClass.
Subclasses **should not override setUp or setUpClass**!
Instead, they should implement `init_instance_fixtures` for per-test-method
resources, and `init_class_fixtures` for per-class resources.
Resources that need to be cleaned up should be registered using
either `enter_{class,instance}_context` or `add_{class,instance}_callback}.
"""
_in_setup = False
@final
@classmethod
def setUpClass(cls):
# Hold a set of all the "static" attributes on the class. These are
# things that are not populated after the class was created like
# methods or other class level attributes.
cls._static_class_attributes = set(vars(cls))
cls._class_teardown_stack = ExitStack()
try:
cls._base_init_fixtures_was_called = False
cls.init_class_fixtures()
assert cls._base_init_fixtures_was_called, (
"ZiplineTestCase.init_class_fixtures() was not called.\n"
"This probably means that you overrode init_class_fixtures"
" without calling super()."
)
except:
cls.tearDownClass()
raise
@classmethod
def init_class_fixtures(cls):
"""
Override and implement this classmethod to register resources that
should be created and/or torn down on a per-class basis.
Subclass implementations of this should always invoke this with super()
to ensure that fixture mixins work properly.
"""
if cls._in_setup:
raise ValueError(
'Called init_class_fixtures from init_instance_fixtures.'
'Did you write super(..., self).init_class_fixtures() instead'
' of super(..., self).init_instance_fixtures()?',
)
cls._base_init_fixtures_was_called = True
@final
@classmethod
def tearDownClass(cls):
# We need to get this before it's deleted by the loop.
stack = cls._class_teardown_stack
for name in set(vars(cls)) - cls._static_class_attributes:
# Remove all of the attributes that were added after the class was
# constructed. This cleans up any large test data that is class
# scoped while still allowing subclasses to access class level
# attributes.
delattr(cls, name)
stack.close()
@final
@classmethod
def enter_class_context(cls, context_manager):
"""
Enter a context manager to be exited during the tearDownClass
"""
if cls._in_setup:
raise ValueError(
'Attempted to enter a class context in init_instance_fixtures.'
'\nDid you mean to call enter_instance_context?',
)
return cls._class_teardown_stack.enter_context(context_manager)
@final
@classmethod
def add_class_callback(cls, callback, *args, **kwargs):
"""
Register a callback to be executed during tearDownClass.
Parameters
----------
callback : callable
The callback to invoke at the end of the test suite.
"""
if cls._in_setup:
raise ValueError(
'Attempted to add a class callback in init_instance_fixtures.'
'\nDid you mean to call add_instance_callback?',
)
return cls._class_teardown_stack.callback(callback, *args, **kwargs)
@final
def setUp(self):
type(self)._in_setup = True
self._pre_setup_attrs = set(vars(self))
self._instance_teardown_stack = ExitStack()
try:
self._init_instance_fixtures_was_called = False
self.init_instance_fixtures()
assert self._init_instance_fixtures_was_called, (
"ZiplineTestCase.init_instance_fixtures() was not"
" called.\n"
"This probably means that you overrode"
" init_instance_fixtures without calling super()."
)
except:
self.tearDown()
raise
finally:
type(self)._in_setup = False
def init_instance_fixtures(self):
self._init_instance_fixtures_was_called = True
@final
def tearDown(self):
# We need to get this before it's deleted by the loop.
stack = self._instance_teardown_stack
for attr in set(vars(self)) - self._pre_setup_attrs:
delattr(self, attr)
stack.close()
@final
def enter_instance_context(self, context_manager):
"""
Enter a context manager that should be exited during tearDown.
"""
return self._instance_teardown_stack.enter_context(context_manager)
@final
def add_instance_callback(self, callback):
"""
Register a callback to be executed during tearDown.
Parameters
----------
callback : callable
The callback to invoke at the end of each test.
"""
return self._instance_teardown_stack.callback(callback)
def alias(attr_name):
"""Make a fixture attribute an alias of another fixture's attribute by
default.
Parameters
----------
attr_name : str
The name of the attribute to alias.
Returns
-------
p : classproperty
A class property that does the property aliasing.
Examples
--------
>>> class C(object):
... attr = 1
...
>>> class D(C):
... attr_alias = alias('attr')
...
>>> D.attr
1
>>> D.attr_alias
1
>>> class E(D):
... attr_alias = 2
...
>>> E.attr
1
>>> E.attr_alias
2
"""
return classproperty(flip(getattr, attr_name))
class WithDefaultDateBounds(object):
"""
ZiplineTestCase mixin which makes it possible to synchronize date bounds
across fixtures.
This fixture should always be the last fixture in bases of any fixture or
test case that uses it.
Attributes
----------
START_DATE : datetime
END_DATE : datetime
The date bounds to be used for fixtures that want to have consistent
dates.
"""
START_DATE = pd.Timestamp('2006-01-03', tz='utc')
END_DATE = pd.Timestamp('2006-12-29', tz='utc')
class WithLogger(object):
"""
ZiplineTestCase mixin providing cls.log_handler as an instance-level
fixture.
After init_instance_fixtures has been called `self.log_handler` will be a
new ``logbook.NullHandler``.
Methods
-------
make_log_handler() -> logbook.LogHandler
A class method which constructs the new log handler object. By default
this will construct a ``NullHandler``.
"""
make_log_handler = NullHandler
@classmethod
def init_class_fixtures(cls):
super(WithLogger, cls).init_class_fixtures()
cls.log = Logger()
cls.log_handler = cls.enter_class_context(
cls.make_log_handler().applicationbound(),
)
class WithAssetFinder(WithDefaultDateBounds):
"""
ZiplineTestCase mixin providing cls.asset_finder as a class-level fixture.
After init_class_fixtures has been called, `cls.asset_finder` is populated
with an AssetFinder.
Attributes
----------
ASSET_FINDER_EQUITY_SIDS : iterable[int]
The default sids to construct equity data for.
ASSET_FINDER_EQUITY_SYMBOLS : iterable[str]
The default symbols to use for the equities.
ASSET_FINDER_EQUITY_START_DATE : datetime
The default start date to create equity data for. This defaults to
``START_DATE``.
ASSET_FINDER_EQUITY_END_DATE : datetime
The default end date to create equity data for. This defaults to
``END_DATE``.
Methods
-------
make_equity_info() -> pd.DataFrame
A class method which constructs the dataframe of equity info to write
to the class's asset db. By default this is empty.
make_futures_info() -> pd.DataFrame
A class method which constructs the dataframe of futures contract info
to write to the class's asset db. By default this is empty.
make_exchanges_info() -> pd.DataFrame
A class method which constructs the dataframe of exchange information
to write to the class's assets db. By default this is empty.
make_root_symbols_info() -> pd.DataFrame
A class method which constructs the dataframe of root symbols
information to write to the class's assets db. By default this is
empty.
make_asset_finder_db_url() -> string
A class method which returns the URL at which to create the SQLAlchemy
engine. By default provides a URL for an in-memory database.
make_asset_finder() -> pd.DataFrame
A class method which constructs the actual asset finder object to use
for the class. If this method is overridden then the ``make_*_info``
methods may not be respected.
See Also
--------
zipline.testing.make_simple_equity_info
zipline.testing.make_jagged_equity_info
zipline.testing.make_rotating_equity_info
zipline.testing.make_future_info
zipline.testing.make_commodity_future_info
"""
ASSET_FINDER_EQUITY_SIDS = ord('A'), ord('B'), ord('C')
ASSET_FINDER_EQUITY_SYMBOLS = None
ASSET_FINDER_EQUITY_START_DATE = alias('START_DATE')
ASSET_FINDER_EQUITY_END_DATE = alias('END_DATE')
@classmethod
def _make_info(cls):
return None
make_futures_info = _make_info
make_exchanges_info = _make_info
make_root_symbols_info = _make_info
make_equity_supplementary_mappings = _make_info
del _make_info
@classmethod
def make_equity_info(cls):
register_calendar("TEST", get_calendar("NYSE"), force=True)
return make_simple_equity_info(
cls.ASSET_FINDER_EQUITY_SIDS,
cls.ASSET_FINDER_EQUITY_START_DATE,
cls.ASSET_FINDER_EQUITY_END_DATE,
cls.ASSET_FINDER_EQUITY_SYMBOLS,
)
@classmethod
def make_asset_finder_db_url(cls):
return 'sqlite:///:memory:'
@classmethod
def make_asset_finder(cls):
"""Returns a new AssetFinder
Returns
-------
asset_finder : zipline.assets.AssetFinder
"""
return cls.enter_class_context(tmp_asset_finder(
url=cls.make_asset_finder_db_url(),
equities=cls.make_equity_info(),
futures=cls.make_futures_info(),
exchanges=cls.make_exchanges_info(),
root_symbols=cls.make_root_symbols_info(),
equity_supplementary_mappings=(
cls.make_equity_supplementary_mappings()
),
))
@classmethod
def init_class_fixtures(cls):
super(WithAssetFinder, cls).init_class_fixtures()
cls.asset_finder = cls.make_asset_finder()
class WithTradingCalendars(object):
"""
ZiplineTestCase mixin providing cls.trading_calendar,
cls.all_trading_calendars, cls.trading_calendar_for_asset_type as a
class-level fixture.
After ``init_class_fixtures`` has been called:
- `cls.trading_calendar` is populated with a default of the nyse trading
calendar for compatibility with existing tests
- `cls.all_trading_calendars` is populated with the trading calendars
keyed by name,
- `cls.trading_calendar_for_asset_type` is populated with the trading
calendars keyed by the asset type which uses the respective calendar.
Attributes
----------
TRADING_CALENDAR_STRS : iterable
iterable of identifiers of the calendars to use.
TRADING_CALENDAR_FOR_ASSET_TYPE : dict
A dictionary which maps asset type names to the calendar associated
with that asset type.
"""
TRADING_CALENDAR_STRS = ('NYSE',)
TRADING_CALENDAR_FOR_ASSET_TYPE = {Equity: 'NYSE', Future: 'us_futures'}
TRADING_CALENDAR_FOR_EXCHANGE = {}
# For backwards compatibility, exisitng tests and fixtures refer to
# `trading_calendar` with the assumption that the value is the NYSE
# calendar.
TRADING_CALENDAR_PRIMARY_CAL = 'NYSE'
@classmethod
def init_class_fixtures(cls):
super(WithTradingCalendars, cls).init_class_fixtures()
cls.trading_calendars = {}
for cal_str in cls.TRADING_CALENDAR_STRS:
# Set name to allow aliasing.
calendar = get_calendar(cal_str)
setattr(cls,
'{0}_calendar'.format(cal_str.lower()), calendar)
cls.trading_calendars[cal_str] = calendar
for asset_type, cal_str in iteritems(
cls.TRADING_CALENDAR_FOR_ASSET_TYPE):
calendar = get_calendar(cal_str)
cls.trading_calendars[asset_type] = calendar
for exchange, cal_str in iteritems(cls.TRADING_CALENDAR_FOR_EXCHANGE):
register_calendar(exchange, get_calendar(cal_str))
cls.trading_calendars[exchange] = get_calendar(cal_str)
cls.trading_calendar = cls.trading_calendars[
cls.TRADING_CALENDAR_PRIMARY_CAL]
class WithTradingEnvironment(WithAssetFinder,
WithTradingCalendars,
WithDefaultDateBounds):
"""
ZiplineTestCase mixin providing cls.env as a class-level fixture.
After ``init_class_fixtures`` has been called, `cls.env` is populated
with a trading environment whose `asset_finder` is the result of
`cls.make_asset_finder`.
Attributes
----------
TRADING_ENV_MIN_DATE : datetime
The min_date to forward to the constructed TradingEnvironment.
TRADING_ENV_MAX_DATE : datetime
The max date to forward to the constructed TradingEnvironment.
TRADING_ENV_TRADING_CALENDAR : pd.DatetimeIndex
The trading calendar to use for the class's TradingEnvironment.
TRADING_ENV_FUTURE_CHAIN_PREDICATES : dict
The roll predicates to apply when creating contract chains.
Methods
-------
make_load_function() -> callable
A class method that returns the ``load`` argument to pass to the
constructor of ``TradingEnvironment`` for this class.
The signature for the callable returned is:
``(datetime, pd.DatetimeIndex, str) -> (pd.Series, pd.DataFrame)``
make_trading_environment() -> TradingEnvironment
A class method that constructs the trading environment for the class.
If this is overridden then ``make_load_function`` or the class
attributes may not be respected.
See Also
--------
:class:`zipline.finance.trading.TradingEnvironment`
"""
TRADING_ENV_FUTURE_CHAIN_PREDICATES = None
@classmethod
def make_load_function(cls):
return None
@classmethod
def make_trading_environment(cls):
return TradingEnvironment(
load=cls.make_load_function(),
asset_db_path=cls.asset_finder.engine,
trading_calendar=cls.trading_calendar,
future_chain_predicates=cls.TRADING_ENV_FUTURE_CHAIN_PREDICATES,
)
@classmethod
def init_class_fixtures(cls):
super(WithTradingEnvironment, cls).init_class_fixtures()
cls.env = cls.make_trading_environment()
class WithSimParams(WithTradingEnvironment):
"""
ZiplineTestCase mixin providing cls.sim_params as a class level fixture.
The arguments used to construct the trading environment may be overridded
by putting ``SIM_PARAMS_{argname}`` in the class dict except for the
trading environment which is overridden with the mechanisms provided by
``WithTradingEnvironment``.
Attributes
----------
SIM_PARAMS_YEAR : int
SIM_PARAMS_CAPITAL_BASE : float
SIM_PARAMS_NUM_DAYS : int
SIM_PARAMS_DATA_FREQUENCY : {'daily', 'minute'}
SIM_PARAMS_EMISSION_RATE : {'daily', 'minute'}
Forwarded to ``factory.create_simulation_parameters``.
SIM_PARAMS_START : datetime
SIM_PARAMS_END : datetime
Forwarded to ``factory.create_simulation_parameters``. If not
explicitly overridden these will be ``START_DATE`` and ``END_DATE``
See Also
--------
zipline.utils.factory.create_simulation_parameters
"""
SIM_PARAMS_YEAR = None
SIM_PARAMS_CAPITAL_BASE = 1.0e5
SIM_PARAMS_NUM_DAYS = None
SIM_PARAMS_DATA_FREQUENCY = 'daily'
SIM_PARAMS_EMISSION_RATE = 'daily'
SIM_PARAMS_START = alias('START_DATE')
SIM_PARAMS_END = alias('END_DATE')
@classmethod
def make_simparams(cls):
return factory.create_simulation_parameters(
year=cls.SIM_PARAMS_YEAR,
start=cls.SIM_PARAMS_START,
end=cls.SIM_PARAMS_END,
num_days=cls.SIM_PARAMS_NUM_DAYS,
capital_base=cls.SIM_PARAMS_CAPITAL_BASE,
data_frequency=cls.SIM_PARAMS_DATA_FREQUENCY,
emission_rate=cls.SIM_PARAMS_EMISSION_RATE,
trading_calendar=cls.trading_calendar,
)
@classmethod
def init_class_fixtures(cls):
super(WithSimParams, cls).init_class_fixtures()
cls.sim_params = cls.make_simparams()
class WithTradingSessions(WithTradingCalendars, WithDefaultDateBounds):
"""
ZiplineTestCase mixin providing cls.trading_days, cls.all_trading_sessions
as a class-level fixture.
After init_class_fixtures has been called, `cls.all_trading_sessions`
is populated with a dictionary of calendar name to the DatetimeIndex
containing the calendar trading days ranging from:
(DATA_MAX_DAY - (cls.TRADING_DAY_COUNT) -> DATA_MAX_DAY)
`cls.trading_days`, for compatibility with existing tests which make the
assumption that trading days are equity only, defaults to the nyse trading
sessions.
Attributes
----------
DATA_MAX_DAY : datetime
The most recent trading day in the calendar.
TRADING_DAY_COUNT : int
The number of days to put in the calendar. The default value of
``TRADING_DAY_COUNT`` is 126 (half a trading-year). Inheritors can
override TRADING_DAY_COUNT to request more or less data.
"""
DATA_MIN_DAY = alias('START_DATE')
DATA_MAX_DAY = alias('END_DATE')
# For backwards compatibility, exisitng tests and fixtures refer to
# `trading_days` with the assumption that the value is days of the NYSE
# calendar.
trading_days = alias('nyse_sessions')
@classmethod
def init_class_fixtures(cls):
super(WithTradingSessions, cls).init_class_fixtures()
cls.trading_sessions = {}
for cal_str in cls.TRADING_CALENDAR_STRS:
trading_calendar = cls.trading_calendars[cal_str]
sessions = trading_calendar.sessions_in_range(
cls.DATA_MIN_DAY, cls.DATA_MAX_DAY)
# Set name for aliasing.
setattr(cls,
'{0}_sessions'.format(cal_str.lower()), sessions)
cls.trading_sessions[cal_str] = sessions
for exchange, cal_str in iteritems(cls.TRADING_CALENDAR_FOR_EXCHANGE):
trading_calendar = cls.trading_calendars[cal_str]
sessions = trading_calendar.sessions_in_range(
cls.DATA_MIN_DAY, cls.DATA_MAX_DAY)
cls.trading_sessions[exchange] = sessions
class WithTmpDir(object):
"""
ZiplineTestCase mixing providing cls.tmpdir as a class-level fixture.
After init_class_fixtures has been called, `cls.tmpdir` is populated with
a `testfixtures.TempDirectory` object whose path is `cls.TMP_DIR_PATH`.
Attributes
----------
TMP_DIR_PATH : str
The path to the new directory to create. By default this is None
which will create a unique directory in /tmp.
"""
TMP_DIR_PATH = None
@classmethod
def init_class_fixtures(cls):
super(WithTmpDir, cls).init_class_fixtures()
cls.tmpdir = cls.enter_class_context(
tmp_dir(path=cls.TMP_DIR_PATH),
)
class WithInstanceTmpDir(object):
"""
ZiplineTestCase mixing providing self.tmpdir as an instance-level fixture.
After init_instance_fixtures has been called, `self.tmpdir` is populated
with a `testfixtures.TempDirectory` object whose path is
`cls.TMP_DIR_PATH`.
Attributes
----------
INSTANCE_TMP_DIR_PATH : str
The path to the new directory to create. By default this is None
which will create a unique directory in /tmp.
"""
INSTANCE_TMP_DIR_PATH = None
def init_instance_fixtures(self):
super(WithInstanceTmpDir, self).init_instance_fixtures()
self.instance_tmpdir = self.enter_instance_context(
tmp_dir(path=self.INSTANCE_TMP_DIR_PATH),
)
class WithEquityDailyBarData(WithTradingEnvironment):
"""
ZiplineTestCase mixin providing cls.make_equity_daily_bar_data.
Attributes
----------
EQUITY_DAILY_BAR_START_DATE : Timestamp
The date at to which to start creating data. This defaults to
``START_DATE``.
EQUITY_DAILY_BAR_END_DATE = Timestamp
The end date up to which to create data. This defaults to ``END_DATE``.
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE : bool
If this flag is set, `make_equity_daily_bar_data` will read data from
the minute bars defined by `WithEquityMinuteBarData`.
The current default is `False`, but could be `True` in the future.
Methods
-------
make_equity_daily_bar_data() -> iterable[(int, pd.DataFrame)]
A class method that returns an iterator of (sid, dataframe) pairs
which will be written to the bcolz files that the class's
``BcolzDailyBarReader`` will read from. By default this creates
some simple sythetic data with
:func:`~zipline.testing.create_daily_bar_data`
See Also
--------
WithEquityMinuteBarData
zipline.testing.create_daily_bar_data
"""
EQUITY_DAILY_BAR_USE_FULL_CALENDAR = False
EQUITY_DAILY_BAR_START_DATE = alias('START_DATE')
EQUITY_DAILY_BAR_END_DATE = alias('END_DATE')
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = None
@classproperty
def EQUITY_DAILY_BAR_LOOKBACK_DAYS(cls):
# If we're sourcing from minute data, then we almost certainly want the
# minute bar calendar to be aligned with the daily bar calendar, so
# re-use the same lookback parameter.
if cls.EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls.EQUITY_MINUTE_BAR_LOOKBACK_DAYS
else:
return 0
@classmethod
def _make_equity_daily_bar_from_minute(cls):
assert issubclass(cls, WithEquityMinuteBarData), \
"Can't source daily data from minute without minute data!"
assets = cls.asset_finder.retrieve_all(cls.asset_finder.equities_sids)
minute_data = dict(cls.make_equity_minute_bar_data())
for asset in assets:
yield asset.sid, minute_frame_to_session_frame(
minute_data[asset.sid],
cls.trading_calendars[Equity])
@classmethod
def make_equity_daily_bar_data(cls):
# Requires a WithEquityMinuteBarData to come before in the MRO.
# Resample that data so that daily and minute bar data are aligned.
if cls.EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls._make_equity_daily_bar_from_minute()
else:
return create_daily_bar_data(
cls.equity_daily_bar_days,
cls.asset_finder.sids,
)
@classmethod
def init_class_fixtures(cls):
super(WithEquityDailyBarData, cls).init_class_fixtures()
trading_calendar = cls.trading_calendars[Equity]
if cls.EQUITY_DAILY_BAR_USE_FULL_CALENDAR:
days = trading_calendar.all_sessions
else:
if trading_calendar.is_session(cls.EQUITY_DAILY_BAR_START_DATE):
first_session = cls.EQUITY_DAILY_BAR_START_DATE
else:
first_session = trading_calendar.minute_to_session_label(
pd.Timestamp(cls.EQUITY_DAILY_BAR_START_DATE)
)
if cls.EQUITY_DAILY_BAR_LOOKBACK_DAYS > 0:
first_session = trading_calendar.sessions_window(
first_session,
-1 * cls.EQUITY_DAILY_BAR_LOOKBACK_DAYS
)[0]
days = trading_calendar.sessions_in_range(
first_session,
cls.EQUITY_DAILY_BAR_END_DATE,
)
cls.equity_daily_bar_days = days
class WithBcolzEquityDailyBarReader(WithEquityDailyBarData, WithTmpDir):
"""
ZiplineTestCase mixin providing cls.bcolz_daily_bar_path,
cls.bcolz_daily_bar_ctable, and cls.bcolz_equity_daily_bar_reader
class level fixtures.
After init_class_fixtures has been called:
- `cls.bcolz_daily_bar_path` is populated with
`cls.tmpdir.getpath(cls.BCOLZ_DAILY_BAR_PATH)`.
- `cls.bcolz_daily_bar_ctable` is populated with data returned from
`cls.make_equity_daily_bar_data`. By default this calls
:func:`zipline.pipeline.loaders.synthetic.make_equity_daily_bar_data`.
- `cls.bcolz_equity_daily_bar_reader` is a daily bar reader
pointing to the directory that was just written to.
Attributes
----------
BCOLZ_DAILY_BAR_PATH : str
The path inside the tmpdir where this will be written.
EQUITY_DAILY_BAR_LOOKBACK_DAYS : int
The number of days of data to add before the first day. This is used
when a test needs to use history, in which case this should be set to
the largest history window that will be
requested.
EQUITY_DAILY_BAR_USE_FULL_CALENDAR : bool
If this flag is set the ``equity_daily_bar_days`` will be the full
set of trading days from the trading environment. This flag overrides
``EQUITY_DAILY_BAR_LOOKBACK_DAYS``.
BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD : int
If this flag is set, use the value as the `read_all_threshold`
parameter to BcolzDailyBarReader, otherwise use the default
value.
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE : bool
If this flag is set, `make_equity_daily_bar_data` will read data from
the minute bar reader defined by a `WithBcolzEquityMinuteBarReader`.
Methods
-------
make_bcolz_daily_bar_rootdir_path() -> string
A class method that returns the path for the rootdir of the daily
bars ctable. By default this is a subdirectory BCOLZ_DAILY_BAR_PATH in
the shared temp directory.
See Also
--------
WithBcolzEquityMinuteBarReader
WithDataPortal
zipline.testing.create_daily_bar_data
"""
BCOLZ_DAILY_BAR_PATH = 'daily_equity_pricing.bcolz'
BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD = None
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = False
# allows WithBcolzEquityDailyBarReaderFromCSVs to call the
# `write_csvs`method without needing to reimplement `init_class_fixtures`
_write_method_name = 'write'
@classmethod
def make_bcolz_daily_bar_rootdir_path(cls):
return cls.tmpdir.makedir(cls.BCOLZ_DAILY_BAR_PATH)
@classmethod
def init_class_fixtures(cls):
super(WithBcolzEquityDailyBarReader, cls).init_class_fixtures()
cls.bcolz_daily_bar_path = p = cls.make_bcolz_daily_bar_rootdir_path()
days = cls.equity_daily_bar_days
trading_calendar = cls.trading_calendars[Equity]
cls.bcolz_daily_bar_ctable = t = getattr(
BcolzDailyBarWriter(p, trading_calendar, days[0], days[-1]),
cls._write_method_name,
)(cls.make_equity_daily_bar_data())
if cls.BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD is not None:
cls.bcolz_equity_daily_bar_reader = BcolzDailyBarReader(
t, cls.BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD)
else:
cls.bcolz_equity_daily_bar_reader = BcolzDailyBarReader(t)
class WithBcolzEquityDailyBarReaderFromCSVs(WithBcolzEquityDailyBarReader):
"""
ZiplineTestCase mixin that provides
cls.bcolz_equity_daily_bar_reader from a mapping of sids to CSV
file paths.
"""
_write_method_name = 'write_csvs'
def _trading_days_for_minute_bars(calendar,
start_date,
end_date,
lookback_days):
first_session = calendar.minute_to_session_label(start_date)
if lookback_days > 0:
first_session = calendar.sessions_window(
first_session,
-1 * lookback_days
)[0]
return calendar.sessions_in_range(first_session, end_date)
class _WithMinuteBarDataBase(WithTradingEnvironment):
MINUTE_BAR_LOOKBACK_DAYS = 0
MINUTE_BAR_START_DATE = alias('START_DATE')
MINUTE_BAR_END_DATE = alias('END_DATE')
class WithEquityMinuteBarData(_WithMinuteBarDataBase):
"""
ZiplineTestCase mixin providing cls.equity_minute_bar_days.
After init_class_fixtures has been called:
- `cls.equity_minute_bar_days` has the range over which data has been
generated.
Attributes
----------
EQUITY_MINUTE_BAR_LOOKBACK_DAYS : int
The number of days of data to add before the first day.
This is used when a test needs to use history, in which case this
should be set to the largest history window that will be requested.
EQUITY_MINUTE_BAR_START_DATE : Timestamp
The date at to which to start creating data. This defaults to
``START_DATE``.
EQUITY_MINUTE_BAR_END_DATE = Timestamp
The end date up to which to create data. This defaults to ``END_DATE``.
Methods
-------
make_equity_minute_bar_data() -> iterable[(int, pd.DataFrame)]
Classmethod producing an iterator of (sid, minute_data) pairs.
The default implementation invokes
zipline.testing.core.create_minute_bar_data.
See Also
--------
WithEquityDailyBarData
zipline.testing.create_minute_bar_data
"""
EQUITY_MINUTE_BAR_LOOKBACK_DAYS = alias('MINUTE_BAR_LOOKBACK_DAYS')
EQUITY_MINUTE_BAR_START_DATE = alias('MINUTE_BAR_START_DATE')
EQUITY_MINUTE_BAR_END_DATE = alias('MINUTE_BAR_END_DATE')
@classmethod
def make_equity_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Equity]
return create_minute_bar_data(
trading_calendar.minutes_for_sessions_in_range(
cls.equity_minute_bar_days[0],
cls.equity_minute_bar_days[-1],
),
cls.asset_finder.equities_sids,
)
@classmethod
def init_class_fixtures(cls):
super(WithEquityMinuteBarData, cls).init_class_fixtures()
trading_calendar = cls.trading_calendars[Equity]
cls.equity_minute_bar_days = _trading_days_for_minute_bars(
trading_calendar,
pd.Timestamp(cls.EQUITY_MINUTE_BAR_START_DATE),
pd.Timestamp(cls.EQUITY_MINUTE_BAR_END_DATE),
cls.EQUITY_MINUTE_BAR_LOOKBACK_DAYS
)
class WithFutureMinuteBarData(_WithMinuteBarDataBase):
"""
ZiplineTestCase mixin providing cls.future_minute_bar_days.
After init_class_fixtures has been called:
- `cls.future_minute_bar_days` has the range over which data has been
generated.
Attributes
----------
FUTURE_MINUTE_BAR_LOOKBACK_DAYS : int
The number of days of data to add before the first day.
This is used when a test needs to use history, in which case this
should be set to the largest history window that will be requested.
FUTURE_MINUTE_BAR_START_DATE : Timestamp
The date at to which to start creating data. This defaults to
``START_DATE``.
FUTURE_MINUTE_BAR_END_DATE = Timestamp
The end date up to which to create data. This defaults to ``END_DATE``.
Methods
-------
make_future_minute_bar_data() -> iterable[(int, pd.DataFrame)]
A class method that returns a dict mapping sid to dataframe
which will be written to into the the format of the inherited
class which writes the minute bar data for use by a reader.
By default this creates some simple sythetic data with
:func:`~zipline.testing.create_minute_bar_data`
See Also
--------
zipline.testing.create_minute_bar_data
"""
FUTURE_MINUTE_BAR_LOOKBACK_DAYS = alias('MINUTE_BAR_LOOKBACK_DAYS')
FUTURE_MINUTE_BAR_START_DATE = alias('MINUTE_BAR_START_DATE')
FUTURE_MINUTE_BAR_END_DATE = alias('MINUTE_BAR_END_DATE')
@classmethod
def make_future_minute_bar_data(cls):
trading_calendar = get_calendar('CME')
return create_minute_bar_data(
trading_calendar.minutes_for_sessions_in_range(
cls.future_minute_bar_days[0],
cls.future_minute_bar_days[-1],
),
cls.asset_finder.futures_sids,
)
@classmethod
def init_class_fixtures(cls):
super(WithFutureMinuteBarData, cls).init_class_fixtures()
# To be replaced by quanto calendar.
trading_calendar = get_calendar('CME')
cls.future_minute_bar_days = _trading_days_for_minute_bars(
trading_calendar,
pd.Timestamp(cls.FUTURE_MINUTE_BAR_START_DATE),
pd.Timestamp(cls.FUTURE_MINUTE_BAR_END_DATE),
cls.FUTURE_MINUTE_BAR_LOOKBACK_DAYS
)
class WithBcolzEquityMinuteBarReader(WithEquityMinuteBarData, WithTmpDir):
"""
ZiplineTestCase mixin providing cls.bcolz_minute_bar_path,
cls.bcolz_minute_bar_ctable, and cls.bcolz_equity_minute_bar_reader
class level fixtures.
After init_class_fixtures has been called:
- `cls.bcolz_minute_bar_path` is populated with
`cls.tmpdir.getpath(cls.BCOLZ_MINUTE_BAR_PATH)`.
- `cls.bcolz_minute_bar_ctable` is populated with data returned from
`cls.make_equity_minute_bar_data`. By default this calls
:func:`zipline.pipeline.loaders.synthetic.make_equity_minute_bar_data`.
- `cls.bcolz_equity_minute_bar_reader` is a minute bar reader
pointing to the directory that was just written to.
Attributes
----------
BCOLZ_MINUTE_BAR_PATH : str
The path inside the tmpdir where this will be written.
Methods
-------
make_bcolz_minute_bar_rootdir_path() -> string
A class method that returns the path for the directory that contains
the minute bar ctables. By default this is a subdirectory
BCOLZ_MINUTE_BAR_PATH in the shared temp directory.
See Also
--------
WithBcolzEquityDailyBarReader
WithDataPortal
zipline.testing.create_minute_bar_data
"""
BCOLZ_EQUITY_MINUTE_BAR_PATH = 'minute_equity_pricing'
@classmethod
def make_bcolz_equity_minute_bar_rootdir_path(cls):
return cls.tmpdir.makedir(cls.BCOLZ_EQUITY_MINUTE_BAR_PATH)
@classmethod
def init_class_fixtures(cls):
super(WithBcolzEquityMinuteBarReader, cls).init_class_fixtures()
cls.bcolz_equity_minute_bar_path = p = \
cls.make_bcolz_equity_minute_bar_rootdir_path()
days = cls.equity_minute_bar_days
writer = BcolzMinuteBarWriter(
p,
cls.trading_calendars[Equity],
days[0],
days[-1],
US_EQUITIES_MINUTES_PER_DAY
)
writer.write(cls.make_equity_minute_bar_data())
cls.bcolz_equity_minute_bar_reader = \
BcolzMinuteBarReader(p)
class WithBcolzFutureMinuteBarReader(WithFutureMinuteBarData, WithTmpDir):
"""
ZiplineTestCase mixin providing cls.bcolz_minute_bar_path,
cls.bcolz_minute_bar_ctable, and cls.bcolz_equity_minute_bar_reader
class level fixtures.
After init_class_fixtures has been called:
- `cls.bcolz_minute_bar_path` is populated with
`cls.tmpdir.getpath(cls.BCOLZ_MINUTE_BAR_PATH)`.
- `cls.bcolz_minute_bar_ctable` is populated with data returned from
`cls.make_equity_minute_bar_data`. By default this calls
:func:`zipline.pipeline.loaders.synthetic.make_equity_minute_bar_data`.
- `cls.bcolz_equity_minute_bar_reader` is a minute bar reader
pointing to the directory that was just written to.
Attributes
----------
BCOLZ_FUTURE_MINUTE_BAR_PATH : str
The path inside the tmpdir where this will be written.
Methods
-------
make_bcolz_minute_bar_rootdir_path() -> string
A class method that returns the path for the directory that contains
the minute bar ctables. By default this is a subdirectory
BCOLZ_MINUTE_BAR_PATH in the shared temp directory.
See Also
--------
WithBcolzEquityDailyBarReader
WithDataPortal
zipline.testing.create_minute_bar_data
"""
BCOLZ_FUTURE_MINUTE_BAR_PATH = 'minute_future_pricing'
@classmethod
def make_bcolz_future_minute_bar_rootdir_path(cls):
return cls.tmpdir.makedir(cls.BCOLZ_FUTURE_MINUTE_BAR_PATH)
@classmethod
def init_class_fixtures(cls):
super(WithBcolzFutureMinuteBarReader, cls).init_class_fixtures()
trading_calendar = get_calendar('CME')
cls.bcolz_future_minute_bar_path = p = \
cls.make_bcolz_future_minute_bar_rootdir_path()
days = cls.future_minute_bar_days
writer = BcolzMinuteBarWriter(
p,
trading_calendar,
days[0],
days[-1],
FUTURES_MINUTES_PER_DAY,
)
writer.write(cls.make_future_minute_bar_data())
cls.bcolz_future_minute_bar_reader = \
BcolzMinuteBarReader(p)
class WithConstantEquityMinuteBarData(WithEquityMinuteBarData):
EQUITY_MINUTE_CONSTANT_LOW = 3.0
EQUITY_MINUTE_CONSTANT_OPEN = 4.0
EQUITY_MINUTE_CONSTANT_CLOSE = 5.0
EQUITY_MINUTE_CONSTANT_HIGH = 6.0
EQUITY_MINUTE_CONSTANT_VOLUME = 100.0
@classmethod
def make_equity_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Equity]
sids = cls.asset_finder.equities_sids
minutes = trading_calendar.minutes_for_sessions_in_range(
cls.equity_minute_bar_days[0],
cls.equity_minute_bar_days[-1],
)
frame = pd.DataFrame(
{
'open': cls.EQUITY_MINUTE_CONSTANT_OPEN,
'high': cls.EQUITY_MINUTE_CONSTANT_HIGH,
'low': cls.EQUITY_MINUTE_CONSTANT_LOW,
'close': cls.EQUITY_MINUTE_CONSTANT_CLOSE,
'volume': cls.EQUITY_MINUTE_CONSTANT_VOLUME,
},
index=minutes,
)
return ((sid, frame) for sid in sids)
class WithAdjustmentReader(WithBcolzEquityDailyBarReader):
"""
ZiplineTestCase mixin providing cls.adjustment_reader as a class level
fixture.
After init_class_fixtures has been called, `cls.adjustment_reader` will be
populated with a new SQLiteAdjustmentReader object. The data that will be
written can be passed by overriding `make_{field}_data` where field may
be `splits`, `mergers` `dividends`, or `stock_dividends`.
The daily bar reader used for this adjustment reader may be customized
by overriding `make_adjustment_writer_equity_daily_bar_reader`.
This is useful to providing a `MockDailyBarReader`.
Methods
-------
make_splits_data() -> pd.DataFrame
A class method that returns a dataframe of splits data to write to the
class's adjustment db. By default this is empty.
make_mergers_data() -> pd.DataFrame
A class method that returns a dataframe of mergers data to write to the
class's adjustment db. By default this is empty.
make_dividends_data() -> pd.DataFrame
A class method that returns a dataframe of dividends data to write to
the class's adjustment db. By default this is empty.
make_stock_dividends_data() -> pd.DataFrame
A class method that returns a dataframe of stock dividends data to
write to the class's adjustment db. By default this is empty.
make_adjustment_db_conn_str() -> string
A class method that returns the sqlite3 connection string for the
database in to which the adjustments will be written. By default this
is an in-memory database.
make_adjustment_writer_equity_daily_bar_reader() -> pd.DataFrame
A class method that returns the daily bar reader to use for the class's
adjustment writer. By default this is the class's actual
``bcolz_equity_daily_bar_reader`` as inherited from
``WithBcolzEquityDailyBarReader``. This should probably not be
overridden; however, some tests used a ``MockDailyBarReader``
for this.
make_adjustment_writer(conn: sqlite3.Connection) -> AdjustmentWriter
A class method that constructs the adjustment which will be used
to write the data into the connection to be used by the class's
adjustment reader.
See Also
--------
zipline.testing.MockDailyBarReader
"""
@classmethod
def _make_data(cls):
return None
make_splits_data = _make_data
make_mergers_data = _make_data
make_dividends_data = _make_data
make_stock_dividends_data = _make_data
del _make_data
@classmethod
def make_adjustment_writer(cls, conn):
return SQLiteAdjustmentWriter(
conn,
cls.make_adjustment_writer_equity_daily_bar_reader(),
cls.equity_daily_bar_days,
)
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return cls.bcolz_equity_daily_bar_reader
@classmethod
def make_adjustment_db_conn_str(cls):
return ':memory:'
@classmethod
def init_class_fixtures(cls):
super(WithAdjustmentReader, cls).init_class_fixtures()
conn = sqlite3.connect(cls.make_adjustment_db_conn_str())
cls.make_adjustment_writer(conn).write(
splits=cls.make_splits_data(),
mergers=cls.make_mergers_data(),
dividends=cls.make_dividends_data(),
stock_dividends=cls.make_stock_dividends_data(),
)
cls.adjustment_reader = SQLiteAdjustmentReader(conn)
class WithSeededRandomPipelineEngine(WithTradingSessions, WithAssetFinder):
"""
ZiplineTestCase mixin providing class-level fixtures for running pipelines
against deterministically-generated random data.
Attributes
----------
SEEDED_RANDOM_PIPELINE_SEED : int
Fixture input. Random seed used to initialize the random state loader.
seeded_random_loader : SeededRandomLoader
Fixture output. Loader capable of providing columns for
zipline.pipeline.data.testing.TestingDataSet.
seeded_random_engine : SimplePipelineEngine
Fixture output. A pipeline engine that will use seeded_random_loader
as its only data provider.
Methods
-------
run_pipeline(start_date, end_date)
Run a pipeline with self.seeded_random_engine.
See Also
--------
zipline.pipeline.loaders.synthetic.SeededRandomLoader
zipline.pipeline.loaders.testing.make_seeded_random_loader
zipline.pipeline.engine.SimplePipelineEngine
"""
SEEDED_RANDOM_PIPELINE_SEED = 42
@classmethod
def init_class_fixtures(cls):
super(WithSeededRandomPipelineEngine, cls).init_class_fixtures()
cls._sids = cls.asset_finder.sids
cls.seeded_random_loader = loader = make_seeded_random_loader(
cls.SEEDED_RANDOM_PIPELINE_SEED,
cls.trading_days,
cls._sids,
)
cls.seeded_random_engine = SimplePipelineEngine(
get_loader=lambda column: loader,
calendar=cls.trading_days,
asset_finder=cls.asset_finder,
)
def raw_expected_values(self, column, start_date, end_date):
"""
Get an array containing the raw values we expect to be produced for the
given dates between start_date and end_date, inclusive.
"""
all_values = self.seeded_random_loader.values(
column.dtype,
self.trading_days,
self._sids,
)
row_slice = self.trading_days.slice_indexer(start_date, end_date)
return all_values[row_slice]
def run_pipeline(self, pipeline, start_date, end_date):
"""
Run a pipeline with self.seeded_random_engine.
"""
if start_date not in self.trading_days:
raise AssertionError("Start date not in calendar: %s" % start_date)
if end_date not in self.trading_days:
raise AssertionError("End date not in calendar: %s" % end_date)
return self.seeded_random_engine.run_pipeline(
pipeline,
start_date,
end_date,
)
class WithDataPortal(WithAdjustmentReader,
# Ordered so that bcolz minute reader is used first.
WithBcolzEquityMinuteBarReader,
WithBcolzFutureMinuteBarReader):
"""
ZiplineTestCase mixin providing self.data_portal as an instance level
fixture.
After init_instance_fixtures has been called, `self.data_portal` will be
populated with a new data portal created by passing in the class's
trading env, `cls.bcolz_equity_minute_bar_reader`,
`cls.bcolz_equity_daily_bar_reader`, and `cls.adjustment_reader`.
Attributes
----------
DATA_PORTAL_USE_DAILY_DATA : bool
Should the daily bar reader be used? Defaults to True.
DATA_PORTAL_USE_MINUTE_DATA : bool
Should the minute bar reader be used? Defaults to True.
DATA_PORTAL_USE_ADJUSTMENTS : bool
Should the adjustment reader be used? Defaults to True.
Methods
-------
make_data_portal() -> DataPortal
Method which returns the data portal to be used for each test case.
If this is overridden, the ``DATA_PORTAL_USE_*`` attributes may not
be respected.
"""
DATA_PORTAL_USE_DAILY_DATA = True
DATA_PORTAL_USE_MINUTE_DATA = True
DATA_PORTAL_USE_ADJUSTMENTS = True
DATA_PORTAL_FIRST_TRADING_DAY = None
DATA_PORTAL_LAST_AVAILABLE_SESSION = None
DATA_PORTAL_LAST_AVAILABLE_MINUTE = None
DATA_PORTAL_MINUTE_HISTORY_PREFETCH = DEFAULT_MINUTE_HISTORY_PREFETCH
DATA_PORTAL_DAILY_HISTORY_PREFETCH = DEFAULT_DAILY_HISTORY_PREFETCH
def make_data_portal(self):
if self.DATA_PORTAL_FIRST_TRADING_DAY is None:
if self.DATA_PORTAL_USE_MINUTE_DATA:
self.DATA_PORTAL_FIRST_TRADING_DAY = (
self.bcolz_equity_minute_bar_reader.
first_trading_day)
elif self.DATA_PORTAL_USE_DAILY_DATA:
self.DATA_PORTAL_FIRST_TRADING_DAY = (
self.bcolz_equity_daily_bar_reader.
first_trading_day)
return DataPortal(
self.env.asset_finder,
self.trading_calendar,
first_trading_day=self.DATA_PORTAL_FIRST_TRADING_DAY,
equity_daily_reader=(
self.bcolz_equity_daily_bar_reader
if self.DATA_PORTAL_USE_DAILY_DATA else
None
),
equity_minute_reader=(
self.bcolz_equity_minute_bar_reader
if self.DATA_PORTAL_USE_MINUTE_DATA else
None
),
adjustment_reader=(
self.adjustment_reader
if self.DATA_PORTAL_USE_ADJUSTMENTS else
None
),
future_minute_reader=(
self.bcolz_future_minute_bar_reader
if self.DATA_PORTAL_USE_MINUTE_DATA else
None
),
future_daily_reader=(
MinuteResampleSessionBarReader(
self.bcolz_future_minute_bar_reader.trading_calendar,
self.bcolz_future_minute_bar_reader)
if self.DATA_PORTAL_USE_MINUTE_DATA else None
),
last_available_session=self.DATA_PORTAL_LAST_AVAILABLE_SESSION,
last_available_minute=self.DATA_PORTAL_LAST_AVAILABLE_MINUTE,
minute_history_prefetch_length=self.
DATA_PORTAL_MINUTE_HISTORY_PREFETCH,
daily_history_prefetch_length=self.
DATA_PORTAL_DAILY_HISTORY_PREFETCH,
)
def init_instance_fixtures(self):
super(WithDataPortal, self).init_instance_fixtures()
self.data_portal = self.make_data_portal()
class WithResponses(object):
"""
ZiplineTestCase mixin that provides self.responses as an instance
fixture.
After init_instance_fixtures has been called, `self.responses` will be
a new `responses.RequestsMock` object. Users may add new endpoints to this
with the `self.responses.add` method.
"""
def init_instance_fixtures(self):
super(WithResponses, self).init_instance_fixtures()
self.responses = self.enter_instance_context(
responses.RequestsMock(),
)
class WithCreateBarData(WithDataPortal):
CREATE_BARDATA_DATA_FREQUENCY = 'minute'
def create_bardata(self, simulation_dt_func, restrictions=None):
return BarData(
self.data_portal,
simulation_dt_func,
self.CREATE_BARDATA_DATA_FREQUENCY,
self.trading_calendar,
restrictions or NoRestrictions()
)
| [
"[email protected]"
] | |
2acee63d49602f2e3ab752718b4bc360ec9853c8 | 5c03660a8a1ac0bba95a6135173f86340a6a12fb | /backend/devforum_25636/urls.py | 2bc18982be14afb0e5607b2f4455270537dfb0e3 | [] | no_license | crowdbotics-apps/devforum-25636 | c6bfaaab79f4b0113ac3d3522ba7196f19682f39 | dbea60ca19e1aca0ede436666ef82a54ed1f1c9e | refs/heads/master | 2023-04-03T06:39:23.212304 | 2021-04-12T05:08:26 | 2021-04-12T05:08:26 | 357,064,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,211 | py | """devforum_25636 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "DevForum"
admin.site.site_title = "DevForum Admin Portal"
admin.site.index_title = "DevForum Admin"
# swagger
api_info = openapi.Info(
title="DevForum API",
default_version="v1",
description="API documentation for DevForum App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"[email protected]"
] | |
a04c7206a90f059fc1e7a64f2ee14889c8afdec6 | 4a74ec1b7e299540b924bce4928537a51fc00ff5 | /day24_day30/day30/최소비용.py | d89cf0abe4576789fbcb40888e9bf1e4dbc0130c | [] | no_license | yeonlang/algorithm | ef74b2592818495f29f6de5f44f81ccf307efa59 | ab788658bb781773c489cac8c6e8d2bea48fda07 | refs/heads/master | 2020-04-22T20:25:46.243355 | 2019-05-08T15:17:45 | 2019-05-08T15:17:45 | 170,641,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | import sys
sys.stdin = open("최소비용.txt")
def ispass(ny,nx,y,x):
if 0<=ny<N and 0<=nx<N :
if data[ny][nx]-data[y][x]>0:
if visited[ny][nx] == -1 or visited[ny][nx] > data[ny][nx] - data[y][x] + visited[y][x] + 1:
visited[ny][nx] = data[ny][nx] - data[y][x] + visited[y][x] + 1
return True
else:
if visited[ny][nx]==-1 or visited[ny][nx]> visited[y][x]+1:
visited[ny][nx] = visited[y][x]+1
return True
return False
def BFS(y,x):
que = [(y,x)]
visited[y][x] = 0
while que:
y,x = que.pop(0)
for i in range(4):
ny = y +dy[i]
nx = x +dx[i]
if ispass(ny,nx,y,x):
que.append((ny,nx))
dy = [1,0,-1,0]
dx = [0,1,0,-1]
for tc in range(int(input())):
N = int(input())
data = [list(map(int,input().split())) for _ in range(N)]
visited = [[-1]*N for _ in range(N)]
BFS(0,0)
print("#{} {}".format(tc+1,visited[N-1][N-1])) | [
"[email protected]"
] | |
0051cc3d11fc72f806d6a37142f0100c2f5e6541 | 34965549da4706d3c8da21d06840452a35eef383 | /dff_drfcn/config/config.py | e9038e61e56fa4fb9835ad460e68437ad5080162 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-2-Clause-Views"
] | permissive | xvjiarui/Deep-Feature-Flow | 50410b650b4ae2754530a4e64891c04a0a3106fe | a6a860dbd76660260b461a7fbb3a674765f52b0c | refs/heads/master | 2020-03-19T16:01:27.637599 | 2018-07-05T15:38:10 | 2018-07-05T15:38:10 | 136,696,520 | 0 | 0 | null | 2018-06-09T06:20:30 | 2018-06-09T06:20:30 | null | UTF-8 | Python | false | false | 6,561 | py | # --------------------------------------------------------
# Deep Feature Flow
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Xizhou Zhu, Yuwen Xiong, Bin Xiao
# --------------------------------------------------------
import yaml
import numpy as np
from easydict import EasyDict as edict
import os
config = edict()
config.MXNET_VERSION = ''
config.output_path = ''
config.symbol = ''
config.gpus = ''
config.CLASS_AGNOSTIC = True
config.SCALES = [(600, 1000)] # first is scale (the shorter side); second is max size
config.USE_PHILLY = False
# default training
config.default = edict()
config.default.frequent = 20
config.default.kvstore = 'device'
# network related params
config.network = edict()
config.network.pretrained_dir = ''
config.network.pretrained_resnet = ''
config.network.pretrained_flow = ''
config.network.pretrained_epoch = 0
config.network.PIXEL_MEANS = np.array([0, 0, 0])
config.network.IMAGE_STRIDE = 0
config.network.RPN_FEAT_STRIDE = 16
config.network.RCNN_FEAT_STRIDE = 16
config.network.FIXED_PARAMS = ['gamma', 'beta']
config.network.ANCHOR_SCALES = (8, 16, 32)
config.network.ANCHOR_RATIOS = (0.5, 1, 2)
config.network.NORMALIZE_RPN = True
config.network.ANCHOR_MEANS = (0.0, 0.0, 0.0, 0.0)
config.network.ANCHOR_STDS = (0.1, 0.1, 0.4, 0.4)
config.network.NUM_ANCHORS = len(config.network.ANCHOR_SCALES) * len(config.network.ANCHOR_RATIOS)
config.network.DFF_FEAT_DIM = 1024
config.network.ROIDispatch = False
config.network.USE_NONGT_INDEX = False
config.network.NMS_TARGET_THRESH = '0.5'
# dataset related params
config.dataset = edict()
config.dataset.dataset = 'ImageNetVID'
config.dataset.image_set = 'DET_train_30classes+VID_train_15frames'
config.dataset.test_image_set = 'VID_val_videos'
config.dataset.root_path = './data'
config.dataset.dataset_path = './data/ILSVRC2015'
config.dataset.NUM_CLASSES = 31
config.TRAIN = edict()
config.TRAIN.lr = 0
config.TRAIN.lr_step = ''
config.TRAIN.lr_factor = 0.1
config.TRAIN.warmup = False
config.TRAIN.warmup_lr = 0
config.TRAIN.warmup_step = 0
config.TRAIN.momentum = 0.9
config.TRAIN.wd = 0.0005
config.TRAIN.begin_epoch = 0
config.TRAIN.end_epoch = 0
config.TRAIN.model_prefix = ''
config.TRAIN.rpn_loss_scale = 3.0
config.TRAIN.nms_loss_scale = 1.0
config.TRAIN.nms_pos_scale = 4.0
# whether resume training
config.TRAIN.RESUME = False
# whether auto resume training
config.TRAIN.AUTO_RESUME = True
# whether flip image
config.TRAIN.FLIP = True
# whether shuffle image
config.TRAIN.SHUFFLE = True
# whether use OHEM
config.TRAIN.ENABLE_OHEM = False
# size of images for each device, 2 for rcnn, 1 for rpn and e2e
config.TRAIN.BATCH_IMAGES = 2
# e2e changes behavior of anchor loader and metric
config.TRAIN.END2END = False
# group images with similar aspect ratio
config.TRAIN.ASPECT_GROUPING = True
# R-CNN
# rcnn rois batch size
config.TRAIN.BATCH_ROIS = 128
config.TRAIN.BATCH_ROIS_OHEM = 128
# rcnn rois sampling params
config.TRAIN.FG_FRACTION = 0.25
config.TRAIN.FG_THRESH = 0.5
config.TRAIN.BG_THRESH_HI = 0.5
config.TRAIN.BG_THRESH_LO = 0.0
# rcnn bounding box regression params
config.TRAIN.BBOX_REGRESSION_THRESH = 0.5
config.TRAIN.BBOX_WEIGHTS = np.array([1.0, 1.0, 1.0, 1.0])
# RPN anchor loader
# rpn anchors batch size
config.TRAIN.RPN_BATCH_SIZE = 256
# rpn anchors sampling params
config.TRAIN.RPN_FG_FRACTION = 0.5
config.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
config.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
config.TRAIN.RPN_CLOBBER_POSITIVES = False
# rpn bounding box regression params
config.TRAIN.RPN_BBOX_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
config.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# used for end2end training
# RPN proposal
config.TRAIN.CXX_PROPOSAL = True
config.TRAIN.RPN_NMS_THRESH = 0.7
config.TRAIN.RPN_PRE_NMS_TOP_N = 12000
config.TRAIN.RPN_POST_NMS_TOP_N = 2000
config.TRAIN.RPN_MIN_SIZE = config.network.RPN_FEAT_STRIDE
# approximate bounding box regression
config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED = True
config.TRAIN.BBOX_MEANS = (0.0, 0.0, 0.0, 0.0)
config.TRAIN.BBOX_STDS = (0.1, 0.1, 0.2, 0.2)
# Learn NMS
config.TRAIN.LEARN_NMS = False
config.TRAIN.JOINT_TRAINING = False
config.TRAIN.FIRST_N = 100
# DFF, trained image sampled from [min_offset, max_offset]
config.TRAIN.MIN_OFFSET = -9
config.TRAIN.MAX_OFFSET = 0
config.TEST = edict()
# R-CNN testing
# use rpn to generate proposal
config.TEST.HAS_RPN = False
# size of images for each device
config.TEST.BATCH_IMAGES = 1
# RPN proposal
config.TEST.CXX_PROPOSAL = True
config.TEST.RPN_NMS_THRESH = 0.7
config.TEST.RPN_PRE_NMS_TOP_N = 6000
config.TEST.RPN_POST_NMS_TOP_N = 300
config.TEST.RPN_MIN_SIZE = config.network.RPN_FEAT_STRIDE
# whether to use softnms
config.TEST.SOFTNMS = False
# whether to use LEARN_NMS
config.TEST.LEARN_NMS = False
config.TEST.FIRST_N = 0
config.TEST.MERGE_METHOD = -1
# RCNN nms
config.TEST.NMS = 0.3
# DFF
config.TEST.KEY_FRAME_INTERVAL = 10
config.TEST.max_per_image = 300
# Test Model Epoch
config.TEST.test_epoch = 0
def update_config(config_file):
exp_config = None
with open(config_file) as f:
exp_config = edict(yaml.load(f))
for k, v in exp_config.items():
if k in config:
if isinstance(v, dict):
if k == 'TRAIN':
if 'BBOX_WEIGHTS' in v:
v['BBOX_WEIGHTS'] = np.array(v['BBOX_WEIGHTS'])
elif k == 'network':
if 'PIXEL_MEANS' in v:
v['PIXEL_MEANS'] = np.array(v['PIXEL_MEANS'])
for vk, vv in v.items():
config[k][vk] = vv
else:
if k == 'SCALES':
config[k][0] = (tuple(v))
else:
config[k] = v
else:
raise ValueError("key must exist in config.py")
def update_philly_config(model_dir, data_dir):
def _update_to_abs(prefix, basename):
if not os.path.isabs(basename):
print("Update {} with {}".format(basename, prefix))
return os.path.join(prefix, basename)
else:
return basename
config.output_path = _update_to_abs(model_dir, config.output_path)
config.dataset.dataset_path = _update_to_abs(data_dir, config.dataset.dataset_path)
config.dataset.root_path = _update_to_abs(data_dir, config.dataset.root_path)
config.network.pretrained_dir = _update_to_abs(data_dir, config.network.pretrained_dir)
| [
"[email protected]"
] | |
c5c82c9f5560f4c2e4856e167c89d545433bb57d | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-ververica/aliyunsdkververica/request/v20200501/TableExistsRequest.py | 137bb321ebbe22b34e009f2bfc35f45f452013f6 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,144 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkververica.endpoint import endpoint_data
class TableExistsRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'ververica', '2020-05-01', 'TableExists')
self.set_uri_pattern('/pop/workspaces/[workspace]/catalog/v1beta2/namespaces/[namespace]/catalogs/[cat]:tableExists')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_workspace(self):
return self.get_path_params().get('workspace')
def set_workspace(self,workspace):
self.add_path_param('workspace',workspace)
def get_database(self):
return self.get_query_params().get('database')
def set_database(self,database):
self.add_query_param('database',database)
def get_cat(self):
return self.get_path_params().get('cat')
def set_cat(self,cat):
self.add_path_param('cat',cat)
def get_namespace(self):
return self.get_path_params().get('namespace')
def set_namespace(self,namespace):
self.add_path_param('namespace',namespace)
def get_table(self):
return self.get_query_params().get('table')
def set_table(self,table):
self.add_query_param('table',table) | [
"[email protected]"
] | |
58499b0b8766249579f224c8aae66b0468d1c391 | 24d9f077593b33c707b12d3a00cf91750f740729 | /src/114. Flatten Binary Tree to Linked List.py | ca38f01ee8f682f594eaea6f2db7d9fe6f7ac343 | [
"Apache-2.0"
] | permissive | xiaonanln/myleetcode-python | 274c8b8d7c29fd74dd11beb845180fb4e415dcd1 | 95d282f21a257f937cd22ef20c3590a69919e307 | refs/heads/master | 2021-01-22T21:45:59.786543 | 2019-04-21T15:24:23 | 2019-04-21T15:24:23 | 85,474,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | """
Given a binary tree, flatten it to a linked list in-place.
For example,
Given
1
/ \
2 5
/ \ \
3 4 6
The flattened tree should look like:
1
\
2
\
3
\
4
\
5
\
6
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def flatten(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
self.flattenHelper(root)
def flattenHelper(self, root):
if not root:
return None, None
leftHead, leftTail = self.flattenHelper(root.left)
rightHead, rightTail = self.flattenHelper(root.right)
if leftHead:
root.left = None
root.right = leftHead
leftTail.right = rightHead
else:
# root.right is already set to rightHead
pass
tail = rightTail or leftTail or root
return root, tail
from utils import *
t = maketree([1, 2, 5, 3, 4, None, 6])
printtree(t)
Solution().flatten(t)
printlist(t, nextKey='right') | [
"[email protected]"
] | |
a698ed601f3b430730ee2f2a7f75671b0eee1904 | 4992af29261214cb1e74375fc36dd51fd95db703 | /sparkmagic/sparkmagic/kernels/sparkrkernel/sparkrkernel.py | 12553364bb11624f8a652b7828b98250bb92c1f2 | [
"BSD-3-Clause"
] | permissive | logicalclocks/sparkmagic | 9ef7ec0c124a8a317f14bb39b6a2b041ed1b8151 | 4f14b6ca54ac5fa54451392eafd5dd10721c000c | refs/heads/master | 2023-05-31T23:30:52.208643 | 2023-02-28T08:21:16 | 2023-02-28T08:21:16 | 134,001,978 | 1 | 11 | NOASSERTION | 2023-03-02T10:57:00 | 2018-05-18T20:38:38 | Python | UTF-8 | Python | false | false | 980 | py | # Copyright (c) 2015 [email protected]
# Distributed under the terms of the Modified BSD License.
from sparkmagic.utils.constants import LANG_R
from sparkmagic.kernels.wrapperkernel.sparkkernelbase import SparkKernelBase
class SparkRKernel(SparkKernelBase):
def __init__(self, **kwargs):
implementation = 'SparkR'
implementation_version = '1.0'
language = 'no-op'
language_version = '0.1'
language_info = {
'name': 'sparkR',
'mimetype': 'text/x-rsrc',
'codemirror_mode': 'text/x-rsrc',
'pygments_lexer': 'r'
}
session_language = LANG_R
super(SparkRKernel, self).__init__(implementation, implementation_version, language, language_version,
language_info, session_language, **kwargs)
if __name__ == '__main__':
from ipykernel.kernelapp import IPKernelApp
IPKernelApp.launch_instance(kernel_class=SparkRKernel)
| [
"[email protected]"
] | |
de77c75c5f975b4bbb813f8ff2747bef83345417 | ca2c6bb4435138eae83d9776a672239651aac9bc | /week03/pymysql_insert.py | 3b59c21070bb191d0f72fbf83794156c1865dcda | [] | no_license | git-guozhijia/Python006-006 | 83b48180229d5be661cb2c3f12944b300a90db5a | 99642c351bc5ebe6dab4a7287bfa3234c37d1a90 | refs/heads/main | 2023-04-08T17:31:00.003482 | 2021-04-16T03:15:35 | 2021-04-16T03:15:35 | 323,208,392 | 0 | 0 | null | 2020-12-21T02:15:12 | 2020-12-21T02:15:12 | null | UTF-8 | Python | false | false | 662 | py | import pymysql
def insert_func(id, name):
db = pymysql.connect(host='localhost', port=3306, user='root', password='guozhijia123', db='test_db')
try:
with db.cursor() as cursor:
mysql = "insert into book (`id`, `name`) value (%s, %s)"
data = (id, name)
cursor.execute(mysql, data)
'''cousor.close() : with 链接方式在执行完with之后回去自动关闭游标的链接,免去了手动关闭链接 cousor.close()'''
db.commit()
except Exception as e:
print(f"insert error:{e}")
finally:
db.close()
if __name__ == '__main__':
insert_func(1002, '西游记') | [
"[email protected]"
] | |
18050b373866ff46bc02391fb395be2e900b67cf | 81c8aaec0ca8d9b345943d1f2324ace61eb034c6 | /backend/markly/actions/models.py | ea833b1f906ff8abe47b7fca3af8265506c5991f | [
"MIT"
] | permissive | salmanAndroidDev/markly | ba1c2e0107e79e4940ab2b5dd9455b877e044e25 | c2b8c4a2fd99b6e2c374e127f62b10adbf143b7c | refs/heads/main | 2023-06-02T21:18:03.710345 | 2021-06-25T18:28:54 | 2021-06-25T18:28:54 | 377,372,093 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,143 | py | from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db import models
class Action(models.Model):
"""Action model to store activities"""
user = models.ForeignKey('auth.User',
related_name='actions',
db_index=True,
on_delete=models.CASCADE)
verb = models.CharField(max_length=255)
created = models.DateTimeField(auto_now_add=True,
db_index=True)
# Making a generic relationship
target_ct = models.ForeignKey(ContentType,
blank=True,
null=True,
related_name='target_obj',
on_delete=models.CASCADE)
target_id = models.PositiveIntegerField(blank=True,
null=True,
db_index=True)
target = GenericForeignKey('target_ct', 'target_id')
class Meta:
ordering = ('-created',)
| [
"[email protected]"
] | |
5347a031cf1d724a4364c563f6146a7b1fd9d704 | 0b793bce2da8c3d09b7956c0672ddbffd46feaed | /atcoder/abc/abc099_a.py | 265c0dc1e76bf1d57b077606affa1275787475aa | [
"MIT"
] | permissive | knuu/competitive-programming | c6c4e08fb231937d988bdc5a60a8ad6b31b97616 | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | refs/heads/master | 2021-01-17T09:39:02.647688 | 2020-11-07T03:17:22 | 2020-11-07T03:17:22 | 27,886,732 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | N = int(input())
if N >= 1000:
print("ABD")
else:
print("ABC")
| [
"[email protected]"
] | |
a664186b38b7feff544ed14e7958602120136940 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1729+371/sdB_pg_1729+371_coadd.py | 568443ae3e489c9b6fd876c2f3788f0e49d3b805 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[262.892625,37.091864], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_pg_1729+371/sdB_pg_1729+371_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_pg_1729+371/sdB_pg_1729+371_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
577715b397b24f4ec51a97efd4e327fa70364739 | b521802cca8e4ee4ff5a5ffe59175a34f2f6d763 | /maya/maya-utils/Scripts/Animation/2019-2-15 Tim Cam_Route_Manager/.history/Cam_Main/Cam_Main/Cam_Item_Layout_20190120130831.py | a494cbe4ba2060bce865f8dd670ec855a9bc01b7 | [] | no_license | all-in-one-of/I-Do-library | 2edf68b29558728ce53fe17168694ad0353a076e | 8972ebdcf1430ccc207028d8482210092acf02ce | refs/heads/master | 2021-01-04T06:58:57.871216 | 2019-12-16T04:52:20 | 2019-12-16T04:52:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,346 | py | # -*- coding:utf-8 -*-
# Require Header
import os
import json
from functools import partial
# Sys Header
import sys
import traceback
import subprocess
from Qt.QtCore import *
from Qt.QtGui import *
from Qt.QtWidgets import *
def loadUiType(uiFile):
import plugin.Qt as Qt
if Qt.__binding__.startswith('PyQt'):
from Qt import _uic as uic
return uic.loadUiType(uiFile)
elif Qt.__binding__ == 'PySide':
import pysideuic as uic
else:
import pyside2uic as uic
import xml.etree.ElementTree as xml
from cStringIO import StringIO
parsed = xml.parse(uiFile)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
with open(uiFile, 'r') as f:
o = StringIO()
frame = {}
uic.compileUi(f, o, indent=0)
pyc = compile(o.getvalue(), '<string>', 'exec')
exec pyc in frame
# Fetch the base_class and form class based on their type
# in the xml from designer
form_class = frame['Ui_%s'%form_class]
base_class = eval('%s'%widget_class)
return form_class, base_class
from Qt.QtCompat import wrapInstance
DIR = os.path.dirname(__file__)
UI_PATH = os.path.join(DIR,"ui","Cam_Item_Layout.ui")
GUI_STATE_PATH = os.path.join(DIR, "json" ,'GUI_STATE.json')
form_class , base_class = loadUiType(UI_PATH)
from maya import cmds
class Cam_Item_Layout(form_class,base_class):
def __init__(self,MainWindow):
super(Cam_Item_Layout,self).__init__()
self.setupUi(self)
self.MainWindow = MainWindow
self.Item_Add_BTN.clicked.connect(self.Item_Add_Fn)
self.Item_Clear_BTN.clicked.connect(self.Item_Clear_Fn)
self.Cam_Item_Num = 0
self.Cam_Item_Scroll.verticalScrollBar().valueChanged.connect(self.Scroll_Fn)
self.Scroll_Offset = 0
self.Attr = {}
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Name"] = ""
# Note 功能按键
self.Batch_Keyframe_BTN.clicked.connect(self.Batch_Keyframe_Fn)
self.Select_Path_BTN.clicked.connect(self.Select_Path_Fn)
self.Batch_Position_BTN.clicked.connect(self.Batch_Position_Fn)
self.Batch_Constraint_BTN.clicked.connect(self.Batch_Constraint_Fn)
# Note spliter
splitter = QSplitter(Qt.Vertical)
splitter.setHandleWidth(5)
splitter.addWidget(self.Cam_Item_Scroll)
splitter.addWidget(self.Button_Layout)
num = len(self.VBox_Widget.children())
print self.VBox_Widget.children()
self.VBox_Widget.layout().insertWidget(num-2,splitter)
def Batch_Constraint_Fn(self):
Cam_Grp = self.Attr["Add_CamGrp_LE"]
Loc = self.Attr["Add_Loc_LE"]
if not cmds.objExists(Cam_Grp): return
if not cmds.objExists(Loc): return
cmds.select(cl=1)
cmds.select(Loc,add=1)
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
Cam_Loc = child.Attr["Add_Loc_LE"]
if not cmds.objExists(Cam_Loc): continue
cmds.select(Cam_Loc,add=1)
child.Cam_Con_CB.setEnabled(True)
cmds.select(Cam_Grp,add=1)
orCns = cmds.orientConstraint(Loc,Cam_Grp,mo=0)[0]
pnCns = cmds.pointConstraint(mo=0)[0]
Attr_List = cmds.listAttr(pnCns,k=1,string="*W*")
cmds.setAttr("%s.%s" % (pnCns,Attr_List[1]),1)
for i,child in enumerate(ChildrenList):
if i != 0:
cmds.setAttr("%s.%s" % (pnCns,Attr_List[i+1]),0)
try :
child.Cam_Con_CB.stateChanged.disconnect()
except:
pass
child.Cam_Con_CB.stateChanged.connect(partial(self.Cam_Con_CB_Fn,child,pnCns,Attr_List,i))
self.Con_Keyframe_BTN.setEnabled(True)
self.Con_Keyframe_BTN.clicked.connect(partial(self.Con_Keyframe_Fn,pnCns,Attr_List))
def Cam_Con_CB_Fn(self,CB,pnCns,Attr_List,num,state):
"""
Cam_Con_CB_Fn - CheckBox Signal
# Note 复选框事件函数
Arguments:
CB {CheckBox} -- 复选框
pnCns {ParenConstraint} -- 父子约束节点
Attr_List {Attribute List} -- 父子约束节点下的属性列表
num {number} -- 当前属性列表下的序号
state {CheckBox state} -- 复选框的状态
"""
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
if child != CB:
child.Cam_Con_CB.blockSignals(True)
child.Cam_Con_CB.setChecked(False)
cmds.setAttr("%s.%s" % (pnCns,Attr_List[i+1]),0)
if state == 2:
CB.Cam_Con_CB.setChecked(True)
cmds.setAttr("%s.%s" % (pnCns,Attr_List[num+1]),1)
cmds.setAttr("%s.%s" % (pnCns,Attr_List[1]),0)
else:
CB.Cam_Con_CB.setChecked(False)
cmds.setAttr("%s.%s" % (pnCns,Attr_List[num+1]),0)
cmds.setAttr("%s.%s" % (pnCns,Attr_List[1]),1)
for i,child in enumerate(ChildrenList):
if i != 0:
if child != CB:
child.Cam_Con_CB.blockSignals(False)
def Con_Keyframe_Fn(self,pnCns,Attr_List):
for i,Attr in enumerate(Attr_List):
if i != 0:
cmds.setKeyframe ("%s.%s" % (pnCns,Attr))
def Batch_Position_Fn(self):
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
Base_Curve = self.Attr["Add_Crv_LE"]
CamGrp = child.Attr["Add_CamGrp_LE"]
if not cmds.objExists(Base_Curve): continue
if not cmds.objExists(CamGrp): continue
cmds.setAttr("%s.tx" % CamGrp,0)
cmds.setAttr("%s.ty" % CamGrp,0)
cmds.setAttr("%s.tz" % CamGrp,0)
cmds.setAttr("%s.rx" % CamGrp,0)
cmds.setAttr("%s.ry" % CamGrp,0)
cmds.setAttr("%s.rz" % CamGrp,0)
cmds.xform( CamGrp,cp=1 )
cmds.delete(cmds.parentConstraint( Base_Curve,CamGrp ))
Target_Curve = child.Attr["Add_Crv_LE"]
if not cmds.objExists(Target_Curve): continue
cmds.xform( Target_Curve,cp=1 )
# Note 解除曲线的锁定
cmds.setAttr("%s.tx" % Target_Curve,lock=False)
cmds.setAttr("%s.ty" % Target_Curve,lock=False)
cmds.setAttr("%s.tz" % Target_Curve,lock=False)
cmds.setAttr("%s.rx" % Target_Curve,lock=False)
cmds.setAttr("%s.ry" % Target_Curve,lock=False)
cmds.setAttr("%s.rz" % Target_Curve,lock=False)
cmds.delete(cmds.parentConstraint( Base_Curve,Target_Curve ))
cmds.headsUpMessage(u"位置匹配完成")
def Batch_Keyframe_Fn(self):
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
Path = child.Attr["Add_Motion_Path_LE"]
if cmds.objExists(Path):
offset = cmds.keyframe(Path,q=1)[0]
cmds.keyframe("%s.uValue"% Path,e=1,iub=1,r=1,o="over",tc=-offset)
def Select_Path_Fn(self):
cmds.select(cl=1)
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
if cmds.objExists(child.Attr["Add_Motion_Path_LE"]):
cmds.select(child.Attr["Add_Motion_Path_LE"],add=1)
def Item_Add_Fn(self):
self.Cam_Item_Num += 1
return Cam_Item(self,self.MainWindow)
def Item_Clear_Fn(self):
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Name"] = ""
for i,child in enumerate(self.Item_Layout.children()):
if i != 0:
child.deleteLater()
def Scroll_Fn(self):
self.Scroll_Offset = self.Cam_Item_Scroll.verticalScrollBar().value()
UI_PATH = os.path.join(DIR,"ui","Cam_Item.ui")
form_class , base_class = loadUiType(UI_PATH)
class Cam_Item(form_class,base_class):
def __init__(self,parent,MainWindow):
super(Cam_Item,self).__init__()
self.setupUi(self)
self.MainWindow = MainWindow
self.Cam_Del_BTN.clicked.connect(self.Cam_Del_BTN_Fn)
# self.Cam_Con_CB.stateChanged.connect(self.Cam_Con_CB_Fn)
# Note 初始化创建参数
TotalCount = len(parent.Item_Layout.children())
parent.Item_Layout.layout().insertWidget(TotalCount-1,self)
self.Cam_LE.setText("Cam_Item_%s" % parent.Cam_Item_Num)
self.Cam_Num_Label.setText(u"镜头%s" % TotalCount)
self.setObjectName("Cam_Item_%s" % TotalCount)
self.Num = TotalCount
self.Attr = {}
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Strat_Time_SB"] = 0
self.Attr["End_Time_SB"] = 0
self.MainWindow.Save_Json_Fun()
def Cam_Del_BTN_Fn(self):
self.deleteLater()
ChildrenList = self.parent().children()
for i,child in enumerate(ChildrenList):
if i != 0:
if i > self.Num:
# Note 修正 child 的序号
child.Num -= 1
child.Cam_Num_Label.setText(u"镜头%s" % (i-1))
child.setObjectName("Cam_Item_%s" % (i-1))
else:
child.Cam_Num_Label.setText(u"镜头%s" % i)
child.setObjectName("Cam_Item_%s" % i)
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Strat_Time_SB"] = ""
self.Attr["End_Time_SB"] = ""
self.MainWindow.Save_Json_Fun()
| [
"[email protected]"
] | |
ef25f3a063b414c81d5e3c7eb6c51c98a5144a94 | ae0f37ebb76bce44c5e366d62424b5ef411f94b3 | /assignment_BKIT/assignment1/src/test/LexerSuite.py | 643aa0ec780c469e53b31073bdf7945e3634103f | [] | no_license | khangsk/PPL | a30b656a8a70b8f6dd96ce39f57d3540495a5a26 | b8e3a04210796e03ff257c05cd1e60923f016d2f | refs/heads/master | 2023-02-21T09:28:25.216162 | 2021-01-18T09:35:15 | 2021-01-18T09:35:15 | 306,542,959 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 18,383 | py | import unittest
from TestUtils import TestLexer
class LexerSuite(unittest.TestCase):
def test1_lower_identifier(self):
"""test identifiers"""
self.assertTrue(TestLexer.checkLexeme("abc","abc,<EOF>",101))
def test2_lower_upper_id(self):
self.assertTrue(TestLexer.checkLexeme("Var","Var,<EOF>",102))
def test3_wrong_token(self):
self.assertTrue(TestLexer.checkLexeme("ab?svn","ab,Error Token ?",103))
def test4_integer(self):
"""test integers"""
self.assertTrue(TestLexer.checkLexeme("Var x;","Var,x,;,<EOF>",104))
def test5_illegal_escape(self):
"""test illegal escape"""
self.assertTrue(TestLexer.checkLexeme(""" "abc\\h def" ""","""Illegal Escape In String: abc\\h""",105))
def test6_unterminated_string(self):
"""test unclosed string"""
self.assertTrue(TestLexer.checkLexeme(""" "abc def ""","""Unclosed String: abc def """,106))
def test7_normal_string_with_escape(self):
"""test normal string with escape"""
self.assertTrue(TestLexer.checkLexeme(""" "ab'"c\\n def" ""","""ab'"c\\n def,<EOF>""",107))
def test8_global_variable1(self):
self.assertTrue(TestLexer.checkLexeme(""" Var: a = 5; ""","""Var,:,a,=,5,;,<EOF>""",108))
def test9_global_variable2(self):
self.assertTrue(TestLexer.checkLexeme(""" Var: b[2][3] = {{2,3,4},{4,5,6}}; ""","""Var,:,b,[,2,],[,3,],=,{,{,2,,,3,,,4,},,,{,4,,,5,,,6,},},;,<EOF>""",109))
def test10_global_variable3(self):
self.assertTrue(TestLexer.checkLexeme(""" Var: c, d = 6, e, f; ""","""Var,:,c,,,d,=,6,,,e,,,f,;,<EOF>""",110))
def test11_global_variable4(self):
self.assertTrue(TestLexer.checkLexeme(""" Var: m, n[10]; ""","""Var,:,m,,,n,[,10,],;,<EOF>""",111))
def test12_single_comment(self):
self.assertTrue(TestLexer.checkLexeme("** This is a single-line comment. **","""<EOF>""",112))
def test13_multi_comment(self):
self.assertTrue(TestLexer.checkLexeme(r"""** This is a
* multi-line
* comment.
**""","""<EOF>""",113))
def test14_unterminated_comment(self):
self.assertTrue(TestLexer.checkLexeme("""** This is a single-line comment. ****as""","""Unterminated Comment""",114))
def test15_unterminated_comment2(self):
self.assertTrue(TestLexer.checkLexeme(r"""** This is a
* multi-line
** comment.
**""","""comment,.,Unterminated Comment""",115))
def test16_comment(self):
self.assertTrue(TestLexer.checkLexeme("*****","""*,<EOF>""",116))
def test17_identifiers(self):
self.assertTrue(TestLexer.checkLexeme(""" Hoang Gia Khang ""","""Error Token H""",117))
def test18_identifiers2(self):
self.assertTrue(TestLexer.checkLexeme(""" hoangGia Khang ""","""hoangGia,Error Token K""",118))
def test19_identifiers3(self):
self.assertTrue(TestLexer.checkLexeme(""" _khangasdas ""","""Error Token _""",119))
def test20_identifiers4(self):
self.assertTrue(TestLexer.checkLexeme(""" hoangGiakh1231231____FAsdf ""","""hoangGiakh1231231____FAsdf,<EOF>""",120))
def test21_keywords(self):
self.assertTrue(TestLexer.checkLexeme(r""" Body Break Continue Do
Else ElseIf EndBody EndIf EndFor EndWhile For Function
If Parameter Return Then
Var While True False EndDo ""","""Body,Break,Continue,Do,Else,ElseIf,EndBody,EndIf,EndFor,EndWhile,For,Function,If,Parameter,Return,Then,Var,While,True,False,EndDo,<EOF>""",121))
def test22_Error_keywords(self):
self.assertTrue(TestLexer.checkLexeme(r""" Body Break Continue Do
Else ElseIf EndBody EndIf EndFor EndWhile For Function
IF Parameter Return Then
Var While True False EndDo ""","""Body,Break,Continue,Do,Else,ElseIf,EndBody,EndIf,EndFor,EndWhile,For,Function,Error Token I""",122))
def test23_float_array(self):
self.assertTrue(TestLexer.checkLexeme(""" {2.0,3,4} ""","""{,2.0,,,3,,,4,},<EOF>""",123))
def test24_operators(self):
self.assertTrue(TestLexer.checkLexeme("""+ +. - -. * *. \\ \\. % ! && || == != < > <= >= =/= <. >. <=. >=. ""","""+,+.,-,-.,*,*.,\\,\\.,%,!,&&,||,==,!=,<,>,<=,>=,=/=,<.,>.,<=.,>=.,<EOF>""",124))
def test25_error_operators(self):
self.assertTrue(TestLexer.checkLexeme("""+ +. - -. * *. ? \\ \\. % ! &&""","""+,+.,-,-.,*,*.,Error Token ?""",125))
def test26_error_separators(self):
self.assertTrue(TestLexer.checkLexeme("""()[]:.,;{}""","""(,),[,],:,.,,,;,{,},<EOF>""",126))
def test27_literal_interger_decimal(self):
self.assertTrue(TestLexer.checkLexeme(""" 0 10 0 5 123123123""","""0,10,0,5,123123123,<EOF>""",127))
def test28_literal_interger_decimal(self):
self.assertTrue(TestLexer.checkLexeme(""" 0123456789""","""0,123456789,<EOF>""",128))
def test29_literal_interger_hex(self):
self.assertTrue(TestLexer.checkLexeme(""" 0x12902""","""0x12902,<EOF>""",129))
def test30_literal_interger_hex(self):
self.assertTrue(TestLexer.checkLexeme(""" 0XAAF231""","""0XAAF231,<EOF>""",130))
def test31_not_literal_interger_hex(self):
self.assertTrue(TestLexer.checkLexeme(""" 0XAGF231""","""0XA,Error Token G""",131))
def test32_not_literal_interger_hex(self):
self.assertTrue(TestLexer.checkLexeme(""" 0xgAF231""","""0,xgAF231,<EOF>""",132))
def test33_literal_interger_oc(self):
self.assertTrue(TestLexer.checkLexeme(""" 0o5620""","""0o5620,<EOF>""",133))
def test34_literal_interger_oc(self):
self.assertTrue(TestLexer.checkLexeme(""" 0O375283""","""0O3752,83,<EOF>""",134))
def test35_not_literal_interger_oc(self):
self.assertTrue(TestLexer.checkLexeme(""" 0O98765""","""0,Error Token O""",135))
def test36_not_literal_interger_oc(self):
self.assertTrue(TestLexer.checkLexeme(""" 0oABC123""","""0,oABC123,<EOF>""",136))
def test37_literal_interger(self):
self.assertTrue(TestLexer.checkLexeme(""" 0 199 0xFF 0XABC 0o567 0O77""","""0,199,0xFF,0XABC,0o567,0O77,<EOF>""",137))
def test38_float(self):
self.assertTrue(TestLexer.checkLexeme(""" 12.0e3 12e3 12.e5 12.0e3 12000. 120000e-1""","""12.0e3,12e3,12.e5,12.0e3,12000.,120000e-1,<EOF>""",138))
def test39_float(self):
self.assertTrue(TestLexer.checkLexeme("""000.0 0. 000E0 000.0E0 00.E0""","""000.0,0.,000E0,000.0E0,00.E0,<EOF>""",139))
def test40_not_float(self):
self.assertTrue(TestLexer.checkLexeme("""0.^0""","""0.,Error Token ^""",140))
def test41_not_float(self):
self.assertTrue(TestLexer.checkLexeme("""-2.E3""","""-,2.E3,<EOF>""",141))
def test42_not_float(self):
self.assertTrue(TestLexer.checkLexeme("""-5Abx0.^0""","""-,5,Error Token A""",142))
def test43_boolean(self):
self.assertTrue(TestLexer.checkLexeme("""True False""","""True,False,<EOF>""",143))
def test44_boolean(self):
self.assertTrue(TestLexer.checkLexeme("""TRUE""","""Error Token T""",144))
def test45_boolean(self):
self.assertTrue(TestLexer.checkLexeme("""false FalsE""","""false,Error Token F""",145))
def test46_array(self):
self.assertTrue(TestLexer.checkLexeme("""{"hoang","gia","khang"}""","""{,hoang,,,gia,,,khang,},<EOF>""",146))
def test47_array(self):
self.assertTrue(TestLexer.checkLexeme("""{{1.e3,1.},{2.3,5e0}}""","""{,{,1.e3,,,1.,},,,{,2.3,,,5e0,},},<EOF>""",147))
def test48_array(self):
self.assertTrue(TestLexer.checkLexeme("""{1,2""","""{,1,,,2,<EOF>""",148))
def test49_array(self):
self.assertTrue(TestLexer.checkLexeme("""{True,False}""","""{,True,,,False,},<EOF>""",149))
def test50_array(self):
self.assertTrue(TestLexer.checkLexeme("""{true,False}""","""{,true,,,False,},<EOF>""",150))
def test51_string(self):
self.assertTrue(TestLexer.checkLexeme(""" "Toi la Hoang Gia Khang!." ""","""Toi la Hoang Gia Khang!.,<EOF>""",151))
def test52_string(self):
self.assertTrue(TestLexer.checkLexeme(""" "This is a string containing tab \\t" ""","""This is a string containing tab \\t,<EOF>""",152))
def test53_string(self):
self.assertTrue(TestLexer.checkLexeme(""" "He asked me: '"Where is John?'"" ""","""He asked me: '"Where is John?'",<EOF>""",153))
def test54_string(self):
self.assertTrue(TestLexer.checkLexeme(""" "He asked me: \\'Where is John?'"" ""","""He asked me: \\'Where is John?'",<EOF>""",154))
def test55_string(self):
self.assertTrue(TestLexer.checkLexeme(""" "He asked me: 'Where is John?'"" ""","""Illegal Escape In String: He asked me: 'W""",155))
def test56_return_stmt(self):
self.assertTrue(TestLexer.checkLexeme(""" Return True ; ""","""Return,True,;,<EOF>""",156))
def test57_comment_in_array(self):
self.assertTrue(TestLexer.checkLexeme("""{12 ** 23asd&%^#$*()**, 3} ; ""","""{,12,,,3,},;,<EOF>""",157))
def test58_assign_stmt(self):
self.assertTrue(TestLexer.checkLexeme("""k__HANmn = foo(g(x + 3) * 4) ;""","""k__HANmn,=,foo,(,g,(,x,+,3,),*,4,),;,<EOF>""",158))
def test59_if_stmt(self):
self.assertTrue(TestLexer.checkLexeme("""
If (a > b) && (!c) Then
Return True;
Else Return False;
EndIf.""",
"""If,(,a,>,b,),&&,(,!,c,),Then,Return,True,;,Else,Return,False,;,EndIf,.,<EOF>""",159))
def test60_for_stmt(self):
self.assertTrue(TestLexer.checkLexeme("""
For (step = 10, step < 100, step * step) Do
writeln(step);
EndDo.""",
"""For,(,step,=,10,,,step,<,100,,,step,*,step,),Do,writeln,(,step,),;,EndDo,.,<EOF>""",160))
def test61_while_stmt(self):
self.assertTrue(TestLexer.checkLexeme("""
While n < 10 Do
writeln(n * n + foo(n));
EndWhile.""",
"""While,n,<,10,Do,writeln,(,n,*,n,+,foo,(,n,),),;,EndWhile,.,<EOF>""",161))
def test62_Do_While_stmt(self):
self.assertTrue(TestLexer.checkLexeme("""
Do
i = i + 1;
input(a[i]);
While (i < 10) EndDo.""",
"""Do,i,=,i,+,1,;,input,(,a,[,i,],),;,While,(,i,<,10,),EndDo,.,<EOF>""",162))
def test63_Break_stmt(self):
self.assertTrue(TestLexer.checkLexeme("""
Do
i = i + 1;
input(a[i]);
If (i == 6) Then
Break ;
EndIf.
While (i < 10) EndDo.""",
"""Do,i,=,i,+,1,;,input,(,a,[,i,],),;,If,(,i,==,6,),Then,Break,;,EndIf,.,While,(,i,<,10,),EndDo,.,<EOF>""",163))
def test64_Continue_stmt(self):
self.assertTrue(TestLexer.checkLexeme("""
While n < 10 Do
If n == 7 Then
Continue;
ElseIf n > 7 Then
Break;
EndIf.
writeln(n * n + foo(n));
EndWhile.""",
"""While,n,<,10,Do,If,n,==,7,Then,Continue,;,ElseIf,n,>,7,Then,Break,;,EndIf,.,writeln,(,n,*,n,+,foo,(,n,),),;,EndWhile,.,<EOF>""",164))
def test65_Call_stmt(self):
self.assertTrue(TestLexer.checkLexeme("""
Var: a, b = 10;
a = foo(b * 3) + 10;
writeln(a);""",
"""Var,:,a,,,b,=,10,;,a,=,foo,(,b,*,3,),+,10,;,writeln,(,a,),;,<EOF>""",165))
def test66_return_stmt(self):
self.assertTrue(TestLexer.checkLexeme("""
Function: nothing
Parameter: a, b
Body:
If a == b Then
Return 1;
Else
Return 0;
EndIf.
EndBody.""",
"""Function,:,nothing,Parameter,:,a,,,b,Body,:,If,a,==,b,Then,Return,1,;,Else,Return,0,;,EndIf,.,EndBody,.,<EOF>""",166))
def test67(self):
self.assertTrue(TestLexer.checkLexeme("""
Var: x;
Function: fact
Parameter: n
Body:
If n == 0 Then
Return 1;
Else
Return n * fact(n - 1);
EndIf.
EndBody.
Function: main
Body:
x = 10;
fact(x);
EndBody.""",
"""Var,:,x,;,Function,:,fact,Parameter,:,n,Body,:,If,n,==,0,Then,Return,1,;,Else,Return,n,*,fact,(,n,-,1,),;,EndIf,.,EndBody,.,Function,:,main,Body,:,x,=,10,;,fact,(,x,),;,EndBody,.,<EOF>""",167))
def test68_illegal_esc(self):
self.assertTrue(TestLexer.checkLexeme(""" "bfasdf\h" """, """Illegal Escape In String: bfasdf\h""",168))
def test69_unterminated_comment(self):
self.assertTrue(TestLexer.checkLexeme(""" **asdn899*(&(*US))***** """, """Unterminated Comment""",169))
def test70_Unclosed_string(self):
self.assertTrue(TestLexer.checkLexeme(""" "Im Khang\n SK" """, """Unclosed String: Im Khang""",170))
def test71(self):
self.assertTrue(TestLexer.checkLexeme(""" "Ho Chi Minh University\\n of \\t Technology" """, """Ho Chi Minh University\\n of \\t Technology,<EOF>""",171))
def test72_illegal_esc(self):
self.assertTrue(TestLexer.checkLexeme(""" "Dai. Hoc. Bach' Khoa" """, """Illegal Escape In String: Dai. Hoc. Bach' """,172))
def test73_string(self):
self.assertTrue(TestLexer.checkLexeme(""" "Thanh Pho'" Ho_ Chi???Minh" """, """Thanh Pho'" Ho_ Chi???Minh,<EOF>""",173))
def test74_error_char(self):
self.assertTrue(TestLexer.checkLexeme(""" th&anh """, """th,Error Token &""",174))
def test75_comment(self):
self.assertTrue(TestLexer.checkLexeme("""*****""", """*,<EOF>""",175))
def test76(self):
self.assertTrue(TestLexer.checkLexeme(""" "What should i \\b \\f \\r \\n test now, huh?" """, """What should i \\b \\f \\r \\n test now, huh?,<EOF>""",176))
def test77_illegal_esc(self):
self.assertTrue(TestLexer.checkLexeme(""" "Really \\t DON\'T know" """, """Illegal Escape In String: Really \\t DON'T""",177))
def test78(self):
self.assertTrue(TestLexer.checkLexeme(""" "\\\\ DenDi said: '"MMR is just a number'"" """, """\\\\ DenDi said: '"MMR is just a number'",<EOF>""",178))
def test79_unclosed_string(self):
self.assertTrue(TestLexer.checkLexeme(""" "BKCSE """, """Unclosed String: BKCSE """,179))
def test80_array(self):
self.assertTrue(TestLexer.checkLexeme(""" {1,{1.e,3e},{{{}}}} """, """{,1,,,{,1.,e,,,3,e,},,,{,{,{,},},},},<EOF>""",180))
def test81_comment_in_array(self):
self.assertTrue(TestLexer.checkLexeme(""" {{123,**das** "SK",3}} """, """{,{,123,,,SK,,,3,},},<EOF>""",181))
def test82_array(self):
self.assertTrue(TestLexer.checkLexeme(""" {{{{{}{{2}}}}}} """, """{,{,{,{,{,},{,{,2,},},},},},},<EOF>""",182))
def test83_array(self):
self.assertTrue(TestLexer.checkLexeme(""" {{}{}} """, """{,{,},{,},},<EOF>""",183))
def test84_error_keyword_var(self):
self.assertTrue(TestLexer.checkLexeme("""
Function: test
Body:
VaR: a = 1;
writeln(a);
EndBody. """, """Function,:,test,Body,:,Error Token V""",184))
def test85(self):
self.assertTrue(TestLexer.checkLexeme("""
Function: test
Body:
Var: a = 1, b = 1.0;
writeln(a \\. b);
EndBody. """, """Function,:,test,Body,:,Var,:,a,=,1,,,b,=,1.0,;,writeln,(,a,\\.,b,),;,EndBody,.,<EOF>""",185))
def test86_error_token(self):
self.assertTrue(TestLexer.checkLexeme(""" a &&& b""", """a,&&,Error Token &""",186))
def test87(self):
self.assertTrue(TestLexer.checkLexeme(""" abc \\\\\\""", """abc,\\,\\,\\,<EOF>""",187))
def test88(self):
self.assertTrue(TestLexer.checkLexeme("""
**The best football club
FC Barcelona**
"abcd'a" ""","""Illegal Escape In String: abcd'a""",188))
def test89_string_teencode(self):
self.assertTrue(TestLexer.checkLexeme(""" "c0' c0ng m4`j s4't c0' ng4`ij n3n kjm" ""","""Illegal Escape In String: c0' """,189))
def test90(self):
self.assertTrue(TestLexer.checkLexeme(""" **"c0' c0ng m4`j s4't c0' \\h ng4`ij n3n kjm"** ""","""<EOF>""",190))
def test91_unterminated_comment(self):
self.assertTrue(TestLexer.checkLexeme(""" Var: a = 0X7FA; **Python is a programming language* ""","""Var,:,a,=,0X7FA,;,Unterminated Comment""",191))
def test92(self):
self.assertTrue(TestLexer.checkLexeme(""" 0 199 0xFF 0XABC 0o567 0O77 True FAlse""","""0,199,0xFF,0XABC,0o567,0O77,True,Error Token F""",192))
def test93(self):
self.assertTrue(TestLexer.checkLexeme("""++.-.- ""","""+,+.,-.,-,<EOF>""",193))
def test94_array(self):
self.assertTrue(TestLexer.checkLexeme("""{{-1.1, -0e123},{-"abc", -True}} ""","""{,{,-,1.1,,,-,0e123,},,,{,-,abc,,,-,True,},},<EOF>""",194))
def test95_UNCLOSE_STRING(self):
self.assertTrue(TestLexer.checkLexeme("""**** "Hello sir!""","""Unclosed String: Hello sir!""",195))
def test96_ILLEGAL_ESCAPE(self):
self.assertTrue(TestLexer.checkLexeme(""" "Talented Engineering - 'Computer Science" ""","""Illegal Escape In String: Talented Engineering - 'C""",196))
def test97_url_string(self):
self.assertTrue(TestLexer.checkLexeme(""" "This is my facebook: https://www.facebook.com/hgkhang" ""","""This is my facebook: https://www.facebook.com/hgkhang,<EOF>""",197))
def test98_comment(self):
self.assertTrue(TestLexer.checkLexeme("""
**
* nothing to say \t
* I'm currently \h learning everything \\n
**focusing""","""focusing,<EOF>""",198))
def test99(self):
self.assertTrue(TestLexer.checkLexeme(
"""Function: reverseString
Parameter: num
Body:
For (i = num * 2, i < num * num, i + 1) Do
If i % 2 == 0 Then
writeln(i);
EndIf.
EndFor.
EndBody.""",
"""Function,:,reverseString,Parameter,:,num,Body,:,For,(,i,=,num,*,2,,,i,<,num,*,num,,,i,+,1,),Do,If,i,%,2,==,0,Then,writeln,(,i,),;,EndIf,.,EndFor,.,EndBody,.,<EOF>""",199))
def test100_index_operator(self):
self.assertTrue(TestLexer.checkLexeme("""
a[3 + foo(2)] = a[b[2][3]] + 4;""",
"""a,[,3,+,foo,(,2,),],=,a,[,b,[,2,],[,3,],],+,4,;,<EOF>""",200)) | [
"[email protected]"
] | |
a4d5f9dbd7ff9760239211ade45c0106a72a2774 | 5ff8f807d6318f41843c645f6da60a9bc43ede80 | /fabfile.py | be5aa937ae9f3288eeceff30d3bcb3d4effda0db | [] | no_license | notmissingout/notmissingout_old | e0de2484ad68083aa892f96e44c900ff09f59a40 | ed2521205679da61345a3335b99151bc0b952689 | refs/heads/master | 2021-06-18T03:06:28.420937 | 2017-05-14T09:37:34 | 2017-05-14T09:37:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | true | false | 7,302 | py | """Manage a remotely-installed WSGI app on a Unix-like system, with
environment variables controlling a lot of the WSGI app's
functionality (12 Factor style).
All remote access to the app is done via an `invoke` script, which
contains the environment variables, which is created during setup.
Sensitive ones should be passed through fab's env rather than being
put directly into this script. (So this should remain valid for the
lifetime of an open source project. Hostnames, db credentials and the
like do not belong in here. We do have a default username and home
directory, for convenience.)
We create a virtual environment for every release. You probably want
to delete them after a while (but you probably want to delete the
releases as well). This is "better" than sharing a virtualenv, because
of the way pip upgrades packages (otherwise you will get periods where
the app will not work if it needs non-code files or just
previously-unused packages). It is, however, slower.
(Heroku's slug compilation is a better approach. It'd be nice to
detect differences and re-use virtualenvs using symlinking or
copy-and-upgrade in future. However we're not really here to build a
cheap PaaS.)
Getting started:
* with a remote user & host you have access to
$ fab -H HOST setup
$ fab -H host setup:restart=false
* subsequently, to put the latest master live
$ fab -H HOST deploy
* if something goes wrong, roll back to a specific version
$ fab -H switch_to:version=<VERS> restart_appserver
deploy will apply migrations; switch_to will not. Also, migrations are
applied while the site is still running, so should be backwards
compatible.
(deploy also runs compilestatic and compilemessages)
Remote server layout (setup makes these):
media upload media (shared between all releases)
archives tgz archives of code versions
releases unpacked, versioned by datetime of fabric invocation
releases/current symlink to current version
releases/previous symlink to previous version
releases/next symlink to version being upgraded to
releases/<>/ENV virtualenv per release
userv/rc userv script for starting app server
invoke environment-setting invocation script (acts both
as an init.d script and a way of invoking app tasks
such as migration, compilestatic
"""
from fabric.api import *
from fabric.contrib.files import exists
import os
import time
from deployment import fabhelpers
env.remote = '[email protected]:notmissingout/notmissingout.git'
env.branch = 'master'
env.project = 'notmissingout'
env.user = 'notmissingout'
env.path = '/home/%s' % env.user
def deploy(restart='true'):
"""
Deploy the latest version of the site to the servers.
"""
restart = (restart in ('true', 'True'))
# installs any required third party modules, compiles static files
# and messages, migrates the database and then restarts the
# appserver
env.release = time.strftime('%Y-%m-%dT%H.%M.%S')
# github doesn't support upload-archive, so work from local repo
fabhelpers.export_and_upload_tar_from_git_local()
prep_release(env.release)
switch_to(env.release)
if restart:
restart_appserver()
else:
invoke(command="start")
fabhelpers.substitute_and_put(
"deployment/crontab.in",
"%s/crontab" % env.path,
(
('TOPDIR', env.path),
),
mode=0700,
)
run("crontab < %(path)s/crontab" % { 'path': env.path })
def switch_to(version):
"""Switch the current (ie live) version."""
require('hosts')
previous_path = os.path.join(env.path, 'releases', 'previous')
current_path = os.path.join(env.path, 'releases', 'current')
if exists(previous_path):
run('rm %s' % previous_path)
if exists(current_path):
run('mv %s %s' % (current_path, previous_path))
# ln -s doesn't actually take a path relative to cwd as its first
# argument; it's actually relative to its second argument
run('ln -s %s %s' % (version, current_path))
# tidy up the next marker if there was one
run('rm -f %s' % os.path.join(env.path, 'releases', 'next'))
env.release = version # in case anything else wants to use it after us
def prep_release(version):
"""Compile static, make messages and migrate."""
require('hosts')
current_path = os.path.join(env.path, 'releases', 'current')
next_path = os.path.join(env.path, 'releases', 'next')
if exists(next_path):
run('rm %s' % next_path)
run('ln -s %s %s' % (version, next_path))
run(
"cd %(next_path)s; "
"if [ -d %(current_path)s/ENV ]; then "
" cp -a %(current_path)s/ENV %(next_path)s/ENV; "
"else "
" virtualenv ENV; "
" ENV/bin/pip install --upgrade pip; "
"fi; "
"ENV/bin/pip install -r requirements/live.txt" % {
'path': env.path,
'next_path': next_path,
'current_path': current_path,
'release': env.release
}
)
run('invoke prep')
# leave the next marker (symlink) in place in case something
# goes wrong before the end of switch_to, since it will provide
# useful state on the remote machine
def app_shell():
"""Get an app shell on the current release."""
require('hosts')
run("invoke shell")
def restart_appserver():
"""Restart the (gunicorn) app server."""
require('hosts')
run("invoke restart")
def invoke(command):
"""Run an init command (or shell or prep) via the invoker."""
require('hosts')
run("invoke %s" % command)
def setup():
"""Set up the initial structure for the given user."""
require('hosts', 'path')
require(
'database_url',
'django_secret_key',
'allowed_hosts',
'listen_port',
used_for="configuring the application.",
)
# make our directory structure
run("mkdir -pm 711 %s/media" % env.path)
run("mkdir -pm 711 %s/releases" % env.path)
run("mkdir -pm 700 %s/archives" % env.path)
# make the userv rc script
run("mkdir -pm 700 %s/.userv" % env.path)
put("deployment/userv.rc.in", "%s/.userv/rc" % env.path, mode=0600)
# and the script it points to
# @TOPDIR@ -> env.path
# @WSGI@ -> $(env.project).wsgi (python path to WSGI app)
# @DATABASE_URL@ -> syntax postgresql://USER:PASSWORD@localhost:5432/DBNAME
# (or postgis://...)
# @DJANGO_SECRET_KEY@ -> what it says (make it long and gnarly)
# @ALLOWED_HOSTS@ -> semicolon separated (eg loose-end.in;www.loose-end.in)
# @PORT@ -> that gunicorn should listen on
#
# The last four should be passed into the env in a fab-ish manner.
# (Hence the require statements above.)
substitutions = (
('TOPDIR', env.path),
('WSGI', '%s.wsgi' % env.project),
('DATABASE_URL', env.database_url),
('DJANGO_SECRET_KEY', env.django_secret_key),
('ALLOWED_HOSTS', env.allowed_hosts),
('PORT', env.listen_port),
)
fabhelpers.substitute_and_put(
"deployment/invoke.in",
"%s/invoke" % env.path,
substitutions,
mode=0700,
)
| [
"[email protected]"
] | |
76a08fcfefed1b045ec3b43c0f851dffda21bfbd | 357fefa288745c9ab3bc276a7ef0bc815f3fec2a | /src/gui/coverage.py | c483bb12db046aecb856f12f3f34c4b03eb1372e | [
"MIT"
] | permissive | jdvelasq/techminer | 61da47f44719e462732627edcc1094fab6c173f1 | 7a34a9fd684ce56cfbab583fa1bb71c1669035f9 | refs/heads/main | 2023-03-15T23:26:22.876051 | 2023-03-13T21:47:24 | 2023-03-13T21:47:24 | 204,352,276 | 0 | 1 | MIT | 2019-12-09T02:37:11 | 2019-08-25T21:34:19 | Jupyter Notebook | UTF-8 | Python | false | false | 1,198 | py | from techminer.gui.bigraph_analysis import App
import pandas as pd
import ipywidgets as widgets
from ipywidgets import GridspecLayout, Layout
from IPython.display import display
class App:
def __init__(self) -> None:
self.app_layout = GridspecLayout(9, 4, height="870px")
self.output = widgets.Output().add_class("output_color")
self.app_layout[0:, 0:] = widgets.VBox(
[self.output],
layout=Layout(margin="10px 10px 10px 10px", border="1px solid gray"),
)
def run(self):
x = pd.read_csv("corpus.csv")
columns = sorted(x.columns)
with self.output:
display(
pd.DataFrame(
{
"Column": columns,
"Number of items": [
len(x) - x[col].isnull().sum() for col in columns
],
"Coverage (%)": [
"{:5.2%}".format((len(x) - x[col].isnull().sum()) / len(x))
for col in columns
],
}
)
)
return self.app_layout
| [
"[email protected]"
] | |
44fef314424eaf2f58f5213dbe42ff75bdeb0352 | 4eddc1ba3a0a207e70bfb7addf73c18b1a0e19fc | /benchmark.py | 77fb873d01b719f263403ec6c853ffa6754beff9 | [
"MIT"
] | permissive | vibiu/validater | 558284b1811a1bdadbcb072620245cd1e78fd33d | 687791f4d234d379f392fdb64064276833d08666 | refs/heads/master | 2020-12-24T10:39:29.655705 | 2016-05-24T22:41:33 | 2016-05-24T22:41:33 | 62,855,916 | 0 | 0 | null | 2016-07-08T03:29:12 | 2016-07-08T03:29:12 | null | UTF-8 | Python | false | false | 1,830 | py | from timeit import timeit
setup = """
from io import BytesIO
import json
from ijson.backends.python import basic_parse
from ijson.backends.yajl2_cffi import basic_parse as cbasic_parse
from validater import parse, validate
schema = parse([{"userid": "int"}])
data_normal = json.dumps([{"userid": "123"}], ensure_ascii=False)
data_deep = '[' * 8000 + ']' * 8000
obj_normal = BytesIO(data_normal.encode('utf-8'))
obj_deep = BytesIO(data_deep.encode('utf-8'))
"""
print('ijson python'.center(60, '-'))
s = """
obj_normal.seek(0)
for event, value in basic_parse(obj_normal):
pass
"""
print("normal data: %.6f sec" % timeit(s, setup, number=1000))
s = """
obj_deep.seek(0)
try:
for event, value in basic_parse(obj_deep):
pass
except RecursionError:
pass
"""
print("deep data: %.6f sec" % timeit(s, setup, number=1000))
print('ijson yajl2_cffi'.center(60, '-'))
s = """
obj_normal.seek(0)
for event, value in cbasic_parse(obj_normal):
pass
"""
print("normal data: %.6f sec" % timeit(s, setup, number=1000))
s = """
obj_deep.seek(0)
try:
for event, value in cbasic_parse(obj_deep):
pass
except RecursionError:
pass
"""
print("deep data: %.6f sec" % timeit(s, setup, number=1000))
print('validater'.center(60, '-'))
s = """
obj_normal.seek(0)
err, val = validate(obj_normal, schema)
"""
print("normal data: %.6f sec" % timeit(s, setup, number=1000))
s = """
obj_deep.seek(0)
err, val = validate(obj_deep, schema)
"""
print("deep data: %.6f sec" % timeit(s, setup, number=1000))
print('standard json'.center(60, '-'))
s = """
obj_normal.seek(0)
json.loads(data_normal)
"""
print("normal data: %.6f sec" % timeit(s, setup, number=1000))
s = """
obj_deep.seek(0)
try:
json.loads(data_deep)
except RecursionError:
pass
"""
print("deep data: %.6f sec" % timeit(s, setup, number=1000))
| [
"[email protected]"
] | |
934c8ece44a8065aa3beb29e3efc8fe2b8064a3f | b01b2f5662cf94e63763b83ddd62194414429447 | /model/recognition_model/CAPSOCR/capsule.py | affd36346ba279b42283b041c313cc168c150b23 | [
"MIT"
] | permissive | TBSuperMan/FudanOCR | 8e4f946c9eac5d98583ff0c366c3cc78c6da0baa | e6b18b0eefaf832b2eb7198f5df79e00bd4cee36 | refs/heads/master | 2023-04-20T18:36:05.871982 | 2020-04-29T09:46:25 | 2020-04-29T09:46:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,491 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
class PrimaryCaps(nn.Module):
r"""Creates a primary convolutional capsule layer
that outputs a pose matrix and an activation.
Note that for computation convenience, pose matrix
are stored in first part while the activations are
stored in the second part.
Args:
A: output of the normal conv layer
B: number of types of capsules
K: kernel size of convolution
P: size of pose matrix is P*P
stride: stride of convolution
Shape:
input: (*, A, h, w)
output: (*, h', w', B*(P*P+1))
h', w' is computed the same way as convolution layer
parameter size is: K*K*A*B*P*P + B*P*P
"""
def __init__(self, A=32, B=32, K=1, P=4, stride=1):
super(PrimaryCaps, self).__init__()
self.pose = nn.Conv2d(in_channels=A, out_channels=B*P*P,
kernel_size=K, stride=stride, bias=True)
self.a = nn.Conv2d(in_channels=A, out_channels=B,
kernel_size=K, stride=stride, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
p = self.pose(x)
a = self.a(x)
a = self.sigmoid(a)
out = torch.cat([p, a], dim=1)
out = out.permute(0, 2, 3, 1)
return out
class ConvCaps(nn.Module):
r"""Create a convolutional capsule layer
that transfer capsule layer L to capsule layer L+1
by EM routing.
Args:
B: input number of types of capsules
C: output number on types of capsules
K: kernel size of convolution
P: size of pose matrix is P*P
stride: stride of convolution
iters: number of EM iterations
coor_add: use scaled coordinate addition or not
w_shared: share transformation matrix across w*h.
Shape:
input: (*, h, w, B*(P*P+1))
output: (*, h', w', C*(P*P+1))
h', w' is computed the same way as convolution layer
parameter size is: K*K*B*C*P*P + B*P*P
"""
def __init__(self, B=32, C=32, K=3, P=4, stride=2, iters=3,
coor_add=False, w_shared=False):
super(ConvCaps, self).__init__()
# TODO: lambda scheduler
# Note that .contiguous() for 3+ dimensional tensors is very slow
self.B = B
self.C = C
self.K = K
self.P = P
self.psize = P*P
self.stride = stride
self.iters = iters
self.coor_add = coor_add
self.w_shared = w_shared
# constant
self.eps = 1e-8
self._lambda = 1e-03
self.ln_2pi = torch.cuda.FloatTensor(1).fill_(math.log(2*math.pi))
# params
# Note that \beta_u and \beta_a are per capsule type,
# which are stated at https://openreview.net/forum?id=HJWLfGWRb¬eId=rJUY2VdbM
self.beta_u = nn.Parameter(torch.zeros(C))
self.beta_a = nn.Parameter(torch.zeros(C))
# Note that the total number of trainable parameters between
# two convolutional capsule layer types is 4*4*k*k
# and for the whole layer is 4*4*k*k*B*C,
# which are stated at https://openreview.net/forum?id=HJWLfGWRb¬eId=r17t2UIgf
self.weights = nn.Parameter(torch.randn(1, K*K*B, C, P, P))
# op
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=2)
def m_step(self, a_in, r, v, eps, b, B, C, psize):
"""
\mu^h_j = \dfrac{\sum_i r_{ij} V^h_{ij}}{\sum_i r_{ij}}
(\sigma^h_j)^2 = \dfrac{\sum_i r_{ij} (V^h_{ij} - mu^h_j)^2}{\sum_i r_{ij}}
cost_h = (\beta_u + log \sigma^h_j) * \sum_i r_{ij}
a_j = logistic(\lambda * (\beta_a - \sum_h cost_h))
Input:
a_in: (b, C, 1)
r: (b, B, C, 1)
v: (b, B, C, P*P)
Local:
cost_h: (b, C, P*P)
r_sum: (b, C, 1)
Output:
a_out: (b, C, 1)
mu: (b, 1, C, P*P)
sigma_sq: (b, 1, C, P*P)
"""
r = r * a_in
r = r / (r.sum(dim=2, keepdim=True) + eps)
r_sum = r.sum(dim=1, keepdim=True)
coeff = r / (r_sum + eps)
coeff = coeff.view(b, B, C, 1)
mu = torch.sum(coeff * v, dim=1, keepdim=True)
sigma_sq = torch.sum(coeff * (v - mu)**2, dim=1, keepdim=True) + eps
r_sum = r_sum.view(b, C, 1)
sigma_sq = sigma_sq.view(b, C, psize)
cost_h = (self.beta_u.view(C, 1) + torch.log(sigma_sq.sqrt())) * r_sum
a_out = self.sigmoid(self._lambda*(self.beta_a - cost_h.sum(dim=2)))
sigma_sq = sigma_sq.view(b, 1, C, psize)
return a_out, mu, sigma_sq
def e_step(self, mu, sigma_sq, a_out, v, eps, b, C):
"""
ln_p_j = sum_h \dfrac{(\V^h_{ij} - \mu^h_j)^2}{2 \sigma^h_j}
- sum_h ln(\sigma^h_j) - 0.5*\sum_h ln(2*\pi)
r = softmax(ln(a_j*p_j))
= softmax(ln(a_j) + ln(p_j))
Input:
mu: (b, 1, C, P*P)
sigma: (b, 1, C, P*P)
a_out: (b, C, 1)
v: (b, B, C, P*P)
Local:
ln_p_j_h: (b, B, C, P*P)
ln_ap: (b, B, C, 1)
Output:
r: (b, B, C, 1)
"""
ln_p_j_h = -1. * (v - mu)**2 / (2 * sigma_sq) \
- torch.log(sigma_sq.sqrt()) \
- 0.5*self.ln_2pi
ln_ap = ln_p_j_h.sum(dim=3) + torch.log(a_out.view(b, 1, C))
r = self.softmax(ln_ap)
return r
def caps_em_routing(self, v, a_in, C, eps):
"""
Input:
v: (b, B, C, P*P)
a_in: (b, C, 1)
Output:
mu: (b, 1, C, P*P)
a_out: (b, C, 1)
Note that some dimensions are merged
for computation convenient, that is
`b == batch_size*oh*ow`,
`B == self.K*self.K*self.B`,
`psize == self.P*self.P`
"""
b, B, c, psize = v.shape
assert c == C
assert (b, B, 1) == a_in.shape
r = torch.cuda.FloatTensor(b, B, C).fill_(1./C)
for iter_ in range(self.iters):
a_out, mu, sigma_sq = self.m_step(a_in, r, v, eps, b, B, C, psize)
if iter_ < self.iters - 1:
r = self.e_step(mu, sigma_sq, a_out, v, eps, b, C)
return mu, a_out
def add_pathes(self, x, B, K, psize, stride):
"""
Shape:
Input: (b, H, W, B*(P*P+1))
Output: (b, H', W', K, K, B*(P*P+1))
"""
b, h, w, c = x.shape
assert h == w
assert c == B*(psize+1)
oh = ow = int(((h - K )/stride)+ 1) # moein - changed from: oh = ow = int((h - K + 1) / stride)
idxs = [[(h_idx + k_idx) \
for k_idx in range(0, K)] \
for h_idx in range(0, h - K + 1, stride)]
x = x[:, idxs, :, :]
x = x[:, :, :, idxs, :]
x = x.permute(0, 1, 3, 2, 4, 5).contiguous()
return x, oh, ow
def transform_view(self, x, w, C, P, w_shared=False):
"""
For conv_caps:
Input: (b*H*W, K*K*B, P*P)
Output: (b*H*W, K*K*B, C, P*P)
For class_caps:
Input: (b, H*W*B, P*P)
Output: (b, H*W*B, C, P*P)
"""
b, B, psize = x.shape
assert psize == P*P
x = x.view(b, B, 1, P, P)
if w_shared:
hw = int(B / w.size(1))
w = w.repeat(1, hw, 1, 1, 1)
w = w.repeat(b, 1, 1, 1, 1)
x = x.repeat(1, 1, C, 1, 1)
v = torch.matmul(x, w)
v = v.view(b, B, C, P*P)
return v
def add_coord(self, v, b, h, w, B, C, psize):
"""
Shape:
Input: (b, H*W*B, C, P*P)
Output: (b, H*W*B, C, P*P)
"""
assert h == w
v = v.view(b, h, w, B, C, psize)
coor = torch.arange(h, dtype=torch.float32) / h
coor_h = torch.cuda.FloatTensor(1, h, 1, 1, 1, self.psize).fill_(0.)
coor_w = torch.cuda.FloatTensor(1, 1, w, 1, 1, self.psize).fill_(0.)
coor_h[0, :, 0, 0, 0, 0] = coor
coor_w[0, 0, :, 0, 0, 1] = coor
v = v + coor_h + coor_w
v = v.view(b, h*w*B, C, psize)
return v
def forward(self, x):
b, h, w, c = x.shape
if not self.w_shared:
# add patches
x, oh, ow = self.add_pathes(x, self.B, self.K, self.psize, self.stride)
# transform view
p_in = x[:, :, :, :, :, :self.B*self.psize].contiguous()
a_in = x[:, :, :, :, :, self.B*self.psize:].contiguous()
p_in = p_in.view(b*oh*ow, self.K*self.K*self.B, self.psize)
a_in = a_in.view(b*oh*ow, self.K*self.K*self.B, 1)
v = self.transform_view(p_in, self.weights, self.C, self.P)
# em_routing
p_out, a_out = self.caps_em_routing(v, a_in, self.C, self.eps)
p_out = p_out.view(b, oh, ow, self.C*self.psize)
a_out = a_out.view(b, oh, ow, self.C)
out = torch.cat([p_out, a_out], dim=3)
else:
assert c == self.B*(self.psize+1)
assert 1 == self.K
assert 1 == self.stride
p_in = x[:, :, :, :self.B*self.psize].contiguous()
p_in = p_in.view(b, h*w*self.B, self.psize)
a_in = x[:, :, :, self.B*self.psize:].contiguous()
a_in = a_in.view(b, h*w*self.B, 1)
# transform view
v = self.transform_view(p_in, self.weights, self.C, self.P, self.w_shared)
# coor_add
if self.coor_add:
v = self.add_coord(v, b, h, w, self.B, self.C, self.psize)
# em_routing
_, out = self.caps_em_routing(v, a_in, self.C, self.eps)
return out
class CapsNet(nn.Module):
"""A network with one ReLU convolutional layer followed by
a primary convolutional capsule layer and two more convolutional capsule layers.
Suppose image shape is 28x28x1, the feature maps change as follows:
1. ReLU Conv1
(_, 1, 28, 28) -> 5x5 filters, 32 out channels, stride 2 with padding
x -> (_, 32, 14, 14)
2. PrimaryCaps
(_, 32, 14, 14) -> 1x1 filter, 32 out capsules, stride 1, no padding
x -> pose: (_, 14, 14, 32x4x4), activation: (_, 14, 14, 32)
3. ConvCaps1
(_, 14, 14, 32x(4x4+1)) -> 3x3 filters, 32 out capsules, stride 2, no padding
x -> pose: (_, 6, 6, 32x4x4), activation: (_, 6, 6, 32)
4. ConvCaps2
(_, 6, 6, 32x(4x4+1)) -> 3x3 filters, 32 out capsules, stride 1, no padding
x -> pose: (_, 4, 4, 32x4x4), activation: (_, 4, 4, 32)
5. ClassCaps
(_, 4, 4, 32x(4x4+1)) -> 1x1 conv, 10 out capsules
x -> pose: (_, 10x4x4), activation: (_, 10)
Note that ClassCaps only outputs activation for each class
Args:
A: output channels of normal conv
B: output channels of primary caps
C: output channels of 1st conv caps
D: output channels of 2nd conv caps
E: output channels of class caps (i.e. number of classes)
K: kernel of conv caps
P: size of square pose matrix
iters: number of EM iterations
...
"""
def __init__(self, A=32, B=32, C=32, D=32, E=10, K=3, P=4, iters=3):
super(CapsNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=A,
kernel_size=5, stride=2, padding=2)
self.bn1 = nn.BatchNorm2d(num_features=A, eps=0.001,
momentum=0.1, affine=True)
self.relu1 = nn.ReLU(inplace=False)
self.primary_caps = PrimaryCaps(A, B, 1, P, stride=1)
self.conv_caps1 = ConvCaps(B, C, K, P, stride=2, iters=iters)
self.conv_caps2 = ConvCaps(C, D, K, P, stride=1, iters=iters)
self.class_caps = ConvCaps(D, E, 1, P, stride=1, iters=iters,
coor_add=True, w_shared=True)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.primary_caps(x)
x = self.conv_caps1(x)
print("The output of conv_caps1 is: ", x.size())
x = self.conv_caps2(x)
print("The output of conv_caps2 is: ",x.size() )
x = self.class_caps(x)
return x
def capsules(**kwargs):
"""Constructs a CapsNet model.
"""
model = CapsNet(**kwargs)
return model
'''
TEST
Run this code with:
```
python -m capsules.py
```
'''
if __name__ == '__main__':
device = torch.device("cuda")
model = capsules(E=10)
model = model.to(device)
# model = model.cuda()
image = torch.Tensor(1,1,100,100)
image = image.cuda()
output = model(image)
print(model)
print(output.size())
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.