blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
โ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
898c144342144cc606a82a8664eb57e9cb92f806 | 6bc099dc14f2d93887bf8c40d2a169de7d15d720 | /dreamtools/core/layout.py | 7961847cb325f2aee44106505fe63982cf8f5491 | [] | no_license | Sage-Bionetworks/dreamtools | a6328849f9246b15c783c7346d66dfc6c22525c1 | fe249100c8165b584d41471fb947ad32a51121dd | refs/heads/master | 2021-01-22T17:14:35.369121 | 2015-08-19T18:09:36 | 2015-08-19T18:09:36 | 38,385,023 | 0 | 0 | null | 2015-07-01T17:30:05 | 2015-07-01T17:30:05 | null | UTF-8 | Python | false | false | 6,670 | py | # -*- python -*-
#
# This file is part of DreamTools software
#
# Copyright (c) 2014-2015 - EBI-EMBL
#
# File author(s): Thomas Cokelaer <[email protected]>
#
# Distributed under the GPLv3 License.
# See accompanying file LICENSE.txt or copy at
# http://www.gnu.org/licenses/gpl-3.0.html
#
# website: http://github.org/dreamtools
#
##############################################################################
"""
"""
import os
import sys
import argparse
from easydev import Logging
README_templates = '''
Overview
===========
:Title:
:Nickname: %(nickname)s
:Summary:
:SubChallenges:
:Synapse page: https://www.synapse.org/#!Synapse:synXXXXXXX
.. contents::
Scoring
---------
::
from dreamtools import %(nickname)s
s = %(nickname)s()
filename = s.download_template()
s.score(filename)
'''
scoring_templates = '''
"""
:Title:
:Nickname: %(nickname)s
:Summary:
:SubChallenges:
:Synapse page: https://www.synapse.org/#!Synapse:synXXXXXXX
"""
import os
from dreamtools.core.challenge import Challenge
class %(nickname)s(Challenge):
"""A class dedicated to %(nickname)s challenge
::
from dreamtools import %(nickname)s
s = %(nickname)s()
filename = s.download_template()
s.score(filename)
Data and templates are downloaded from Synapse. You must have a login.
"""
def __init__(self):
""".. rubric:: constructor
"""
super(%(nickname)s, self).__init__('%(nickname)s')
self._path2data = os.path.split(os.path.abspath(__file__))[0]
self._init()
self.sub_challenges = []
def _init(self):
# should download files from synapse if required.
pass
def score(self, filename, subname=None, goldstandard=None):
raise NotImplementedError
def download_template(self, subname=None):
# should return full path to a template file
raise NotImplementedError
def download_goldstandard(self, subname=None):
# should return full path to a gold standard file
raise NotImplementedError
'''
class Layout(Logging):
"""Class to create automatic layout for a given challenge
:Usage:
::
dreamtools-layout --name D8C2
.. warning:: for developers only.
"""
def __init__(self, name, verbose=True):
super(Layout, self).__init__(level=verbose)
self.name = name
# check the name is correct
from dreamtools.core.challenge import Challenge
c = Challenge(self.name)
def create_layout(self):
self._create_directory(self.name)
self._create_file(self._pj('__init__.py'))
for directory in ['misc', 'leaderboard', 'generator', 'data',
'paper', 'templates', 'goldstandard']:
self._create_directory(self._pj(directory))
# now fill the contents of scoring.py if the file does not exists !
filename = 'scoring.py'
if os.path.exists(self._pj(filename)) is True:
self.warning("%s already exists. Skipped the creation of this directory" % filename)
else:
fh = open(self._pj(filename), 'w')
fh.write(scoring_templates % {'nickname': self.name})
filename = 'README.rst'
if os.path.exists(self._pj('README.rst')) is True:
self.warning("%s already exists. Skipped the creation of this directory" % filename)
else:
fh = open(self._pj(filename), 'w')
fh.write(README_templates % {'nickname': self.name})
def _pj(self, filename):
return os.sep.join([self.name, filename])
def _create_file(self, filename):
if os.path.exists(filename) is True:
self.warning("%s already exists. Skipped the creation of the file" % filename )
else:
self.info('Creating %s' % filename)
open(filename, 'a').close()
def _create_directory(self, directory):
if os.path.isdir(directory) is True:
self.warning("%s already exists. Skipped the creation of this directory" % directory)
else:
self.info('Creating %s' % directory)
os.mkdir(directory)
def layout(args=None):
"""This function is used by the standalone application called dreamscoring
::
dreamtools-layout --help
"""
import easydev
d = easydev.DevTools()
if args == None:
args = sys.argv[:]
user_options = Options(prog="dreamtools-layout")
if len(args) == 1:
user_options.parse_args(["prog", "--help"])
else:
options = user_options.parse_args(args[1:])
if options.challenge_name is None:
print_color('--challenge-name must be provided', red)
sys.exit()
lay = Layout(options.challenge_name)
lay.create_layout()
class Options(argparse.ArgumentParser):
description = "tests"
def __init__(self, version="1.0", prog=None):
usage = """\npython %s --challenge-name D8C1""" % prog
usage += """\n%s --challenge-name D8C1""" % prog
epilog="""Author(s):
- Thomas Cokelaer (cokelaer at gmail dot com).
Source code on: https://github.com/dreamtools/dreamtools
Issues or bug report ? Please fill an issue on http://github.com/dreamtools/dreamtools/issues """
description = """General Description:
dreamtools-layout creates the layout for a challenge. You must provide
a valid nickname (e.g., D8C1 for Dream8, Challenge 1). Then, the following
layout is created for you:
D8C1/__init__.py
D8C1/scoring.py
D8C1/README.rst
D8C1/generator/
D8C1/data/
D8C1/templates/
D8C1/goldstandard/
D8C1/leaderboard/
D8C1/misc/
D8C1/paper/
"""
super(Options, self).__init__(usage=usage, version=version, prog=prog,
epilog=epilog, description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
self.add_input_options()
def add_input_options(self):
"""The input options.
Default is None. Keep it that way because otherwise, the contents of
the ini file is overwritten in :class:`apps.Apps`.
"""
group = self.add_argument_group("General", '')
group.add_argument("--challenge-name", dest='challenge_name',
default=None, type=str,
help="nickname of the challenge (e.g., D8C1 stands for"
"dream8 challenge 1). Intermediate challenge such as first challenge of DREAM9.5 must be encoded as D9dot5C1")
if __name__ == "__main__":
layout(sys.argv)
| [
"[email protected]"
] | |
eb86decd02eb9a52625d4bd377ebd1b39da939a7 | 5b9ddd5e0528af6c2953dcdd957d7d6242a6bc97 | /test/test_single.py | fa9e05263ab9687fb2a54e936f488e085a0f5449 | [] | no_license | p-koo/wrangler | 098de4e06b05620a1a18c929c97f833432dd563d | ca427788f6822c8da5970c7a201c2e00d0aa4de6 | refs/heads/master | 2022-05-13T07:37:45.524877 | 2022-04-23T02:46:38 | 2022-04-23T02:46:38 | 98,224,245 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,819 | py | import os, h5py
import numpy as np
from tensorflow import keras
import tensorflow as tf
def custom_model(input_shape, num_labels, params=[128, 196, 512], activation='relu'):
l2_reg = tf.keras.regularizers.L2(1e-6)
inputs = keras.layers.Input(shape=input_shape)
# layer 1
nn = keras.layers.Conv1D(filters=params[0], kernel_size=19, use_bias=False, padding='same',
kernel_regularizer=l2_reg)(inputs)
nn = keras.layers.BatchNormalization()(nn)
nn = keras.layers.Activation(activation)(nn)
nn = keras.layers.MaxPool1D(pool_size=20)(nn)
nn = keras.layers.Dropout(0.1)(nn)
# layer 2
nn = keras.layers.Conv1D(filters=params[1], kernel_size=7, use_bias=False, padding='same', activation=None,
kernel_regularizer=l2_reg)(nn)
nn = keras.layers.BatchNormalization()(nn)
nn = keras.layers.Activation('relu')(nn)
nn = keras.layers.MaxPool1D(pool_size=5)(nn)
nn = keras.layers.Dropout(0.2)(nn)
nn = keras.layers.Flatten()(nn)
# layer 4 - Fully-connected
nn = keras.layers.Dense(params[2], activation=None, use_bias=False,
kernel_regularizer=l2_reg)(nn)
nn = keras.layers.BatchNormalization()(nn)
nn = keras.layers.Activation('relu')(nn)
nn = keras.layers.Dropout(0.5)(nn)
# Output layer
logits = keras.layers.Dense(num_labels, activation='linear', use_bias=True)(nn)
outputs = keras.layers.Activation('sigmoid')(logits)
# create keras model
return keras.Model(inputs=inputs, outputs=outputs)
cell_line = 'REST_GM12878'
filepath = '/home/koolab/peter/data/test_singletask/'+cell_line+'.h5'
dataset = h5py.File(filepath, 'r')
with h5py.File(filepath, 'r') as dataset:
x_train = np.array(dataset['x_train']).astype(np.float32)
y_train = np.array(dataset['y_train']).astype(np.float32)
x_valid = np.array(dataset['x_valid']).astype(np.float32)
y_valid = np.array(dataset['y_valid']).astype(np.int32)
x_test = np.array(dataset['x_test']).astype(np.float32)
y_test = np.array(dataset['y_test']).astype(np.int32)
N, L, A = x_train.shape
num_labels = y_valid.shape[1]
print(x_train.shape)
model = custom_model(input_shape=(L,A), num_labels=num_labels,
params=[32, 64, 128], activation='exponential')
# set up optimizer and metrics
auroc = keras.metrics.AUC(curve='ROC', name='auroc')
aupr = keras.metrics.AUC(curve='PR', name='aupr')
optimizer = keras.optimizers.Adam(learning_rate=0.0005)
loss = keras.losses.BinaryCrossentropy(from_logits=False, label_smoothing=0.0)
model.compile(optimizer=optimizer,
loss=loss,
metrics=[auroc, aupr])
# early stopping callback
es_callback = keras.callbacks.EarlyStopping(monitor='val_aupr', #'val_aupr',#
patience=10,
verbose=1,
mode='max',
restore_best_weights=True)
# reduce learning rate callback
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_aupr',
factor=0.2,
patience=3,
min_lr=1e-7,
mode='max',
verbose=1)
# train model
history = model.fit(x_train, y_train,
epochs=100,
batch_size=64,
shuffle=True,
validation_data=(x_valid, y_valid),
callbacks=[es_callback, reduce_lr])
model.evaluate(x_test, y_test)
# weights_path =
#model.save_weights(weights_path)
| [
"[email protected]"
] | |
cb0337f6d2080e531df1781a8ea5f94d48bed765 | bd42dac15d6f997fc696677cc8568a7975255b18 | /setup.py | db60797fb86c63bb774c8b150a01aa19201d0e8e | [] | no_license | TheMisterrio/DepartmentHandler | db72440f9b523e9b46a220d50418a0478d7ef360 | 0a90bef2d72d7c2f94a48e083de901dcd5b3a3c1 | refs/heads/master | 2023-05-24T18:47:03.531576 | 2020-04-07T12:58:46 | 2020-04-07T12:58:46 | 224,926,292 | 0 | 0 | null | 2023-05-22T22:36:01 | 2019-11-29T21:48:51 | JavaScript | UTF-8 | Python | false | false | 524 | py | from setuptools import setup
with open("README.md", 'r') as f:
long_description = f.read()
setup(
name='Department Handler',
version='1.0',
author='Nikita Kapusta',
author_email='[email protected]',
long_description=long_description,
packages=['department-app'],
include_package_data=True,
zip_safe=False,
install_requires=['Flask', 'flask-sqlalchemy', 'mysql-connector',
'pylint', 'coverage', 'flask-migrate', 'flask-script', 'flask-restful',
'gunicorn', 'requests']
)
| [
"[email protected]"
] | |
5d551e3e57ce1d79d155aa72d8b4b02fc5739acb | 1d3d8f023bfb1442a651af62cd00bebde456d354 | /manager/migrations/0007_auto_20170512_0919.py | 5bf587e2c68eb7c6dcc42688b6c8eeffef1b8640 | [] | no_license | zhanghaoyan/sale | 173cf81827480ade23f400dfec294810618c2d10 | dfb85f3fa8a9a870534e8b4ef2ae1f56f557eb30 | refs/heads/master | 2020-12-30T14:12:49.405924 | 2017-05-22T15:52:49 | 2017-05-22T15:52:49 | 91,289,177 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-05-12 09:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('manager', '0006_auto_20170511_0600'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='ctelphone',
field=models.CharField(max_length=32),
),
]
| [
"[email protected]"
] | |
a10e1d346e26015a338459f1fcf010525ea3f485 | e2e996662fde8ac0e1509995468b725d47d8ea10 | /_second_order_mutant_extend/create_second_order_mutants.py | 4b1e9205b97cabce86a6439c38fd4bb26d567529 | [] | no_license | HOM-CISE/HOM-MBFL | b5c27ba9268b2bf7cd4faccece2ff479147ebf81 | 38806a5971736ab98e1d6f4b22952cf4d9b328b2 | refs/heads/master | 2023-05-05T20:24:44.228132 | 2021-06-01T13:58:28 | 2021-06-01T13:58:28 | 285,776,568 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,033 | py |
import os
import os.path
import shutil
import argparse
import util
def parserCommad():
'''
Get the command line parameter and return them.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--second_order_mutant_path', action='store',
default="./second_order_mutant_extend/result/",
dest='second_order_mutant_path',
help='Path of storing second-order mutants.')
parser.add_argument('--defect_source_path', action='store',
default="./test_data/defect_root/source/tot_info.c",
dest='defect_source_path',
help='Defect program path.')
parser.add_argument('--version', action='version',
version='%(prog)s 1.0')
results = parser.parse_args()
return results
def moveFileto(sourceDir, targetDir):
'''
copy file to a new dictionary
:param sourceDir:
:param targetDir:
:return:
'''
shutil.copy(sourceDir, targetDir)
def Mutation(filename1, filename2, root1, root2):
first_file = open(filename1)
first_data = first_file.readlines()
s0 = os.path.splitext(filename2)[0]
s1 = os.path.splitext(filename2)[1]
index = 0
for data in first_data:
index = index + 1
dirFrom = (root1 + filename2)
Rename = s0 + "_" + str(index) + s1
moveFileto(dirFrom, root2+filename2)
src_file_name = os.path.join(root2, filename2)
rename_file_name = os.path.join(root2, Rename)
os.rename(src_file_name, rename_file_name)
dirTo = root2 + Rename
f = open(dirTo, 'r+')
res_record_line = data.split("line: ")
res_record_line = res_record_line[1].split(" index: ")
original_line = int(res_record_line[0])
res_record_line = res_record_line[1].split(" original: ")
mut_index = int(res_record_line[0])
res_record_line = res_record_line[1].split(" mutated: ")
original = res_record_line[0]
res_record_line = res_record_line[1].split(' |||||')
mutated = res_record_line[0]
mu_source = f.readlines()
main_str = mu_source[original_line - 1]
left_str = main_str[:mut_index]
right_str = main_str[mut_index + len(original):]
mu_source[original_line - 1] = left_str + mutated + right_str
# complete the first-order mutation===================================
res_record_line = data.split("line: ")
res_record_line = res_record_line[2].split(" index: ")
original_line = int(res_record_line[0])
res_record_line = res_record_line[1].split(" original: ")
mut_index = int(res_record_line[0])
res_record_line = res_record_line[1].split(" mutated: ")
original = res_record_line[0]
res_record_line = res_record_line[1].split('\n')
mutated = res_record_line[0]
main_str = mu_source[original_line - 1]
left_str = main_str[:mut_index]
right_str = main_str[mut_index + len(original):]
mu_source[original_line - 1] = left_str + mutated + right_str
# complete the second-order mutation===================================
f.seek(0, 0) # Reset the pointer of reading file
for mu_source_line in mu_source:
f.writelines(mu_source_line)
f.close()
if __name__ == "__main__":
args = parserCommad()
defect_source_name = args.defect_source_path[util.findIndex(args.defect_source_path, "/")[-1] + 1:]
defect_source_path = args.defect_source_path[: util.findIndex(args.defect_source_path, "/")[-1] + 1]
if not os.path.exists(args.second_order_mutant_path + "Mutation_source"):
os.mkdir(args.second_order_mutant_path + "Mutation_source")
Mutation("./second_order_mutant_extend/result/logs/second_order_mutants_record.txt", defect_source_name,
defect_source_path, args.second_order_mutant_path + "Mutation_source/")
| [
"[email protected]"
] | |
3455f02319fb7e979bd7e2d67312525c08d5e290 | 4f08920c06bdd724731428c1aaccc70d8bff772a | /MAC/urls.py | ac6edfe1ecba2fe08b820684751a98f6dbe8af8c | [] | no_license | Zainali649/OnlineStore | 142b66cbf2fe7aa9ddea7f4dbf6d90ccd556132a | 05db9cdca1078f1b8e3e8626d7cd9a5e898e4066 | refs/heads/master | 2023-08-07T17:46:22.534032 | 2021-09-24T14:35:57 | 2021-09-24T14:35:57 | 409,987,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | from django.contrib import admin
from django.urls import path, include
from shop import views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('blog/', include("blog.urls")),
path('shop/', include("shop.urls")),
path('', views.index)
]+ static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT) | [
"[email protected]"
] | |
2408d2b454456b7db86da7304c7cd381074f96e0 | 331640994b1b6f66c1639278571ddbdc6c8c0751 | /test/python/body_io_file/wsgi.py | 713e341b050f5e9962c2a35e9ded68d79a232c7e | [
"Apache-2.0"
] | permissive | nginx/unit | eabcd067eaa60f4bdcf0cfaffe7d9932add2c66a | 9b22b6957bc87b3df002d0bc691fdae6a20abdac | refs/heads/master | 2023-09-04T02:02:13.581700 | 2023-08-30T16:07:24 | 2023-08-30T16:07:24 | 102,627,638 | 4,649 | 452 | Apache-2.0 | 2023-09-12T01:28:22 | 2017-09-06T15:45:30 | C | UTF-8 | Python | false | false | 131 | py | def application(env, start_response):
start_response('200', [('Content-Length', '5')])
f = open('file', 'rb')
return f
| [
"[email protected]"
] | |
0f6185d5c993be8a7af22fc9d0a378048d18e711 | 0f331270f31fc3c077bb91bed38e86900a8ad905 | /Algorithm_Programs/4.0 Bubble Sort.py | 377b32401e5cb048a14ccaf525e563db8c8e11d0 | [] | no_license | bharath02/BridgeLabz_Programs_59_week1 | 0a16ad1d08fb2326d7e8568ce64b340acabdfcc3 | 3a1cad9c0301b4cc42580505740c4b1eaed8ebaa | refs/heads/master | 2021-01-02T15:34:16.480225 | 2020-02-21T11:38:32 | 2020-02-21T11:38:32 | 239,683,973 | 0 | 0 | null | 2020-02-21T11:38:33 | 2020-02-11T05:29:31 | Python | UTF-8 | Python | false | false | 278 | py | #Bubble Sort
sort=["Nagaraj","Dilip", "Kranthi", "Prashanth","Jeevan","Bharath","SaiKumar","Sandeep","Naresh","Suvam"]
for i in range(0,len(sort)-1):
for j in range(0,len(sort)-i-1):
if(sort[j]>sort[j+1]):
sort[j],sort[j+1]=sort[j+1],sort[j]
print(sort)
| [
"[email protected]"
] | |
f8a9bc48ad588541fa9dc1f91cbf6147352ab04f | 10cb14a030aae225def3239af6200184fb18a3ce | /Python/Sprite/scoreboard.py | f901147bf24ed0dc78394c29fd0c010e6792e7e7 | [
"ICU",
"Apache-2.0"
] | permissive | linghtiin/test | 756ad4e08297da62b85b3992fa9e3952079becd8 | 4718dfbe33768fa6e623e27933199cbb21d440ae | refs/heads/master | 2021-01-22T19:36:20.708678 | 2019-10-21T12:24:45 | 2019-10-21T12:24:45 | 85,219,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,783 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 3 16:02:49 2019
@author: 10127
"""
import pygame.font
from pygame.sprite import Group
from ship import Ship
class Scoreboard():
""" """
def __init__(self, ai_settings, screen, stats):
""" """
self.screen = screen
self.screen_rect = screen.get_rect()
self.ai_settings = ai_settings
self.stats = stats
self.text_color = (30,30,30)
self.font = pygame.font.SysFont(None, 30)
self.prep_score()
self.prep_high_score()
self.prep_level()
self.prep_ships()
def prep_score(self):
""" ๆๅญ้ขๆธฒๆ """
rounded_score = int(round(self.stats.score, -1))
score_str = "{:,}pt".format(rounded_score)
self.score_image = self.font.render(score_str, True, self.text_color,
self.ai_settings.bg_color)
self.score_rect = self.score_image.get_rect()
self.score_rect.right = self.screen_rect.right - 20
self.score_rect.top = 20
def prep_high_score(self):
""" """
rounded_high_score = int(round(self.stats.high_score, -1))
high_score_str = "{:,}pt".format(rounded_high_score)
self.high_score_image = self.font.render(high_score_str, True, self.text_color,
self.ai_settings.bg_color)
self.high_score_rect = self.high_score_image.get_rect()
self.high_score_rect.centerx = self.screen_rect.centerx
self.high_score_rect.top = self.screen_rect.top
def prep_level(self):
""" """
level_str = str(self.stats.level)
self.level_image = self.font.render(level_str, True, self.text_color,
self.ai_settings.bg_color)
self.level_rect = self.level_image.get_rect()
self.level_rect.right = self.score_rect.right
self.level_rect.top = self.score_rect.bottom + 10
def prep_ships(self):
""" """
self.ships = Group()
for ship_number in range(self.stats.ship_left):
ship = Ship(self.ai_settings, self.screen)
ship.rect.x = 10 + ship_number * ship.rect.width
ship.rect.y = 10
self.ships.add(ship)
def show_score(self):
""" """
self.screen.blit(self.score_image, self.score_rect)
self.screen.blit(self.high_score_image, self.high_score_rect)
self.screen.blit(self.level_image, self.level_rect)
self.ships.draw(self.screen)
| [
"[email protected]"
] | |
a75ff295dcf088593d014b821d1e36830db744bb | 6775e06b64ab60777c846d3c2c337340a6a20307 | /3 Pin/tswitch.py | d796d920f5cfd695672ddfed8745bd17cc5d9787 | [] | no_license | tangylyre/RasPi-Projects | 8a35493b9c53dae5b1bc341cd9a95fc7f3c3d33f | 40e57caa3992db6d362bc668428636395c52db5e | refs/heads/master | 2021-07-10T03:00:11.437935 | 2021-03-29T20:43:20 | 2021-03-29T20:43:20 | 238,429,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
while True:
GPIO.output(18, True)
time.sleep(30)
GPIO.output(18, False)
time.sleep(30) | [
"[email protected]"
] | |
13cce84e532ebb215dce5473596719415166b05f | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/joblib/test/test_memory.py | 4986880efb1b3370658406ea5e8e23b4e3c55d23 | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:d7af44f0a98b2ed51376a0663f13b30780cd441ca29ea9095cc0d3c73fd98308
size 43755
| [
"[email protected]"
] | |
a48c8debf8d2db367795a06b36a01b308e9d8ff6 | 888aa356ba60e97754b780badb9aba3b293e3438 | /ErroresyExcepciones.py | b6831b60e65d63e8fabccf416c11449bec61c985 | [] | no_license | DanielTellezAlvarado/Python-Ejercicos-Practica | 45c74760ada253fa3052f0987cde815458606c62 | 1b923165138c8f698a19e4993b8e96ab10170b66 | refs/heads/master | 2022-11-27T13:26:14.223547 | 2020-08-08T04:35:03 | 2020-08-08T04:35:03 | 285,158,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py |
#esta funciรณn harรก lo mismo que input pero verificarรก la entrada del usuario hasta que ingrese un numero
def verificador_de_entrada_numerica(r):
print(r)
while True:
try:
a=float(input(""))
if ((a/1)==a):
return a;
except (TypeError,ValueError):
print("\nNo ingresaste un numero por favor intenta de nuevo\n")
#solo es para no hacer tan tediosa la anotaciรณn la funcion entrada harรก lo mismo
entrada=verificador_de_entrada_numerica
#programa muestra para verificar el uso de la funciรณn
print("Vamos a sumar dos numeros\n")
a=entrada("Ingresa el primer numero\n")
b=entrada("Ingresa el segundo numero\n")
print("La suma es: ",a+b)
| [
"[email protected]"
] | |
36e505c6b4b867c56f5f81788b9ac62ea10742ed | 7ac31a36aa002c852fca854f8da2610038f4901c | /tests/integration_tests/repositories/test_product_repository.py | 1d11595c0eb7afa1fd5ec49be251b0e6ebf8516f | [] | no_license | Himon-SYNCRAFT/cleanPony | f10ab47adaa9d88fa522cccb81e2ece2d5b7f8b1 | 6a0c778a35e27fef1dd82376a2d93001fda66b68 | refs/heads/master | 2020-04-17T18:34:48.763489 | 2019-03-07T14:52:51 | 2019-03-07T14:52:51 | 166,832,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,202 | py | from cleanPony.db.models import db, Product, Title
from cleanPony.core.entities import Product as ProductEntity
from cleanPony.db.repositories import CrudRepository
from pony.orm import commit, db_session
from cleanPony.core.filter import Filter, FilterType
product_id = 1
product_name = 'product_name'
product_repository = CrudRepository(Product, ProductEntity)
def setup_function():
db.create_tables()
add_data()
def teardown_function():
db.drop_all_tables(with_all_data=True)
@db_session
def add_data():
product = Product(id=product_id, name=product_name)
Title(id=1, name='title', product=product)
commit()
def test_get_product():
product = product_repository.get(entity_id=product_id)
assert product.id == product_id
assert type(product) is ProductEntity
def test_find_product():
filters = {Filter(filter='name', value=product_name)}
products = product_repository.find(filters=filters)
assert products[0].name == product_name
filter_value = 'name'
filters = {Filter(filter='name', value=filter_value, operator=FilterType.LIKE)}
products = product_repository.find(filters=filters)
assert filter_value in products[0].name
| [
"[email protected]"
] | |
3b1cb0aedc178e698f4673f9caac6b88fdb5cdbf | 49fe5157ac5cece1fbd537514ded6b8c0f7a03cb | /dacy/tests/tutorials/test_dacy_augmentation.py | 927db94427357aab10580d9b7a36987aec1e4404 | [
"Apache-2.0"
] | permissive | scottire/DaCy | 5303f8c47f4c84a8e319c982c2495d03983b7555 | b5a1a34422efd00ccf9e678b1bb9a8e1a150f3c3 | refs/heads/main | 2023-06-28T15:31:32.127470 | 2021-08-03T11:32:41 | 2021-08-03T11:32:41 | 392,293,740 | 0 | 0 | Apache-2.0 | 2021-08-03T11:29:26 | 2021-08-03T11:29:25 | null | UTF-8 | Python | false | false | 4,061 | py | import dacy
from spacy.training import Example
from typing import List, Callable, Iterator
def test_tutorial():
def doc_to_example(doc):
return Example(doc, doc)
nlp = dacy.load("da_dacy_small_tft-0.0.0")
doc = nlp(
"Peter Schmeichel mener ogsรฅ, at det danske landshold anno 2021 tilhรธrer verdenstoppen og kan vinde den kommende kamp mod England."
)
example = doc_to_example(doc)
from spacy.training.augment import create_lower_casing_augmenter
from dacy.augmenters import (
create_keyboard_augmenter,
create_pers_augmenter,
create_spacing_augmenter,
)
from dacy.datasets import danish_names
lower_aug = create_lower_casing_augmenter(level=1)
keyboard_05 = create_keyboard_augmenter(
doc_level=1, char_level=0.05, keyboard="QWERTY_DA"
)
keyboard_15 = create_keyboard_augmenter(
doc_level=1, char_level=0.15, keyboard="QWERTY_DA"
)
space_aug = create_spacing_augmenter(doc_level=1, spacing_level=0.4)
for aug in [lower_aug, keyboard_05, keyboard_15, space_aug]:
aug_example = next(aug(nlp, example)) # augment the example
doc = aug_example.y # extract the reference doc
print(doc)
for aug in [lower_aug, keyboard_05, keyboard_15, space_aug]:
aug_example = next(aug(nlp, example)) # augment the example
doc = aug_example.y # extract the reference doc
print(doc)
print(danish_names().keys())
print(danish_names()["first_name"][0:5])
print(danish_names()["last_name"][0:5])
def augment_texts(texts: List[str], augmenter: Callable) -> Iterator[Example]:
"""Takes a list of strings and yields augmented examples"""
docs = nlp.pipe(texts)
for doc in docs:
ex = Example(doc, doc)
aug = augmenter(nlp, ex)
yield next(aug).y
texts = [
"Hans Christian Andersen var en dansk digter og forfatter",
"1, 2, 3, Schmeichel er en mur",
"Peter Schmeichel mener ogsรฅ, at det danske landshold anno 2021 tilhรธrer verdenstoppen og kan vinde den kommende kamp mod England.",
]
# Create a dictionary to use for name replacement
dk_name_dict = danish_names()
# force_pattern augments PER entities to fit the format and length of `patterns`. Patterns allows you to specificy arbitrary
# combinations of "fn" (first names), "ln" (last names), "abb" (abbreviated to first character) and "abbpunct" (abbreviated
# to first character + ".") separeated by ",". If keep_name=True, the augmenter will not change names, but if force_pattern_size
# is True it will make them fit the length and potentially abbreviate names.
pers_aug = create_pers_augmenter(
dk_name_dict, force_pattern_size=True, keep_name=False, patterns=["fn,ln"]
)
augmented_docs = augment_texts(texts, pers_aug)
for d in augmented_docs:
print(d)
# Here's an example with keep_name=True and force_pattern_size=False which simply abbreviates first names
abb_aug = create_pers_augmenter(
dk_name_dict, force_pattern_size=False, keep_name=True, patterns=["abbpunct"]
)
augmented_docs = augment_texts(texts, abb_aug)
for d in augmented_docs:
print(d)
# patterns can also take a list of patterns to replace from (which can be weighted using the
# patterns_prob argument. The pattern to use is sampled for each entity.
# This setting is especially useful for finetuning models.
multiple_pats = create_pers_augmenter(
dk_name_dict,
force_pattern_size=True,
keep_name=False,
patterns=["fn,ln", "abbpunct,ln", "fn,ln,ln,ln"],
)
augmented_docs = augment_texts(texts, multiple_pats)
for d in augmented_docs:
print(d)
docs = nlp.pipe(texts)
augmented_docs = augment_texts(texts, multiple_pats)
# Check that the added/removed PER entities are still tagged as entities
for doc, aug_doc in zip(docs, augmented_docs):
print(doc.ents, "\t\t", aug_doc.ents) | [
"[email protected]"
] | |
fa076fef300c8be5f0d7d8732a3ca007d6fa3ca9 | a13fca7c40d8842ef9a7be25a8e2a29f10923a56 | /venv/Scripts/pip3.7-script.py | ad2986e8b052c05de723d65a6e1ee59f90719384 | [] | no_license | salmaaashraf/Vehcile_Routing_Problem_Using_AntColony | c6661da36ff20d5a58180c75d157a7423b674433 | 14cc091007eee52c8d0c33b3df36a12195df400d | refs/heads/main | 2023-03-23T02:43:52.398428 | 2021-03-18T16:32:25 | 2021-03-18T16:32:25 | 342,939,189 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | #!"C:\Users\metr\PycharmProjects\Vehicle routing problem\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"[email protected]"
] | |
698252227f388daecacf70012ab174ebe595d3be | c90594921d50e2e5df224c8d12663a25b0c476dc | /assn5/ABalauskas_assn5.py | 8667e13b5d014c9c8437d11d8c698fcd0b3997b8 | [] | no_license | zavon25/Intro-to-scripting | e3b386135b8e6de9fc0f6b3637daca3a9643bc15 | adfe5a7868bc2b4ca2528eeea1d348af6dd66f51 | refs/heads/master | 2021-01-10T21:20:46.117647 | 2015-04-05T02:00:51 | 2015-04-05T02:00:51 | 33,426,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,956 | py | #!/usr/bin/env python
"""
ITS 3210 Introduction to Scripting Languages
Governor's State University
Adam Balauskas
Assignment 5
Things are starting to get a lot more complicated. You may feel overwhelmed! This will be
much worse if you are waiting to try to do all of the exercises and homework in one day.
It is imperative that you spend multiple days per week on course material or you will not
retain it. Since every week is building on the previous week, you can't afford to have to
relearn the material every week. It will quickly become unmanageable and you'll be tempted
to cut corners which will become evident in the exams and final project. Stick with it!
You can do it - but it will take TIME and DEDICATION!
This week, you've been introduced to Boolean logic, list objects, for and while loops.
You've also learned a little about the concept of recursion through the use of nested
statements. Working with a bunch of simplistic scripts can get boring after awhile, so
this week I though it would be fun to write something a little more practical: your very
first zip file dictionary attack password cracker.
With that said, a reminder from the syllabus on ethical behavior is in order: Ethical
behavior is an absolute expectation in the execution of knowledge and tools gained through
this class. In some cases students may be exposed to activities and/or knowledge that can
be used illegally. Such activities will be grounds for failure of the course and potential
administrative or legal action taken against the student.
Complete this code so that it successfully cracks the zip file entitled
crackmeifyoucan.zip. Use the file common_passwords.txt as your attack dictionary. Once
successful, put the password in the comments and explain what the file is so that I know
you cracked it!
"""
# these are modules we will use to handle command line parameters and zip files
import sys
import zipfile
"""
# The first command line parameter is the script name itself, which is sys.argv[0]
# (remember that Python starts counting at 0). To determine if we have more arguments than
# just the script name itself, we can check the length of argv using the len() function
# which you have previously used in LPTHW exercises. The neat thing is that argv is
# actually a list, so you can iterate over it like a list (with a for loop) and use index
# operators on it. We want the second command line parameter, sys.argv[1], which will be
# the name of the zip file we want to crack. This script gives a friendly error message
# if the user doesn't invoke the script correctly.
#
# Don't worry about understanding the zipfile module code just yet, especially if you're
# confused already. If you can handle an extra challenge, read the docs:
# https://docs.python.org/2/library/zipfile.html
"""
# Checks the length of the command line and prevents importing extra information
if len(sys.argv) > 1:
zip_file = sys.argv[1]
zip = zipfile.ZipFile(zip_file, 'r')
else:
print "You must start the script with the file name of the zip file you want to " \
"crack as a command line parameter, e.g. 'python assn5.py crackmeifyoucan.zip'"
sys.exit(1) # this exits the program and tells the operating system it had an error
# we need to open the file with our 'dictionary' of common passwords in this case
password_file = open('common_passwords.txt', 'rb')
# this is how we attempt to open the zip file with the password contained on the current
# line of the file. Turn this into a function so that it can be called from inside the
# loop that iterates through each line (password) in the file. Don't worry if you don't
# understand this line of code, it is specific to the zipfile library. We'll learn about
# using different libraries more as the course progresses. Also, don't worry about the
# try and except blocks just yet. We'll learn more about exception handling in week 8.
# For now, just place this entire block inside a function so that you don't have to repeat
# it every time you try a password.
# this following function takes the line from the file and uses the string to attempt to crack the zip file.
#the password is "wolfpack" and creates a copy a clip from the webcomic www.PHDCOMICS.com
def trypassword(next_password):
try:
current_password = next_password.strip() # this removes extra whitespace: IMPORTANT!
print 'Trying password %s' % current_password
zip.setpassword(current_password)
zip.extractall()
print 'The password is %s' % current_password
sys.exit(0) # the program will exit successfully once the password is found
except Exception:
print '\tWRONG PASSWORD'
# This section of code steps through each line of the file and tests it against the zip file using the fuction trynextpassword.
for line in password_file.readlines():
next_password = line
trypassword(next_password)
# don't forget to close files after opening them!
zip.close()
password_file.close() | [
"[email protected]"
] | |
d6d136ff9639fec40049af12260793b8c5cb02b5 | 3e3c70bd9a045259a8f2398c54b491461e7b871c | /hex2im.py | 521e5b9adeef750b83ed8634d02f6d6e0b928b99 | [] | no_license | abhishyantkhare/hexpaint | e9e5bd71ba032fd061ff09aa307970cbd56acdec | a2b01d89d62c89dac391c723ebc44ad03717d4e4 | refs/heads/master | 2020-05-27T12:33:27.751516 | 2019-05-25T23:17:25 | 2019-05-25T23:17:25 | 188,619,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,270 | py | import numpy as np
from subprocess import call
import sys
from PIL import Image
#TODO: Make these user input
IM_MAX_H = 1080
IM_MAX_W = 1920
IM_RES_1080_PROD = IM_MAX_H * IM_MAX_W
def file2hex(fname):
#Hexdump the file, convert to rgb, save to an np array
#split filename
print("Processing file...")
txt_fname = fname.split('.')[0] + '.txt'
#Use subprocess.call to call hexdump and pipe to text file
#TODO: seems jank, find a better way to do this?
f = open(txt_fname, "w")
call(['hexdump', fname], stdout=f)
rgb_arr = []
with open(txt_fname, 'rb') as hdump:
i = 0
for line in hdump:
#Remove the offset
if i % 5000 == 0:
print("Processed {} lines".format(i))
line = line.decode().split(' ')[1:]
for hex_val in line:
if hex_val and hex_val != '\n':
rgb_arr.append(int(hex_val, 16))
i = i + 1
#Remove created text file
call(['rm', txt_fname])
return np.asarray(rgb_arr).astype(np.uint8)
def shape_rgbarr(rgb_arr):
#Shape the hex array into the proper shape
#TODO: implement downsampling, right now it just truncates hexdumps that are too big
print("Reshaping array...")
curr_len = rgb_arr.shape[0] // 3
end_lim = curr_len * 3
rgb_arr = rgb_arr[:end_lim]
rgb_arr = rgb_arr.reshape(curr_len, 3)
div_factor = get_div_factor(curr_len)
h_lim = IM_MAX_H // div_factor
w_lim = IM_MAX_W // div_factor
prod_lim = h_lim*w_lim
rgb_arr = rgb_arr[:prod_lim]
rgb_arr = rgb_arr.reshape(h_lim, w_lim, 3)
print(rgb_arr.shape)
return rgb_arr
def get_div_factor(size_prod):
if size_prod > IM_RES_1080_PROD:
return 1
curr_lim = 2
curr_prod = IM_RES_1080_PROD // (curr_lim**2)
while size_prod < curr_prod:
curr_lim = curr_lim + 1
curr_prod = IM_RES_1080_PROD // (curr_lim**2)
return curr_lim
def imfromrgb_arr(rgb_arr, fname):
#Create an image from an rgb array
print("Writing image...")
img = Image.fromarray(rgb_arr)
fname = fname.split('.')[0] + '.png'
img.save(fname)
print("Done!")
# Check if user passed in a file
if len(sys.argv) < 2:
print("Please pass in a file to visualize!")
else:
fname = sys.argv[-1]
rgb_arr = file2hex(fname)
shaped_rgb_arr = shape_rgbarr(rgb_arr)
imfromrgb_arr(shaped_rgb_arr, fname) | [
"Abhishyant Khare"
] | Abhishyant Khare |
6291bc6c15db2bfec8ba0a05cf6beccc636a97ca | 0c110eb32f2eaea5c65d40bda846ddc05757ced6 | /python_scripts/pimriscripts/mastersort/scripts_dir/p7561_run1M6.py | 18f24feb1b08a183966111f19479d9b029bd46d3 | [] | no_license | nyspisoccog/ks_scripts | 792148a288d1a9d808e397c1d2e93deda2580ff4 | 744b5a9dfa0f958062fc66e0331613faaaee5419 | refs/heads/master | 2021-01-18T14:22:25.291331 | 2018-10-15T13:08:24 | 2018-10-15T13:08:24 | 46,814,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | from __future__ import with_statement
import os, csv, shutil,tarfile, uf, dcm_ops
dest_root = '/ifs/scratch/pimri/soccog/test_working'
dst_path_lst = ['7561', 'run1M6']
uf.buildtree(dest_root, dst_path_lst)
uf.copytree('/ifs/scratch/pimri/soccog/old/SocCog_Raw_Data_By_Exam_Number/2751/E2751_e783755/s800605_5610_1M4_s6', '/ifs/scratch/pimri/soccog/test_working/7561/run1M6')
t = tarfile.open(os.path.join('/ifs/scratch/pimri/soccog/test_working/7561/run1M6','MRDC_files.tar.gz'), 'r')
t.extractall('/ifs/scratch/pimri/soccog/test_working/7561/run1M6')
for f in os.listdir('/ifs/scratch/pimri/soccog/test_working/7561/run1M6'):
if 'MRDC' in f and 'gz' not in f:
old = os.path.join('/ifs/scratch/pimri/soccog/test_working/7561/run1M6', f)
new = os.path.join('/ifs/scratch/pimri/soccog/test_working/7561/run1M6', f + '.dcm')
os.rename(old, new)
qsub_cnv_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7561/run1M6', '7561_run1M6', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cnv')
#qsub_cln_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7561/run1M6', '7561_run1M6', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cln')
| [
"[email protected]"
] | |
7bcff63ef29e993b7f8ebc900815a24b41cedf76 | 906cc3ded19736a3a962b4a074321eba93d6d282 | /plugin/AbsorptionCoefficient/InP.py | ba0516d35adcfdaa2ecf4d51460cc5ea252f9ba5 | [
"MIT"
] | permissive | Sunethan/APD-analyser | 7612955335fb44c96e86ac70237baa9aeed29998 | 30a190c763017017ce16e171b5bf641fda62a4b0 | refs/heads/master | 2021-06-23T02:28:14.482556 | 2020-12-16T12:49:06 | 2020-12-16T12:49:06 | 173,737,815 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,845 | py | import utils
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import matplotlib.pylab as pylab
import DataAnalysis as Data
import numpy as np
import inspect
params = {'legend.fontsize': 'x-large',
'figure.figsize': (7.5, 7),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
plt.rcParams.update({'font.size': 13})
file_location = '/Users/Ethan/PycharmProjects/Python/Avalanche Photodiode/plugin/AbsorptionCoefficient/data/'
def Adachi(energy, color='b'):
location = file_location + 'InP_Adachi.csv'
raw = Data.CSV(data=location, xlabel='energy', ylabel='absorption')
if energy is not None:
if energy < raw.X[0] or energy > raw.X[-1]:
raise BaseException("Energy should be in [%.2f, %.2f]" % (raw.X[0], raw.X[-1]))
return utils.find(raw.X, raw.Y, energy, 'log')
else:
plt.plot(raw.X, raw.Y, linestyle='-.', color=color, marker='s', label='S. Adachi (1989)')
def Palik(wavelength, color='b'):
"""
Input is wavelength (um), rather than energy(eV) or wavelength(nm).
:param wavelength: (um)
:param color: default is blue
:return: absorption coefficient
"""
location = file_location + 'InP_E.D.Palik.csv'
raw = Data.CSV(data=location, xlabel='wavelength', ylabel='absorption')
if wavelength is not None:
if wavelength < np.amin(raw.X) or wavelength > np.amax(raw.X):
raise BaseException("Wavelength should be in [%.2f, %.2f]" % (np.amin(raw.X), np.amax(raw.X)))
return utils.find(raw.X, raw.Y, wavelength, 'log')
else:
plt.plot(raw.X, raw.Y, linestyle='-.', color=color, marker='s', label='E.D. Palik (1985)') | [
"[email protected]"
] | |
57a874e0d17bd957f0f83e88d9ffaeec510202ae | 0aabfa9e73c0b714f3de7dd5e45acc7569b5250a | /PyLesson_05/Lesson_05_ModulusOperator.py | 6ea59a3d7da6492377d58e0a70d0fa50210cf860 | [] | no_license | nikejd1706/Speshilov_Nikita | 5b0ec55b73ef453ba85f437e31adaaa38adce46f | b0d9282eb922fe79fa7bbb29e6313f0393fe9dc2 | refs/heads/master | 2021-01-21T18:50:28.189820 | 2017-04-28T17:22:31 | 2017-04-28T17:22:31 | 67,239,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | one = int(input("Please enter a number: "))
two = int(input("Please enter a 2nd number: "))
even = (one + two) % 2 == 0
if even:
print(one + two, "is even!")
if not even:
print(one + two, "is odd!")
| [
"[email protected]"
] | |
353f316af62e26efd742a40f8eb83f4514001de4 | 3f3b02f914069de656eff79bb49216524d2a7422 | /authnzerver/actions/user.py | 28acc9b54bdede017979107cdc0d083e4e0f9e35 | [
"MIT"
] | permissive | sshivaji/authnzerver | ae0a3449461d1ca1018beaa61366205c2a6cd660 | ce2ece00e120f7764ac57a917c268a60de6ecfaf | refs/heads/master | 2021-02-14T19:52:13.449526 | 2020-03-24T08:40:33 | 2020-03-24T08:40:33 | 249,654,578 | 0 | 0 | null | 2020-03-24T08:39:11 | 2020-03-24T08:39:11 | null | UTF-8 | Python | false | false | 25,853 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# actions_user.py - Waqas Bhatti ([email protected]) - Aug 2018
# License: MIT - see the LICENSE file for the full text.
'''This contains functions to drive user account related auth actions.
'''
#############
## LOGGING ##
#############
import logging
# get a logger
LOGGER = logging.getLogger(__name__)
#############
## IMPORTS ##
#############
try:
from datetime import datetime, timezone, timedelta
utc = timezone.utc
except Exception:
from datetime import datetime, timedelta, tzinfo
ZERO = timedelta(0)
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
import multiprocessing as mp
import socket
import uuid
from tornado.escape import squeeze
from sqlalchemy import select
from fuzzywuzzy.fuzz import UQRatio
from .. import authdb
from .session import auth_session_exists
from argon2 import PasswordHasher
from .. import validators
######################
## PASSWORD CONTEXT ##
######################
pass_hasher = PasswordHasher()
#######################
## PASSWORD HANDLING ##
#######################
def validate_input_password(
full_name,
email,
password,
min_length=12,
max_match_threshold=20
):
'''This validates user input passwords.
1. must be at least min_length characters (we'll truncate the password at
1024 characters since we don't want to store entire novels)
2. must not match within max_match_threshold of their email or full_name
3. must not match within max_match_threshold of the site's FQDN
4. must not have a single case-folded character take up more than 20% of the
length of the password
5. must not be completely numeric
6. must not be in the top 10k passwords list
'''
messages = []
# we'll ignore any repeated white space and fail immediately if the password
# is all white space
if len(squeeze(password.strip())) < min_length:
LOGGER.warning('password for new account: %s is too short' % email)
messages.append('Your password is too short. '
'It must have at least %s characters.' % min_length)
passlen_ok = False
else:
passlen_ok = True
# check if the password is straight-up dumb
if password.casefold() in validators.TOP_10K_PASSWORDS:
LOGGER.warning('password for new account: %s is '
'in top 10k passwords list' % email)
messages.append('Your password is on the list of the '
'most common passwords and is vulnerable to guessing.')
tenk_ok = False
else:
tenk_ok = True
# FIXME: also add fuzzy matching to top 10k passwords list to avoid stuff
# like 'passwordpasswordpassword'
# check the fuzzy match against the FQDN and email address
fqdn = socket.getfqdn()
fqdn_match = UQRatio(password.casefold(), fqdn.casefold())
email_match = UQRatio(password.casefold(), email.casefold())
name_match = UQRatio(password.casefold(), full_name.casefold())
fqdn_ok = fqdn_match < max_match_threshold
email_ok = email_match < max_match_threshold
name_ok = name_match < max_match_threshold
if not fqdn_ok or not email_ok or not name_ok:
LOGGER.warning('password for new account: %s matches FQDN '
'(similarity: %s) or their email address '
'(similarity: %s)' % (email, fqdn_match, email_match))
messages.append('Your password is too similar to either '
'the domain name of this server or your '
'own name or email address.')
# next, check if the password is complex enough
histogram = {}
for char in password:
if char.lower() not in histogram:
histogram[char.lower()] = 1
else:
histogram[char.lower()] = histogram[char.lower()] + 1
hist_ok = True
for h in histogram:
if (histogram[h]/len(password)) > 0.2:
hist_ok = False
LOGGER.warning('one character is more than '
'0.2 x length of the password')
messages.append(
'Your password is not complex enough. '
'One or more characters appear appear too frequently.'
)
break
# check if the password is all numeric
if password.isdigit():
numeric_ok = False
messages.append('Your password cannot be all numbers.')
else:
numeric_ok = True
return (
(passlen_ok and email_ok and name_ok and
fqdn_ok and hist_ok and numeric_ok and tenk_ok),
messages
)
def change_user_password(payload,
raiseonfail=False,
override_authdb_path=None,
min_pass_length=12,
max_similarity=30):
'''This changes the user's password.
payload requires the following keys:
- user_id
- full_name
- email
- current_password
- new_password
'''
for key in ('user_id',
'full_name',
'email',
'current_password',
'new_password'):
if key not in payload:
return {
'success':False,
'user_id':None,
'email':None,
'messages':['Invalid password change request. '
'Some args are missing.'],
}
# this checks if the database connection is live
currproc = mp.current_process()
engine = getattr(currproc, 'authdb_engine', None)
if override_authdb_path:
currproc.auth_db_path = override_authdb_path
if not engine:
currproc.authdb_engine, currproc.authdb_conn, currproc.authdb_meta = (
authdb.get_auth_db(
currproc.auth_db_path,
echo=raiseonfail
)
)
users = currproc.authdb_meta.tables['users']
# get the current password
sel = select([
users.c.password,
]).select_from(users).where(
(users.c.user_id == payload['user_id'])
)
result = currproc.authdb_conn.execute(sel)
rows = result.fetchone()
result.close()
current_password = payload['current_password'][:1024]
new_password = payload['new_password'][:1024]
try:
pass_check = pass_hasher.verify(rows['password'],
current_password)
except Exception:
pass_check = False
if not pass_check:
return {
'success':False,
'user_id':payload['user_id'],
'email':payload['email'],
'messages':['Your current password did '
'not match the stored password.']
}
# check if the new hashed password is the same as the old hashed password,
# meaning that the new password is just the old one
try:
same_check = pass_hasher.verify(rows['password'], new_password)
except Exception:
same_check = False
if same_check:
return {
'success':False,
'user_id':payload['user_id'],
'email':payload['email'],
'messages':['Your new password cannot '
'be the same as your old password.']
}
# hash the user's password
hashed_password = pass_hasher.hash(new_password)
# validate the input password to see if it's OK
# do this here to make sure the password hash completes at least once
# verify the new password is OK
passok, messages = validate_input_password(
payload['full_name'],
payload['email'],
new_password,
min_length=min_pass_length,
max_match_threshold=max_similarity
)
if passok:
# update the table for this user
upd = users.update(
).where(
users.c.user_id == payload['user_id']
).where(
users.c.is_active.is_(True)
).where(
users.c.email == payload['email']
).values({
'password': hashed_password
})
result = currproc.authdb_conn.execute(upd)
sel = select([
users.c.password,
]).select_from(users).where(
(users.c.user_id == payload['user_id'])
)
result = currproc.authdb_conn.execute(sel)
rows = result.fetchone()
result.close()
if rows and rows['password'] == hashed_password:
messages.append('Password changed successfully.')
return {
'success':True,
'user_id':payload['user_id'],
'email':payload['email'],
'messages':messages
}
else:
messages.append('Password could not be changed.')
return {
'success':False,
'user_id':payload['user_id'],
'email':payload['email'],
'messages':messages
}
else:
messages.append("The new password you entered is insecure. "
"It must be at least 12 characters long and "
"be sufficiently complex.")
return {
'success':False,
'user_id':payload['user_id'],
'email':payload['email'],
'messages': messages
}
###################
## USER HANDLING ##
###################
def create_new_user(payload,
min_pass_length=12,
max_similarity=30,
raiseonfail=False,
override_authdb_path=None):
'''This makes a new user.
payload keys: full_name, email, password
Returns the user_id and email if successful.
The emailverify_sent_datetime is set to the current time. The initial
account's is_active is set to False and user_role is set to 'locked'.
The email verification token sent by the frontend expires in 2 hours. If the
user doesn't get to it by then, they'll have to wait at least 24 hours until
another one can be sent.
If the email address already exists in the database, then either the user
has forgotten that they have an account or someone else is being
annoying. In this case, if is_active is True, we'll tell the user that we've
sent an email but won't do anything. If is_active is False and
emailverify_sent_datetime is at least 24 hours in the past, we'll send a new
email verification email and update the emailverify_sent_datetime. In this
case, we'll just tell the user that we've sent the email but won't tell them
if their account exists.
Only after the user verifies their email, is_active will be set to True and
user_role will be set to 'authenticated'.
'''
for key in ('full_name',
'email',
'password'):
if key not in payload:
return {
'success':False,
'user_email':None,
'user_id':None,
'send_verification':False,
'messages':["Invalid user creation request."]
}
# validate the email provided
email_confusables_ok = (
validators.validate_confusables_email(payload['email'])
)
email_regex_ok = validators.validate_email_address(payload['email'])
email_ok = email_regex_ok and email_confusables_ok
if not email_ok:
return {
'success':False,
'user_email':None,
'user_id':None,
'send_verification':False,
'messages':["The email address provided doesn't "
"seem to be a valid email address and cannot be used "
"to sign up for an account on this server."]
}
email = validators.normalize_value(payload['email'])
full_name = validators.normalize_value(payload['full_name'])
password = payload['password']
# this checks if the database connection is live
currproc = mp.current_process()
engine = getattr(currproc, 'authdb_engine', None)
if override_authdb_path:
currproc.auth_db_path = override_authdb_path
if not engine:
currproc.authdb_engine, currproc.authdb_conn, currproc.authdb_meta = (
authdb.get_auth_db(
currproc.auth_db_path,
echo=raiseonfail
)
)
users = currproc.authdb_meta.tables['users']
input_password = password[:1024]
# hash the user's password
hashed_password = pass_hasher.hash(input_password)
# validate the input password to see if it's OK
# do this here to make sure the password hash completes at least once
passok, messages = validate_input_password(
full_name,
email,
input_password,
min_length=min_pass_length,
max_match_threshold=max_similarity
)
if not passok:
return {
'success':False,
'user_email':email,
'user_id':None,
'send_verification':False,
'messages':messages
}
# insert stuff into the user's table, set is_active = False, user_role =
# 'locked', the emailverify_sent_datetime to datetime.utcnow()
try:
# create a system_id for this user
system_id = str(uuid.uuid4())
new_user_dict = {
'full_name':full_name,
'system_id':system_id,
'password':hashed_password,
'email':email,
'email_verified':False,
'is_active':False,
'emailverify_sent_datetime':datetime.utcnow(),
'created_on':datetime.utcnow(),
'user_role':'locked',
'last_updated':datetime.utcnow(),
}
ins = users.insert(new_user_dict)
result = currproc.authdb_conn.execute(ins)
result.close()
user_added = True
LOGGER.info('new user created: %s' % payload['email'])
# this will catch stuff like people trying to sign up again with their email
# address
except Exception:
LOGGER.warning('could not create a new user with '
'email: %s probably because they exist already'
% payload['email'])
user_added = False
# get back the user ID
sel = select([
users.c.email,
users.c.user_id,
users.c.is_active,
users.c.emailverify_sent_datetime,
]).select_from(users).where(
users.c.email == email
)
result = currproc.authdb_conn.execute(sel)
rows = result.fetchone()
result.close()
# if the user was added successfully, tell the frontend all is good and to
# send a verification email
if user_added and rows:
LOGGER.info('new user ID: %s for email: %s, is_active = %s'
% (rows['user_id'], rows['email'], rows['is_active']))
messages.append(
'User account created. Please verify your email address to log in.'
)
return {
'success':True,
'user_email':rows['email'],
'user_id':rows['user_id'],
'send_verification':True,
'messages':messages
}
# if the user wasn't added successfully, then they exist in the DB already
elif (not user_added) and rows:
LOGGER.warning(
'attempt to create new user with existing email: %s'
% email
)
# check the timedelta between now and the emailverify_sent_datetime
verification_timedelta = (datetime.utcnow() -
rows['emailverify_sent_datetime'])
# this sets whether we should resend the verification email
resend_verification = (
not(rows['is_active']) and
(verification_timedelta > timedelta(hours=24))
)
LOGGER.warning(
'existing user_id = %s, '
'is active = %s, '
'email verification originally sent at = %sZ, '
'will resend verification = %s' %
(rows['user_id'],
rows['is_active'],
rows['emailverify_sent_datetime'].isoformat(),
resend_verification)
)
messages.append(
'User account created. Please verify your email address to log in.'
)
return {
'success':False,
'user_email':rows['email'],
'user_id':rows['user_id'],
'send_verification':resend_verification,
'messages':messages
}
# otherwise, the user wasn't added successfully and they don't already exist
# in the database so something else went wrong.
else:
messages.append(
'User account created. Please verify your email address to log in.'
)
return {
'success':False,
'user_email':None,
'user_id':None,
'send_verification':False,
'messages':messages
}
def delete_user(payload,
raiseonfail=False,
override_authdb_path=None):
'''
This deletes the user.
This can only be called by the user themselves or the superuser.
This will immediately invalidate all sessions corresponding to this user.
Superuser accounts cannot be deleted.
payload must contain:
- email
- user_id
- password
'''
for key in ('email',
'user_id',
'password'):
if key not in payload:
return {
'success': False,
'user_id':None,
'email':None,
'messages':["Invalid user deletion request."],
}
# this checks if the database connection is live
currproc = mp.current_process()
engine = getattr(currproc, 'authdb_engine', None)
if override_authdb_path:
currproc.auth_db_path = override_authdb_path
if not engine:
currproc.authdb_engine, currproc.authdb_conn, currproc.authdb_meta = (
authdb.get_auth_db(
currproc.auth_db_path,
echo=raiseonfail
)
)
users = currproc.authdb_meta.tables['users']
sessions = currproc.authdb_meta.tables['sessions']
# check if the incoming email address actually belongs to the user making
# the request
sel = select([
users.c.user_id,
users.c.email,
users.c.password,
users.c.user_role
]).select_from(
users
).where(
users.c.user_id == payload['user_id']
)
result = currproc.authdb_conn.execute(sel)
row = result.fetchone()
if (not row) or (row['email'] != payload['email']):
return {
'success': False,
'user_id':payload['user_id'],
'email':payload['email'],
'messages':["We could not verify your email address or password."]
}
# check if the user's password is valid and matches the one on record
try:
pass_ok = pass_hasher.verify(row['password'],
payload['password'][:1024])
except Exception as e:
LOGGER.error(
"Password mismatch for user: %s, exception type: %s" %
(payload['user_id'], e)
)
pass_ok = False
if not pass_ok:
return {
'success': False,
'user_id':payload['user_id'],
'email':payload['email'],
'messages':["We could not verify your email address or password."]
}
if row['user_role'] == 'superuser':
LOGGER.error(
"Can't delete superusers."
)
return {
'success': False,
'user_id':payload['user_id'],
'email':payload['email'],
'messages':["Can't delete superusers."]
}
# delete the user
delete = users.delete().where(
users.c.user_id == payload['user_id']
).where(
users.c.email == payload['email']
).where(
users.c.user_role != 'superuser'
)
result = currproc.authdb_conn.execute(delete)
result.close()
# don't forget to delete the sessions as well
delete = sessions.delete().where(
sessions.c.user_id == payload['user_id']
)
result = currproc.authdb_conn.execute(delete)
result.close()
sel = select([
users.c.user_id,
users.c.email,
sessions.c.session_token
]).select_from(
users.join(sessions)
).where(
users.c.user_id == payload['user_id']
)
result = currproc.authdb_conn.execute(sel)
rows = result.fetchall()
if rows and len(rows) > 0:
return {
'success': False,
'user_id':payload['user_id'],
'email':payload['email'],
'messages':["Could not delete user from DB."]
}
else:
return {
'success': True,
'user_id':payload['user_id'],
'email':payload['email'],
'messages':["User successfully deleted from DB."]
}
def verify_password_reset(payload,
raiseonfail=False,
override_authdb_path=None,
min_pass_length=12,
max_similarity=30):
'''
This verifies a password reset request.
payload must contain:
- email_address
- new_password
- session_token
'''
for key in ('email_address',
'new_password',
'session_token'):
if key not in payload:
return {
'success':False,
'messages':["Invalid password reset request. "
"Some required parameters are missing."]
}
# this checks if the database connection is live
currproc = mp.current_process()
engine = getattr(currproc, 'authdb_engine', None)
if override_authdb_path:
currproc.auth_db_path = override_authdb_path
if not engine:
currproc.authdb_engine, currproc.authdb_conn, currproc.authdb_meta = (
authdb.get_auth_db(
currproc.auth_db_path,
echo=raiseonfail
)
)
users = currproc.authdb_meta.tables['users']
# check the session
session_info = auth_session_exists(
{'session_token':payload['session_token']},
raiseonfail=raiseonfail,
override_authdb_path=override_authdb_path
)
if not session_info['success']:
return {
'success':False,
'messages':([
"Invalid session token for password reset request."
])
}
sel = select([
users.c.user_id,
users.c.full_name,
users.c.email,
users.c.password,
]).select_from(
users
).where(
users.c.email == payload['email_address']
)
result = currproc.authdb_conn.execute(sel)
user_info = result.fetchone()
result.close()
if not user_info or len(user_info) == 0:
return {
'success':False,
'messages':([
"Invalid user for password reset request."
])
}
# let's hash the new password against the current password
new_password = payload['new_password'][:1024]
try:
pass_same = pass_hasher.verify(
user_info['password'],
new_password,
)
except Exception:
pass_same = False
# don't fail here, but note that the user is re-using the password they
# forgot. FIXME: should we actually fail here?
if pass_same:
LOGGER.warning('user %s is re-using their '
'password that they ostensibly forgot' %
user_info['email_address'])
# hash the user's password
hashed_password = pass_hasher.hash(new_password)
# validate the input password to see if it's OK
# do this here to make sure the password hash completes at least once
passok, messages = validate_input_password(
user_info['full_name'],
payload['email_address'],
new_password,
min_length=min_pass_length,
max_match_threshold=max_similarity
)
if not passok:
return {
'success':False,
'messages':([
"Invalid password for password reset request."
])
}
# if the password passes validation, hash it and store it
else:
# update the table for this user
upd = users.update(
).where(
users.c.user_id == user_info['user_id']
).where(
users.c.is_active.is_(True)
).where(
users.c.email == payload['email_address']
).values({
'password': hashed_password
})
result = currproc.authdb_conn.execute(upd)
sel = select([
users.c.password,
]).select_from(users).where(
(users.c.email == payload['email_address'])
)
result = currproc.authdb_conn.execute(sel)
rows = result.fetchone()
result.close()
if rows and rows['password'] == hashed_password:
messages.append('Password changed successfully.')
return {
'success':True,
'messages':messages
}
else:
messages.append('Password could not be changed.')
return {
'success':False,
'messages':messages
}
| [
"[email protected]"
] | |
eb4640d817a6397d6d98f8b579e926a8660c41b6 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/Lambda-Resource-Static-Assets/2-resources/BLOG/Python/pcc_2e-master/chapter_13/creating_first_alien/alien_invasion.py | fbaabad22dc6b3fa631254c1d41c8ae0ea689055 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 3,190 | py | import sys
import pygame
from settings import Settings
from ship import Ship
from bullet import Bullet
from alien import Alien
class AlienInvasion:
"""Overall class to manage game assets and behavior."""
def __init__(self):
"""Initialize the game, and create game resources."""
pygame.init()
self.settings = Settings()
self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
self.settings.screen_width = self.screen.get_rect().width
self.settings.screen_height = self.screen.get_rect().height
pygame.display.set_caption("Alien Invasion")
self.ship = Ship(self)
self.bullets = pygame.sprite.Group()
self.aliens = pygame.sprite.Group()
self._create_fleet()
def run_game(self):
"""Start the main loop for the game."""
while True:
self._check_events()
self.ship.update()
self._update_bullets()
self._update_screen()
def _check_events(self):
"""Respond to keypresses and mouse events."""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
self._check_keydown_events(event)
elif event.type == pygame.KEYUP:
self._check_keyup_events(event)
def _check_keydown_events(self, event):
"""Respond to keypresses."""
if event.key == pygame.K_RIGHT:
self.ship.moving_right = True
elif event.key == pygame.K_LEFT:
self.ship.moving_left = True
elif event.key == pygame.K_q:
sys.exit()
elif event.key == pygame.K_SPACE:
self._fire_bullet()
def _check_keyup_events(self, event):
"""Respond to key releases."""
if event.key == pygame.K_RIGHT:
self.ship.moving_right = False
elif event.key == pygame.K_LEFT:
self.ship.moving_left = False
def _fire_bullet(self):
"""Create a new bullet and add it to the bullets group."""
if len(self.bullets) < self.settings.bullets_allowed:
new_bullet = Bullet(self)
self.bullets.add(new_bullet)
def _update_bullets(self):
"""Update position of bullets and get rid of old bullets."""
# Update bullet positions.
self.bullets.update()
# Get rid of bullets that have disappeared.
for bullet in self.bullets.copy():
if bullet.rect.bottom <= 0:
self.bullets.remove(bullet)
def _create_fleet(self):
"""Create the fleet of aliens."""
# Make an alien.
alien = Alien(self)
self.aliens.add(alien)
def _update_screen(self):
"""Update images on the screen, and flip to the new screen."""
self.screen.fill(self.settings.bg_color)
self.ship.blitme()
for bullet in self.bullets.sprites():
bullet.draw_bullet()
self.aliens.draw(self.screen)
pygame.display.flip()
if __name__ == "__main__":
# Make a game instance, and run the game.
ai = AlienInvasion()
ai.run_game()
| [
"[email protected]"
] | |
3fccf8f78ed68775e041233c5bb83366d17f3b17 | d88ab5b8629fe205f03d404e644378f3228431d8 | /grubcat/fanju/backup.py | 13041ecfd69737f6831c69295423b86e97a41ff5 | [] | no_license | zjxhz/grubcat-backend | a8537b517e8bbf63d583c37b511db78d185b9e8e | fad2cbb03acd72dd649557b3c170a3208e4a43a7 | refs/heads/master | 2020-04-01T18:00:44.136883 | 2016-08-02T04:34:47 | 2016-08-02T04:34:47 | 64,723,949 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 28,214 | py | #coding=utf-8
# Create your views here.
######################### forms related ####################################################
class GroupForm(ModelForm):
class Meta:
model = Group
widgets = {
'desc': Textarea({'rows': 5})
}
exclude = ('owner', 'members')
class GroupLogoForm(ModelForm):
class Meta:
model = Group
fields = ('logo',)
class GroupCommentForm(ModelForm):
class Meta:
model = GroupComment
############################# model related ################################################
from datetime import datetime, date, timedelta
class RestaurantInfo(models.Model):
restaurant = models.OneToOneField(Restaurant, related_name='info')
average_cost = models.FloatField()
average_rating = models.FloatField()
good_rating_percentage = models.FloatField()
divider = models.IntegerField()
class RatingPic(models.Model):
restaurant = models.ForeignKey(Restaurant)
user = models.ForeignKey(User)
image = models.CharField(max_length=1024)
class Meta:
db_table = u'rating_pic'
class BestRatingDish(models.Model):
restaurant = models.ForeignKey(Restaurant, related_name="best_rating_dishes")
dish = models.ForeignKey(Dish)
times = models.IntegerField()
class Rating(models.Model):
user = models.ForeignKey(User, related_name='user_ratings')
restaurant = models.ForeignKey(Restaurant, related_name="ratings")
comments = models.CharField(max_length=4096)
time = models.DateTimeField()
rating = models.FloatField()
average_cost = models.FloatField()
dishes = models.ManyToManyField(Dish, db_table="rating_dishes")
auto_share = models.BooleanField()
class MealInvitation(models.Model):
from_person = models.ForeignKey(User, related_name="invitation_from_user")
to_person = models.ForeignKey(User, related_name='invitation_to_user')
meal = models.ForeignKey(Meal)
timestamp = models.DateTimeField(default=datetime.now())
status = models.IntegerField(default=0) # PENDING, ACCEPTED, REJECTED
def is_related(self, user_profile):
return self.from_person == user_profile or self.to_person == user_profile
class GroupCategory(models.Model):
name = models.CharField(u'ๅๅญๅ็ฑปๅ', max_length=30, unique=True)
cover = models.ImageField(u'ๅ็ฑปๅพ็', upload_to='category_cover', blank=True, null=True)
@property
def cover_url_default_if_none(self):
if self.cover:
return self.cover.url
else:
return staticfiles_storage.url('img/default/category-cover.png')
def __unicode__(self):
return self.name
class Meta:
db_table = u'group_category'
verbose_name = u'ๅๅญๅ็ฑป'
verbose_name_plural = u'ๅๅญๅ็ฑป'
class GroupPrivacy(Privacy):
pass
GROUP_PRIVACY_CHOICE = (
(GroupPrivacy.PUBLIC, u'ๅ
ฌๅผ๏ผๆๆไบบ้ฝๅฏไปฅๅ ๅ
ฅ'),
(GroupPrivacy.PRIVATE, u'็งๅฏ๏ผไป
่ขซ้่ฏท็ไบบๅฏไปฅๅ ๅ
ฅ')
)
class Group(models.Model):
"""ๅๅญ"""
name = models.CharField(u'ๅ็งฐ', max_length=15, unique=True)
desc = models.CharField(u'ๆ่ฟฐ', max_length=100)
category = models.ForeignKey(GroupCategory, verbose_name=u'ๅ็ฑป', null=True, blank=True)
privacy = models.SmallIntegerField(u'ๅ
ฌๅผ', choices=GROUP_PRIVACY_CHOICE, default=GroupPrivacy.PUBLIC)
owner = models.ForeignKey(User, verbose_name=u'ๅๅปบ่
')
logo = models.ImageField(upload_to='group_logos', blank=True, null=True)
members = models.ManyToManyField(User, verbose_name=u'ๆๅ', related_name='interest_groups')
@property
def recent_meals(self):
return Meal.objects.filter(group=self).filter(
Q(start_date__gt=date.today()) | Q(start_date=date.today(),
start_time__gt=datetime.now().time())).order_by("start_date",
"start_time")
@property
def passed_meals(self):
return Meal.objects.filter(group=self).filter(
Q(start_date__lt=date.today()) | Q(start_date=date.today(),
start_time__lte=datetime.now().time())).order_by("start_date",
"start_time")
@property
def logo_url_default_if_none(self):
if self.logo:
return self.logo.url
else:
return staticfiles_storage.url('img/default/group-logo.jpg')
@models.permalink
def get_absolute_url(self):
return 'group_detail', (self.id, )
def __unicode__(self):
return self.name
class Meta:
db_table = u'group'
verbose_name = u'ๅๅญ'
verbose_name_plural = u'ๅๅญ'
class GroupComment(Comment):
group = models.ForeignKey(Group, verbose_name=u'ๅๅญ', related_name='comments')
parent = models.ForeignKey('self', related_name='replies', verbose_name=u'็ถ่ฏ่ฎบ', null=True, blank=True)
class Meta:
verbose_name = u'ๅๅญ่ฏ่ฎบ'
verbose_name_plural = u'ๅๅญ่ฏ่ฎบ'
def __unicode__(self):
return u'ๅๅญ(%s) ่ฏ่ฎบ%s' % (self.group, self.id)
################################################## views related ##########################################
from datetime import datetime
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse
from fanju.models import Restaurant, RestaurantInfo, Rating, Dish, Order,\
BestRatingDish, RestaurantTag, Region, Relationship, UserMessage, Meal, MealInvitation
from fanju.forms import *
import sys
import json
from django.conf import settings
### group related views ###
class GroupListView(ListView):
# TODO order by member num
queryset = Group.objects.filter(privacy=GroupPrivacy.PUBLIC).select_related('category').annotate(
num_members=Count('members')).order_by('-num_members')
template_name = "group/group_list.html"
context_object_name = "group_list"
def get_context_data(self, **kwargs):
context = super(GroupListView, self).get_context_data(**kwargs)
context['categories'] = GroupCategory.objects.all()
return context
class GroupCreateView(CreateView):
form_class = GroupForm
template_name = 'group/add_group.html'
def form_valid(self, form):
group = form.save(False)
group.owner = self.request.user
super(GroupCreateView, self).form_valid(form)
group.members.add(self.request.user)
# TODO need save many to many?
content = r'<a class="auto-close" href="%s"></a>' % reverse_lazy('group_detail', kwargs={'pk': group.id})
return HttpResponse(content=content)
class GroupUpdateView(UpdateView):
form_class = GroupForm
model = Group
template_name = "group/edit_group.html"
class GroupLogoUpdateView(UpdateView):
form_class = GroupLogoForm
model = Group
template_name = "group/edit_group_logo.html"
def form_valid(self, form):
group = form.save(False)
super(GroupLogoUpdateView, self).form_valid(form)
content = r'<a class="auto-close" href="%s"></a>' % reverse_lazy('group_detail', kwargs={'pk': group.id})
return HttpResponse(content=content)
GROUP_COMMENT_PAGINATE_BY = 5
class GroupDetailView(DetailView):
model = Group
context_object_name = "group"
template_name = "group/group_detail.html"
def get_queryset(self):
return Group.objects.prefetch_related('comments__from_person', 'comments__replies__from_person')
def get_context_data(self, **kwargs):
parent_comments = GroupComment.objects.filter(parent__isnull=True, group=self.get_object()).select_related(
'group',
'from_person').prefetch_related('replies__from_person').order_by('-id')
context = super(GroupDetailView, self).get_context_data(**kwargs)
context.update({
"parent_comments": parent_comments[:GROUP_COMMENT_PAGINATE_BY],
'has_next': parent_comments.count() > GROUP_COMMENT_PAGINATE_BY
})
return context
class GroupCommentListView(ListView):
template_name = "group/comment_list.html"
context_object_name = "parent_comments"
model = GroupComment
paginate_by = GROUP_COMMENT_PAGINATE_BY
def get_queryset(self):
parent_comments = GroupComment.objects.filter(parent__isnull=True,
group=self.kwargs['group_id']).select_related(
'from_person').prefetch_related('replies__from_person').order_by('-id')
return parent_comments
def get_context_data(self, **kwargs):
context = super(GroupCommentListView, self).get_context_data(**kwargs)
context.update({
"group_id": self.kwargs['group_id']
})
return context
class GroupMemberListView(ListView):
template_name = "group/member_list.html"
context_object_name = "user_list"
paginate_by = 10
def get_queryset(self):
return Group.objects.get(pk=self.kwargs['group_id']).members.all()
def get_context_data(self, **kwargs):
context = super(GroupMemberListView, self).get_context_data(**kwargs)
context.update({
"group_id": self.kwargs['group_id']
})
return context
def join_group(request, pk):
if request.method == 'POST':
group = Group.objects.get(pk=pk)
if group.privacy == GroupPrivacy.PUBLIC:
#TODO refactor
if request.user not in group.members.all():
group.members.add(request.user)
return create_sucess_json_response(u'ๅทฒ็ปๆๅๅ ๅ
ฅ่ฏฅๅๅญ๏ผ', {'redirect_url': reverse('group_list')})
else:
return create_failure_json_response(u'ๅฏนไธ่ตทๆจๅทฒ็ปๅ ๅ
ฅ่ฏฅๅๅญ๏ผๆ ้ๅๆฌกๅ ๅ
ฅ๏ผ')
else:
# need to handle invitation
return create_no_right_response(u'ๅฏนไธ่ตท๏ผๅชๆๅๅฐ้่ฏท็็จๆทๆๅฏไปฅๅ ๅ
ฅ่ฏฅ็งๅฏๅๅญ')
elif request.method == 'GET':
return HttpResponse(u'ไธๆฏๆ่ฏฅๆไฝ')
def leave_group(request, pk):
if request.method == 'POST':
group = Group.objects.get(pk=pk)
if request.user in group.members.all():
group.members.remove(request.user)
return create_sucess_json_response(u'ๅทฒ็ปๆๅ็ฆปๅผ่ฏฅๅๅญ๏ผ')
else:
return create_failure_json_response(u'ๅฏนไธ่ตทๆจ่ฟๆชๅ ๅ
ฅ่ฏฅๅๅญ๏ผ')
elif request.method == 'GET':
return HttpResponse(u'ไธๆฏๆ่ฏฅๆไฝ')
def create_group_comment(request):
if request.method == 'POST':
form = GroupCommentForm(request.POST)
#TODO some checks
if form.is_valid():
comment = form.save()
t = render_to_response('group/single_comment.html', {'comment': comment},
context_instance=RequestContext(request))
return create_sucess_json_response(u'ๅทฒ็ปๆๅๅๅปบ่ฏ่ฎบ๏ผ', {'comment_html': t.content})
else:
return create_failure_json_response(u'ๅฏนไธ่ตทๆจ่ฟๆชๅ ๅ
ฅ่ฏฅๅๅญ๏ผ')
elif request.method == 'GET':
return HttpResponse(u'ไธๆฏๆ่ฏฅๆไฝ')
def del_group_comment(request, pk):
if request.method == 'POST':
user_id = request.user.id
comment = GroupComment.objects.filter(pk=pk)
#TODO some checks
if len(comment) == 1:
comment[0].delete()
return create_sucess_json_response(u'ๅทฒ็ปๆๅๅ ้ค่ฏ่ฎบ๏ผ')
elif request.method == 'GET':
return HttpResponse(u'ไธๆฏๆ่ฏฅๆไฝ')
def writeJson(qs, response, relations=None):
json_serializer = serializers.get_serializer("json")()
if relations:
return json_serializer.serialize(qs, ensure_ascii=False, relations=relations, stream=response)
return json_serializer.serialize(qs, ensure_ascii=False, stream=response)
def getJsonResponse(qs, relations=None):
response = HttpResponse(content_type='application/json')
writeJson(qs, response, relations)
return response
def createGeneralResponse(status, message, extra_dict=None):
response = {'status': status, 'info': message}
if extra_dict:
response.update(extra_dict)
return HttpResponse(json.dumps(response))
# get distance in meter, code from google maps
def getDistance( lng1, lat1, lng2, lat2):
EARTH_RADIUS = 6378.137
from math import asin, sin, cos, radians, pow, sqrt
radLat1 = radians(lat1)
radLat2 = radians(lat2)
a = radLat1 - radLat2
b = radians(lng1) - radians(lng2)
s = 2 * asin(sqrt(pow(sin(a / 2), 2) + cos(radLat1) * cos(radLat2) * pow(sin(b / 2), 2)))
s = s * EARTH_RADIUS
return s * 1000
# convert the query set of models to a list of dict
def modelToDict(query_set, relations=None):
serializer = serializers.get_serializer("json")()
if relations:
return json.loads(serializer.serialize(query_set, relations=relations))
return json.loads(serializer.serialize(query_set))
def restaurantList(request):
key = request.GET.get('key')
if key:
return getJsonResponse(Restaurant.objects.filter(name__contains=key))
return getJsonResponse(Restaurant.objects.all())
def get_restaurant(request, restaurant_id):
response = HttpResponse()
r = Restaurant.objects.get(id=restaurant_id)
jsonR = modelToDict([r])[0]
try:
ri = RestaurantInfo.objects.get(restaurant__id=restaurant_id)
jsonR['fields']['rating'] = ri.average_rating
jsonR['fields']['average_cost'] = ri.average_cost
jsonR['fields']['good_rating_percentage'] = ri.good_rating_percentage
jsonR['fields']['comments'] = modelToDict(Rating.objects.filter(restaurant__id=restaurant_id),
{'user': {'fields': ('username',)}})
jsonR['fields']['recommended_dishes'] = modelToDict(r.get_recommended_dishes(),
{'user': {'fields': ('username',)}, 'dish': {'fields': ('name',)}})
except ObjectDoesNotExist:
jsonR['fields']['rating'] = -1
jsonR['fields']['average_cost'] = -1
jsonR['fields']['good_rating_percentage'] = -1
jsonR['fields']['comments'] = []
jsonR['fields']['recommended_dishes'] = []
response.write(json.dumps(jsonR, ensure_ascii=False))
return response
def get_recommended_dishes(request, restaurant_id):
response = HttpResponse()
dishes = Restaurant.objects.get(id=restaurant_id).get_recommended_dishes()
writeJson(dishes, response, ('dish',)) # order by dish descending
return response
# return a list of values with the order how keys are sorted for a given dict
def sortedDictValues(some_dict):
keys = some_dict.keys()
keys.sort()
return [some_dict[key] for key in keys]
def get_restaurant_list_by_geo(request):
try:
response = HttpResponse()
lng = float(request.GET.get('longitude'))
lat = float(request.GET.get('latitude'))
rangeInMeter = float(request.GET.get('range'))
# TODO page...
distance_restaurant_dict = {}
rating_restaurant_dict = {}
cost_restaurant_dict = {}
order_by = request.GET.get('order_by')
restaurants = []
for r in Restaurant.objects.all():
if r.longitude and r.latitude:
distance = getDistance(lng, lat, r.longitude, r.latitude)
if distance < rangeInMeter:
if order_by == 'distance':
distance_restaurant_dict[distance] = r
elif order_by == 'cost':
cost_restaurant_dict[r.average_cost] = r
elif order_by == 'rating':
print "%s rating: %s" % (r, r.rating)
rating_restaurant_dict[r.rating] = r
else:
restaurants.append(r)
if order_by == 'distance':
restaurants = sortedDictValues(distance_restaurant_dict)
elif order_by == 'cost':
restaurants = sortedDictValues(cost_restaurant_dict)
elif order_by == 'rating':
print "before reverse_lazy: %s" % sortedDictValues(rating_restaurant_dict)
restaurants = sortedDictValues(rating_restaurant_dict)
restaurants.reverse_lazy()
# print "Restaruants in range %s meters: %s" % (rangeInMeter, distance_restaurant_dict)
writeJson(restaurants, response)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
return response
def login_required_response(request):
response = {"status": "NOK", "info": "You were not logged in"}
return HttpResponse(json.dumps(response))
def order_last_modified(request, order_id):
return Order.objects.get(id=order_id).confirmed_time
def get_orders(request):
if not request.user.is_authenticated():
return login_required_response(request)
response = HttpResponse()
serializer = serializers.get_serializer("json")()
orders = Order.objects.filter(customer__id=request.user.id).order_by("-created_time")
serializer.serialize(orders, relations={'restaurant': {'fields': ('name',)}}, stream=response, ensure_ascii=False)
return response
def favorite_restaurant(request, id):
if not request.user.is_authenticated():
return login_required_response(request)
response = {}
profile = request.user
if request.method == 'POST':
profile.favorite_restaurants.add(Restaurant.objects.get(id=id))
profile.save()
response['status'] = 'OK'
response['info'] = 'Data saved'
# is GET needed? How about use /restaurant
elif request.method == 'GET':
response['status'] = 'NOK'
response['info'] = 'GET is not supported'
return HttpResponse(json.dumps(response, ensure_ascii=False))
def favorite_restaurants(request):
if not request.user.is_authenticated():
return login_required_response(request)
response = HttpResponse()
profile = request.user
serializer = serializers.get_serializer("json")()
#serializer.serialize([profile], relations=('favorite_restaurants',), stream=response)
serializer.serialize(profile.favorite_restaurants.all(), relations=('favorite_restaurants',), ensure_ascii=False,
stream=response)
return response
# View or add an user comment for a restaurant
def restaurant_rating(request, restaurant_id):
rid = int(restaurant_id)
r = Restaurant.objects.get(id=restaurant_id)
response = HttpResponse()
if request.method == 'GET':
id_name_fields = {'fields': ('username',)}
writeJson(r.get_rating(), response, relations={'user': id_name_fields, })
return response
elif request.method == 'POST':
if not request.user.is_authenticated():
return login_required_response(request)
data = json.loads(request.raw_post_data)
comments = data['comments']
rating = float(data['rating'])
averageCost = float(data['average_cost'])
recommendedDishes = data['recommended_dishes']
try:
ri = RestaurantInfo.objects.get(restaurant__id=rid)
divider = ri.divider
ri.average_cost = (ri.average_cost * divider + averageCost) / (divider + 1)
ri.average_rating = (ri.average_rating * divider + rating) / (divider + 1)
good_rate = 0
if rating >= 3:
good_rate = 1
ri.good_rating_percentage = (ri.good_rating_percentage * divider + good_rate) / (divider + 1)
ri.divider = divider + 1
except ObjectDoesNotExist:
ri = RestaurantInfo()
ri.restaurant_id = rid
ri.average_cost = averageCost
ri.average_rating = rating
if rating >= 3:
ri.good_rating_percentage = 1
else:
ri.good_rating_percentage = 0
ri.divider = 1
for dish_id in recommendedDishes:
try:
rd = BestRatingDish.objects.get(dish__id=dish_id)
rd.times = rd.times + 1
except ObjectDoesNotExist:
rd = BestRatingDish()
rd.times = 1
rd.restaurant_id = rid
rd.dish_id = dish_id
rd.save()
ri.save()
rc = Rating()
rc.rating = rating
rc.user_id = request.user.id
rc.restaurant_id = rid
rc.comments = comments
rc.time = datetime.now()
rc.average_cost = averageCost
rc.save()
for dish_id in recommendedDishes:
rc.dishes.add(Dish.objects.get(id=dish_id))
rc.save()
return createGeneralResponse('OK', 'Comments committed')
else:
raise
def get_restaurant_tags(request):
return getJsonResponse(RestaurantTag.objects.all())
def get_restaurants_with_tag(request, tag_id):
tag = RestaurantTag.objects.get(id=tag_id)
return getJsonResponse(Restaurant.objects.filter(tags=tag))
#return getJsonResponse(RestaurantTag.objects.get(id=tag_id).restaurant_set.all())
def get_regions(request):
return getJsonResponse(Region.objects.all())
def get_restaurants_in_region(request, region_id):
return getJsonResponse(Region.objects.get(id=region_id).restaurant_set.all())
def get_following(request, user_id):
user = User.objects.get(id=user_id)
if request.method == 'GET':
return getJsonResponse(user.following.all(), {'user': {'fields': ('username',)}})
elif request.method == 'POST':
following_user = User.objects.get(id=request.POST.get('user_id'))
relationship = Relationship(from_person=user, to_person=following_user)
relationship.save()
return createGeneralResponse('OK', 'You are now following %s' % following_user)
else:
raise
def remove_following(request, user_id):
user = User.objects.get(id=user_id)
if request.method == 'POST':
following_user = User.objects.get(id=request.POST.get('user_id'))
relationship = Relationship.objects.get(from_person=user, to_person=following_user)
relationship.delete()
return createGeneralResponse('OK', 'You are not following %s anymore' % following_user)
def followers(request, user_id):
user = User.objects.get(id=user_id)
return getJsonResponse(user.followers.all(), {'user': {'fields': ('username',)}})
def get_recommended_following(request, user_id):
user = User.objects.get(id=user_id)
return getJsonResponse(user.recommended_following.all(), {'user':
{'fields': ('username',)}
})
def messages(request, user_id):
user = User.objects.get(id=user_id)
if request.method == 'GET':
message_type = request.GET.get('type', '0')
print "message_type: %s" % message_type
return getJsonResponse(UserMessage.objects.filter(to_person=user, type=message_type))
elif request.method == 'POST':
from_person = User.objects.get(id=request.POST.get('from_user_id'))
text = request.POST.get('message')
message_type = request.POST.get('type', '0')
message = UserMessage(from_person=from_person,
to_person=user,
message=text,
timestamp=datetime.now(),
type=message_type)
message.save()
return createGeneralResponse('OK', 'Message sent to %s' % user)
else:
raise
def meal_participants(request, meal_id):
if not request.user.is_authenticated():
return login_required_response(request)
user = request.user
if request.method == 'POST':
meal = Meal.objects.get(id=meal_id)
if meal.participants.count() >= meal.max_persons:
return createGeneralResponse('NOK', "No available seat.")
if user == meal.host:
return createGeneralResponse('NOK', "You're the host.")
if meal.is_participant(user):
return createGeneralResponse('NOK', "You already joined.")
meal.participants.add(user)
meal.actual_persons += 1
meal.save()
return createGeneralResponse('OK', "You've just joined the meal")
else:
raise
def view_or_send_meal_invitations(request, user_id):
if not request.user.is_authenticated():
return login_required_response(request)
user = request.user
if request.method == 'POST':
to_user = User.objects.get(id=request.POST.get('to_user_id'))
meal = Meal.objects.get(id=request.POST.get('meal_id'))
#if meal.host != user:
# return createGeneralResponse('NOK',"You're not the host - do we check this?")
if to_user == meal.host or meal.is_participant(to_user):
return createGeneralResponse('NOK', "%s already joined." % to_user)
if MealInvitation.objects.filter(from_person=user, to_person=to_user, meal=meal):
return createGeneralResponse('NOK', "Invitation sent to %s earlier, no new invitation sent." % to_user)
i = MealInvitation(from_person=user, to_person=to_user, meal=meal)
i.save()
return createGeneralResponse('OK', "Invitation sent to %s" % to_user)
elif request.method == 'GET':
# from_person=user
return getJsonResponse(user.invitation)
else:
raise
def accept_or_reject_meal_invitations(request, user_id, invitation_id):
if not request.user.is_authenticated():
return login_required_response(request)
user = request.user
i = MealInvitation.objects.get(id=invitation_id)
if request.method == 'POST':
if i.to_person == user:
if i.status == 0: # PENDING
accept = request.POST.get("accept").lower()
if accept == "yes":
i.status = 1
i.save()
return meal_participants(request, i.meal.id) #createGeneralResponse('OK',"Invitation accepted.")
else:
i.status = 2
i.save()
return createGeneralResponse('OK', "Invitation rejected.")
else:
return createGeneralResponse('NOK',
"Can not accept/reject an invitation that was already accepted or rejected")
else:
return createGeneralResponse('NOK', "Unauthorized: you are not the receiver of this invitation.")
elif request.method == 'GET':
if not i.is_related(user):
return createGeneralResponse('NOK',
"Unauthorized: you are not either the sender or receiver of the invitation")
return getJsonResponse([i])
else:
raise
# not used for now
#class RegisterView(CreateView):
# form_class = UserCreationForm
# template_name = 'registration/register.html'
#
# def get_context_data(self, **kwargs):
# context = super(RegisterView, self).get_context_data(**kwargs)
# context['next'] = self.get_success_url()
# return context
#
# def form_valid(self, form):
# response = super(RegisterView, self).form_valid(form)
# user = authenticate(username=form.cleaned_data["username"], password=form.cleaned_data["password1"])
# login(self.request, user)
# return response
#
# def get_success_url(self):
# success_url = self.request.REQUEST.get('next', '')
# netloc = urlparse.urlparse(success_url)[1]
# # Use default setting if redirect_to is empty
# if not success_url:
# success_url = reverse_lazy("index")
# # Heavier security check -- don't allow redirection to a different host.
# elif netloc and netloc != self.request.get_host():
# success_url = reverse_lazy("index")
# return success_url
########################### admin ###############################
class GroupAdmin(admin.ModelAdmin):
list_display = ('id','name','category')
list_filter = ('category',)
list_editable = ('category',)
class GroupCommentAdmin(admin.ModelAdmin):
list_display = ('id','group','comment')
list_filter = ('group',)
class ImageTestAdmin(ImageCroppingMixin,admin.ModelAdmin):
pass | [
"zjxhz@localhost"
] | zjxhz@localhost |
efd6bb802d3c2770a5e0673eba43b30b90ea6c0a | 474d55e53efa7855197287a2d4104e880eaed469 | /l1/travello/urls.py | 12082c6902c4bbf9860bd04ee2b104d7dba97372 | [] | no_license | ashayk9/django_projects | b6ef5d6c8f1e7c60c45dee94b8f41d88bd8bdb7f | 07388eeeecf30317cf92d6b96e4a1b9ea482f8e6 | refs/heads/master | 2022-07-09T15:03:22.813777 | 2020-05-11T08:56:53 | 2020-05-11T08:56:53 | 262,990,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('<int:primary_key>',views.individual_dest,name='place_details'),
]
| [
"[email protected]"
] | |
38ad6338c63036032cf23cb524f1d0b283572c2b | f86d5538c1b4b017a42dcdd1311b146711f0322e | /misc/gen_docx.py | b93aee7d757c3de76c7351dd03b5576aede715df | [] | no_license | godspeedcurry/CTF | 36342918db3105bdb19e5c4666ee2e433ad0160b | e9a8404d7e167fbe9921e77d9960de3c5e7ed370 | refs/heads/master | 2021-10-11T01:19:45.657361 | 2021-10-02T08:11:43 | 2021-10-02T08:11:43 | 155,665,426 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,704 | py | #!/usr/bin/env python
#coding: utf-8
import docx
import base64,re,json,os
from docx.shared import Cm
from bs4 import BeautifulSoup
json_data = ''
with open('test.json','rb') as f:
json_data = f.read()
json_data = json.loads(json_data)['RECORDS']
# print(s)
# r = input()
#ๅๅปบ Document ๅฏน่ฑก๏ผ็ธๅฝไบๆๅผไธไธช word ๆๆกฃ
d = docx.Document('good.docx')
s = [cell.text for cell in d.tables[0]._cells]
print(s)
texts = {
'[ๆ นๆฎใCNNVDๆผๆดๅฝๅ่ง่ใๅกซๅ]' : 'test',
'[ๅฎไฝๅ็งฐ]' : 'test',
'[ๆ นๆฎใCNNVDๆผๆดๅ็ฑปๆๅใๅกซๅ]' : 'test',
'[ๆ นๆฎใCNNVDๆผๆดๅ็บง่ง่ใๅกซๅ]' : 'test',
'[ๅฎไฝๆ่ฟฐ]\n[ๆผๆดๆ่ฟฐ]\n[ๅฝฑๅๆ่ฟฐ]\n[ๅฎไฝ่ฎฟ้ฎๅฐๅ]' : 'test',
'[ๆบๆๅ็งฐ]' : 'test',
'[ๆไบคไบบๅๅงๅ]' : 'test',
'[็ตๅญ้ฎ็ฎฑ]|[่็ณป็ต่ฏ]' : 'test',
'ๅนด ๆ ๆฅ' : 'test',
'[ๆผๆดๅฎไฝ]' : 'test',
'[่งฆๅๆกไปถ]' : 'test',
'[ๆผๆด้ช่ฏ่ฟ็จๅบ่ฏฅๅ
ๆฌๅฎๆด็ๅค็ฐๆญฅ้ชคๅ็ปๆ]' : 'test'
}
name_dict = None
def solve(mylist,flag):
if not flag:
res = []
for my in mylist:
url = re.findall('\[(.*?)\]\((.*?)\)',my)
print(url)
if len(url) == 1 and len(url[0]) == 2 and url[0][0] == url[0][1]:
res.append("URL:" + url[0][0])
continue
img = re.findall('/static/upload/(\d+)\.png',my)
if len(img) == 1:
res.append("IMG:" + img[0])
continue
res.append(my)
print(res)
return res
else:
res = []
for my in mylist:
img = re.findall('/static/upload/(\d+)\.png',my)
if len(img) == 1:
res.append("IMG:" + img[0])
continue
soup = BeautifulSoup(my,'html.parser')
res.append(soup.get_text())
return res
for data in json_data:
texts['[ๆ นๆฎใCNNVDๆผๆดๅฝๅ่ง่ใๅกซๅ]'] = data['title']
texts['[ๅฎไฝๅ็งฐ]'] = data['title']
texts['[ๆ นๆฎใCNNVDๆผๆดๅ็ฑปๆๅใๅกซๅ]'] = data['related_vul_type']
texts['[ๆ นๆฎใCNNVDๆผๆดๅ็บง่ง่ใๅกซๅ]'] = '้ซๅฑ'
texts['[ๅฎไฝๆ่ฟฐ]\n[ๆผๆดๆ่ฟฐ]\n[ๅฝฑๅๆ่ฟฐ]\n[ๅฎไฝ่ฎฟ้ฎๅฐๅ]'] = base64.b64decode(data['vul_poc']).decode('utf-8')
texts['[ๆบๆๅ็งฐ]'] = 'ๆญๅท้ปๅฎ็งๆๆ้ๅ
ฌๅธ'
texts['[ๆไบคไบบๅๅงๅ]'] = name_dict[data['author'].split('@')[0]]['zh_cn']
texts['[็ตๅญ้ฎ็ฎฑ]|[่็ณป็ต่ฏ]'] = data['author'] + ';' + name_dict[data['author'].split('@')[0]]['phone']
texts['ๅนด ๆ ๆฅ'] = '2020ๅนด8ๆ27ๆฅ'
long_text = base64.b64decode(data['vul_solution']).decode('utf-8').replace("**","")
url = None
markdown_url = re.findall('\[(.*?)\]',long_text)
html_url = re.findall('href=\"(.*?)\"',long_text)
if len(markdown_url):
url = markdown_url[0]
if len(html_url):
url = html_url[0]
texts['[ๆผๆดๅฎไฝ]'] = url if url else ''
texts['[่งฆๅๆกไปถ]'] = 'ๆ '
prove = solve([x for x in long_text.split('\r\n') if len(x)],'</' in long_text or '/>' in long_text)
texts['[ๆผๆด้ช่ฏ่ฟ็จๅบ่ฏฅๅ
ๆฌๅฎๆด็ๅค็ฐๆญฅ้ชคๅ็ปๆ]'] = ''
tables = d.tables[0]
columns = len(tables.columns)
rows = len(tables.rows)
for text in texts.keys():
idx = s.index(text)
col = idx & 1
row = (idx - col)//2
tables.cell(row,col).text = texts[text]
for p in prove:
if p.startswith("URL"):
tables.cell(14,1).paragraphs[0].add_run(p.replace("URL:",""))
elif p.startswith("IMG"):
img = tables.cell(14,1).paragraphs[0].add_run().add_picture("upload/" + p.replace("IMG:","") + ".png")
print(img.height,img.width)
scale = Cm(12.5) / img.width
img.height = int(img.height * scale)
img.width = Cm(12.5)
else:
tables.cell(14,1).paragraphs[0].add_run(p + "\n")
d.save('result/{}.docx'.format(data['title']))
| [
"[email protected]"
] | |
5353a60eec46537d695dbf035b5f246a7ad03d7c | 26a623b2ad6df6ec08051412338e6463e9b3fb49 | /_old_axolotl-1.0/code/modifications/gradient.py | 75337d92e0ff20aa2339f07bd48accd4dad8d03b | [] | no_license | GuoyingDong/Axolotl | ae548719df1ffea9e07a110cfd802b5a884acde2 | 0a19f182ea9263f9e23d29a85994adac4d410f0f | refs/heads/master | 2022-03-29T03:46:34.886417 | 2019-12-17T09:25:58 | 2019-12-17T09:25:58 | 261,350,353 | 1 | 0 | null | 2020-05-05T03:10:37 | 2020-05-05T03:10:37 | null | UTF-8 | Python | false | false | 1,148 | py | """
Calculates a gradient direction vector for every cell.
Inputs:
vals: distance field values
nx: number of cells in x direction
ny: number of cells in y direction
nz: number of cells in z direction
Output:
a: a 3d vector in gradient direction for every cell
"""
__author__ = ['Mathias Bernhard']
__copyright__ = 'Copyright 2018 / Digital Building Technologies DBT / ETH Zurich'
__license__ = 'MIT License'
__email__ = '<[email protected]>'
import Rhino.Geometry as rg
a = []
nyz = ny*nz
def get_index(x,y,z):
return x*nyz + y*nz + z
for x in range(nx):
for y in range(ny):
for z in range(nz):
if x==0 or x==nx-1 or y==0 or y==ny-1 or z==0 or z==nz-1:
a.append(rg.Vector3d(0,0,0))
else:
ux = vals[get_index(x-1,y,z)]
vx = vals[get_index(x+1,y,z)]
uy = vals[get_index(x,y-1,z)]
vy = vals[get_index(x,y+1,z)]
uz = vals[get_index(x,y,z-1)]
vz = vals[get_index(x,y,z+1)]
a.append(rg.Vector3d(vx-ux,vy-uy,vz-uz))
| [
"[email protected]"
] | |
cbdff796501d1e15ff5baecf3a1c46ca37665c54 | ce0614afad4b6d29845d6c4b47a45379e1550694 | /model/FM_Models/NFM.py | 86fc2c013849aa4846b13106407f25d7624ec3c3 | [] | no_license | wangfangye/FieldWiseFM2 | 56fdbb8af2af7a3785d97c06c3e6c2ea497f9ede | 973ccb5a9f618e777676c628192008cba80aa6c1 | refs/heads/master | 2023-04-29T12:07:01.508989 | 2021-05-20T05:57:45 | 2021-05-20T05:57:45 | 369,068,450 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
@Author:wangfy
@project:PNNConvModel
@Time:2021/4/27 9:25 ไธๅ
''' | [
"[email protected]"
] | |
dfc38dc241e56e66500aaec28b030f0ea69033f0 | 46c8e288ccf1aca0bc2fca728725b3d11716a3ea | /useless-trivia-program.py | 611da77154d11cc5dc0176635ce07924b05c4776 | [
"CC0-1.0"
] | permissive | Karandeep07/beginner-projects | dc3cea54788077c328c9a30cbd98214341805ce1 | fbbba395b4b58a6b63bac564a1bd8fd618426b33 | refs/heads/main | 2023-06-01T11:08:50.477726 | 2021-06-18T10:05:58 | 2021-06-18T10:05:58 | 378,100,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,378 | py | # The program takes three pieces of personal information from the user: name, age, and weight.
# From these mundane items, the program is able to produce some amusing but trivial facts about
# the person, such as how much the person would weigh on the moon.
# Though this may seem like a simple program ( and it is ), youโll find that the program is more
# interesting when you run it because youโve had input.
# Youโll care more about the results because theyโre personally tailored to you.
# Useless Trivia#
#
# Gets personal information from the user and then
# prints true but useless information about him or her
name = input("Hello, what is your name? \n")
age = float(input("How old are you? \n"))
weight = float(input("How much do you weigh in kilograms?\n"))
print(
f"If Johnson Sakaja were to email you, he would address you as {name.title()} \nbut if he was mad, he would address you as {name.upper()}\n")
name += " "
print(
f"If a small child was trying to get your attention, he would say: \n \"{name.title()*5}\" ")
seconds = age * 365 * 24 * 60 * 60
print(f"\nYouโre over {seconds} seconds old.\n")
moon_weight = weight / 6
print(
f"\nDid you know that on the moon you would weigh only {moon_weight} kilograms?\n")
sun_weight = weight * 27.1
print(f"\nOn the sun, you'd weigh {sun_weight} (but, ah... not for long).\n")
| [
"[email protected]"
] | |
1029da52c6ae618f3934abf6a4ae4cbb6e8ed814 | 25b870ab40952a6b5555498f116d8e1c146f9b41 | /game/weleem_utils.py | ec29e55795d59368b870ab53939bdb713ab5dfd2 | [] | no_license | tomdieli/way_lame_game | 2d37a4a22fce339986dd667408dc737ab8be4826 | eb52b0e440f4a197eb72652b6c336d904b90101a | refs/heads/main | 2023-08-06T01:40:02.420875 | 2023-07-28T19:42:01 | 2023-07-28T19:42:01 | 243,342,018 | 0 | 0 | null | 2023-04-21T20:52:22 | 2020-02-26T18:59:46 | Python | UTF-8 | Python | false | false | 5,022 | py | import random
from django.contrib.auth.models import User
from game.models import Item, Figure, Item, Game
def create_game(user_name):
owner = User.objects.get(username=user_name)
game = Game.objects.create(owner=owner)
return game.id
def delete_game(game_id):
game = Game.objects.get(id=game_id)
return game.delete()
def roll_dice(num_dice=1):
rolls = []
for die in range(1, num_dice + 1):
die_roll = random.randrange(1, 7)
rolls.append(die_roll)
return rolls
def roll_init():
roll = roll_dice(3)
return roll
def pre_attack_info(weapon, disadvantage=False):
# Provides most info needed to perform the attack action.
attack_info = {}
if disadvantage:
attack_info['num_dice'] = 4
attack_info['auto_miss'] = 20
attack_info['fumble_mod'] = 2
else:
attack_info['num_dice'] = 3
attack_info['auto_miss'] = 16
attack_info['fumble_mod'] = 1
attack_info['damage_dice'] = weapon['damage_dice']
attack_info['damage_mod'] = weapon['damage_mod']
return attack_info
def get_hit_takes(inventory):
armourshields = [i for i in inventory if i['hit_takes'] != 0]
if armourshields:
desc = ''
hit_takes = 0
for armour in armourshields:
hit_takes = armour['hit_takes']
desc += f'{armour["name"]}, '
return hit_takes, desc
def attack_results(binfo):
# contains the info that will be published to each player.
rolls = roll_dice(binfo['num_dice'])
attack_roll = sum(rolls)
attack_dict = {
'status': None,
'rolls': rolls,
'roll': attack_roll,
'damage_rolls': [],
'damage': 0,
}
if ((binfo['attacker_dx'] >= attack_roll) and (attack_roll <= binfo['auto_miss'])) or\
(attack_roll <= 5):
attack_dict['status'] = 'Hit'
damage_rolls = roll_dice(binfo['damage_dice'])
damage_roll = sum(damage_rolls)
attack_damage = damage_roll + binfo['damage_mod']
if attack_roll == 3:
attack_damage *= 3
attack_dict['status'] += '-TripleDamage'
if attack_roll == 4:
attack_damage *= 2
attack_dict['status'] += '-DoubleDamage'
attack_dict['damage_rolls'] = damage_rolls
attack_dict['damage'] = attack_damage
else:
# TODO: get broken weapon to work
attack_dict['status'] = 'Miss'
if attack_roll >= binfo['auto_miss'] + binfo['fumble_mod']:
attack_dict['status'] += '-DroppedWeapon'
elif attack_roll >= binfo['auto_miss'] + (binfo['fumble_mod'] * 2):
attack_dict['status'] += '-BrokenWeapon'
return attack_dict
# this one called first
def attack(attacker, attackee, weapon):
battle_info = pre_attack_info(weapon, attackee['dodging'])
battle_info['hit_takes'], battle_info['armor'] = get_hit_takes(attackee['equipped_items'])
battle_info['attacker_dx'] = attacker['adj_dx']
attacker_name = attacker['figure_name']
attackee_name = attackee['figure_name']
message = "player %s attacks player %s.\n" % (attacker_name, attackee_name)
result = attack_results(battle_info)
if attackee['dodging']:
message += "player %s is dodging. it's a 4-die roll. " % attackee_name
attackee['dodging'] = False
message += "Dice rolls: %s -> %s, Result: %s. " % (result['rolls'], result['roll'], result['status'])
if result['status'].startswith('Hit'):
message += "Damage dice rolls: %s -> %s, Result: %s " % (result['damage_rolls'], result['damage'], result['status'])
damage = result['damage']
message += f"{battle_info['armor']} removes {battle_info['hit_takes']}"
f"hits from the damage total of {damage}. "
damage -= battle_info['hit_takes']
if damage < 0:
damage = 0
message += "Total damage to Player %s: %s. " % (attackee_name, damage)
attackee['hits'] -= damage
if attackee['hits'] <= 0:
attackee['hits'] = 0
attackee['penalties'].append('DEAD')
message += "Player %s is DEAD!!!. " % attackee_name
elif damage >= 8:
message += "Player %s is PRONE due to heavy damage. " % attackee_name
attackee['penalties'].append('prone')
elif damage >= 5:
message += "Player %s has -2 ADJ_DX next turn due to heavy damage. " % attackee_name
attackee['penalties'].append('dx_adj')
elif attackee['hits'] <= 3:
message += "Player %s has -3 ADJ_DX next turn due to low hits. " % attackee_name
attackee['penalties'].append('st_dx_adj')
elif 'Weapon' in result['status']:
if weapon:
message += "Player %s dropped his weapon. " % attacker_name
attacker['penalties'].append('dropped_weapon')
result['message'] = message
result['attacker'] = attacker
result['attackee'] = attackee
return result
| [
"[email protected]"
] | |
c3e119ecb69e228273a9a8667fe8cc3699b94037 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/fractions_20200802115552.py | 87d6a99c8b64edac1dce64a5dd9222a8a12fa2a8 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,370 | py | def fractions(numerator,denominator):
if denominator == 0 :
return str(numerator)
number = numerator / denominator
if numerator % denominator == 0:
return str(numerator // denominator)
newStr = str(number)
print(newStr)
largeStr = newStr.split(".")
if len(largeStr[1]) > 1:
return largeStr[0] + "." + '(' + largeStr[1][0] + ')'
return newStr
def frac(numerator,denominator):
res = ""
if numerator == 0:
return "0"
if denominator == 0:
return "undefined"
if (numerator < 0 and denominator > 0) or (numerator > 0 and denominator <0):
res += "-"
if numerator % denominator == 0:
return str(numerator / denominator)
else:
# this means its has a remainder
res += str(numerator // denominator)
res += "."
newDict = {}
rem = numerator % denominator
print(rem)
while rem != 0:
print('dict',newDict)
if rem in newDict:
position = res.find(".") + 1
res = res[:position] + "(" + res
break
newDict[rem] = len(res)
rem *=10
res_part = rem // denominator
res += str(res_part)
rem = rem % denominator
print('res',res)
# print('dict',newDict)
print(frac(4,333)) | [
"[email protected]"
] | |
09cbdb0b6400828c5a00bd4bcaad0c2b8c6868a7 | c0af7cf947fd73b3e5cb7c62bf84cfa6cb1fa3c0 | /library/forms.py | 61bd9d457d9a79afcf16490aa91fda4d2bf1d618 | [] | no_license | ibrohimyakubov/university | b1c4e208052308c085dad1a08245f7d9e3b7e0e2 | 5bb325493351ed34465fc5e98ab2d59b8c1fdd4e | refs/heads/main | 2023-05-14T07:29:47.358117 | 2021-06-09T11:37:43 | 2021-06-09T11:37:43 | 374,569,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | from django import forms
class BookForm(forms.Form):
title = forms.CharField(max_length=200)
author = forms.CharField(max_length=150)
count = forms.IntegerField(min_value=1) | [
"ะฐะดัะตั"
] | ะฐะดัะตั |
9f18434fd751a8965e170247d3153a98b140ba30 | f49ba21338297606def06bb2ca14b4ee5ac40fda | /LPTHW/ex33.py | 416d3c520d26b7961439bf587a60c0a395b03036 | [] | no_license | kittykatcode/Learn-Python-the-Hard-Way | b848ee786596da42d885ed229a9073ccc923f7d6 | 8c916407ecf100896775d2f8999773db36fe9e20 | refs/heads/main | 2023-06-24T19:10:44.056623 | 2021-07-27T04:03:55 | 2021-07-27T04:03:55 | 346,969,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | i = 0
numbers = []
while i < 45:
print(f" at the top i is : {i}")
numbers.append(i)
i =i+1
print("number now:", numbers)
print("at the bottom i is :" , i)
print("the numbers")
for n in numbers:
print(n)
| [
"[email protected]"
] | |
8f1bd2aca8398f99e3b555f610b8f2bcc4f1de8b | 54e749c9c970652fb0e1a83e5b3bbf3a01dcc8cc | /shop/migrations/0009_auto_20200726_2138.py | dfdc64eaf34f348f073df6ceb4b43e12fe8073a8 | [] | no_license | champmaniac/Online-Bakery-System | f0a08137e0feb283b52007dbc4f233d6bef7c7c0 | 3915221921c2f6e96a001fb5fef069f3e9d9536d | refs/heads/main | 2023-01-27T13:08:32.861908 | 2020-12-12T15:17:31 | 2020-12-12T15:17:31 | 320,858,713 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | # Generated by Django 3.0.8 on 2020-07-26 16:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0008_orders'),
]
operations = [
migrations.RenameModel(
old_name='Orders',
new_name='Order',
),
]
| [
"[email protected]"
] | |
74465e541448406d5635e7d976518bb96b872f29 | 4857e7da34d15c679e07b1a4af63f9e761a48523 | /Documents/python code/webapp/wordweb.py | 0997890e87db077d3cfd70da3578e8a7f4678cbd | [] | no_license | Dar005/webapp | 7b0f95d9789b3580366e7c263903eb30f81ffc40 | 8fe1450ce0ed785389361314b6017ac730194cde | refs/heads/master | 2021-05-03T16:41:09.267865 | 2019-02-14T13:38:10 | 2019-02-14T13:38:10 | 120,440,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,785 | py | # Student Name: Darran Gahan
# Student Number: C00098391
from flask import Flask, render_template, request, session, redirect
import word_utils
import data_utils
import time
from wordgame import check_words
app = Flask(__name__)
app.secret_key = "adfs;45;']sgakltuuos;ga]][g"
@app.route('/')
def index():
return render_template('index.html',
the_title='WordGame.')
@app.route('/running')
def running():
# gets a random source word and displays it
word_utils.pre_process_words()
sourceword = word_utils.get_source_word()
print(sourceword)
session['sourceword'] = sourceword
session['start_time'] = time.perf_counter()
return render_template('running.html',
the_title='WordGame',
sourceword=sourceword)
@app.route('/processwords', methods=['GET', 'POST'])
def word_process():
# Gets and returns the user_input
if request.method == 'POST':
session['end_time'] = time.perf_counter()
user_input = request.form['user_input']
session['user_input'] = user_input
winner, words = check_words(session.get('sourceword'), user_input)
session['winner'] = winner
if winner:
# if user wins
session['time_taken'] = session['end_time'] - session['start_time']
final_time = round(session['time_taken'], 2)
session['final_time'] = final_time
return render_template('winner.html',
time_taken=session['time_taken'],
final_time=final_time,
the_title='Winner')
return render_template('loser.html',
# if user loses
the_tilte='WordGame',
sourceword=session.get('sourceword'),
user_input=user_input,
words=words, )
@app.route('/addwinner', methods=['GET', 'POST'])
def add_winner():
if request.method == 'POST':
if session.get('winner'):
data_utils.add_to_scores(request.form.get('user_name'),
session.get('final_time'),
session.get('sourceword'))
session['winner'] = False
session['time_taken'] = False
leaderboard = data_utils.get_sorted_leaderboard()[:10]
return render_template('leaderboard.html',
the_title='WordGame',
leaderboard=leaderboard,)
return redirect('/')
if __name__ == '__main__':
app.run(debug=True)
app.secret_key = "adfs;45;']sgakltuuos;ga]][g" | [
"[email protected]"
] | |
92be9acf5c9a1e1f6d6e3ce4dfc56f8e076137d6 | d473970d5a2fd505b8073f9c67a914c5c4f55ad9 | /src/tessif/examples/application/verification_scenarios/sink/linear/timeseries_fixed.py | 14d3ed25a5895f116750e384c1035ac5740b7686 | [
"MIT"
] | permissive | tZ3ma/tessif-phd | 65c29eedf414d56d96e9253e1d551144446230d8 | b5e7c885c10ae3f856a51c9f4d72e924cfe10327 | refs/heads/main | 2023-04-15T09:18:47.586945 | 2023-03-26T12:53:50 | 2023-03-26T12:53:50 | 618,724,258 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,543 | py | import tessif.frused.namedtuples as nts
import numpy as np
import pandas as pd
# timeseries_fixed
# one source, two sinks
# sinks differ in parametrization of flow_rate and flow_costs
# sink1 has a capped flow rate given in a timeseries with an equal or lower value than the sources outflow
# sink2 has infinite flow rate (default parametrization) but higher flow costs than source1
# expected behaviour: sink1 will be fed from source with max flow rate, remaining outflow will feed sink2
# (-> the flow rate argument will cap the flow from sink1)
mapping = {
'sources': {
'sourceA': {
'name': 'source1',
'outputs': ('electricity',),
# force a flow rate of exactly 50
'flow_rates': {'electricity': nts.MinMax(min=50, max=50)},
'timeseries': {'electricity': nts.MinMax(min=10*[50], max=10*[50])},
},
},
'transformers': {},
'sinks': {
'sinkA': {
'name': 'sink1',
'inputs': ('electricity',),
# flow_costs = 0
'flow_costs': {'electricity': 0},
'flow_rates': {'electricity': nts.MinMax(min=0, max=50)},
# flow_rate limited
'timeseries': {
'electricity': nts.MinMax(
min=[50, 40, 30, 20, 10, 0, 0, 0, 0, 0, ],
max=[50, 40, 30, 20, 10, 0, 0, 0, 0, 0, ],
),
},
},
'sinkB': {
'name': 'sink2',
'inputs': ('electricity',),
# flow_rate unlimited (default)
'flow_rates': {'electricity': nts.MinMax(min=0, max=50)},
'timeseries': {
'electricity': nts.MinMax(
min=[0, 10, 20, 30, 40, 50, 50, 50, 50, 50, ],
max=[0, 10, 20, 30, 40, 50, 50, 50, 50, 50, ],
),
},
# flow_costs > 0
'flow_costs': {'electricity': 1},
},
},
'storages': {},
'busses': {
'busA': {
'name': 'centralbus',
'inputs': ('source1.electricity',),
'outputs': ('sink1.electricity', 'sink2.electricity',),
},
},
'timeframe': {
'primary': pd.date_range('01/01/2022', periods=10, freq='H'),
},
'global_constraints': {
'primary': {'name': 'default',
'emissions': float('+inf'),
'material': float('+inf')},
},
} # minimum working example sink - linear - timeseries_fixed
| [
"[email protected]"
] | |
f65157aedaf5c4e8be5e64b59442bb775ce690ec | d995415a37f721f8ab6b4b7898dd1c1b0979ac94 | /plot_accuracy.py | e66a914ead63ecf261ac23946e5b67cbec5e9380 | [] | no_license | dufengtong/EWC-python | 44c693d866082f85d1ea65b735be3741fe82f96c | 146fab8263cd84e2d667ead410e255b0ed61041e | refs/heads/master | 2020-04-08T19:00:06.495754 | 2019-05-11T12:39:30 | 2019-05-11T12:39:30 | 159,627,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | import matplotlib.pyplot as plt
ewc_accuracy_list = [[0.9583], [0.9366, 0.9586], [0.9367, 0.9488, 0.954]]
no_ewc_accuracy_list = [[0.9558], [0.9116, 0.965], [0.9128, 0.9247, 0.9673]]
ax1 = plt.subplot(311)
y1 = [x[0] for x in ewc_accuracy_list ]
y2 = [x[0] for x in no_ewc_accuracy_list ]
ax1.set_ylim([0.8,1])
ax1.set_xticks([1,2,3])
ax1.set_xticklabels(['trainA','trainB','trainC'])
ax1.set_ylabel('TaskA')
ax1.set_xlim([1,3])
ax1.annotate('EWC', xy=(2.85,0.95), color='red')
ax1.annotate('SGD', xy=(2.85,0.88), color='blue')
ax1.plot([1,2,3], y1, 'ro-')
ax1.plot([1,2,3], y2, 'bo-')
ax2 = plt.subplot(312)
y1 = [x[1] for x in ewc_accuracy_list[1:] ]
y2 = [x[1] for x in no_ewc_accuracy_list[1:] ]
ax2.set_ylim([0.8,1])
ax2.set_xlim([1,3])
ax2.set_xticks([1,2,3])
ax2.set_xticklabels(['trainA','trainB','trainC'])
ax2.set_ylabel('TaskB')
ax2.plot([2,3], y1, 'ro-')
ax2.plot([2,3], y2, 'bo-')
ax3 = plt.subplot(313)
y1 = [ewc_accuracy_list[2][-1] ]
y2 = [no_ewc_accuracy_list[2][-1] ]
ax3.set_ylim([0.8,1])
ax3.set_xlim([1,3])
ax3.set_xticks([1,2,3])
ax3.set_xticklabels(['trainA','trainB','trainC'])
ax3.set_ylabel('TaskC')
ax3.plot([3], y1, 'ro-')
ax3.plot([3], y2, 'bo-')
plt.show() | [
"[email protected]"
] | |
2c16df93593b5fc2bd2067cfdd77f7e689333f40 | 2dc305915b1413c56eb49aa833871d249dacc512 | /application/method/method0002.py | 53048606c0b14c0428489be2c130db6bf31e3dbf | [] | no_license | mash716/Python | 03bf3200a9e6f16f47b6d67663a82dc66aa11acb | 3ff1a3ceea80e49ddb170e4c0578837cc318f311 | refs/heads/master | 2021-07-15T21:54:41.606433 | 2019-12-30T11:23:26 | 2019-12-30T11:23:26 | 216,381,156 | 1 | 0 | null | 2020-10-13T17:11:03 | 2019-10-20T15:11:40 | C++ | UTF-8 | Python | false | false | 471 | py | def test_func(num_1,num_2,oprn):
if oprn == 1:
print('่ถณใ็ฎ้ๅง')
print(num_1 + num_2)
elif oprn == 2:
print('ๅผใ็ฎ้ๅง')
print(num_1 - num_2)
elif oprn == 3:
print('ๆใ็ฎ้ๅง')
print(num_1 * num_2)
elif oprn == 4:
print('ๅฒใ็ฎ้ๅง')
print(num_1 / num_2)
else:
print('ไธๆใชใชใใฌใผใทใงใณๆๅฎใงใ')
test_func(100,10,3) | [
"[email protected]"
] | |
1ad07d343d48141bbdb19c9363927ca7810ba2b9 | f70e55037d6098ff30af5fe57050c5f1f09ae2f9 | /app/tests/contributor/test_cloud_methods_contributor.py | d7f6d5846f3804a689fe6cd28464103b12f0f126 | [] | no_license | apoorv-on-git/pixel-editor-v2 | 13191d7f106c5cb69468e77acbf0c0f1becba958 | 52ef95ee96166e542737304da3cdcb1ba6dec3fb | refs/heads/master | 2022-12-28T07:19:10.613298 | 2020-10-02T11:42:45 | 2020-10-02T11:42:45 | 265,566,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from app.db_controllers.contributor_controller import *
def test_get_user_document_valid():
contributor_data = get_user_document_data("contributor")
if contributor_data:
assert True
else:
assert False
def test_get_user_document_invalid():
contributor_data = get_user_document_data("contributor_invalid")
if contributor_data:
assert False
else:
assert True | [
"[email protected]"
] | |
c01300475a1c9af6040d3c7b69b95d22fe55530d | 88aec535575a7b9bfa9479e92a484c606d5e4a58 | /TK/4_listNscroll.py | 86acf2c71cc378611928bf7552321464f7f817fb | [] | no_license | mako34/PythonPrimer | 8c9ff74116bbcf888098886794dd5a9898b6de7f | 806662a1c5a9f184b7148b9e1f5d2228e0fab29c | refs/heads/master | 2020-03-30T14:17:25.711059 | 2013-11-26T20:44:11 | 2013-11-26T20:44:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | #tamanio! no se vuelve mas pequenio de un ancho!
from tkinter import *
from tkinter import ttk
root = Tk()
l = Listbox(root, height=5)
l.grid(column=0, row=0, sticky=(N,W,E,S))
s = ttk.Scrollbar(root, orient=VERTICAL, command=l.yview)
s.grid(column=1, row=0, sticky=(N,S))
l['yscrollcommand'] = s.set
#mira el grip! para cambiar tamanio!
ttk.Sizegrip().grid(column=1, row=1, sticky=(S,E))
root.grid_columnconfigure(0, weight=1)
root.grid_rowconfigure(0, weight=1)
for i in range(1,101):
l.insert('end', 'Line %d of 100' % i)
root.mainloop() | [
"[email protected]"
] | |
e6113068b75de07dbe10e8c32f02074555d9ba99 | 49c416c488c4315a7c5b15239071e85ec4189cfc | /functions/old/random_board_generator_pythonchess.py | fbfb91c55873e3c4e71ce0e2d77b65bc0e74c208 | [
"MIT"
] | permissive | tvturnhout/chessential | 35e1dd1add008dc2bd874813af8dd90cbf80ba35 | 0261f785ecdf80f6d112cc8eb650af98395f0bb6 | refs/heads/master | 2021-09-13T00:39:45.180454 | 2018-04-23T09:38:28 | 2018-04-23T09:38:28 | 112,793,048 | 2 | 2 | MIT | 2017-12-02T12:05:02 | 2017-12-01T22:29:16 | null | UTF-8 | Python | false | false | 2,662 | py | import chess
import random
from vectorize import board2vector
from call_engine import best_board
import numpy as np
import h5py
import datetime
import time
boards = []
Nextboards = []
fname = './../data/' + datetime.datetime.now().strftime('%Y%m%dT%H%M') + 'boards.h5'
h5f = h5py.File(fname, 'w')
dsI = h5f.create_dataset("input_boards", (792,0), maxshape=(792,None), dtype='f', chunks=(792,1000))
dsO = h5f.create_dataset("output_boards", (792,0), maxshape=(792,None), dtype='f', chunks=(792,1000))
h5f.close()
start = time.time()
N = 0
while N < 1000000000:
board = chess.Board()
i = 0
while not i > 50 \
and not board.is_game_over() \
and not board.is_insufficient_material() \
and not board.is_stalemate() \
and not board.is_seventyfive_moves():
legal_moves = board.legal_moves
potential_moves = []
for item in legal_moves:
potential_moves.append(item)
i = i + 1
move = random.choice(potential_moves)
board.push(move)
#use the transformation function before adding it to board
boards.append(board2vector(board))
Nextboards.append(board2vector(best_board(board,search_depth=6)))
#print(Board2Vector(board))
#print(move)
# if board.is_game_over():
# print('is_game_over')
# print(board)
# if board.is_stalemate():
# print('is_stalemate')
# print(board)
# if board.is_seventyfive_moves():
# print('is_seventyfive_moves')
# print(board)
# if board.is_insufficient_material():
# print('is_insufficient_material')
# print(board)
#print(len(boards))
h5f = h5py.File(fname, 'a')
dsetIn = h5f["input_boards"]
dsetOut = h5f["output_boards"]
curlength = dsetIn.shape[1]
dsetIn.resize(curlength+len(boards), axis=1)
dsetIn[:,curlength:]=np.transpose(np.array(boards))
curlength = dsetOut.shape[1]
dsetOut.resize(curlength+len(Nextboards), axis=1)
dsetOut[:,curlength:]=np.transpose(np.array(Nextboards))
N = dsetIn.shape[1]
h5f.close()
boards = []
Nextboards = []
print("Added " + str(N) + " boards to database")
end = time.time()
print('Elapsed time {} sec'.format(end-start))
#h5f = h5py.File('boards.h5', 'w')
#h5f.create_dataset('input_boards', data=boards)
#h5f.create_dataset('output_boards', data=Nextboards)
#h5f.close()
'''
np.savetxt('test.txt', boards , delimiter=",", newline="\n", fmt ="%d")
outF = open("myOutFile.txt", "w")
for line in boards:
outF.write(str(line))
outF.write("\n")
outF.close()
'''
| [
"[email protected]"
] | |
6871b23a2b9713ca5355da1ae22f3b4ffe9dcf53 | f0c07e4a42aba9b30309f810fb3bc0d8c761b168 | /src/_pytest-stubs/config.pyi | d4be517af469cc73bea40985358a0149e212ee11 | [
"Apache-2.0"
] | permissive | lschmelzeisen/nasty-typeshed | 0d498b3c8f2cc6ad2d973c667b2f82dfc54eab18 | 1f37fa48e9f941ddbf35e4e6c904f7fc91078d90 | refs/heads/master | 2023-03-22T06:28:41.889328 | 2021-03-15T18:46:01 | 2021-03-15T18:46:15 | 264,028,905 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | pyi | #
# Copyright 2019-2021 Lukas Schmelzeisen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
class PytestPluginManager:
def hasplugin(self, name: str) -> bool: ...
class Config:
option: argparse.Namespace
pluginmanager: PytestPluginManager
def addinivalue_line(self, name: str, line: str) -> None: ...
| [
"[email protected]"
] | |
fb5ffa85703b324ad175cb58ce20de82367cafb3 | c4d93f7a1930336363d1fd6c5bc6e8594d5c2e22 | /setup.py | 59386344406b6c3b052b90fb60572fe5cbe5cd33 | [
"MIT"
] | permissive | ztime/prio-merge | b4965a0ccd480c04b6cb71b3e6edb63a0a930d49 | 806737e8783980bababa19f6be68a63e41989a42 | refs/heads/master | 2023-01-22T21:49:13.403836 | 2020-11-12T23:25:21 | 2020-11-12T23:25:21 | 312,105,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="prio_merge",
version="0.0.2",
author="Jonas Wedin",
author_email="[email protected]",
description="Merge nested dicts according to a few specific rules",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ztime/prio-merge",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| [
"[email protected]"
] | |
707a4dbe72dc800db14a2359c62fd9d46640dd3c | 50bb3b7cd6302b4145a36ec7d9247132936c5834 | /venv/bin/pip3.9 | 246c4dea602d327e73ad20e989451cd50c69d5cd | [] | no_license | jkzy24/joycelyn-blog | 7b25be817d9bd33bab20ace000d0fa5038399bf4 | f26a380db950f6995cd48d3e46f609c7d5545c8f | refs/heads/main | 2023-05-17T21:25:31.913542 | 2021-06-06T03:18:38 | 2021-06-06T03:18:38 | 366,576,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | 9 | #!/Users/joycelynkwang/Downloads/blog-with-users-end/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
007c976b403ef74d9f63207e2c05024f7ec1fe88 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /pimnBHXJNtQffq4Cf_10.py | a7e19e001cd26a1e0742d2c10d235d14dcad91b3 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py |
def mapping(letters):
result = dict()
for x in letters:
result[x] = x.upper()
return result
| [
"[email protected]"
] | |
a508228386b12d40df88c3865e7a7d234ebdb325 | 216814fb5f4aceeced92a0cfda849087a317b06a | /excelcy/storage.py | 8f5030cebe35515889e0a7c7ea6926fe3731ec18 | [
"MIT"
] | permissive | mindis/excelcy | 2a5faff49da6aec38b6d52bf994453c7861d2fdb | f123fd3474c17930fda5aaa8f6f24cc3e51599d1 | refs/heads/master | 2020-03-26T00:05:32.717327 | 2018-07-29T11:24:41 | 2018-07-29T11:24:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,299 | py | import copy
import datetime
import os
import tempfile
import typing
import attr
from excelcy import utils
from excelcy.registry import Registry, field
from excelcy.utils import odict
@attr.s
class BaseItemRegistry(Registry):
"""
Base class for all item alike data
"""
idx = field(default=None) # type: int
enabled = field(default=True) # type: bool
notes = field(default=None) # type: str
@attr.s
class BaseItemListRegistry(Registry):
"""
Base class for all list item alike data
"""
items = field(default=attr.Factory(odict)) # type: typing.Dict[str, Registry]
def add_item(self, item):
item.idx = len(self.items) + 1 if not item.idx or item.idx == str(None) else item.idx
self.items[str(item.idx)] = item
return item
@attr.s()
class Config(Registry):
"""
Storage for config in ExcelCy
"""
nlp_base = field(default=None) # type: str
nlp_name = field(default=None) # type: str
source_language = field(default='en') # type: str
prepare_enabled = field(default=True) # type: bool
train_iteration = field(default=None) # type: int
train_drop = field(default=None) # type: float
@attr.s()
class Phase(BaseItemRegistry):
fn = field(default=None) # type: str
args = field(default=attr.Factory(odict)) # type: dict
@attr.s()
class Phases(BaseItemListRegistry):
items = field(default=attr.Factory(odict)) # type: typing.Dict[str, Phase]
def add(self, fn: str, args: dict = None, idx: str = None):
item = Phase(fn=fn, args=args or odict(), idx=str(idx))
self.add_item(item=item)
return item
@attr.s()
class Source(BaseItemRegistry):
kind = field(default=None) # type: str
value = field(default=None) # type: str
@attr.s()
class Sources(BaseItemListRegistry):
def add(self, kind: str, value: str, idx: str = None):
item = Source(kind=kind, value=value, idx=str(idx))
self.add_item(item=item)
return item
@attr.s()
class Prepare(BaseItemRegistry):
kind = field(default=None) # type: str
value = field(default=None)
entity = field(default=None) # type: str
@attr.s()
class Prepares(BaseItemListRegistry):
items = field(default=attr.Factory(odict)) # type: typing.Dict[str, Prepare]
def add(self, kind: str, value, entity: str, idx: str = None):
item = Prepare(kind=kind, value=value, entity=entity, idx=str(idx))
self.add_item(item=item)
return item
@attr.s()
class Gold(BaseItemRegistry):
subtext = field(default=None) # type: str
offset = field(default=None) # type: str
entity = field(default=None) # type: str
@attr.s()
class Train(BaseItemRegistry):
text = field(default=None) # type: str
items = field(default=attr.Factory(odict)) # type: typing.Dict[str, Gold]
def add(self, subtext: str, entity: str, offset: str = None, idx: str = None):
item = Gold(subtext=subtext, offset=offset, entity=entity, idx=str(idx))
self.add_item(item=item)
return item
def add_item(self, item: Gold):
item.idx = '%s.%s' % (self.idx, len(self.items) + 1) if not item.idx or item.idx == str(None) else item.idx
self.items[str(item.idx)] = item
return item
@attr.s()
class Trains(BaseItemListRegistry):
items = field(default=attr.Factory(odict)) # type: typing.Dict[str, Train]
def add(self, text: str, idx: str = None):
item = Train(text=text, idx=str(idx))
self.add_item(item=item)
return item
@attr.s()
class Storage(Registry):
phase = field(default=attr.Factory(Phases)) # type: Phases
source = field(default=attr.Factory(Sources)) # type: Sources
prepare = field(default=attr.Factory(Prepares)) # type: Prepares
train = field(default=attr.Factory(Trains)) # type: Trains
config = field(default=attr.Factory(Config)) # type: Config
def resolve_value(self, value: str):
if type(value) == str:
now = datetime.datetime.now()
tmp_path = os.environ.get('EXCELCY_TEMP_PATH', tempfile.gettempdir())
value = value.replace('[tmp]', tmp_path)
if self.base_path:
value = value.replace('[base_path]', self.base_path)
if self.nlp_path:
value = value.replace('[nlp_path]', self.nlp_path)
if self.storage_path:
value = value.replace('[storage_path]', self.storage_path)
value = value.replace('[date]', now.strftime("%Y%m%d"))
value = value.replace('[time]', now.strftime("%H%M%S"))
return value
def __attrs_post_init__(self):
super(Storage, self).__attrs_post_init__()
self.base_path = None
self.nlp_path = None
self.storage_path = None
def _load_yml(self, file_path: str):
"""
Data loader for YML
:param file_path: YML file path
"""
data = utils.yaml_load(file_path=file_path)
self.parse(data=data)
def _load_xlsx(self, file_path: str):
"""
Data loader for XLSX, this needs to be converted back to YML structure format
:param file_path: XLSX file path
"""
wb = utils.excel_load(file_path=file_path)
data = odict()
# TODO: add validator, if wrong data input
# TODO: refactor to less hardcoded?
# parse phase
data['phase'] = odict()
data['phase']['items'] = odict()
for phase in wb.get('phase', []):
idx = phase.get('idx', len(data['phase']['items']))
args = odict()
raws = phase.get('args', '').split(',')
for raw in raws:
kv = raw.split('=')
if len(kv) == 2:
key, value = kv
args[key.strip()] = value.strip()
phase['args'] = args
data['phase']['items'][str(idx)] = phase
# parse source
data['source'] = odict()
data['source']['items'] = odict()
for source in wb.get('source', []):
idx = source.get('idx', len(data['source']['items']))
data['source']['items'][str(idx)] = source
# parse prepare
data['prepare'] = odict()
data['prepare']['items'] = odict()
for prepare in wb.get('prepare', []):
idx = prepare.get('idx', len(data['prepare']['items']))
data['prepare']['items'][str(idx)] = prepare
# parse train
data['train'] = odict()
data['train']['items'] = odict()
# lets ensure there is idx
train_idx, gold_idx = 0, 0
for train in wb.get('train', []):
if train.get('text') is not None:
if gold_idx > 0:
train_idx = train_idx + 1
gold_idx = 0
if train.get('idx') is None:
train['idx'] = str(train_idx)
else:
if train.get('idx') is None:
train['idx'] = '%s.%s' % (train_idx, gold_idx)
gold_idx = gold_idx + 1
for train in wb.get('train', []):
idx = str(train.get('idx'))
train_idx, gold_idx = idx, None
if '.' in idx:
train_idx, gold_idx = idx.split('.')
# add train list
if train.get('text') is not None:
t = odict()
t['items'] = odict()
for k in ['idx', 'text']:
t[k] = train.get(k)
data['train']['items'][train_idx] = t
else:
t = data['train']['items'][train_idx]
g = odict()
for k in ['idx', 'subtext', 'offset', 'entity']:
g[k] = train.get(k)
t['items'][idx] = g
# parse config
data['config'] = odict()
for config in wb.get('config', odict()):
name, value = config.get('name'), config.get('value')
data['config'][name] = value
self.parse(data=data)
def load(self, file_path: str):
"""
Load storage data from file path
:param file_path: File path
"""
# check whether it is remote URL
if '://' in file_path:
from urllib import request, parse
import tempfile
# download the file and put into temp dir
file_url = file_path
parsed = parse.urlparse(file_url)
file_name, file_ext = os.path.splitext(os.path.basename(parsed.path))
self.base_path = tempfile.gettempdir()
file_path = os.path.join(self.base_path, file_name + file_ext)
request.urlretrieve(file_url, file_path)
else:
# analyse the file name and ext
file_name, file_ext = os.path.splitext(file_path)
self.base_path = os.path.dirname(file_path)
self.storage_path = file_path
processor = getattr(self, '_load_%s' % file_ext[1:], None)
if processor:
processor(file_path=file_path)
def _save_yml(self, file_path: str, kind: list):
data = self.as_dict()
names = list(data.keys())
for name in names:
if name not in kind:
del data[name]
utils.yaml_save(file_path=file_path, data=data)
def _save_xlsx(self, file_path: str, kind: list):
def convert(header: list, registry: Registry) -> list:
return [getattr(registry, key, None) for key in header]
sheets = odict()
# build phase sheet
if 'phase' in kind:
headers = ['idx', 'enabled', 'fn', 'args', 'notes']
sheets['phase'] = [headers]
for _, phase in self.phase.items.items():
val = convert(sheets['phase'][0], phase)
val[headers.index('args')] = ', '.join(
['%s=%s' % (k, v) for k, v in val[headers.index('args')].items()])
sheets['phase'].append(val)
# build source sheet
if 'source' in kind:
headers = ['idx', 'enabled', 'kind', 'value', 'notes']
sheets['source'] = [headers]
for _, source in self.source.items.items():
sheets['source'].append(convert(sheets['source'][0], source))
# build prepare sheet
if 'prepare' in kind:
headers = ['idx', 'enabled', 'kind', 'value', 'entity', 'notes']
sheets['prepare'] = [headers]
for _, prepare in self.prepare.items.items():
sheets['prepare'].append(convert(sheets['prepare'][0], prepare))
# build train sheet
if 'train' in kind:
headers = ['idx', 'enabled', 'text', 'subtext', 'entity', 'notes']
sheets['train'] = [headers]
for _, train in self.train.items.items():
sheets['train'].append(convert(sheets['train'][0], train))
for _, gold in train.items.items():
sheets['train'].append(convert(sheets['train'][0], gold))
# build config sheet
if 'config' in kind:
headers = ['name', 'value']
sheets['config'] = [headers]
for config_name, config_value in self.config.as_dict().items():
sheets['config'].append([config_name, config_value])
# save
utils.excel_save(sheets=sheets, file_path=file_path)
def save(self, file_path: str, kind: list = None):
kind = kind or ['phase', 'source', 'prepare', 'train', 'config']
file_name, file_ext = os.path.splitext(file_path)
processor = getattr(self, '_save_%s' % file_ext[1:], None)
if processor:
processor(file_path=file_path, kind=kind)
def parse(self, data: odict):
"""
Overwrite current state of storage with given data
:param data: Data in ordereddict
"""
# copy the data
data = copy.deepcopy(data)
# parse phase
self.phase = Phases()
for idx, item in data.get('phase', {}).get('items', {}).items():
args = item.get('args', odict())
for key, val in args.items():
args[key] = self.resolve_value(value=val)
phase = Phase.make(items=item)
self.phase.add_item(item=phase)
# parse source
self.source = Sources()
for idx, item in data.get('source', {}).get('items', {}).items():
source = Source.make(items=item)
self.source.add_item(item=source)
# parse prepare
self.prepare = Prepares()
for idx, item in data.get('prepare', {}).get('items', {}).items():
prepare = Prepare.make(items=item)
self.prepare.add_item(item=prepare)
# parse train
self.train = Trains()
for idx, train_item in data.get('train', {}).get('items', {}).items():
train = Train.make(items=train_item)
self.train.add_item(item=train)
for idx2, gold_item in train_item.get('items', {}).items():
gold = Gold.make(items=gold_item)
train.add_item(item=gold)
# parse config
self.config = Config.make(items=data.get('config', {}))
| [
"[email protected]"
] | |
eb6d1a5901cd71495e844c11067b580ae5ee0f1b | 5e0d40b2dd13808037c3f8f8153ea9a7985f240a | /sandbox.py | f2a9c40789cb38e033c2594cecfd89d4d1cce592 | [
"MIT"
] | permissive | apgoldst/wok_search | 8ac54a71706e44bcfb4f6b1af129d3aacd27764e | f35fcf31f7219c60a6138480459782107453d8ec | refs/heads/master | 2021-01-15T12:22:44.437243 | 2016-01-18T22:12:12 | 2016-01-18T22:12:12 | 48,895,385 | 0 | 0 | null | 2016-01-02T01:52:27 | 2016-01-02T01:52:27 | null | UTF-8 | Python | false | false | 219 | py | import save_tables
csv_file = "DOE grant numbers.csv"
ns = "{http://scientific.thomsonreuters.com/schema/wok5.4/public/FullRecord}"
save_tables.print_pub_table(csv_file, ns)
save_tables.print_grant_table(csv_file, ns) | [
"[email protected]"
] | |
26972d4b9827830248b731c521b381ea08df0242 | d16807d7b86b2104795562e632b0e12c7e4db529 | /Company2/tests.py | a2d1c15fd487992924746a3e869ed81443bb4a9b | [
"MIT"
] | permissive | graysonw/interview_assignments | 6d60a62467d8d169a843df5d74049e5526ab3f50 | 00672b4b1802bf9d1535126589068db797757871 | refs/heads/master | 2022-12-11T19:34:40.781833 | 2019-11-20T19:08:05 | 2019-11-20T19:08:05 | 222,823,995 | 2 | 0 | MIT | 2022-12-08T06:54:46 | 2019-11-20T01:27:09 | Python | UTF-8 | Python | false | false | 3,174 | py | import unittest
import datetime
class TestMatching(unittest.TestCase):
# First unit test. There are of course a lot more we could/should do, but
# this is enough to get started/show the general idea.
def test_matching(self):
from transform import match_data
friendly_data = [
{
"firstname": "Roger",
"id": 111941,
"last_active_date": "2017-01-04",
"lastname": "Aaberg",
"practice_location": "belk",
"specialty": "Orthopedics",
"user_type_classification": "Contributor"
},
{
"firstname": "Joseph",
"id": 15921,
"last_active_date": "2017-01-08",
"lastname": "Aadland",
"practice_location": "concord",
"specialty": "Orthopedics",
"user_type_classification": "Contributor"
},
{
"firstname": "Kimberly",
"id": 20597,
"last_active_date": "2017-01-04",
"lastname": "Aaron",
"practice_location": "avon",
"specialty": "Cardiology",
"user_type_classification": "Leader"
}]
company2_data = [{"company2_user_id": 666015,
"practice_id": 28,
"firstname": "Kimberly",
"lastname": "Aaron",
"classification": "contributor",
"specialty": "Cardiology",
"platform_registered_on": "website",
"company2_last_active_date": datetime.date(2016, 12, 27),
"practice_name": "generic_clinic_27",
"location": "avon"},
{"company2_user_id": 251043,
"practice_id": 35,
"firstname": "Steven",
"lastname": "Michael",
"classification": "contributor",
"specialty": "Family Medicine",
"platform_registered_on": "website",
"company2_last_active_date": datetime.date(2016, 10, 15),
"practice_name": "generic_clinic_35",
"location": "bellingham"},
{"company2_user_id": 45015,
"practice_id": 63,
"firstname": "Emma",
"lastname": "Aaron",
"classification": "popular",
"specialty": "Dermatology",
"platform_registered_on": "mobile",
"company2_last_active_date": datetime.date(2017, 1, 5),
"practice_name": "generic_clinic_63",
"location": "chickasaw"}]
num_matches, result = match_data(friendly_data, company2_data, [])
self.assertEqual(num_matches, 1)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
57150a643799fb93317005ad8c86b44f45ab9653 | cea6957a1ff60f8b6bc7a1e7e2556233fdf478ab | /tests/fixtures.py | 64ce69a9eb5051d3af46909ac065c591fa9b08f7 | [
"BSD-3-Clause"
] | permissive | sigmavirus24/github3.py | 246b32431ee5e409703e1465d8df1ae7ff226c07 | 43aa2d617a7d91743d7ab93c87bc9a52e300cb72 | refs/heads/main | 2023-08-22T05:32:30.222500 | 2023-06-20T00:51:21 | 2023-06-20T00:51:21 | 3,710,711 | 1,077 | 534 | BSD-3-Clause | 2023-07-03T23:59:24 | 2012-03-13T19:58:53 | Python | UTF-8 | Python | false | false | 446 | py | import os
import sys
from io import BytesIO
def path(name, mode="r"):
return open(f"tests/json/{name}", mode)
def content(path_name):
content = path(path_name).read().strip()
iterable = f"[{content}]"
content = content.encode()
iterable = iterable.encode()
return BytesIO(content), BytesIO(iterable)
default = {}
iterable = {}
for file in os.listdir("tests/json/"):
default[file], iterable[file] = content(file)
| [
"[email protected]"
] | |
8ef286c089ffcb4c669c699f48f6dc79bdc368c2 | f84a8313289dc5bc321cdf5da2294cd3cd3127be | /chipy/__init__.py | 700a132a3d976664b4e795e031097229edc87c3b | [] | no_license | JelleLa/ChiPy | 1a89ce66228921d701b5ccac64c9a376aa23c598 | c0f6a85c3575a070d583fecf3b40aee34900811d | refs/heads/main | 2023-09-05T06:22:30.354904 | 2021-11-19T11:33:50 | 2021-11-19T11:33:50 | 387,149,212 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | #! usr/bin/python3
ver = "TESTING"
print(f"ChiPy version {ver} imported succesfully.\nUse Python's help() (e.g. help(chipy.Station.proc)) for additional information of ChiPy\ncomponents.\n ")
if (ver == "TESTING"):
print(f"Note that you are using a \"TESTING\" version of ChiPy! Things are likely unstable and some functionality might be broken.\n")
from .core.environment import environment
from .functions.visualisation.flowtimediagram import flowtime_diagram
from .functions.visualisation.stationstats import station_stats
from .processes.station import Station
from .processes.generator import Generator
from .processes.repairman import Repairman
from .core.resource import BasicResource, PriorQueue, PriorResource
from .functions.simulate import simulate
from .functions.prints.stats import stats
| [
"[email protected]"
] | |
3b1c5d70989490ee104a3d607e265b421cd0e15b | 19a7b31e9f9881b60fbfbb63234a50dae904db89 | /shell.cgi | 51614becca9be84d966f99cf6b47c2f42a5cfd8e | [] | no_license | sidd2000/LinuxCGI | 08370491b7477ae324f57a74c7ba5997320a865c | c25c494ab124d300c3bcb3b0bab444ce8a46f31d | refs/heads/master | 2020-04-13T20:47:05.153012 | 2019-02-14T06:21:21 | 2019-02-14T06:21:21 | 163,439,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | cgi | #!/usr/bin/python
import commands
import cgi
import cgitb #traceback errors on browser
cgitb.enable()
print "Content-Type: text/html"
print ""
mypage_data=cgi.FieldStorage()
print "<pre>"
print commands.getoutput('echo $0')
print "</pre>"
print "\n \n"
print '<a href="/linuxGUI/home.html">'
print 'Back to Home Page'
print '</a>'
| [
"[email protected]"
] | |
668beba79b4395bdba8281e3c590b327e649431f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_367/ch129_2020_04_01_18_04_56_230900.py | 41f1f925941761d677d461149418cfa456dbd746 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | import math
def verifica_quadrado_perfeito(x):
x = 1
while x <= fim:
if x % 2 == 1:
return (True)
else:
return (False) | [
"[email protected]"
] | |
ec5fd7ac7bc0426283387d6962f962f9c8d44a4d | 8ca19f1a31070738b376c0370c4bebf6b7efcb43 | /office365/directory/audit/signins/location.py | 91c41c2644397a547255326320f89e056a394ddc | [
"MIT"
] | permissive | vgrem/Office365-REST-Python-Client | 2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3 | cbd245d1af8d69e013c469cfc2a9851f51c91417 | refs/heads/master | 2023-09-02T14:20:40.109462 | 2023-08-31T19:14:05 | 2023-08-31T19:14:05 | 51,305,798 | 1,006 | 326 | MIT | 2023-08-28T05:38:02 | 2016-02-08T15:24:51 | Python | UTF-8 | Python | false | false | 1,212 | py | from office365.onedrive.driveitems.geo_coordinates import GeoCoordinates
from office365.runtime.client_value import ClientValue
class SignInLocation(ClientValue):
"""Provides the city, state and country/region from where the sign-in happened."""
def __init__(self, city=None, country_or_region=None, geo_coordinates=GeoCoordinates(), state=None):
"""
:param str city: Provides the city where the sign-in originated. This is calculated using latitude/longitude
information from the sign-in activity.
:param str country_or_region: Provides the country code info (2 letter code) where the sign-in originated.
This is calculated using latitude/longitude information from the sign-in activity.
:param GeoCoordinates geo_coordinates: Provides the latitude, longitude and altitude where the sign-in
originated.
:param str state: Provides the State where the sign-in originated. This is calculated using latitude/longitude
information from the sign-in activity.
"""
self.city = city
self.countryOrRegion = country_or_region
self.geoCoordinates = geo_coordinates
self.state = state
| [
"[email protected]"
] | |
f5c7d83a8b42de65e3dd54d5aa3303ae1cfeff5c | d00a38677d50fc452faaf44a2eaa936e9adbabeb | /0x04-python-more_data_structures/10-best_score.py | 6d970f7059c7737565e68a6685fe9249cd5a3335 | [] | no_license | faykris/holbertonschool-higher_level_programming | c42dc166886315193fcf2e053f4e02d9fc5c4eb7 | b5b4fa105affb63b80d4a60be1a9efe4a2bef4f7 | refs/heads/main | 2023-08-05T19:16:12.718183 | 2021-09-23T02:06:57 | 2021-09-23T02:06:57 | 361,807,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | #!/usr/bin/python3
def best_score(a_dictionary):
if not a_dictionary:
return None
m_key = ""
m_val = 0
best_key = {}
for key, value in a_dictionary.items():
if value > m_val:
m_key = key
m_val = value
best_key[m_key] = m_val
return m_key
| [
"[email protected]"
] | |
9b6bf4dff18a517df9b4bbe935253383856eaf19 | adc4e055b243bddd73e18a89cbd5b64543ed7d62 | /{{cookiecutter.workspace_name}}/{{cookiecutter.project_name}}/pipelines_async.py | 2d5cbd099cf25285fc3407766fe23ec3fd038894 | [] | no_license | xiaowangwindow/cookiecutter-scrapy-template | 7910e857674cc3d426d08976cf6223ab81871836 | 0cea474730e9be82a91b85ab3d4f9bb0c658e134 | refs/heads/master | 2021-01-02T22:29:34.769786 | 2017-08-18T06:35:16 | 2017-08-18T06:35:16 | 99,329,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py |
import pymongo
from scrapy.crawler import Crawler
from scrapy.exceptions import DropItem
from txmongo.connection import ConnectionPool
from twisted.internet import defer, reactor, ssl
from txmongo.filter import sort
from {{cookiecutter.project_name}}.items import RequestErrorItem
class ScrapyCityPipeline(object):
@classmethod
def from_crawler(cls, crawler: Crawler):
return cls(crawler)
def __init__(self, crawler: Crawler):
self.crawler = crawler
self.settings = crawler.settings
@defer.inlineCallbacks
def open_spider(self, spider):
self.client = yield ConnectionPool(self.settings['MONGODB_URI'])
self.db = self.client[self.settings['MONGODB_DB']]
self.error_coll = self.db[self.settings['MONGODB_COLL_ERROR']]
yield self.error_coll.create_index(sort([('request_url', 1)]))
@defer.inlineCallbacks
def close_spider(self, spider):
yield self.client.disconnect()
@defer.inlineCallbacks
def process_item(self, item, spider):
if isinstance(item, RequestErrorItem):
yield self.error_coll.update_one(
{'request_url': item['request_url']},
{'$set': dict(item)},
upsert=True
)
raise DropItem
else:
return item
if __name__ == '__main__':
pass | [
"[email protected]"
] | |
8cc8fd94200c23e7132dc0f86c8621b83d8b5e94 | 2c26b51cfa45c96b62d04e64968adf98be30a0a0 | /solarforecastarbiter/reference_forecasts/utils.py | fa4292a5dab2737b1799810d4f8dd68e685113da | [
"MIT"
] | permissive | lboeman/solarforecastarbiter-core | dba0060feb7845a2a25e08d616d961b8070caf02 | 23ac3c7a699f720fb642c3cc7d1acb83b486c2a6 | refs/heads/master | 2023-02-02T05:56:37.318011 | 2021-03-26T23:50:05 | 2021-03-26T23:50:05 | 167,227,791 | 0 | 0 | MIT | 2019-01-23T17:51:20 | 2019-01-23T17:51:19 | null | UTF-8 | Python | false | false | 11,698 | py | import numpy as np
import pandas as pd
import pytz
from solarforecastarbiter.io import utils as io_utils
def get_issue_times(forecast, start_from):
"""Return a list of the issue times for a given Forecast starting
from the date of `start_from` until the first issue time of the next day.
The output timestamps are localized to the timezone of `start_from`.
Parameters
----------
forecast : datamodel.Forecast
Forecast object that contains the time information
start_from : pandas.Timestamp
Return issue times from this same day in the same timezone
Returns
-------
list
pandas.Timestamp objects with the issues times for the particular day
including the first issue time for the next day.
"""
start_time = forecast.issue_time_of_day
if start_time.tzinfo is None:
start_time = pytz.utc.localize(start_time)
# work for forecasts over 1d run length
dayadj = pd.Timedelta(forecast.run_length).ceil('1d')
# make broad range of times that should cover start_from and next time
# even after timezone conversion
earliest_start = pd.Timestamp.combine(
(start_from - dayadj).date(), start_time)
possible_times = []
for i in range(3):
start = earliest_start + i * dayadj
end = (start + dayadj).floor('1d')
possible_times.extend(list(
pd.date_range(start=start, end=end, freq=forecast.run_length)))
possible_times = pd.DatetimeIndex(possible_times).tz_convert(
start_from.tz).drop_duplicates()
# then slice the broad range based on start_from day
startloc = possible_times.get_loc(start_from.floor('1d'), method='bfill')
endloc = possible_times.get_loc(
(start_from + pd.Timedelta('1d')).floor('1d'), method='bfill') + 1
return list(possible_times[startloc:endloc])
def get_next_issue_time(forecast, run_time):
"""Determine the next issue time from a forecast and run time
"""
issue_times = get_issue_times(forecast, run_time)
idx = np.searchsorted(issue_times, run_time)
return issue_times[idx]
def get_init_time(run_time, fetch_metadata):
"""Determine the most recent init time for which all forecast data is
available."""
run_finish = (pd.Timedelta(fetch_metadata['delay_to_first_forecast']) +
pd.Timedelta(fetch_metadata['avg_max_run_length']))
freq = fetch_metadata['update_freq']
init_time = (run_time - run_finish).floor(freq=freq)
return init_time
def get_forecast_start_end(forecast, issue_time,
adjust_for_interval_label=False):
"""
Get absolute forecast start from *forecast* object parameters and
absolute *issue_time*.
Parameters
----------
forecast : datamodel.Forecast
issue_time : pd.Timestamp
adjust_for_interval_label : boolean
If True, adds or subtracts a nanosecond from the start or end
time based value of forecast.interval_label
Returns
-------
forecast_start : pd.Timestamp
Start time of forecast issued at issue_time
forecast_end : pd.Timestamp
End time of forecast issued at issue_time
Raises
------
ValueError if forecast and issue_time are incompatible
"""
issue_times = get_issue_times(forecast, issue_time)
if issue_time not in issue_times:
raise ValueError(
('Incompatible forecast.issue_time_of_day %s, '
'forecast.run_length %s, and issue_time %s') % (
forecast.issue_time_of_day, forecast.run_length, issue_time))
forecast_start = issue_time + forecast.lead_time_to_start
forecast_end = forecast_start + forecast.run_length
if adjust_for_interval_label:
forecast_start, forecast_end = \
io_utils.adjust_start_end_for_interval_label(
forecast.interval_label, forecast_start, forecast_end, True)
return forecast_start, forecast_end
def find_next_issue_time_from_last_forecast(forecast, last_forecast_time):
"""
Find the next issue time for *forecast* based on the timestamp of the
last forecast value. If *last_forecast_time* is not the end of a forecast
run, the issue time returned will be the issue time that overwrites
*last_forecast_time* with a full length forecast.
Parameters
----------
forecast : datamodel.Forecast
last_forecast_time : pd.Timestamp
Last timestamp available for the forecast
Returns
-------
pd.Timestamp
The next issue time for the forecast
"""
last_probable_issue_time = (last_forecast_time -
forecast.run_length -
forecast.lead_time_to_start)
# for beginning & instantaneous labels, last_probable_issue_time
# is currently e.g. 13:55 - 60 min - 0 min = 12:55, so add the
# interval_length to make last_probable_issue_time = 13:00
if forecast.interval_label != 'ending':
last_probable_issue_time += forecast.interval_length
next_issue_time = get_next_issue_time(
forecast, last_probable_issue_time + pd.Timedelta('1ns'))
return next_issue_time
def _is_intraday(forecast):
"""Is the forecast intraday?"""
# intra day persistence and "day ahead" persistence require
# fairly different parameters.
# is this a sufficiently robust way to distinguish?
return forecast.run_length < pd.Timedelta('1d')
def _intraday_start_end(observation, forecast, run_time):
"""
Time range of data to be used for intra-day persistence forecast.
Parameters
----------
observation : datamodel.Observation
forecast : datamodel.Forecast
run_time : pd.Timestamp
Returns
-------
data_start : pd.Timestamp
data_end : pd.Timestamp
"""
_check_intraday_compatibility(observation, forecast)
# no longer than 1 hour
window = min(forecast.run_length, pd.Timedelta('1hr'))
data_end = run_time
data_start = data_end - window
return data_start, data_end
def _dayahead_start_end(issue_time, forecast):
"""
Time range of data to be used for day-ahead persistence forecast.
Parameters
----------
issue_time : pd.Timestamp
forecast : datamodel.Forecast
Returns
-------
data_start : pd.Timestamp
data_end : pd.Timestamp
Notes
-----
Day-ahead persistence: uses the most recently available data that
maintains same times in forecast and observation data,
but shifts observation period by a number of days to end before
issue time.
"""
# data_end = last forecast time for next issue of run - 1 day
data_end = issue_time + forecast.lead_time_to_start + forecast.run_length
# data end should end before, not at issue time, so add the extra ns
data_end -= (data_end - issue_time + pd.Timedelta('1ns')).ceil('1d')
data_start = data_end - forecast.run_length
return data_start, data_end
def _weekahead_start_end(issue_time, forecast):
"""
Time range of data to be used for week-ahead persistence, aka, day of week
persistence.
Parameters
----------
issue_time : pd.Timestamp
lead_time : pd.Timedelta
Returns
-------
data_start : pd.Timestamp
data_end : pd.Timestamp
"""
data_start = issue_time + forecast.lead_time_to_start - pd.Timedelta('7d')
data_end = data_start + forecast.run_length
return data_start, data_end
def _adjust_for_instant_obs(data_start, data_end, observation, forecast):
# instantaneous observations require care.
# persistence models return forecasts with same closure as obs
if 'instant' in forecast.interval_label:
data_end -= pd.Timedelta('1s')
elif forecast.interval_label == 'beginning':
data_end -= pd.Timedelta('1s')
else:
data_start += pd.Timedelta('1s')
return data_start, data_end
def get_data_start_end(observation, forecast, run_time, issue_time):
"""
Determine the data start and data end times for a persistence
forecast. For non-intraday persistence, the data start/end
only rely on the issue time and forecast parameters to ensure
that one can reason about what data was used for a particular
forecast instead of also having to know when the forecast was
made.
Parameters
----------
observation : datamodel.Observation
forecast : datamodel.Forecast
run_time : pd.Timestamp
issue_time : pd.Timestamp
Returns
-------
data_start : pd.Timestamp
data_end : pd.Timestamp
"""
if _is_intraday(forecast):
data_start, data_end = _intraday_start_end(observation, forecast,
run_time)
elif forecast.variable == 'net_load':
data_start, data_end = _weekahead_start_end(
issue_time, forecast)
else:
data_start, data_end = _dayahead_start_end(issue_time, forecast)
_check_instant_compatibility(observation, forecast)
# to ensure that each observation data point contributes to the correct
# forecast, the data_end and data_start values may need to be nudged
if 'instant' in observation.interval_label:
data_start, data_end = _adjust_for_instant_obs(data_start, data_end,
observation, forecast)
return data_start, data_end
def _check_instant_compatibility(observation, forecast):
if 'instant' in forecast.interval_label:
if 'instant' not in observation.interval_label:
raise ValueError('Instantaneous forecast cannot be made from '
'interval average observations')
if forecast.interval_length != observation.interval_length:
raise ValueError('Instantaneous forecast requires instantaneous '
'observation with identical interval length.')
def _check_intraday_compatibility(observation, forecast):
# time window over which observation data will be used to create
# persistence forecast.
if (observation.interval_length > forecast.run_length or
observation.interval_length > pd.Timedelta('1h')):
raise ValueError(
'Intraday persistence requires observation.interval_length '
'<= forecast.run_length and observation.interval_length <= 1h')
def check_persistence_compatibility(observation, forecast, index):
"""
Checks if the Observation is compatible with the Forecast to
generate a persistence forecast.
Parameters
----------
observation : datamodel.Observation
The metadata of the observation to be used to create the
forecast.
forecast : datamodel.Forecast
The metadata of the desired forecast.
index : bool
If the persistence forecast will persist a clear sky or
AC power index (True), or use the observed value (False).
Raises
------
ValueError
If an intraday forecast is to be made and the
observation interval length is too long.
ValueError
If an instantaneous forecast is to be made and the
observation is not also instantaneous with the same
interval length.
ValueError
If the forecast run length is greater than one day
and an index persistence forecast was to be made.
"""
intraday = _is_intraday(forecast)
if intraday:
_check_intraday_compatibility(observation, forecast)
else:
if index:
raise ValueError('index=True not supported for forecasts'
' with run_length >= 1day')
_check_instant_compatibility(observation, forecast)
| [
"[email protected]"
] | |
dd9136710d4ba4be942bfcb16db4b546e7828ba7 | ce6c96ff6087b175a096f18c6b3e28a600aa372b | /page/models.py | 169aac4c99cee0ab7c42917f1047de49b3439336 | [] | no_license | epiphnani/AppSchool | 405b14700294878e3098ac163b89c6a40616d4dc | fc08d4a4406f373f10c395bd6656e83c77a6bbe0 | refs/heads/main | 2023-06-19T07:24:30.322753 | 2021-07-09T18:07:33 | 2021-07-09T18:07:33 | 384,483,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,471 | py | from django.db import models
from django.contrib.auth.models import User
import os
# Create your models here.
#3rd apps field
from ckeditor.fields import RichTextField
def user_directory_path(instance, filename):
#THis file will be uploaded to MEDIA_ROOT /the user_(id)/the file
return 'user_{0}/{1}'.format(instance.user.id, filename)
class PostFileContent(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
file = models.FileField(upload_to=user_directory_path)
posted = models.DateTimeField(auto_now_add=True)
def get_file_name(self):
return os.path.basename(self.file.name)
def save_lesson_files(instance, filename):
upload_to = 'img/'
ext = filename.split('.')[-1]
# get filename
if instance.page_id:
filename = 'lesson_files/{}/{}.{}'.format(instance.page_id, instance.page_id, ext)
if os.path.exists(filename):
new_name = str(instance.lesson_id) + str('1')
filename = 'page_images/{}/{}.{}'.format(instance.page_id, new_name, ext)
return os.path.join(upload_to, filename)
class Page(models.Model):
page_id = models.CharField(max_length=100, null=True, unique=True)
title = models.CharField(max_length=150)
content = RichTextField()
files = models.ManyToManyField(PostFileContent)
video = models.FileField(upload_to=save_lesson_files, verbose_name="Media", blank=True, null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='page_owner')
def __str__(self):
return self.title
| [
"[email protected]"
] | |
a137cc244b691ebce1fe9778f17d13d4545fd3ed | ba350210808e3c01a0b3087087199667b85252c3 | /preproc/preproc_all20_info_usp.py | 5f5df7659244ef2a9e57921b8797867b57d58665 | [
"MIT"
] | permissive | zyxwvu321/Classifer_SSL_Longtail | c9b590748218955c20a85a111bc170fc50e52343 | e6c09414c49e695b0f4221a3c6245ae3929a1788 | refs/heads/master | 2022-11-07T23:21:28.565466 | 2020-06-24T14:25:50 | 2020-06-24T14:25:50 | 254,838,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 12:31:29 2020
test if meta data in ISIC19 include all the images in ISIC18
@author: cmj
"""
import cv2
from pathlib import Path
import pandas as pd
import numpy as np
from tqdm import tqdm
import os.path as osp
fn_ISIC19_meta ='../data/ISIC20/test.csv'
fn_ISIC18_colorgain = './dat/all20_usp_colorgain.csv'
fd_im = '../data/all20_usp_coloradj'
out_csv = './dat/all20_info_usp.csv'
df = pd.read_csv(fn_ISIC19_meta)
datas_meta = df.values
df = pd.read_csv(fn_ISIC18_colorgain)
datas_colorgain = df.values
#seg result
flist = sorted(list(Path(fd_im).glob('*.jpg')))
fns = [fn.stem for fn in flist]
flist = [str(fn) for fn in flist]
for fn in fns:
if fn not in datas_meta[:,0]:
#print img filename if metafile not include
print(fn)
# FN, hh, ww, class, meta(age,pos,sex, 3col, lesion_id is skipped), cropped bbox(4 col)) color_gain(3col)--------14 col
info_list = []
for idx, fn in enumerate(tqdm(flist)):
img = cv2.imread(fn)
hh,ww,_ = img.shape
idx_meta = np.where(datas_meta[:,0]==Path(fn).stem)[0][0]
meta = datas_meta[idx_meta][[3,4,2]]
#gt = np.where(datas[idx][1:]==1)[0][0]
idx_colorgain = np.where(datas_colorgain[:,0]==Path(fn).stem)[0][0]
color_gain = datas_colorgain[idx_colorgain]
info_list.append([Path(fn).stem, hh, ww,-1, *meta, *color_gain[1:4]])
df = pd.DataFrame(data = info_list, index = None,columns = ['fn','hh','ww','gt','age','pos','sex','g_r','g_g','g_b'])
df.to_csv(out_csv, index=False) | [
"[email protected]"
] | |
a1a0ed95ba867e93644ec09edfeeb9fa3db2740e | d2cbfb6f8f8e6ab3d72c392ad2658a8b3e3bbc0f | /exploring/compute_sie_september_decrease_map.py | 3b81f361d8ae0a05f9ace9a70a3cc0abbdf39e9c | [] | no_license | ychavail/sea-ice | 5c462d6303e63020c45689032089c6ca7ae45986 | ac099b7ae7c935c3674ef927a7aa3903e92e0c27 | refs/heads/master | 2020-03-24T05:11:13.315788 | 2019-04-17T20:55:28 | 2019-04-17T20:55:28 | 142,478,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,590 | py | ##################################################################
# Description: Compute the NH-spatial field of the global sea ice extent at its annual minimum (september) of particular runs of CanESM2-LE when sea ice extent is exceptionnally low and a few years around.
# Code name: compute_sie_september_decrease_map.py
# Date of creation: 2018/04/06
# Date of last modification: 2018/04/09
# Contacts: [email protected]
##################################################################
## Needed packages
import netCDF4 as netcdf # description of the meaning of data and relations fields stored in a netcdf file
import os # portable way of using operating system dependent functionality
import numpy as np # scientific computing with Python
from datetime import datetime as dt
import time as tt
import sys
import functions_time as fct_t
start_time = tt.time()
## historical-r2 - r3i1p1 - 2002
nc13 = netcdf.Dataset('/dmf2/scenario/external_data/cccma/CanESM2_large_ensemble/historical-r2/day/seaIce/sic/r3i1p1/sic_day_CanESM2_historical-r2_r3i1p1_19500101-20201231.nc','r')
time13 = nc13.variables['time']
sic13 = nc13.variables['sic'][:,32:,:]
lat = nc13.variables['lat'][32:]
lon = nc13.variables['lon'][:]
years13 = np.arange(2000,2006)
for y in years13:
ind_t = fct_t.selmonthyear(y,9,time13[:],time13.units,time13.calendar)
sic13_y = np.mean(sic13[ind_t[0],:,:],axis=0)
sic13_y = np.expand_dims(sic13_y,axis=0)
if y == years13[0]:
sie13 = sic13_y
else:
sie13 = np.concatenate((sie13,sic13_y),axis=0)
## historical-r5 - r10i1p1 - 2012
nc50 = netcdf.Dataset('/dmf2/scenario/external_data/cccma/CanESM2_large_ensemble/historical-r5/day/seaIce/sic/r10i1p1/sic_day_CanESM2_historical-r5_r10i1p1_19500101-20201231.nc','r')
time50 = nc50.variables['time']
sic50 = nc50.variables['sic'][:,32:,:]
years50 = np.arange(2010,2016)
for y in years50:
ind_t = fct_t.selmonthyear(y,9,time50[:],time50.units,time50.calendar)
sic50_y = np.mean(sic50[ind_t[0],:,:],axis=0)
sic50_y = np.expand_dims(sic50_y,axis=0)
if y == years50[0]:
sie50 = sic50_y
else:
sie50 = np.concatenate((sie50,sic50_y),axis=0)
# saving data of sea ice extent in a netcdf file
outputdir = '/exec/yanncha/abrupt_changes/'
outputfilename = (outputdir+'sie_september_decrease_map_CanESM2-LE.nc')
outputfile = netcdf.Dataset(outputfilename,'w',format='NETCDF4')
outputfile.createDimension('lat',len(lat))
outputfile.createDimension('lon',len(lon))
outputfile.createDimension('time',len(years13))
sie13_ = outputfile.createVariable('sie13','f8',('time','lat','lon',))
sie50_ = outputfile.createVariable('sie50','f8',('time','lat','lon',))
time13_ = outputfile.createVariable('time13','i2',('time',))
time50_ = outputfile.createVariable('time50','i2',('time',))
lat_ = outputfile.createVariable('lat','i2',('lat',))
lon_ = outputfile.createVariable('lon','i2',('lon',))
#family_ = outputfile.createVariable('family','str',('sim',))
#member_ = outputfile.createVariable('member','str',('sim',))
sie13_[:,:,:] = sie13[:,:,:]
sie50_[:,:,:] = sie50[:,:,:]
time13_[:] = years13[:]
time50_[:] = years50[:]
lat_[:] = lat[:]
lon_[:] = lon[:]
#family_[:] = family[:]
#member_[:] = member[:]
sie13_.units = "%"
sie50_.units = "%"
lat_.units = "degree North"
lon_.units = "degree East"
outputfile.description = "Sea ice extent in the Northern Hemisphere in september from the CanESM2-LE."
outputfile.history = ('Created '+tt.ctime())
outputfile.contact = '[email protected]'
outputfile.close()
| [
"[email protected]"
] | |
c845a5eb8218c2d1bb4f97d5d5b1c926830ba184 | 32f0be74561308ef561a2971bf6c48e374b1a1b7 | /vistem/utils/timer.py | a147c60d4801831270dff979d8c06c157d27757f | [] | no_license | major196512/vistem | 9f97ddec02c5d8df8e2ba18e839fa74c92f1752d | 085a9a5e7af601dd536df88aab8865c978b63a97 | refs/heads/master | 2023-01-09T01:08:31.348249 | 2020-11-12T16:26:52 | 2020-11-12T16:26:52 | 287,187,456 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,662 | py | from time import perf_counter
from typing import Optional
class Timer:
def __init__(self, warmup=0, pause=False) -> None:
self.reset(warmup, pause)
def reset(self, warmup=0, pause=False) -> None:
self._start = perf_counter()
self._warmup = warmup
self._paused: Optional[float] = None
self._total_paused = 0
self._total_warmup = 0
self._count_start = 0
if pause : self._paused = perf_counter()
else : self._count_start += 1
def pause(self) -> None:
if self._paused is not None:
raise ValueError("Trying to pause a Timer that is already paused!")
if self._warmup > 0:
self._total_warmup = perf_counter() - self._start - self._total_paused
self._warmup -= 1
self._count_start -= 1
self._paused = perf_counter()
def is_paused(self) -> bool:
return self._paused is not None
def resume(self) -> None:
if self._paused is None:
raise ValueError("Trying to resume a Timer that is not paused!")
self._total_paused += (perf_counter() - self._paused)
self._paused = None
self._count_start += 1
def seconds(self) -> float:
if self._paused is not None:
end_time: float = self._paused # type: ignore
else:
end_time = perf_counter()
return end_time - self._start - self._total_paused - self._total_warmup
def avg_seconds(self) -> float:
return self.seconds() / self._count_start
def total_seconds(self) -> float:
return perf_counter() - self._start - self._total_warmup
| [
"[email protected]"
] | |
a76c100ad1a6e37a7810d7d70dcc316c105ceaa5 | 36308e5fec6cc87cdf76a5e4d966c70a3c182167 | /venv/Scripts/easy_install-script.py | 1072f16594e0e14655e3366155e26f8b91f6343e | [] | no_license | ashifa-parvin/samples | 9889ea308560143117591ded4e66fe43c7d082e1 | e27065fa1b21fae93f91c1c3ee1156a230ceccf1 | refs/heads/master | 2022-07-12T10:44:48.327722 | 2020-05-14T12:53:24 | 2020-05-14T12:53:24 | 263,914,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | #!C:\Users\AJ8090\PycharmProjects\samples\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | |
bbbe9a3829272abaa12996e0bfe7d2850bb50e26 | 8c9b76cf4cdb2e661602acb1a3e227be57e217b8 | /Data/transform.py | 4145e0157981453ccb9f38f1fedd97687702d317 | [] | no_license | JoesRain/CancerDiag | 118e6751492c70c898b51551a77d7c39bddc0482 | 34e9fb2e8e9fde2efd8ced1a58d94d7a4e16b480 | refs/heads/master | 2022-12-14T19:29:48.183876 | 2020-09-21T06:10:55 | 2020-09-21T06:10:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,872 | py | __all__ = ('PadResize', 'ToNormTensor', 'Scale', 'PILResize', 'ConvertToTensor', 'Norm',)
import torch
import torch.nn.functional as F
import numpy as np
from PIL import Image
from collections import Sequence
from torchvision.transforms import Compose, ToTensor, Normalize
class PadResize:
"""Resize an PIL Image to the target size, with unchanged aspect ratio using padding.
note that this way will lose the edge info for segmentation"""
def __init__(self, target_size: (int, tuple), interpolation=Image.BICUBIC):
assert isinstance(target_size, (int, tuple))
if isinstance(target_size, int):
target_size = (target_size, target_size)
self.size = target_size
self.inter = interpolation
def __call__(self, item: dict):
img = item.get('image')
resized_img = self._resize(img)
if item.get('label'):
mask = item.get('label')
# maskไฝฟ็จๆ่ฟ้ปๆๅผ๏ผ
resized_mask = self._resize(mask, inter=Image.NEAREST)
item.update(image=resized_img, label=resized_mask)
else:
item.update(image=resized_img)
return item
def _resize(self, img, inter=None):
# ๅๅงๅฐบๅฏธ
ori_w, ori_h = img.size
# ็ฎๆ ๅฐบๅฏธ
tar_w, tar_h = self.size
# ๅฎฝใ้ซๆๅฐ็็ผฉๆพๆฏไพ
scale = min(tar_w / ori_w, tar_h / ori_h)
# ่ขซโๆๆโ็ผฉๆพ็ๅฎฝใ้ซ๏ผไฟๆๅๅง็ๅฎฝใ้ซๆฏ
val_w, val_h = int(scale * ori_w), int(scale * ori_h)
if inter is None:
inter = self.inter
# ไฟๆๅๅพๅฎฝใ้ซๆฏ่ฟ่ก็ผฉๆพ
# ไฝๆณจๆ่ฟไธชๅฐบๅฏธไธๆฏ็ฎๆ ๅฐบๅฏธ
resized_img = img.resize((val_w, val_h), inter)
# float32็ฑปๅ
valid = np.asarray(resized_img, dtype='float')
img_arr = np.asarray(img, dtype='float')
# ๅพๅๆฏ3็ปด็ฉ้ต
if img_arr.ndim == 3:
# ๅ้้ๅ็ด ๅผๅๅผ
pad = img_arr.mean(axis=(0, 1))
target_arr = np.zeros((tar_h, tar_w, 3))
# maskๆฏไบ็ปด็ฉ้ต
# ่ฟ็งๆนๆณๅฏนmaskไธๅฏ่ก๏ผไผไธขๅคฑ่พน็ผไฟกๆฏ๏ผไธไผไฝฟๅพmask้ไบๅผ
else:
pad = img_arr.mean()
# print("pad value:{}".format(pad))
target_arr = np.zeros((tar_h, tar_w))
target_arr[:, :] = pad
# ไธญๅฟๅบๅ็ปดๆๅๅพๅฎฝใ้ซๆฏ
start_y = (tar_h - val_h) // 2
end_y = start_y + val_h
start_x = (tar_w - val_w) // 2
end_x = start_x + val_w
target_arr[start_y:end_y, start_x:end_x] = valid
# print("unique target_arr:{}".format(np.unique(target_arr)))
# ่ฟๅๆๅพๅๆถๆณจๆ่ฝฌๆขๅuint8็ฑปๅ
target_img = Image.fromarray(target_arr.astype('uint8'))
# print("unique target uint8:{}".format(np.unique(target_img)))
return target_img
class ToNormTensor:
"""Convert PIL Image to normalized tensor."""
def __init__(self, mean: (Sequence, int, float) = None, std: (Sequence, int, float) = None):
if mean is not None and std is not None:
if not isinstance(mean, Sequence):
mean = [mean] * 3
if not isinstance(std, Sequence):
std = [std] * 3
for m in mean:
assert 0. <= m <= 255.
if m > 1:
m /= 255.
for s in std:
assert 0. <= s <= 255.
if s > 1:
s /= 255.
self.mean = mean
self.std = std
def __call__(self, item: dict):
if self.mean is not None and self.std is not None:
# Normalize()็ๆไฝๅฏน่ฑกๆฏ๏ผC,H,W๏ผ็tensor๏ผๅ ๆญคๅ
ไฝฟ็จToTensor()
# ไฝๆณจๆToTensor()ๅฐๅฏน่ฑกๅฝไธๅๅฐ0-1ไน้ด๏ผๅ ๆญค่ฟ้meanๅstd้ฝ้่ฆๅจ0-1ไน้ด
_transform = Compose([
ToTensor(),
Normalize(self.mean, self.std)
])
else:
_transform = ToTensor()
img = item.get('image')
img_tensor = _transform(img)
item.update(image=img_tensor)
if item.get('label') is not None:
mask = item.get('label')
# uint8->int64
# ไฝฟ็จ.copy()๏ผๅฆๅtorchไผๅบ็ฐwarning
mask_tensor = torch.from_numpy(np.asarray(mask, dtype='long').copy())
# # (H, W) -> (1, H, W)
# # ไธบmaskๅขๅ 1ไธชๅฏนๅบ้้็็ปดๅบฆ
# mask_tensor = mask_tensor.unsqueeze(0)
# assert mask_tensor.dim() == 3 and mask_tensor.shape[0] == 1
item.update(label=mask_tensor)
return item
class ConvertToTensor:
"""Convert PIL Image to Pytorch tensor."""
def __init__(self):
self._transform = ToTensor()
def __call__(self, item: dict):
img = item.get('image')
# uint8->float32
img_tensor = self._transform(img)
item.update(image=img_tensor)
mask = item.get('label')
if mask:
# uint8->int64
mask_tensor = torch.from_numpy(np.asarray(mask, dtype='long').copy())
item.update(label=mask_tensor)
return item
class Norm:
"""Normalize a tensor image with mean and standard deviation."""
def __init__(self, mean=None, std=None):
if mean is None or std is None:
mean = [0.] * 3
std = [1.] * 3
for i, m in enumerate(mean):
assert 0 <= m <= 255
if m > 1:
mean[i] = m / 255
for j, s in enumerate(std):
assert 0 <= s <= 255
if s > 1:
std[j] = s / 255
# print("mean=", mean)
# print("std=", std)
self._transform = Normalize(mean, std)
def __call__(self, item: dict):
img = item.get('image')
normalized_img = self._transform(img)
item.update(image=normalized_img)
return item
class Scale:
"""Scale the image tensor with Pytorch implementation.
ps: recommend to set align_corners=True if scale the image, default is False"""
def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=False):
if mode in ('nearest', 'area') and align_corners:
raise ValueError("align_corners option can only be set with the "
"interpolating modes: linear | bilinear | bicubic | trilinear")
assert size is not None or scale_factor is not None, "'size' and 'scale factor' cannot be both None!"
# ็ฎๆ ๅฐบๅฏธ๏ผ้ซ๏ผๅฎฝ๏ผ
if size is not None:
self.size = size
# ็ผฉๆพ็ณปๆฐ
if scale_factor is not None:
self.factor = scale_factor
# ๆๅผๆนๅผ
self.mode = mode
# ๅ็ด ๆฏ็ไฝ่ง็น่ฟๆฏๆนๅฝข็ไธญๅฟ
self.align_corners = align_corners
def __call__(self, item: dict):
# Pytorch็ๆๅผๆนๆณๅฏนไบๅพๅ้่ฆinputๆฏ4Dๅผ ้
# (H,W,C) -> (C,H,W)๏ผๆณจๆ่ฝฌๆขไธบfloat๏ผๆๅผ้่ฆ
# img_arr = np.asarray(item.get('image'), dtype='float').transpose(2, 0, 1)
# 3D->4D tensor: (C,H,W)->(1,C,H,W)
# img_ts = torch.from_numpy(img_arr).unsqueeze(0)
img_ts = item.get('image').unsqueeze(0)
if self.size is not None:
resized_img_ts = F.interpolate(img_ts, self.size, mode=self.mode, align_corners=self.align_corners)
else:
resized_img_ts = F.interpolate(img_ts, scale_factor=self.factor, mode=self.mode, align_corners=self.align_corners)
# 4D->3D tensor: (1,C,H,W)->(C,H,W)
img = resized_img_ts.squeeze(0)
_, h, w = img.shape
assert h == self.size[0] and w == self.size[1]
# (C,H,W)->(H,W,C)
# img_arr = img_ts.squeeze(0).numpy().transpose(1, 2, 0)
# # ๆณจๆๆขๅคไธบuint8
# img = Image.fromarray(img_arr.astype('uint8'))
item.update(image=img)
if item.get('label') is not None:
# maskไฝฟ็จๆ่ฟ้ปๆๅผ
# ๆๅผๆถ่ฆๆฑ่พๅ
ฅๆฏๆตฎ็น็ฑปๅ
# (H,W)
# mask_arr = np.asarray(item.get('label'), dtype='float')
# (H,W)->(1,H,W)
# mask_arr = mask_arr[np.newaxis].transpose(2, 0, 1)
# 3D->4D tensor: (1,H,W)->(1,1,H,W)
# mask_ts = torch.from_numpy(mask_arr).unsqueeze(0)
mask_ts = item.get('label').float()
mask_ts = mask_ts.unsqueeze(0).unsqueeze(0)
# mask็ไฝฟ็จ้ป่ฎค็ๆ่ฟ้ปๆนๅผ่ฟ่กๆๅผ
if self.size is not None:
resized_mask_ts = F.interpolate(mask_ts, self.size)
else:
resized_mask_ts = F.interpolate(mask_ts, scale_factor=self.factor)
# (1,1,H,W)->(H,W)
# mask_arr = mask_ts.squeeze().numpy()
# ๆขๅคไธบuint8็ฑปๅ
# mask = Image.fromarray(mask_arr.astype('uint8'))
# float->long
mask = resized_mask_ts.squeeze().long()
h, w = mask.shape
assert h == self.size[0] and w == self.size[1]
item.update(label=mask)
return item
class PILResize:
"""Resize the PIL image."""
def __init__(self, size, mode=Image.BILINEAR):
# (W,H)
self.size = size
self.mode = mode
def __call__(self, item):
image = item.get('image')
resized_image = image.resize(self.size, self.mode)
assert resized_image.size == self.size
item.update(image=resized_image)
mask = item.get('label')
if mask:
# maskไฝฟ็จๆ่ฟ้ปๆๅผ
resized_mask = mask.resize(self.size, Image.NEAREST)
assert resized_mask.size == self.size
item.update(label=resized_mask)
return item
| [
"[email protected]"
] | |
a218aba8b54a5b5d0739652f34a611c4b69eed71 | 0e0447a51812ad2e4c1127ed00019f1d0d91878a | /blog/migrations/0001_initial.py | 19491c15f5652699c8abc9b454725300652d6fb7 | [] | no_license | yerminaaa/my-first-blog | a2412580fb66768b93bc4c21f640ba18257855cc | e70c7b51d52b44e6e1971e70f14ad7eb3272e329 | refs/heads/master | 2023-01-04T05:41:18.416151 | 2020-11-01T14:48:18 | 2020-11-01T14:48:18 | 309,077,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | # Generated by Django 2.2.16 on 2020-11-01 10:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
8e9b32db7dfc3060bddd44dd6983a037a8cdebc2 | 4186a478df1cbe5ab626984a09f28757f5fa3e5f | /generalization/general_player.py | dba4f8bc33fd05b6d8074ed16070e2320fe16cea | [] | no_license | Tyler-Pearson/OpenAI | 439a7a103c42779d40c5b6e297bc836d6d82703b | 19e69f5f8b8b91402a7ddaa902ca01912ef9e447 | refs/heads/master | 2020-03-17T02:25:25.596585 | 2018-08-26T21:50:05 | 2018-08-26T21:50:05 | 133,189,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,710 | py | import time
import gym
import numpy as np
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
from statistics import mean, median
from collections import Counter
def play_game(env, goal_steps, display, model):
score = 0
game_memory = []
prev_obs = []
env.reset()
for _ in range(goal_steps):
if display:
env.render()
if (len(prev_obs) == 0) or (model is None):
action = env.action_space.sample()
else:
action = np.argmax(model.predict(prev_obs.reshape(-1, len(prev_obs), 1))[0])
new_obs, reward, done, info = env.step(action)
if len(prev_obs) > 0:
game_memory.append([prev_obs, action])
prev_obs = new_obs
score += reward
if done:
break
return score, game_memory
def get_pop(env, action_count, pop_size, goal_steps, min_threshold, model):
scores = []
training_data = []
accepted_scores = []
while len(accepted_scores) < pop_size:
score, game_memory = play_game(env, goal_steps, False, model)
if score > min_threshold:
accepted_scores.append(score)
for data in game_memory:
output = np.zeros(action_count)
output[data[1]] = 1
training_data.append([data[0], output])
scores.append(score)
return training_data, accepted_scores, scores
def neural_network_model(input_size, action_count, LR=1e-3):
network = input_data(shape=[None, input_size, 1], name='input')
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, action_count, activation='softmax') #output layers
network = regression(network, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(network, tensorboard_dir='log')
return model
def train_model(training_data, action_count, max_steps, model=False):
X = np.array([i[0] for i in training_data]).reshape(-1, len(training_data[0][0]), 1)
y = [i[1] for i in training_data]
if not model:
model = neural_network_model(input_size = len(X[0]), action_count=action_count)
#n_epoch should be determined dynamically
model.fit({'input':X}, {'targets':y}, n_epoch=5, snapshot_step=max_steps, show_metric=True, run_id='openaistuff')
return model
def test_model(env, model, max_steps):
test_scores = []
for i in range(100):
score, mem = play_game(env, max_steps, i < 5, model)
if (i < 5):
print("Test {}: {}".format(i+1, score))
test_scores.append(score)
print("Average test score: {}".format(mean(test_scores)))
print("Scores: {}".format(Counter(test_scores)))
def play(game_name, max_steps, score_req):
env = gym.make(game_name)
env._max_episode_steps = max_steps
action_count = env.action_space.n
pop_size = 40
training_data, accepted, train_scores = get_pop(env, action_count, pop_size, max_steps, score_req, None)
print("Average training score: {}".format(mean(train_scores)))
print("Average accepted mean: {}".format(mean(accepted)))
print("Accepted count: {}".format(Counter(accepted)))
model = train_model(training_data, action_count, max_steps)
raw_input("Press enter to test model...")
test_model(env, model, max_steps)
def demo(game_name, steps, accepted, disp_count):
raw_input("Press enter to demo...")
env = gym.make(game_name)
env._max_episode_steps = steps
action_count = env.action_space.n
count = 0
score_total = 0
print("\nDemo-ing {}\n---------\nrandom moves\ndisplay first {} of 10 games".format(game_name, disp_count))
for i in range(10):
score, mem = play_game(env, steps, i < disp_count, None)
# print("Score: {}".format(score))
if (i < disp_count):
print("score: {}".format(score))
time.sleep(0.5)
score_total += score
if score > accepted:
count += 1
print("Wins out of 10 attempts: {}".format(count))
print("Avg random moves score: {}".format(score_total / 10))
def main():
# play('CartPole-v0', 500, 130)
# play('MountainCar-v0', 1000, -950)
# play('Acrobot-v1', 1500, -1200)
demo('MountainCar-v0', 1500, -700, 5)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
fe36961878d2f98518099e1da4ece8786c06190d | 16480b5b49fe66494c62d57a4f7f05772b48e930 | /manager.py | cedd5ec41619952bc1a52ea4d48661089f3837f3 | [
"MIT"
] | permissive | tpearson1/time | a2f0700dbb055575072f60d8711b55a6300cbe2a | cf4ab2426907fe70a8c9e2ec30dc53ae6ff62992 | refs/heads/master | 2020-03-11T19:06:00.328524 | 2018-11-26T05:11:10 | 2018-11-26T05:11:10 | 130,197,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,568 | py | #!/usr/bin/python
# Time Management Program
import config_utils as cfgu
import config as cfg
import os.path
import parts.timer as timer
import parts.duration as duration
import parts.logentry as logentry
import parts.log as log
from datetime import datetime, date, timedelta
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, GObject
PROGRAM_DIR = os.path.dirname(os.path.realpath(__file__))
DATA_DIR = PROGRAM_DIR + "/data"
LOG_FILE = DATA_DIR + "/log.txt"
DISPLAY_WEEK = 0
DISPLAY_MONTH = 1
DISPLAY_YEAR = 2
DISPLAY_ALL = 3
FIRST_DAY_WORK_MESSAGE = "First day using this software - You will be able to give \
yourself tomorrow's task(s) at the end of today's work"
def chosen_event_text(chosen):
days = chosen.days_till_event()
if days == 0:
return "Today: " + chosen.description
return "{} day{} from now: {}".format(days, "" if days == 1 else "s",
chosen.description)
def chosen_events_text():
chosen = cfg.pick_event()
if chosen == []:
return "No chosen event"
result = ""
for event in chosen:
result += chosen_event_text(event) + '\n';
return result
def create_data_dir():
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
class Main:
def __init__(self):
self.builder = Gtk.Builder()
self.builder.add_from_file(PROGRAM_DIR + "/interface.glade")
self.timer = timer.Timer(self.update_times)
create_data_dir()
self.log = log.Log(LOG_FILE)
handlers = {
"on-destroy":
self.shutdown_app,
"start-counting":
lambda _: self.start_timer(),
"stop-counting":
lambda _: self.stop_timer(),
"finish-work":
lambda _: self.finish_work(),
"build-week-display":
lambda _: self.build_log_display(DISPLAY_WEEK),
"build-month-display":
lambda _: self.build_log_display(DISPLAY_MONTH),
"build-year-display":
lambda _: self.build_log_display(DISPLAY_YEAR),
"build-full-display":
lambda _: self.build_log_display(DISPLAY_ALL)
}
self.builder.connect_signals(handlers)
self.window = self.builder.get_object("main-window")
self.window.show_all()
self.has_clicked_finish = False
self.get_event_and_message()
self.setup_times()
self.log_list_store = self.builder.get_object("work-log-data")
self.build_log_display(DISPLAY_WEEK)
self.setup_work_input_boxes()
def start_timer(self):
self.timer.start()
# Prevent clicking the button multiple times
self.start_timer_button.set_sensitive(False)
self.stop_timer_button.set_sensitive(True)
def stop_timer(self):
self.timer.stop()
# Prevent clicking the button multiple times
self.stop_timer_button.set_sensitive(False)
self.start_timer_button.set_sensitive(True)
def setup_work_input_boxes(self):
self.accomplished_input_box = self.builder.get_object("work-today")
self.for_tomorrow_input_box = self.builder.get_object("work-tomorrow")
today = self.log.entry_for_today()
if today is None:
return
self.accomplished_input_box.set_text(today.accomplished)
self.for_tomorrow_input_box.set_text(today.for_tomorrow)
def work_message(self):
prev = self.log.entry_before_today()
if prev is None:
return FIRST_DAY_WORK_MESSAGE
return prev.for_tomorrow
def get_event_and_message(self):
next_event_label = self.builder.get_object("next-event")
next_event_label.set_text(chosen_events_text())
message_for_day = self.builder.get_object("message-for-day")
message_for_day.set_text(self.work_message())
def get_overdue(self):
latest = self.log.latest_entry()
if latest is None:
# No overdue time from an empty log
return cfgu.make_duration(0)
if self.log.entry_for_today_present():
# Overdue time carries over from previous days
return latest.overdue
else:
unworked_from_earlier = \
latest.expected_time_working - latest.time_worked
if unworked_from_earlier.total_seconds() > 0:
return latest.overdue + unworked_from_earlier
return latest.overdue
def remaining_work_time_today(self):
if self.log.is_empty():
return cfg.expected_work_time_today()
today = self.log.entry_for_today()
if today is not None:
unworked = today.expected_time_working - today.time_worked
if unworked.total_seconds() > 0:
return unworked
else:
# No work required today
return cfgu.make_duration(0)
else:
# Since they haven't worked today yet, we can just use the
# expected work time for today
return cfg.expected_work_time_today()
def setup_times(self):
self.time_remaining_label = self.builder.get_object("time-remaining")
self.time_for_today_label = self.builder.get_object("time-for-today")
self.overdue_label = self.builder.get_object("overdue")
self.start_timer_button = self.builder.get_object("start-timer")
self.stop_timer_button = self.builder.get_object("stop-timer")
# Should not be able to click stop unless the timer is running
self.stop_timer_button.set_sensitive(False)
self.time_for_today = self.remaining_work_time_today()
self.time_for_today_secs = self.time_for_today.total_seconds()
self.overdue_secs = self.get_overdue().total_seconds()
self.time_worked_secs = 0
self.set_times()
def update_times(self):
self.time_worked_secs += 1
if self.time_for_today_secs > 0:
self.time_for_today_secs -= 1
elif self.overdue_secs > 0:
self.overdue_secs -= 1
if self.overdue_secs == 0:
self.timer.stop()
self.set_times()
def get_time_remaining(self):
return self.time_for_today_secs + self.overdue_secs
def finish_work(self):
self.has_clicked_finish = True
self.timer.stop()
accomplished = self.builder.get_object("work-today").get_text()
for_tomorrow = self.builder.get_object("work-tomorrow").get_text()
time_worked = timedelta(seconds=self.time_worked_secs)
overdue = timedelta(seconds=self.overdue_secs)
today = self.log.entry_for_today()
if today is not None:
today.time_worked += time_worked
today.overdue = overdue
today.accomplished = accomplished
today.for_tomorrow = for_tomorrow
self.log.push_entry(today)
else:
self.log.push_entry(
logentry.LogEntry.for_today(cfg.expected_work_time_today(),
time_worked, overdue, accomplished,
for_tomorrow))
def set_times(self):
self.time_remaining_label.set_text(
str(timedelta(seconds=self.get_time_remaining())))
self.time_for_today_label.set_text(
str(timedelta(seconds=self.time_for_today_secs)))
self.overdue_label.set_text(str(timedelta(seconds=self.overdue_secs)))
def shutdown_app(self, o, d=None):
if not self.has_clicked_finish:
self.finish_work()
Gtk.main_quit(o, d)
def build_log_display(self, period):
self.log_list_store.clear()
for entry in self.log.entries:
entry_age = date.today() - entry.entry_date
if period == DISPLAY_WEEK:
if duration.longer_than(entry_age, timedelta(weeks=1)):
continue
if period == DISPLAY_MONTH:
if duration.longer_than(entry_age, timedelta(days=30)):
continue
if period == DISPLAY_YEAR:
if duration.longer_than(entry_age, timedelta(days=365)):
continue
self.log_list_store.append([
str(entry.entry_date),
str(entry.expected_time_working),
str(entry.time_worked),
str(entry.overdue), entry.accomplished, entry.for_tomorrow
])
Main()
Gtk.main()
| [
"[email protected]"
] | |
5f7ea256ab54931233592b33790684eb18f0b154 | 9114a65cb643c076452d7d51d87ce764b175d22a | /dsba6155project/data_pull/data_flow/entity_job/setup.py | d11c5ace37774b830c51728a270682d2e6856b36 | [] | no_license | jananiarunachalam/dsba6155project | 827463a3570e060e2e58cc4e988a512bebdfc072 | 6cbcf9eda7aeb98b998d2653d9f9d5983b23442d | refs/heads/master | 2022-06-09T00:25:44.193250 | 2020-05-06T04:41:47 | 2020-05-06T04:41:47 | 261,603,534 | 0 | 0 | null | 2020-05-05T23:15:09 | 2020-05-05T23:15:08 | null | UTF-8 | Python | false | false | 2,517 | py | import setuptools
from distutils.command.build import build as _build # type: ignore
import os
import logging
import subprocess
import setuptools.command.build_py
import distutils.cmd
import distutils.log
import setuptools
class build(_build): # pylint: disable=invalid-name
sub_commands = _build.sub_commands + [('CustomCommands', None)]
# CUSTOM_COMMANDS = [
# ["wget" ,"'https://github.com/explosion/spacy-models/releases/download/en_core_web_md-2.2.5/en_core_web_md-2.2.5.tar.gz'"],
# ["tar","-xvzf" ,"en_core_web_md-2.2.5.tar.gz" , "-C" , "/usr/spacy_model" ],
# #['pip', 'install', 'https://github.com/explosion/spacy-models/releases/download/en_core_web_md-2.2.5/en_core_web_md-2.2.5.tar.gz']
# ]
CUSTOM_COMMANDS = [["easy_install" , "https://github.com/explosion/spacy-models/releases/download/en_core_web_md-2.2.5/en_core_web_md-2.2.5.tar.gz"]]
class CustomCommands(setuptools.Command):
"""A setuptools Command class able to run arbitrary commands."""
def initialize_options(self):
pass
def finalize_options(self):
pass
def RunCustomCommand(self, command_list):
print('Running command: %s' % command_list)
p = subprocess.Popen(
command_list,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Can use communicate(input='y\n'.encode()) if the command run requires
# some confirmation.
stdout_data, _ = p.communicate()
print('Command output: %s' % stdout_data)
if p.returncode != 0:
raise RuntimeError(
'Command %s failed: exit code: %s' % (command_list, p.returncode))
def run(self):
for command in CUSTOM_COMMANDS:
self.RunCustomCommand(command)
setuptools.setup(
name='entity-job',
version='1.0',
install_requires=[
"spacy",
"spacy-lookups-data",
"apache-beam",
"apache_beam[gcp]"
#"spacy-model @ https://github.com/explosion/spacy-models/releases/download/en_core_web_md-2.2.5/en_core_web_md-2.2.5.tar.gz"
],
# dependency_links=[
# "https://github.com/explosion/spacy-models/releases/download/en_core_web_md-2.2.5/en_core_web_md-2.2.5.tar.gz"
# ],
packages=setuptools.find_packages(),
cmdclass={
# Command class instantiated and run during pip install scenarios.
'build': build,
'CustomCommands': CustomCommands,
}
)
#os.system("pip install https://github.com/explosion/spacy-models/releases/download/en_core_web_md-2.2.5/en_core_web_md-2.2.5.tar.gz")
| [
"[email protected]"
] | |
052a400db0a9b0de41885de17c491c39852d387e | 4e0f14507502820fbae8241bd8079def5669b54f | /boxoffice/test_tickets.py | 4ab7635b6f857c3484056938cc531210976339e7 | [
"MIT"
] | permissive | Code-Institute-Submissions/TWCoulsdon | 29ca6aa327c8bac3b7d451654118ac6f30335308 | 870ae7e8ea6a3fc23d24fe21bbb21965cdbab27b | refs/heads/main | 2023-05-04T08:29:46.952648 | 2021-05-29T18:14:40 | 2021-05-29T18:14:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,212 | py | """ Unit tests for tickets.py """
# pylint: disable=R0902
# Creating lots of member attributes for setup of tests
import json
from datetime import timedelta
from django.test import TestCase
from django.utils import timezone
from events.models import Event, EventDate, Venue
from .models import TicketType, Ticket, Order
from .tickets import (check_ticket_available, check_basket_availability,
check_order_availabillity, create_tickets_for_order,
TicketsNotAvailable, EmptyOrder)
class TestTestCheckAvailability(TestCase):
""" Tests the various ticket availability functions """
def setUp(self):
self.venue = Venue.objects.create(
name = "Test Venue",
capacity = 10
)
self.event_one = Event.objects.create(
title = "Test Event",
description = "Test Event",
venue = self.venue
)
self.event_two = Event.objects.create(
title = "Test Event",
description = "Test Event",
venue = self.venue
)
self.date_event_one = EventDate.objects.create(
event = self.event_one, date = timezone.now() + timedelta(days=10))
self.dates_event_two = []
self.dates_event_two.append(EventDate.objects.create(
event = self.event_two, date = timezone.now() + timedelta(days=15)))
self.dates_event_two.append(EventDate.objects.create(
event = self.event_two, date = timezone.now() + timedelta(days=20)))
self.order = Order.objects.create(
full_name = "Test User",
email = "[email protected]",
phone_number = "01234567890",
)
self.ticket_types = []
self.ticket_types.append(TicketType.objects.create(
name = "test", display_name = "Test Ticket"))
self.ticket_types.append(TicketType.objects.create(
name = "test2", display_name = "Test Ticket2"))
# Create tickets for event_two
for date in self.dates_event_two:
for _ in range(0, 5):
Ticket.objects.create(
order = self.order, type = self.ticket_types[0],
event = self.event_two, date = date
)
# Create some test baskets
self.good_basket = {} # All good
self.bad_basket = {} # All bad
self.mixed_basket = {} # Some good, some bad
# Create a basket with available tickets
for date in self.dates_event_two:
self.good_basket[date.id] = {
self.ticket_types[0].id: 2,
self.ticket_types[1].id: 2,
}
# Create a basket with unavailable tickets
for date in self.dates_event_two:
self.bad_basket[date.id] = {
self.ticket_types[0].id: 4,
self.ticket_types[1].id: 4,
}
# Create a basket with a mix of available and unavailable tickets
self.mixed_basket[self.date_event_one.id] = {
self.ticket_types[0].id: 4,
self.ticket_types[1].id: 4,
}
for date in self.dates_event_two:
self.mixed_basket[date.id] = {
self.ticket_types[0].id: 4,
self.ticket_types[1].id: 4,
}
def test_check_ticket_available(self):
""" Tests correct returns from check_ticket_available """
# Check correct return if no tickets have been bought
self.assertTrue(check_ticket_available(self.date_event_one, 2))
# Check correct return if tickets have been bought, but enough are left
self.assertTrue(check_ticket_available(self.dates_event_two[0],2))
# Check correct return if required and remaining are the same
self.assertTrue(check_ticket_available(self.dates_event_two[0],5))
# Check correct return if there are not enough tickets
self.assertFalse(check_ticket_available(self.dates_event_two[0],8))
def test_check_basket_availability(self):
""" Tests correct returns from check_basket_availability """
# Check correct return from a basket with availability
self.assertTrue(check_basket_availability(self.good_basket))
# Check correct return from a basket with no availability
self.assertRaises(TicketsNotAvailable, check_basket_availability,
self.bad_basket)
# Check correct return from a basket with some availability
self.assertRaises(TicketsNotAvailable, check_basket_availability,
self.mixed_basket)
try:
check_basket_availability(self.mixed_basket)
except TicketsNotAvailable as error:
self.assertEqual(error.date_id, self.dates_event_two[0].id)
def test_check_order_availabillity(self):
""" Tests correct returns from check_order_availabillity """
# Check an order with availability
self.order.original_basket = json.dumps(self.good_basket)
self.assertTrue(check_order_availabillity(self.order))
# Check an order with no availability
self.order.original_basket = json.dumps(self.bad_basket)
self.assertRaises(TicketsNotAvailable, check_order_availabillity,
self.order)
# Check an order with some availability
self.order.original_basket = json.dumps(self.mixed_basket)
self.assertRaises(TicketsNotAvailable, check_order_availabillity,
self.order)
class TestTicketCreation(TestCase):
""" Tests that tickets are created properly from an order """
def setUp(self):
self.venue = Venue.objects.create(
name = "Test Venue",
capacity = 10
)
self.event_one = Event.objects.create(
title = "Test Event",
description = "Test Event",
venue = self.venue
)
self.event_two = Event.objects.create(
title = "Test Event",
description = "Test Event",
venue = self.venue
)
self.date_event_one = EventDate.objects.create(
event = self.event_one, date = timezone.now() + timedelta(days=10))
self.dates_event_two = []
self.dates_event_two.append(EventDate.objects.create(
event = self.event_two, date = timezone.now() + timedelta(days=15)))
self.dates_event_two.append(EventDate.objects.create(
event = self.event_two, date = timezone.now() + timedelta(days=20)))
self.order = Order.objects.create(
full_name = "Test User",
email = "[email protected]",
phone_number = "01234567890",
)
self.ticket_types = []
self.ticket_types.append(TicketType.objects.create(
name = "test", display_name = "Test Ticket"))
self.ticket_types.append(TicketType.objects.create(
name = "test2", display_name = "Test Ticket2"))
def test_simple_order(self):
"""
Tests that tickets are created correctly for an order
with tickets of a single date, event, and type
"""
# Create a simple basket
basket = {}
basket[self.date_event_one.id] = {
self.ticket_types[0].id: 2
}
self.order.original_basket = json.dumps(basket)
create_tickets_for_order(self.order)
# Check there are the right number of tickets for each type and date
ticket_count = Ticket.objects.filter(
date=self.date_event_one, type=self.ticket_types[0]).count()
self.assertEqual(ticket_count, 2)
# Check that there aren't any incorrect tickets
ticket_count = Ticket.objects.filter(
date=self.date_event_one, type=self.ticket_types[1]).count()
self.assertEqual(ticket_count, 0)
ticket_count = Ticket.objects.filter(event=self.event_two).count()
self.assertEqual(ticket_count, 0)
def test_multiple_type_order(self):
"""
Tests that tickets are created correctly for an order
with tickets of a single date and event but multiple types
"""
basket = {}
basket[self.date_event_one.id] = {
self.ticket_types[0].id: 4,
self.ticket_types[1].id: 6,
}
self.order.original_basket = json.dumps(basket)
create_tickets_for_order(self.order)
# Check there are the right number of tickets for each type and date
ticket_count = Ticket.objects.filter(
date=self.date_event_one, type=self.ticket_types[0].id).count()
self.assertEqual(ticket_count, 4)
ticket_count = Ticket.objects.filter(
date=self.date_event_one, type=self.ticket_types[1].id).count()
self.assertEqual(ticket_count, 6)
# Check that there aren't any incorrect tickets
ticket_count = Ticket.objects.filter(event=self.event_two).count()
self.assertEqual(ticket_count, 0)
def test_multiple_date_order(self):
"""
Tests that tickets are created correctly for an order
with tickets of a single event but multiple dates and types
"""
basket = {}
for date in self.dates_event_two:
basket[date.id] = {
self.ticket_types[0].id: 2,
self.ticket_types[1].id: 4,
}
self.order.original_basket = json.dumps(basket)
create_tickets_for_order(self.order)
# Check there are the right number of tickets for each type and date
for date in self.dates_event_two:
ticket_count = Ticket.objects.filter(
date=date, type=self.ticket_types[0].id).count()
self.assertEqual(ticket_count, 2)
ticket_count = Ticket.objects.filter(
date=date, type=self.ticket_types[1].id).count()
self.assertEqual(ticket_count, 4)
# Check that there aren't any incorrect tickets
ticket_count = Ticket.objects.filter(event=self.event_one).count()
self.assertEqual(ticket_count, 0)
ticket_count = Ticket.objects.filter(event=self.event_two).count()
self.assertEqual(ticket_count, 12)
def test_multiple_event_order(self):
"""
Tests that tickets are created correctly for an order
with tickets of multiple events, dates, and types
"""
basket = {}
basket[self.date_event_one.id] = {
self.ticket_types[0].id: 4,
self.ticket_types[1].id: 6,
}
for date in self.dates_event_two:
basket[date.id] = {
self.ticket_types[0].id: 2,
self.ticket_types[1].id: 4,
}
self.order.original_basket = json.dumps(basket)
create_tickets_for_order(self.order)
ticket_count = Ticket.objects.filter(event=self.event_one).count()
self.assertEqual(ticket_count, 10)
ticket_count = Ticket.objects.filter(event=self.event_two).count()
self.assertEqual(ticket_count, 12)
def test_fails_on_empty(self):
"""
Tests that ticket creation fails if order basket is empty
"""
self.order.original_basket = json.dumps({})
self.assertRaises(EmptyOrder, create_tickets_for_order, self.order)
self.order.original_basket = ""
self.assertRaises(EmptyOrder, create_tickets_for_order, self.order)
def test_fails_on_bad_line(self):
"""
Tests that ticket creation fails if one line is unavailable
and that no tickets are created
"""
basket = {}
basket[self.date_event_one.id] = {
self.ticket_types[0].id: 2
}
basket[self.dates_event_two[0].id] = {
self.ticket_types[0].id: 12
}
self.order.original_basket = json.dumps(basket)
self.assertRaises(TicketsNotAvailable, create_tickets_for_order, self.order)
# Check no tickets created
ticket_count = Ticket.objects.filter(event=self.event_one).count()
self.assertEqual(ticket_count, 0)
ticket_count = Ticket.objects.filter(event=self.event_two).count()
self.assertEqual(ticket_count, 0)
def test_fails_on_bad_order(self):
"""
Tests that ticket creation fails if all lines are unavailable
and that no tickets are created
"""
basket = {}
basket[self.date_event_one.id] = {
self.ticket_types[0].id: 5,
self.ticket_types[1].id: 6,
}
for date in self.dates_event_two:
basket[date.id] = {
self.ticket_types[0].id: 11,
self.ticket_types[1].id: 12,
}
self.order.original_basket = json.dumps(basket)
self.assertRaises(TicketsNotAvailable, create_tickets_for_order, self.order)
# Check no tickets created
ticket_count = Ticket.objects.filter(event=self.event_one).count()
self.assertEqual(ticket_count, 0)
ticket_count = Ticket.objects.filter(event=self.event_two).count()
self.assertEqual(ticket_count, 0)
| [
"[email protected]"
] | |
1ceb882d0d874678ce849d4b88f0381bf790d210 | a9ad81b001d5cfc2e069f2d80e9cf315c404a2d5 | /user/guest1.news.py | e23b5aba34c946efd56507ef6da91ec6ef04a50a | [
"MIT"
] | permissive | ethankennerly/hotel-vs-gozilla | a51b7d07c22584edf7564add87a8ed5736da0bfa | c12d9fe39f2f71ee7ea78607820f215fa9201474 | refs/heads/master | 2020-12-25T17:34:32.050845 | 2016-07-31T17:37:47 | 2016-07-31T17:37:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | {'level_mc': {'_txt': {'text': '7'},
'currentLabel': 'up',
'progress_mc': {'currentLabel': '_4'}},
'lobby_mc': {'_00_mc': {'capture_3_3_1_mc': {'score_txt': {'text': '$2'}},
'capture_3_3_mc': {'score_txt': {'text': '$2'}},
'capture_5_5_mc': {'score_txt': {'text': '$1'}},
'capture_block_easy_mc': {'score_txt': {'text': '$4'}},
'capture_block_mc': {'score_txt': {'text': '$2'}},
'capture_corner_mc': {'score_txt': {'text': '$6'}},
'capture_critical_mc': {'score_txt': {'text': '$1'}},
'capture_rule_beside_mc': {'score_txt': {'text': '$1'}}}}} | [
"[email protected]"
] | |
de5857ae96e85fb48c25e879feaba146543f9d18 | 14f56d799a27266eb0c4b1bc162b0fc2b1201dbc | /gym-tetris/gym_tetris/envs/Tetris.py | 5aae5e5d7bf60cce99c8ccc039034aa6f9d95699 | [] | no_license | kli512/Tetris-DQN | cf063e00a660ff0142f775d4b487a20388feca11 | 4533818fbebb2c3c0b9e441afed0a5d5b7940465 | refs/heads/master | 2022-09-16T13:55:17.358014 | 2020-06-02T03:37:07 | 2020-06-02T03:37:07 | 266,505,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,995 | py | import copy
import random
from collections import deque
from datetime import datetime
import os
import sys
import numpy as np
import gym_tetris.envs.utils as utils
class Piece:
rotation_table = {'cw': 1, 'ccw': -1}
# called with the piece_str e.g. 'S' representing which piece
# can be I, J, L, O , S, T, Z pieces
def __init__(self, piece_str, parent_game):
"""Tetromino class, used to represent a live piece
Parameters
----------
piece_str : str
Letter used to name the piece (I, O, T, J, L, S, Z)
parent_game : Board
Parent game this piece belongs to, used to check if Piece is valid
Attributes
----------
piece_str : str
Letter used to name the piece (I, O, T, J, L, S, Z)
rotation : int
Rotational value (0 pointing up, 1 to the right, etc)
last_move : str
Record of the last valid move the piece made
"""
self.piece_str = piece_str
self._parent_game = parent_game
self.rotation = 0
self.last_move = None
# sets the spawning location (compensates for O's shape by moving it forward one column)
self.pos = [6, 4 if piece_str == 'O' else 3]
# rotates the piece (rotates the _shape matrix)
def _rotate(self, direc):
self.rotation = (self.rotation + direc) % 4
if not self._parent_game._piece_valid():
self.rotation = (self.rotation - direc) % 4
return False
return True
# moves the piece in the desired direction
# 'd', 'l', or 'r'
def _move(self, direc: str) -> bool:
index = 0 if direc in ('u', 'd') else 1
amt = 1 if direc in ('d', 'r') else -1
self.pos[index] += amt
if not self._parent_game._piece_valid():
self.pos[index] -= amt
return False
return True
# returns the board indicies currently occupied by this piece
def occupied(self):
for spot in utils.OCCUPIED[self.piece_str][self.rotation]:
yield tuple(map(sum, zip(self.pos, spot)))
def act(self, action: str) -> bool:
"""Interface for Piece class - manipulates the piece as given
Parameters
----------
action : str
Name of the action being performed - u, d, l, r, cw, or ccw
Returns
-------
bool
Whether or not the action succeeded
"""
if action in utils.ROTATIONS:
if self._rotate(self.rotation_table[action]):
self.last_move = action
return True
elif action in utils.MOVEMENT:
if self._move(action):
self.last_move = action
return True
else:
raise ValueError('Invalid move \'{}\''.format(action))
return False
class Board:
def __init__(self, board=None, rseed=None):
"""Main board class, built on top of numpy array
Parameters
----------
board : numpy.array, optional
Board state, by default None which generates an empty board
rseed : Any, optional
Seed used by random.seed() for rng, by default None
"""
self.time_passed = 0
self.height = 26
self.playable_height = 20
self.width = 10
if board is None:
self._board = np.array([[0 for i in range(self.width)]
for i in range(self.height)])
else:
self._board = np.array(board)
if rseed is None:
rseed = datetime.now()
random.seed(rseed)
self.lines_cleared = 0
self.score = 0
self.dead = False
self.held_piece = None
self._hold_used = False
self.cur_piece = None
self.next_pieces = deque()
self._pick_new_next()
self._spawn_piece()
self.ghost_piece_occupied = None
self._generate_ghost_piece()
def _out_of_bounds(self, pos):
if not 0 <= pos[0] < self.height or not 0 <= pos[1] < self.width:
return True
return False
# checks if the current location of the piece is valid
# i.e. doesn't intersect with already placed blocks / out of bounds
def _piece_valid(self):
return not any(self._out_of_bounds(pos) or self._board[pos] != 0 for pos in self.cur_piece.occupied())
# picks a random new piece
# modify this if want to switch to bag/other randomizer
def _pick_new_next(self):
self.next_pieces.extend(np.random.permutation(utils.SHAPES))
# spawns a new piece in
# also checks validity of spawned piece to see if game is lost
def _spawn_piece(self):
self._hold_used = False
self.cur_piece = Piece(self.next_pieces.popleft(), self)
if len(self.next_pieces) < 7:
self._pick_new_next()
self._generate_ghost_piece()
if not self._piece_valid():
self.dead = True
def _hold(self):
if self.held_piece is not None:
self.next_pieces.appendleft(self.held_piece)
self.held_piece = self.cur_piece.piece_str
self._spawn_piece()
self._hold_used = True
def _generate_ghost_piece(self):
og_piece = copy.deepcopy(self.cur_piece)
while self.cur_piece.act('d'):
pass
self.ghost_piece_occupied = tuple(self.cur_piece.occupied())
self.cur_piece = og_piece
def _tspun(self):
if self.cur_piece.piece_str != 'T' or self.cur_piece.last_move not in utils.ROTATIONS:
return False
corners = [(0, 0), (0, 2), (2, 0), (2, 2)]
filled_corners = 0
#print('pos {}'.format(self.cur_piece.pos))
for corner in corners:
tocheck = tuple(map(sum, zip(corner, self.cur_piece.pos)))
if self._out_of_bounds(tocheck) or self._board[tocheck] != 0:
filled_corners += 1
if filled_corners >= 3:
return True
return False
# clears lines as needed and award points
def _clear_lines(self, mult):
lcleared = 0
for r_ind in range(self.height):
if all(val != 0 for val in self._board[r_ind]):
for l_ind in reversed(range(1, r_ind + 1)):
self._board[l_ind] = self._board[l_ind - 1]
self._board[0] = 0
self.score += mult * 1000 * (lcleared + 1)
lcleared += 1
self.lines_cleared += lcleared
return lcleared
# public interface; this is how the player will interact
# locks _cur_piece in place and spawns a new one
def lock_piece(self):
safe = False
for pos in self.cur_piece.occupied():
# self._board[pos] = utils.shape_values[self.cur_piece.piece_str] # enable for color
self._board[pos] = 1
if pos[0] >= self.height - self.playable_height:
safe = True
if not safe:
self.dead = True
mult = 2 if self._tspun() else 1
# if mult == 2: print ("tspin")
self._clear_lines(mult)
self._spawn_piece()
# valid actions: u, d, l, or r for movement
# cw or ccw for rotation
# returns -1 for invalid action
# 0 for failed action (e.g. piece became invalid)
# 1 for successful action
def act(self, action):
if self.time_passed >= 120 * 3: # game time (s)* inputs per second
self.dead = True
return False
# if not self.cur_piece.act('d'):
# self.lock_piece()
if self.time_passed % 3 == 2:
if not self.cur_piece.act('d'):
self.lock_piece()
return False
self.time_passed += 1
if action == 'hold':
if self._hold_used:
return False
self._hold()
elif action == 'hd':
while self.cur_piece.act('d'):
pass
self.lock_piece()
elif action in utils.MOVEMENT:
if not self.cur_piece.act(action):
return False
elif action in utils.ROTATIONS:
offsets = utils.SRS_TABLE.get_rotation(
self.cur_piece.piece_str, self.cur_piece.rotation, utils.ROTATION_TO_VAL[action])
for offset in offsets:
old_pos = self.cur_piece.pos
self.cur_piece.pos = list(
utils.vector_add(self.cur_piece.pos, offset))
if self.cur_piece.act(action):
break
self.cur_piece.pos = old_pos
else:
return False
elif action == '':
return True
else:
raise ValueError('Invalid move \'{}\''.format(action))
if action != 'd':
self._generate_ghost_piece()
return True
def state(self):
bstate = copy.deepcopy(self._board)
for pos in self.cur_piece.occupied():
bstate[pos] = 1
# bstate[pos] = utils.shape_values[self.cur_piece.piece_str] # add back for color
return bstate
def __str__(self):
temp = copy.deepcopy(self._board)
for pos in self.cur_piece.occupied():
temp[pos] = utils.shape_values[self.cur_piece.piece_str]
out = np.array_str(temp)
out = out.replace('0', ' ')
return out
def clear():
os.system('cls||clear')
def main():
b = Board()
res = None
move = None
while True:
print(b)
print('Score: {}'.format(b.score))
while True:
print('Enter move (l, d, r, hd, cw, ccw, hold): ', end='')
move = input()
try:
b.act(move)
break
except ValueError as e:
print(e)
if b.dead:
break
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
efa8bb4b3111da7cf23411d1a20e9322918cd148 | 9d4e38f5486473b902ff4feb38ef039ea84ec123 | /CMA-GCV_v1.py | fccff2dead0d4443366c7b979bb3af1b81e2fc18 | [] | no_license | lhysgithub/CMA | 557586d33460c185aebd174ef0dcb8d7349dfa98 | 62039344dec327f5e34feafdb4e99bd5f4d22503 | refs/heads/master | 2020-04-05T21:50:06.079091 | 2018-12-02T11:31:19 | 2018-12-02T11:31:19 | 157,235,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,295 | py | import numpy as np
import tensorflow as tf
import os
import io
import time # Used to count using time
import PIL # import image
import shutil # DELETE DIRECTORY
import scipy # draw render
import matplotlib.pyplot as plt # draw render
from google.cloud import vision # GCV
from google.cloud.vision import types # GCV
os.environ["CUDA_VISIBLE_DEVICES"] = "2,3"
credential_path = "bit928 wang-e1273a6d10ff.json"
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credential_path
# parallel computeing
def GCVAPI(image,OutDir):
image = np.reshape(image,(-1,299,299,3))
labels = []
confidences =[]
print(len(image))
# Instantiates a client
client = vision.ImageAnnotatorClient()
for i in range(len(image)):
# Save the image as .jpg
scipy.misc.imsave(os.path.join(OutDir, '%s.jpg' % i), image[i])
# The name of the image file to annotate
file_name = os.path.join(os.path.dirname(__file__), os.path.join(OutDir, '%s.jpg' % i))
# parallel query
# Read the image file
with open(file_name, 'rb') as image_file:
content = image_file.read()
# image_file.closed
# with io.open(file_name, 'rb') as image_file:
# content = image_file.read()
# Binary to Image
testimage = types.Image(content=content)
# Performs label detection on the image file
response = client.label_detection(image=testimage)
templabs = response.label_annotations
tempDescriptions = []
tempScores = []
for j in templabs:
tempDescriptions.append(j.description)
tempScores.append(j.score)
labels.append(tempDescriptions)
confidences.append(tempScores)
return labels,confidences
# (INumber,Topk),(INumber,Topk)่ฟๅไธไธชๅญๅ
ธ็ฑปๅ
def load_image(path):
image = PIL.Image.open(path)
if image.height > image.width:
height_off = int((image.height - image.width) / 2)
image = image.crop((0, height_off, image.width, height_off + image.width))
elif image.width > image.height:
width_off = int((image.width - image.height) / 2)
image = image.crop((width_off, 0, width_off + image.height, image.height))
image = image.resize((299, 299))
img = np.asarray(image).astype(np.float32) / 255.0
if img.ndim == 2:
img = np.repeat(img[:, :, np.newaxis], repeats=3, axis=2)
if img.shape[2] == 4:
# alpha channel
img = img[:, :, :3]
return img
def get_image(InputDir="", indextemp=-1):
image_paths = sorted([os.path.join(InputDir, i) for i in os.listdir(InputDir)])
if indextemp != -1:
index = indextemp
else:
index = np.random.randint(len(image_paths))
path = image_paths[index]
x = load_image(path)
return x
def render_frame(OutDir, image, save_index, SourceClass, TargetClass, StartImg):
image = np.reshape(image, (299, 299, 3)) + StartImg
scipy.misc.imsave(os.path.join(OutDir, '%s.jpg' % save_index), image)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 8))
# image
ax1.imshow(image)
fig.sca(ax1)
plt.xticks([])
plt.yticks([])
# classifications
probs,confidence = GCVAPI(image,OutDir)
probs = probs[0]
confidence= confidence[0]
barlist = ax2.bar(range(len(probs)), confidence)
# ๅคๅฏนๅค็ๆ่ฒๆนๆก
for i, v in enumerate(probs):
if v in SourceClass:
barlist[i].set_color('g')
elif v in TargetClass:
barlist[i].set_color('r')
# ไธๅฏนไธๆถ็ๆ่ฒๆนๆก
# for i, v in enumerate(probs):
# if v == SourceClass:
# barlist[i].set_color('g')
# if v == TargetClass:
# barlist[i].set_color('r')
plt.sca(ax2)
plt.ylim([0, 1.1])
plt.xticks(range(len(probs)), probs, rotation='vertical')
fig.subplots_adjust(bottom=0.2)
path = os.path.join(OutDir, 'frame%06d.jpg' % save_index)
if os.path.exists(path):
os.remove(path)
plt.savefig(path)
plt.close()
def StartPoint(SourceImage, TargetImage, Domin):
StartUpper = np.clip(TargetImage + Domin, 0.0, 1.0)
StartDowner = np.clip(TargetImage - Domin, 0.0, 1.0)
ClipedImage = np.clip(SourceImage, StartDowner, StartUpper)
return ClipedImage
def main():
# Algorithm parameters
InputDir = "adv_samples/"
OutDir = "adv_example/"
QueryTimes = 0
Topk = 10
Convergence = 20
# CloseThreshold = - 0.5
# CloseThreshold = 0
CloseThreshold = - 70
Domin = 0.1
Sigma = 10
INumber = 50 # ๆ่ฒไฝไธชๆฐ / ไธชไฝไธชๆฐ
BatchSize = 50 # ๅฏปๆพๅฏ็จไธชไฝๆถ็จ็ๆน้ไธ้
MaxEpoch = 10000 # ่ฟญไปฃไธ้
Reserve = 0.25 # ไฟ็็ = ็ถๅญไฟ็็็ฒพ่ฑ้ / BestNumber
BestNmber = int(INumber * Reserve) # ไผ็งๆ ทๆฌๆฐ้
IndividualShape = (INumber, 299, 299, 3)
ImageShape = (299, 299, 3)
StartStdDeviation = 0.1
CloseEVectorWeight = 0.3
CloseDVectorWeight = 0.1
UnVaildExist = 0 # ็จๆฅ่กจ็คบๆฏๅฆๅ ไธบๆข็ดขๅนฟๅบฆ่ฟๅคงๅฏผ่ดๆ ๆๆฐๆฎ่ฟๅค
ConstantUnVaildExist = 0
# Set output directory
if os.path.exists(OutDir):
shutil.rmtree(OutDir)
os.makedirs(OutDir)
# Initialization
SourceImage = get_image(InputDir,4)
TargetImage = get_image(InputDir,5)
# SourceType,_ = GCVAPI(SourceImage,OutDir) # ่ทๅ้ฆๅ็ฑป
# SourceType = SourceType[0][0] #
# TargetType,_ = GCVAPI(TargetImage,OutDir)
# TargetType = TargetType[0][0]
# ็กฎๅฎไธคๅผ ่พๅ
ฅๅพ็็่ฏๅซๅ็ฑป
SourceType, _ = GCVAPI(SourceImage, OutDir) # ่ทๅ้ฆๅ็ฑป
TargetType, _ = GCVAPI(TargetImage, OutDir)
TypeNumber = 5
if (len(SourceType[0]) > len(TargetType[0])):
TypeNumber = len(TargetType[0])
else :
TypeNumber = len(SourceType[0])
if (TypeNumber>5):
TypeNumber = 5
SourceType = SourceType[0][0:TypeNumber] #
TargetType = TargetType[0][0:TypeNumber]
# Already done?
# if (TargetType == SourceType):
# print("Done!")
# ๅคๅฏนๅคไธ็ๅๅงๆฃ้ช
for i in SourceType:
if i in TargetType :
print("Done!")
break
# Set the start point of evolution
StartImg = StartPoint(SourceImage, TargetImage,Domin)
Upper = 1.0 - StartImg
Downer = 0.0 - StartImg
# Evolution parameters
SSD = StartStdDeviation
DM = Domin
CEV = CloseEVectorWeight
CDV = CloseDVectorWeight
StartError = 0 # unexpectation detection
LogFile = open(os.path.join(OutDir, 'log%d.txt' % 1), 'w+')
StartNumber = 2 # the Minimum startnumber of evolution
PBF = -1000000.0
PBL2Distance = 100000
ENP = np.zeros(ImageShape, dtype=float)
DNP = np.zeros(ImageShape, dtype=float) + SSD
# ๆญ็น็ปญๅฎ้ช
if os.path.exists(SourceType[0] + " " + TargetType[0] + "ENP.npy"):
ENP = np.load(SourceType[0] + " " + TargetType[0] + "ENP.npy")
DNP = np.load(SourceType[0] + " " + TargetType[0] + "DNP.npy")
LastENP = ENP
LastDNP = DNP
LastPBF = PBF
LastPBL2 = PBL2Distance
BestAdv = ENP
BestAdvL2 = PBL2Distance
BestAdvF = PBF
# there is the compute graph
with tf.Session() as sess:
Individual = tf.placeholder(shape=IndividualShape, dtype=tf.float32) # ๏ผINumber๏ผ299๏ผ299๏ผ3๏ผ
# logit = tf.placeholder(shape=(INumber), dtype=tf.float32)# We can change the order of response.
STImg = tf.placeholder(shape=ImageShape, dtype=tf.float32)
StartImgtf = tf.reshape(STImg,shape= (-1,299,299,3))
SourceImgtf = tf.placeholder(shape=ImageShape, dtype=tf.float32)
SourceImg = tf.reshape(SourceImgtf, (-1,299,299,3))
NewImage = Individual + StartImgtf
# Labels = tf.constant(1,(0,)*(Topk-1)) # wait to test
# Compute the L2Distance and IndividualFitness
L2Distance = tf.sqrt(tf.reduce_sum(tf.square(NewImage - SourceImg), axis=(1, 2, 3)))
# IndividualFitness = - (Sigma * tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=Labels) + L2Distance)
LossFunction = tf.placeholder(dtype=tf.float32)
IndividualFitness = - (-LossFunction + L2Distance) # -tf.log(logit)
# Select BestNmber Individual
TopKFit, TopKFitIndx = tf.nn.top_k(IndividualFitness, BestNmber)
TopKIndividual = tf.gather(Individual, TopKFitIndx) # (BestNmber,299,299,3) ๆญคๅคๆฏๅฆๅฏไปฅๅฎๆ
# Update the Expectation and Deviation
Expectation = tf.constant(np.zeros(ImageShape), dtype=tf.float32)
for i in range(BestNmber):
Expectation += (0.5 ** (i + 1) * TopKIndividual[i])
# Expectation = tf.reduce_mean(TopKIndividual,reduction_indices=0)
Deviation = tf.constant(np.zeros(ImageShape), dtype=tf.float32)
for i in range(BestNmber):
Deviation += 0.5 ** (i + 1) * tf.square(TopKIndividual[i] - Expectation)
# Deviation /= BestNmber
StdDeviation = tf.sqrt(Deviation)
# Find the best ่ทๅ็ง็พคๆไฝณ๏ผๆดป็็๏ผไธ็ฎๅๅฒ็๏ผ
PbestFitness = tf.reduce_max(IndividualFitness)
Pbestinds = tf.where(tf.equal(PbestFitness, IndividualFitness))
Pbestinds = Pbestinds[:, 0]
Pbest = tf.gather(Individual, Pbestinds)
# Start evolution
for i in range(MaxEpoch):
Start = time.time()
UsefullNumber = 0
Times = 0
cycletimes = 0
initI = np.zeros(IndividualShape, dtype=float)
# initCp = np.zeros((INumber), dtype=float)
initCR = np.zeros((INumber), dtype=float)
initPP = []
initLoss = np.zeros((INumber), dtype=float)
# find the usefull Individual
while UsefullNumber != INumber:
# Generate TempPerturbation, TestImage, CP and PP
TempPerturbation = np.random.randn(BatchSize, 299, 299, 3)
TempPerturbation = TempPerturbation * np.reshape(DNP, (1, 299, 299, 3)) + np.reshape(ENP, (1, 299, 299, 3))
TempPerturbation = np.clip(TempPerturbation, Downer, Upper)
TestImage = TempPerturbation + np.reshape(StartImg, (1, 299, 299, 3))
PP, CP = GCVAPI(TestImage,OutDir)
Used = np.zeros(BatchSize)
# CP = np.reshape(CP, (BatchSize, 1000))
# ็ญ้
QueryTimes += BatchSize
for j in range(BatchSize):
for oneTType in TargetType:
if (oneTType in PP[j]) and Used[j]==0:
initI[UsefullNumber] = TempPerturbation[j]
initPP.append(PP[j])
templabes = [-1] * len(PP[j])
for k in PP[j]:
if k in TargetType:
templabes[PP[j].index(k)] = 10
elif k in SourceType:
templabes[PP[j].index(k)] = -10
initLoss[UsefullNumber] = - np.sum((1 / np.log(CP[j]))*templabes)
# initCR[UsefullNumber] = np.log(CP[j][PP[j].index(TargetType)]/CP[j][0])
Used[j]=1
UsefullNumber += 1
if UsefullNumber == INumber: # ๆพๅคไบ๏ผ่ทณๅบๆๆ่ฟๅ
break
if UsefullNumber == INumber: # ๆพๅคไบ๏ผ่ทณๅบๆๆ่ฟๅ
break
# ไธๅฏนไธไธ็ๆๆ่ฟๅๆนๆณ
# if TargetType in PP[j]:
# initI[UsefullNumber] = TempPerturbation[j]
# templabes = [-1]*len(PP[j])
# templabes[PP[j].index(TargetType)] = 10
# if SourceType in PP[j]:
# templabes[PP[j].index(SourceType)] = -10
# # initLoss[UsefullNumber] = np.sum( np.log(CP[j]) * templabes)
# initLoss[UsefullNumber] = - np.sum((1 / np.log(CP[j]))*templabes)
# # initLoss[UsefullNumber] = np.sum(np.log(CP[j][PP[j].index(TargetType)])-np.log(CP[j]))
# # initLoss[UsefullNumber] = np.log(np.exp(CP[j][PP[j].index(TargetType)])/np.sum(np.exp(np.log(CP[j]))))
# # initLoss[UsefullNumber] = np.exp(CP[j][PP[j].index(TargetType)])/np.sum(np.exp(np.log(CP[j])))
# initCR[UsefullNumber] = np.log(CP[j][PP[j].index(TargetType)]/CP[j][0])
# # initCp[UsefullNumber] = CP[j][PP[j].index(TargetType)]
# UsefullNumber += 1
# if UsefullNumber == INumber:
# break
# Check whether the UsefullNumber equals INumber
if UsefullNumber != INumber:
LogText = "UsefullNumber: %3d SSD: %.2f DM: %.3f" % (UsefullNumber, SSD, DM)
LogFile.write(LogText + '\n')
print(LogText)
# if we find some usefull individual we can find more
if UsefullNumber > StartNumber - 1 and UsefullNumber < INumber:
tempI = initI[0:UsefullNumber]
ENP = np.zeros(ImageShape, dtype=float)
DNP = np.zeros(ImageShape, dtype=float)
for j in range(UsefullNumber):
ENP += tempI[j]
ENP /= UsefullNumber
for j in range(UsefullNumber):
DNP += np.square(tempI[j] - ENP)
DNP /= UsefullNumber
DNP = np.sqrt(DNP)
# We need to find some init-usefull individual
if i == 0 and UsefullNumber < StartNumber:
Times += 1
TimesUper = 1
if UsefullNumber > 0:
TimesUper = 5
else:
TimesUper = 1
if Times == TimesUper:
SSD += 0.01
if SSD - StartStdDeviation >= 0.05:
SSD = StartStdDeviation
DM -= 0.05
StartImg = StartPoint(SourceImage, TargetImage,DM)
Upper = 1.0 - StartImg
Downer = 0.0 - StartImg
DNP = np.zeros(ImageShape, dtype=float) + SSD
Times = 0
# If invalid happened, we need to roll back dnp and enp
if i != 0 and UsefullNumber < StartNumber:
CEV -= 0.01
CDV = CEV / 3
if CEV <= 0.01:
CEV = 0.01
CDV = CEV / 3
DNP = LastDNP + (SourceImage - (StartImg + ENP)) * CDV
ENP = LastENP + (SourceImage - (StartImg + ENP)) * CEV
LogText = "UnValidExist CEV: %.3f CDV: %.3f" % (CEV, CDV)
LogFile.write(LogText + '\n')
print(LogText)
# ๅคๆญๆฏๅฆๅบ็ฐๆ ทๆฌๆ ๆๅ
if cycletimes == 0:
if i != 0 and UsefullNumber < StartNumber:
UnVaildExist = 1
elif i != 0 and UsefullNumber >= StartNumber:
UnVaildExist = 0
cycletimes += 1
# Check whether the ssd overflows
if SSD > 1:
LogText = "Start Error"
LogFile.write(LogText + '\n')
print(LogText)
StartError = 1
break
# Error dispose
if StartError == 1:
break
# initI = np.clip(initI, Downer, Upper)
LastPBF, LastDNP, LastENP = PBF, DNP, ENP
PBI,ENP, DNP, PBF, PB = sess.run([Pbestinds,Expectation, StdDeviation, PbestFitness, Pbest],
feed_dict={Individual: initI, LossFunction: initLoss,STImg:StartImg,SourceImgtf:SourceImage})
# ๆญ็น็ปญๅฎ้ช
np.save(SourceType[0] + " " + TargetType[0] + "ENP.npy", ENP)
np.save(SourceType[0] + " " + TargetType[0] + "DNP.npy", DNP)
PBI = PBI[0]
if PB.shape[0] > 1:
PB = PB[0]
PB = np.reshape(PB, (1, 299, 299, 3))
print("PBConvergence")
End = time.time()
LastPBL2 = PBL2Distance
PBL2Distance = np.sqrt(np.sum(np.square(StartImg + PB - SourceImage), axis=(1, 2, 3)))
render_frame(OutDir, PB, 100 + i, SourceType, TargetType, StartImg)
LogText = "Step %05d: PBF: %.4f UseingTime: %.4f PBL2Distance: %.4f QueryTimes: %d" % (
i, PBF, End - Start, PBL2Distance, QueryTimes)
LogFile.write(LogText + '\n')
print(LogText)
# elif i>10 and LastPBF > PBF: # ๅ็ๆๅจ้ทๅ
ฅๅฑ้จๆไผ(ไธๅบ่ฏฅไปฅๆฏๅฆๅ็ๆๅจๆฅๅคๆญๅๆฐ๏ผ่ๆฏๅบ่ฏฅไปฅๆฏๅฆๅ็ฐๅบ็ฐๆ ๆๆฐๆฎๆฅๅคๆญ๏ผๆ่
ไธค่
ๅ
ฑๅๅคๆญ)
if PBL2Distance>15 and abs(PBF - LastPBF) < Convergence:
Closeflag = 0
for w in range(int(len(initPP[PBI])/2)):
if initPP[PBI][w] in TargetType:
Closeflag = 1
break
if (Closeflag == 1): # ้ ่ฟ
# if (PBF + PBL2Distance> CloseThreshold): # ้ ่ฟ
# if ( 1 ): # ้ ่ฟ
CEV += 0.01
CDV = CEV / 3
DNP += (SourceImage - (StartImg + ENP)) * CDV
ENP += (SourceImage - (StartImg + ENP)) * CEV
LogText = "Close up CEV: %.3f CDV: %.3f" % (CEV, CDV)
LogFile.write(LogText + '\n')
print(LogText)
else:
# CEV += 0.01
# CDV = CEV / 3
DNP += (SourceImage - (StartImg + ENP)) * CDV
LogText = "Scaling up CEV: %.3f CDV: %.3f" % (CEV, CDV)
LogFile.write(LogText + '\n')
print(LogText)
if (initPP[PBI][0] not in TargetType) and PBL2Distance < 15 and abs(PBF - LastPBF) < Convergence:
# CEV += 0.01
# CDV = CEV / 3
DNP += (SourceImage - (StartImg + ENP)) * CDV
LogText = "Scaling up CEV: %.3f CDV: %.3f" % (CEV, CDV)
LogFile.write(LogText + '\n')
print(LogText)
# ๅฆๆ็ปๆ่ฟ่ก๏ผๅฏไปฅไฟๅญ
# if (PBF + PBL2Distance > CloseThreshold): # ้ ่ฟ
# if initCR[PBI]>=0:
if initPP[PBI][0] in TargetType:
BestAdv = PB
BestAdvL2 = PBL2Distance
BestAdvF = PBF
# ่งฃ้
if BestAdvL2 < 15:
LogText = "Complete BestAdvL2: %.4f BestAdvF: %.4f QueryTimes: %d" % (
BestAdvL2, BestAdvF, QueryTimes)
print(LogText)
LogFile.write(LogText + '\n')
render_frame(OutDir, BestAdv, 1000000, SourceType, TargetType, StartImg)
break
# ๆๅคงๅพช็ฏๆฌกๆฐ
if i == MaxEpoch - 1 or ConstantUnVaildExist == 30:
LogText = "Complete to MaxEpoch or ConstantUnVaildExist BestAdvL2: %.4f BestAdvF: %.4f QueryTimes: %d" % (
BestAdvL2, BestAdvF, QueryTimes)
print(LogText)
LogFile.write(LogText + '\n')
render_frame(OutDir, BestAdv, 1000000, SourceType, TargetType, StartImg)
break
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4d2c18252c767533204105781cf91047b3d1d7b3 | 5377d8bca16d00f396f998a09a1bc06438e71064 | /packages/hyperopt/fmin.py | eaf4db1b6182cec4fb4d42dd2b0c5347aba26027 | [] | no_license | Paul-Saves/Bandit-BO | 15d35aae37b01968259801e760bdcd72b4e16216 | 4cea35276d8cc2afb773ffffc1ae15d9a68d91e0 | refs/heads/master | 2023-03-01T00:46:21.352757 | 2020-07-20T11:37:29 | 2020-07-20T11:37:29 | 337,110,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,257 | py | from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
from builtins import str
from builtins import object
import functools
import logging
import os
import sys
import time
import numpy as np
from . import pyll
from .utils import coarse_utcnow
from . import base
standard_library.install_aliases()
logger = logging.getLogger(__name__)
try:
import dill as pickler
except Exception as e:
logger.info('Failed to load dill, try installing dill via "pip install dill" for enhanced pickling support.')
import six.moves.cPickle as pickler
def generate_trial(tid, space, output):
variables = space.keys()
idxs = {v: [tid] for v in variables}
vals = {k: [v] for k, v in space.items()}
return {'state': base.JOB_STATE_NEW,
'tid': tid,
'spec': output,
'result': {'status': 'new'},
'misc': {'tid': tid,
'cmd': ('domain_attachment',
'FMinIter_Domain'),
'workdir': None,
'idxs': idxs,
'vals': vals},
'exp_key': None,
'owner': None,
'version': 0,
'book_time': None,
'refresh_time': None,
}
def generate_trials_to_calculate(points, fvalues):
"""
Function that generates trials to be evaluated from list of points
:param points: List of points to be inserted in trials object in form of
dictionary with variable names as keys and variable values as dict
values. Example value:
[{'x': 0.0, 'y': 0.0}, {'x': 1.0, 'y': 1.0}]
:return: object of class base.Trials() with points which will be calculated
before optimisation start if passed to fmin().
"""
trials = base.Trials()
new_trials = [generate_trial(tid, x, fvalues[tid]) for tid, x in enumerate(points)]
trials.insert_trial_docs(new_trials)
return trials
def fmin_pass_expr_memo_ctrl(f):
"""
Mark a function as expecting kwargs 'expr', 'memo' and 'ctrl' from
hyperopt.fmin.
expr - the pyll expression of the search space
memo - a partially-filled memo dictionary such that
`rec_eval(expr, memo=memo)` will build the proposed trial point.
ctrl - the Experiment control object (see base.Ctrl)
"""
f.fmin_pass_expr_memo_ctrl = True
return f
def partial(fn, **kwargs):
"""functools.partial work-alike for functions decorated with
fmin_pass_expr_memo_ctrl
"""
rval = functools.partial(fn, **kwargs)
if hasattr(fn, 'fmin_pass_expr_memo_ctrl'):
rval.fmin_pass_expr_memo_ctrl = fn.fmin_pass_expr_memo_ctrl
return rval
class FMinIter(object):
"""Object for conducting search experiments.
"""
catch_eval_exceptions = False
pickle_protocol = -1
def __init__(self, algo, domain, trials, rstate, asynchronous=None,
max_queue_len=1,
poll_interval_secs=1.0,
max_evals=sys.maxsize,
verbose=0,
):
self.algo = algo
self.domain = domain
self.trials = trials
if asynchronous is None:
self.asynchronous = trials.asynchronous
else:
self.asynchronous = asynchronous
self.poll_interval_secs = poll_interval_secs
self.max_queue_len = max_queue_len
self.max_evals = max_evals
self.rstate = rstate
if self.asynchronous:
if 'FMinIter_Domain' in trials.attachments:
logger.warn('over-writing old domain trials attachment')
msg = pickler.dumps(domain)
# -- sanity check for unpickling
pickler.loads(msg)
trials.attachments['FMinIter_Domain'] = msg
def serial_evaluate(self, N=-1):
for trial in self.trials._dynamic_trials:
if trial['state'] == base.JOB_STATE_NEW:
trial['state'] == base.JOB_STATE_RUNNING
now = coarse_utcnow()
trial['book_time'] = now
trial['refresh_time'] = now
spec = base.spec_from_misc(trial['misc'])
ctrl = base.Ctrl(self.trials, current_trial=trial)
output = trial['spec']
output = np.array(output).reshape(-1, 1) # format output to [[]]
fvalue = output[0][0] # get function value
try:
if fvalue is not None:
result = {'loss': fvalue, 'status': 'ok'}
else:
result = self.domain.evaluate(spec, ctrl)
except Exception as e:
logger.info('job exception: %s' % str(e))
trial['state'] = base.JOB_STATE_ERROR
trial['misc']['error'] = (str(type(e)), str(e))
trial['refresh_time'] = coarse_utcnow()
if not self.catch_eval_exceptions:
# -- JOB_STATE_ERROR means this trial
# will be removed from self.trials.trials
# by this refresh call.
self.trials.refresh()
raise
else:
trial['state'] = base.JOB_STATE_DONE
trial['result'] = result
trial['refresh_time'] = coarse_utcnow()
N -= 1
if N == 0:
break
self.trials.refresh()
def block_until_done(self):
already_printed = False
if self.asynchronous:
unfinished_states = [base.JOB_STATE_NEW, base.JOB_STATE_RUNNING]
def get_queue_len():
return self.trials.count_by_state_unsynced(unfinished_states)
qlen = get_queue_len()
while qlen > 0:
if not already_printed:
logger.info('Waiting for %d jobs to finish ...' % qlen)
already_printed = True
time.sleep(self.poll_interval_secs)
qlen = get_queue_len()
self.trials.refresh()
else:
self.serial_evaluate()
def run(self, N, block_until_done=True):
"""
block_until_done means that the process blocks until ALL jobs in
trials are not in running or new state
"""
trials = self.trials
algo = self.algo
n_queued = 0
def get_queue_len():
return self.trials.count_by_state_unsynced(base.JOB_STATE_NEW)
stopped = False
while n_queued < N:
qlen = get_queue_len()
while qlen < self.max_queue_len and n_queued < N:
n_to_enqueue = min(self.max_queue_len - qlen, N - n_queued)
new_ids = trials.new_trial_ids(n_to_enqueue)
self.trials.refresh()
if 0:
for d in self.trials.trials:
print('trial %i %s %s' % (d['tid'], d['state'],
d['result'].get('status')))
new_trials = algo(new_ids, self.domain, trials,
self.rstate.randint(2 ** 31 - 1))
assert len(new_ids) >= len(new_trials)
if len(new_trials):
self.trials.insert_trial_docs(new_trials)
self.trials.refresh()
n_queued += len(new_trials)
qlen = get_queue_len()
else:
stopped = True
break
if self.asynchronous:
# -- wait for workers to fill in the trials
time.sleep(self.poll_interval_secs)
else:
# -- loop over trials and do the jobs directly
self.serial_evaluate()
if stopped:
break
if block_until_done:
self.block_until_done()
self.trials.refresh()
logger.info('Queue empty, exiting run.')
else:
qlen = get_queue_len()
if qlen:
msg = 'Exiting run, not waiting for %d jobs.' % qlen
logger.info(msg)
def __iter__(self):
return self
def __next__(self):
self.run(1, block_until_done=self.asynchronous)
if len(self.trials) >= self.max_evals:
raise StopIteration()
return self.trials
def exhaust(self):
n_done = len(self.trials)
self.run(self.max_evals - n_done, block_until_done=self.asynchronous)
self.trials.refresh()
return self
def fmin(fn, space, algo, max_evals, trials=None, rstate=None,
allow_trials_fmin=True, pass_expr_memo_ctrl=None,
catch_eval_exceptions=False,
verbose=0,
return_argmin=True,
points_to_evaluate=None,
max_queue_len=1
):
"""Minimize a function over a hyperparameter space.
More realistically: *explore* a function over a hyperparameter space
according to a given algorithm, allowing up to a certain number of
function evaluations. As points are explored, they are accumulated in
`trials`
Parameters
----------
fn : callable (trial point -> loss)
This function will be called with a value generated from `space`
as the first and possibly only argument. It can return either
a scalar-valued loss, or a dictionary. A returned dictionary must
contain a 'status' key with a value from `STATUS_STRINGS`, must
contain a 'loss' key if the status is `STATUS_OK`. Particular
optimization algorithms may look for other keys as well. An
optional sub-dictionary associated with an 'attachments' key will
be removed by fmin its contents will be available via
`trials.trial_attachments`. The rest (usually all) of the returned
dictionary will be stored and available later as some 'result'
sub-dictionary within `trials.trials`.
space : hyperopt.pyll.Apply node
The set of possible arguments to `fn` is the set of objects
that could be created with non-zero probability by drawing randomly
from this stochastic program involving involving hp_<xxx> nodes
(see `hyperopt.hp` and `hyperopt.pyll_utils`).
algo : search algorithm
This object, such as `hyperopt.rand.suggest` and
`hyperopt.tpe.suggest` provides logic for sequential search of the
hyperparameter space.
max_evals : int
Allow up to this many function evaluations before returning.
trials : None or base.Trials (or subclass)
Storage for completed, ongoing, and scheduled evaluation points. If
None, then a temporary `base.Trials` instance will be created. If
a trials object, then that trials object will be affected by
side-effect of this call.
rstate : numpy.RandomState, default numpy.random or `$HYPEROPT_FMIN_SEED`
Each call to `algo` requires a seed value, which should be different
on each call. This object is used to draw these seeds via `randint`.
The default rstate is
`numpy.random.RandomState(int(env['HYPEROPT_FMIN_SEED']))`
if the `HYPEROPT_FMIN_SEED` environment variable is set to a non-empty
string, otherwise np.random is used in whatever state it is in.
verbose : int
Print out some information to stdout during search.
allow_trials_fmin : bool, default True
If the `trials` argument
pass_expr_memo_ctrl : bool, default False
If set to True, `fn` will be called in a different more low-level
way: it will receive raw hyperparameters, a partially-populated
`memo`, and a Ctrl object for communication with this Trials
object.
return_argmin : bool, default True
If set to False, this function returns nothing, which can be useful
for example if it is expected that `len(trials)` may be zero after
fmin, and therefore `trials.argmin` would be undefined.
points_to_evaluate : list, default None
Only works if trials=None. If points_to_evaluate equals None then the
trials are evaluated normally. If list of dicts is passed then
given points are evaluated before optimisation starts, so the overall
number of optimisation steps is len(points_to_evaluate) + max_evals.
Elements of this list must be in a form of a dictionary with variable
names as keys and variable values as dict values. Example
points_to_evaluate value is [{'x': 0.0, 'y': 0.0}, {'x': 1.0, 'y': 2.0}]
max_queue_len : integer, default 1
Sets the queue length generated in the dictionary or trials. Increasing this
value helps to slightly speed up parallel simulatulations which sometimes lag
on suggesting a new trial.
Returns
-------
argmin : None or dictionary
If `return_argmin` is False, this function returns nothing.
Otherwise, it returns `trials.argmin`. This argmin can be converted
to a point in the configuration space by calling
`hyperopt.space_eval(space, best_vals)`.
"""
if rstate is None:
env_rseed = os.environ.get('HYPEROPT_FMIN_SEED', '')
if env_rseed:
rstate = np.random.RandomState(int(env_rseed))
else:
rstate = np.random.RandomState()
if allow_trials_fmin and hasattr(trials, 'fmin'):
return trials.fmin(
fn, space,
algo=algo,
max_evals=max_evals,
rstate=rstate,
pass_expr_memo_ctrl=pass_expr_memo_ctrl,
verbose=verbose,
catch_eval_exceptions=catch_eval_exceptions,
return_argmin=return_argmin,
)
if trials is None:
if points_to_evaluate is None:
trials = base.Trials()
else:
assert type(points_to_evaluate) == list
trials = generate_trials_to_calculate(points_to_evaluate)
domain = base.Domain(fn, space,
pass_expr_memo_ctrl=pass_expr_memo_ctrl)
rval = FMinIter(algo, domain, trials, max_evals=max_evals,
rstate=rstate,
verbose=verbose,
max_queue_len=max_queue_len)
rval.catch_eval_exceptions = catch_eval_exceptions
rval.exhaust()
if return_argmin:
return trials.argmin
def space_eval(space, hp_assignment):
"""Compute a point in a search space from a hyperparameter assignment.
Parameters:
-----------
space - a pyll graph involving hp nodes (see `pyll_utils`).
hp_assignment - a dictionary mapping hp node labels to values.
"""
space = pyll.as_apply(space)
nodes = pyll.toposort(space)
memo = {}
for node in nodes:
if node.name == 'hyperopt_param':
label = node.arg['label'].eval()
if label in hp_assignment:
memo[node] = hp_assignment[label]
rval = pyll.rec_eval(space, memo=memo)
return rval
# -- flake8 doesn't like blank last line
| [
"[email protected]"
] | |
fcbe21ec01c605407a9c8e63d2897b12bea24182 | 3efc4c9787f309c5aae8f9c2ec9d3c5ba2a6e245 | /tests_zeeguu_api/test_user_articles.py | d25fc03ebcebf81aba4ee3b300a9c8e8afdae112 | [
"MIT"
] | permissive | TWEApol/Zeeguu-API | a8bc539902845ea26baa550b4048901a4772d144 | 832f1cb2d0950bd879af72d9e606783698d78b57 | refs/heads/master | 2020-04-12T11:07:58.521931 | 2018-12-19T16:35:43 | 2018-12-19T16:35:43 | 162,450,353 | 0 | 0 | MIT | 2018-12-19T14:40:13 | 2018-12-19T14:40:12 | null | UTF-8 | Python | false | false | 1,447 | py | # coding=utf-8
from unittest import TestCase
from tests_zeeguu_api.api_test_mixin import APITestMixin
from tests_zeeguu.rules.rss_feed_rule import RSSFeedRule
from zeeguu.content_retriever.article_downloader import download_from_feed
from zeeguu.model import RSSFeedRegistration
import zeeguu
from tests_zeeguu_api.test_feeds import FeedTests
URL_1 = "http://www.spiegel.de/politik/deutschland/diesel-fahrverbote-schuld-sind-die-grenzwerte-kolumne-a-1197123.html"
class UserArticlesTests(APITestMixin, TestCase):
def setUp(self):
super(UserArticlesTests, self).setUp()
self.url = URL_1
def test_starred_or_liked(self):
# No article is starred initially
result = self.json_from_api_get(f'/user_articles/starred_or_liked')
assert (len(result) == 0)
# Star article
article_id = self.json_from_api_get('/article_id', other_args=dict(url=self.url))['article_id']
self.api_post(f'/user_article', formdata=dict(starred='True', article_id=article_id))
# One article is starred eventually
result = self.json_from_api_get(f'/user_articles/starred_or_liked')
assert (len(result) == 1)
# Like article
self.api_post(f'/user_article', formdata=dict(liked='True', article_id=article_id))
# Still one article is returned
result = self.json_from_api_get(f'/user_articles/starred_or_liked')
assert (len(result) == 1) | [
"[email protected]"
] | |
2db92cf2730e4a0824efafd107a9d05518d7effa | d1c67f2031d657902acef4411877d75b992eab91 | /swagger_client/models/opsview_integration.py | 2c79ec74f715056f9173d2ef90332130b9feca0d | [] | no_license | Certn/opsgenie-python | c6e6a7f42394499e5224d679cc9a449042fcf9c3 | bd5f402f97d591e4082b38c938cbabca4cf29787 | refs/heads/master | 2023-01-01T10:45:13.132455 | 2020-10-27T17:40:01 | 2020-10-27T17:40:01 | 307,769,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,726 | py | # coding: utf-8
"""
Opsgenie REST API
Opsgenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OpsviewIntegration(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'suppress_notifications': 'bool',
'ignore_teams_from_payload': 'bool',
'ignore_recipients_from_payload': 'bool',
'recipients': 'list[Recipient]',
'is_advanced': 'bool',
'ignore_responders_from_payload': 'bool',
'ignore_tags_from_payload': 'bool',
'ignore_extra_properties_from_payload': 'bool',
'responders': 'list[Recipient]',
'priority': 'str',
'custom_priority': 'str',
'tags': 'list[str]',
'extra_properties': 'dict(str, str)',
'assigned_team': 'TeamMeta',
'feature_type': 'str',
'allow_configuration_access': 'bool',
'allow_read_access': 'bool',
'allow_write_access': 'bool',
'allow_delete_access': 'bool'
}
attribute_map = {
'suppress_notifications': 'suppressNotifications',
'ignore_teams_from_payload': 'ignoreTeamsFromPayload',
'ignore_recipients_from_payload': 'ignoreRecipientsFromPayload',
'recipients': 'recipients',
'is_advanced': 'isAdvanced',
'ignore_responders_from_payload': 'ignoreRespondersFromPayload',
'ignore_tags_from_payload': 'ignoreTagsFromPayload',
'ignore_extra_properties_from_payload': 'ignoreExtraPropertiesFromPayload',
'responders': 'responders',
'priority': 'priority',
'custom_priority': 'customPriority',
'tags': 'tags',
'extra_properties': 'extraProperties',
'assigned_team': 'assignedTeam',
'feature_type': 'feature-type',
'allow_configuration_access': 'allowConfigurationAccess',
'allow_read_access': 'allowReadAccess',
'allow_write_access': 'allowWriteAccess',
'allow_delete_access': 'allowDeleteAccess'
}
def __init__(self, suppress_notifications=None, ignore_teams_from_payload=None, ignore_recipients_from_payload=None, recipients=None, is_advanced=None, ignore_responders_from_payload=None, ignore_tags_from_payload=None, ignore_extra_properties_from_payload=None, responders=None, priority=None, custom_priority=None, tags=None, extra_properties=None, assigned_team=None, feature_type=None, allow_configuration_access=None, allow_read_access=None, allow_write_access=None, allow_delete_access=None): # noqa: E501
"""OpsviewIntegration - a model defined in Swagger""" # noqa: E501
self._suppress_notifications = None
self._ignore_teams_from_payload = None
self._ignore_recipients_from_payload = None
self._recipients = None
self._is_advanced = None
self._ignore_responders_from_payload = None
self._ignore_tags_from_payload = None
self._ignore_extra_properties_from_payload = None
self._responders = None
self._priority = None
self._custom_priority = None
self._tags = None
self._extra_properties = None
self._assigned_team = None
self._feature_type = None
self._allow_configuration_access = None
self._allow_read_access = None
self._allow_write_access = None
self._allow_delete_access = None
self.discriminator = None
if suppress_notifications is not None:
self.suppress_notifications = suppress_notifications
if ignore_teams_from_payload is not None:
self.ignore_teams_from_payload = ignore_teams_from_payload
if ignore_recipients_from_payload is not None:
self.ignore_recipients_from_payload = ignore_recipients_from_payload
if recipients is not None:
self.recipients = recipients
if is_advanced is not None:
self.is_advanced = is_advanced
if ignore_responders_from_payload is not None:
self.ignore_responders_from_payload = ignore_responders_from_payload
if ignore_tags_from_payload is not None:
self.ignore_tags_from_payload = ignore_tags_from_payload
if ignore_extra_properties_from_payload is not None:
self.ignore_extra_properties_from_payload = ignore_extra_properties_from_payload
if responders is not None:
self.responders = responders
if priority is not None:
self.priority = priority
if custom_priority is not None:
self.custom_priority = custom_priority
if tags is not None:
self.tags = tags
if extra_properties is not None:
self.extra_properties = extra_properties
if assigned_team is not None:
self.assigned_team = assigned_team
if feature_type is not None:
self.feature_type = feature_type
if allow_configuration_access is not None:
self.allow_configuration_access = allow_configuration_access
if allow_read_access is not None:
self.allow_read_access = allow_read_access
if allow_write_access is not None:
self.allow_write_access = allow_write_access
if allow_delete_access is not None:
self.allow_delete_access = allow_delete_access
@property
def suppress_notifications(self):
"""Gets the suppress_notifications of this OpsviewIntegration. # noqa: E501
If enabled, notifications that come from alerts will be suppressed. Defaults to false # noqa: E501
:return: The suppress_notifications of this OpsviewIntegration. # noqa: E501
:rtype: bool
"""
return self._suppress_notifications
@suppress_notifications.setter
def suppress_notifications(self, suppress_notifications):
"""Sets the suppress_notifications of this OpsviewIntegration.
If enabled, notifications that come from alerts will be suppressed. Defaults to false # noqa: E501
:param suppress_notifications: The suppress_notifications of this OpsviewIntegration. # noqa: E501
:type: bool
"""
self._suppress_notifications = suppress_notifications
@property
def ignore_teams_from_payload(self):
"""Gets the ignore_teams_from_payload of this OpsviewIntegration. # noqa: E501
If enabled, the integration will ignore teams sent in request payloads. Defaults to false # noqa: E501
:return: The ignore_teams_from_payload of this OpsviewIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_teams_from_payload
@ignore_teams_from_payload.setter
def ignore_teams_from_payload(self, ignore_teams_from_payload):
"""Sets the ignore_teams_from_payload of this OpsviewIntegration.
If enabled, the integration will ignore teams sent in request payloads. Defaults to false # noqa: E501
:param ignore_teams_from_payload: The ignore_teams_from_payload of this OpsviewIntegration. # noqa: E501
:type: bool
"""
self._ignore_teams_from_payload = ignore_teams_from_payload
@property
def ignore_recipients_from_payload(self):
"""Gets the ignore_recipients_from_payload of this OpsviewIntegration. # noqa: E501
If enabled, the integration will ignore recipients sent in request payloads. Defaults to false # noqa: E501
:return: The ignore_recipients_from_payload of this OpsviewIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_recipients_from_payload
@ignore_recipients_from_payload.setter
def ignore_recipients_from_payload(self, ignore_recipients_from_payload):
"""Sets the ignore_recipients_from_payload of this OpsviewIntegration.
If enabled, the integration will ignore recipients sent in request payloads. Defaults to false # noqa: E501
:param ignore_recipients_from_payload: The ignore_recipients_from_payload of this OpsviewIntegration. # noqa: E501
:type: bool
"""
self._ignore_recipients_from_payload = ignore_recipients_from_payload
@property
def recipients(self):
"""Gets the recipients of this OpsviewIntegration. # noqa: E501
Optional user, schedule, teams or escalation names to calculate which users will receive the notifications of the alert. Recipients which are exceeding the limit are ignored # noqa: E501
:return: The recipients of this OpsviewIntegration. # noqa: E501
:rtype: list[Recipient]
"""
return self._recipients
@recipients.setter
def recipients(self, recipients):
"""Sets the recipients of this OpsviewIntegration.
Optional user, schedule, teams or escalation names to calculate which users will receive the notifications of the alert. Recipients which are exceeding the limit are ignored # noqa: E501
:param recipients: The recipients of this OpsviewIntegration. # noqa: E501
:type: list[Recipient]
"""
self._recipients = recipients
@property
def is_advanced(self):
"""Gets the is_advanced of this OpsviewIntegration. # noqa: E501
:return: The is_advanced of this OpsviewIntegration. # noqa: E501
:rtype: bool
"""
return self._is_advanced
@is_advanced.setter
def is_advanced(self, is_advanced):
"""Sets the is_advanced of this OpsviewIntegration.
:param is_advanced: The is_advanced of this OpsviewIntegration. # noqa: E501
:type: bool
"""
self._is_advanced = is_advanced
@property
def ignore_responders_from_payload(self):
"""Gets the ignore_responders_from_payload of this OpsviewIntegration. # noqa: E501
:return: The ignore_responders_from_payload of this OpsviewIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_responders_from_payload
@ignore_responders_from_payload.setter
def ignore_responders_from_payload(self, ignore_responders_from_payload):
"""Sets the ignore_responders_from_payload of this OpsviewIntegration.
:param ignore_responders_from_payload: The ignore_responders_from_payload of this OpsviewIntegration. # noqa: E501
:type: bool
"""
self._ignore_responders_from_payload = ignore_responders_from_payload
@property
def ignore_tags_from_payload(self):
"""Gets the ignore_tags_from_payload of this OpsviewIntegration. # noqa: E501
:return: The ignore_tags_from_payload of this OpsviewIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_tags_from_payload
@ignore_tags_from_payload.setter
def ignore_tags_from_payload(self, ignore_tags_from_payload):
"""Sets the ignore_tags_from_payload of this OpsviewIntegration.
:param ignore_tags_from_payload: The ignore_tags_from_payload of this OpsviewIntegration. # noqa: E501
:type: bool
"""
self._ignore_tags_from_payload = ignore_tags_from_payload
@property
def ignore_extra_properties_from_payload(self):
"""Gets the ignore_extra_properties_from_payload of this OpsviewIntegration. # noqa: E501
:return: The ignore_extra_properties_from_payload of this OpsviewIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_extra_properties_from_payload
@ignore_extra_properties_from_payload.setter
def ignore_extra_properties_from_payload(self, ignore_extra_properties_from_payload):
"""Sets the ignore_extra_properties_from_payload of this OpsviewIntegration.
:param ignore_extra_properties_from_payload: The ignore_extra_properties_from_payload of this OpsviewIntegration. # noqa: E501
:type: bool
"""
self._ignore_extra_properties_from_payload = ignore_extra_properties_from_payload
@property
def responders(self):
"""Gets the responders of this OpsviewIntegration. # noqa: E501
:return: The responders of this OpsviewIntegration. # noqa: E501
:rtype: list[Recipient]
"""
return self._responders
@responders.setter
def responders(self, responders):
"""Sets the responders of this OpsviewIntegration.
:param responders: The responders of this OpsviewIntegration. # noqa: E501
:type: list[Recipient]
"""
self._responders = responders
@property
def priority(self):
"""Gets the priority of this OpsviewIntegration. # noqa: E501
:return: The priority of this OpsviewIntegration. # noqa: E501
:rtype: str
"""
return self._priority
@priority.setter
def priority(self, priority):
"""Sets the priority of this OpsviewIntegration.
:param priority: The priority of this OpsviewIntegration. # noqa: E501
:type: str
"""
self._priority = priority
@property
def custom_priority(self):
"""Gets the custom_priority of this OpsviewIntegration. # noqa: E501
:return: The custom_priority of this OpsviewIntegration. # noqa: E501
:rtype: str
"""
return self._custom_priority
@custom_priority.setter
def custom_priority(self, custom_priority):
"""Sets the custom_priority of this OpsviewIntegration.
:param custom_priority: The custom_priority of this OpsviewIntegration. # noqa: E501
:type: str
"""
self._custom_priority = custom_priority
@property
def tags(self):
"""Gets the tags of this OpsviewIntegration. # noqa: E501
:return: The tags of this OpsviewIntegration. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this OpsviewIntegration.
:param tags: The tags of this OpsviewIntegration. # noqa: E501
:type: list[str]
"""
self._tags = tags
@property
def extra_properties(self):
"""Gets the extra_properties of this OpsviewIntegration. # noqa: E501
:return: The extra_properties of this OpsviewIntegration. # noqa: E501
:rtype: dict(str, str)
"""
return self._extra_properties
@extra_properties.setter
def extra_properties(self, extra_properties):
"""Sets the extra_properties of this OpsviewIntegration.
:param extra_properties: The extra_properties of this OpsviewIntegration. # noqa: E501
:type: dict(str, str)
"""
self._extra_properties = extra_properties
@property
def assigned_team(self):
"""Gets the assigned_team of this OpsviewIntegration. # noqa: E501
:return: The assigned_team of this OpsviewIntegration. # noqa: E501
:rtype: TeamMeta
"""
return self._assigned_team
@assigned_team.setter
def assigned_team(self, assigned_team):
"""Sets the assigned_team of this OpsviewIntegration.
:param assigned_team: The assigned_team of this OpsviewIntegration. # noqa: E501
:type: TeamMeta
"""
self._assigned_team = assigned_team
@property
def feature_type(self):
"""Gets the feature_type of this OpsviewIntegration. # noqa: E501
:return: The feature_type of this OpsviewIntegration. # noqa: E501
:rtype: str
"""
return self._feature_type
@feature_type.setter
def feature_type(self, feature_type):
"""Sets the feature_type of this OpsviewIntegration.
:param feature_type: The feature_type of this OpsviewIntegration. # noqa: E501
:type: str
"""
allowed_values = ["email-based", "token-based"] # noqa: E501
if feature_type not in allowed_values:
raise ValueError(
"Invalid value for `feature_type` ({0}), must be one of {1}" # noqa: E501
.format(feature_type, allowed_values)
)
self._feature_type = feature_type
@property
def allow_configuration_access(self):
"""Gets the allow_configuration_access of this OpsviewIntegration. # noqa: E501
This parameter is for allowing or restricting the configuration access. If configuration access is restricted, the integration will be limited to Alert API requests and sending heartbeats. Defaults to false # noqa: E501
:return: The allow_configuration_access of this OpsviewIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_configuration_access
@allow_configuration_access.setter
def allow_configuration_access(self, allow_configuration_access):
"""Sets the allow_configuration_access of this OpsviewIntegration.
This parameter is for allowing or restricting the configuration access. If configuration access is restricted, the integration will be limited to Alert API requests and sending heartbeats. Defaults to false # noqa: E501
:param allow_configuration_access: The allow_configuration_access of this OpsviewIntegration. # noqa: E501
:type: bool
"""
self._allow_configuration_access = allow_configuration_access
@property
def allow_read_access(self):
"""Gets the allow_read_access of this OpsviewIntegration. # noqa: E501
:return: The allow_read_access of this OpsviewIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_read_access
@allow_read_access.setter
def allow_read_access(self, allow_read_access):
"""Sets the allow_read_access of this OpsviewIntegration.
:param allow_read_access: The allow_read_access of this OpsviewIntegration. # noqa: E501
:type: bool
"""
self._allow_read_access = allow_read_access
@property
def allow_write_access(self):
"""Gets the allow_write_access of this OpsviewIntegration. # noqa: E501
This parameter is for configuring the read-only access of integration. If the integration is limited to read-only access, the integration will not be authorized to perform any create, update or delete action within any domain. Defaults to true # noqa: E501
:return: The allow_write_access of this OpsviewIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_write_access
@allow_write_access.setter
def allow_write_access(self, allow_write_access):
"""Sets the allow_write_access of this OpsviewIntegration.
This parameter is for configuring the read-only access of integration. If the integration is limited to read-only access, the integration will not be authorized to perform any create, update or delete action within any domain. Defaults to true # noqa: E501
:param allow_write_access: The allow_write_access of this OpsviewIntegration. # noqa: E501
:type: bool
"""
self._allow_write_access = allow_write_access
@property
def allow_delete_access(self):
"""Gets the allow_delete_access of this OpsviewIntegration. # noqa: E501
:return: The allow_delete_access of this OpsviewIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_delete_access
@allow_delete_access.setter
def allow_delete_access(self, allow_delete_access):
"""Sets the allow_delete_access of this OpsviewIntegration.
:param allow_delete_access: The allow_delete_access of this OpsviewIntegration. # noqa: E501
:type: bool
"""
self._allow_delete_access = allow_delete_access
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(OpsviewIntegration, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OpsviewIntegration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
ecd40f78f941dd9008f0d868335017f7e4370c6f | fd4fe3432e22a48a9db109abe2fead2fb839467b | /plottest.py | 13ef5ec85b205a16fc18e23982242724d33f7f1d | [] | no_license | DaniBodor/CenModel | d0c3a7d33bd25328fa9f3c24ae7203ebb9645569 | 9b675786900c419391f2ad5fbb326a939ae2bfb8 | refs/heads/master | 2020-05-30T12:33:38.350657 | 2015-09-21T18:34:16 | 2015-09-21T18:34:16 | 42,248,923 | 0 | 0 | null | 2015-09-10T14:30:08 | 2015-09-10T14:18:07 | null | UTF-8 | Python | false | false | 126 | py | import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 5, 0.1);
y = np.sin(x)
graph = plt.plot(x, y)
plt.show() | [
"[email protected]"
] | |
d9101a4ad1ca3630b0e1df219c54b9976c2c9374 | f14b40c3bfa5a5ea6c18c5f3555523837b360619 | /gis/bin/django-admin.py | 4e59ca4064aef688cc7629416cc384a15927e0d1 | [] | no_license | tayyabsayyad/CommunityGIS | a70175e979619288df6087d99a187a8aea964b4f | b654590d1fa6c6553ac2f57affe404ee0073b730 | refs/heads/master | 2020-05-03T08:58:45.549341 | 2019-03-23T11:38:00 | 2019-03-23T11:38:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | #!/home/user/Desktop/communitygis/gis/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
e2e9ff0919476e0654935161fe7eaeb82b7a5bd4 | 81cfc965cdbf83db5187c60a2788fbbbaf479c89 | /seqbio/pattern/SeqPattern.py | 3c5f66fb81274ef84bf97537185e3a83076de92e | [] | no_license | 6436877/Assign09 | a8828ac4cefe3ec4b68ac56f1a8a2dfec3fec2a3 | 00f25bd698143e7f4010129bcff03b2418f5afd6 | refs/heads/main | 2023-08-30T12:19:04.552444 | 2021-10-23T17:37:20 | 2021-10-23T17:37:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,182 | py | # SeqPattern module
import re
def cpgSearch(seq):
cpgs = []
for m in re.finditer(r'CG', seq, re.I):
cpgs.append((m.group(), m.start(), m.end()))
return cpgs
def enzTargetsScan(seq, enz):
resEnzyme = dict(EcoRI='GAATTC', BamHI='GGATCC',
HindIII='AAGCTT',AccB2I='[AG]GCGC[CT]',
AasI='GAC[ATCG][ATCG][ATCG][ATCG][ATCG][ATCG]GTC',
AceI='GC[AT]GC')
out = []
if enz in resEnzyme:
for m in re.finditer(resEnzyme.get(enz,),seq):
out.append((m.group(0),m.start(),m.end()))
return out
def reverseSeq(seq):
return seq[::-1]
def complementSeq(seq):
compl = {"A": "T", "T": "A",
"G": "C", "C": "G"}
complementary = "".join([ compl[base] for base in seq ])
return complementary
def reverseComplementSeq(seq):
revComp = complementSeq(reverseSeq(seq))
return revComp
def dna2rna(seq):
return seq.replace("T","U")
def dna2protein(seq):
DNA_Codons = loadCodons()
protein = ""
for i in range(0,len(seq),3):
dna = seq[i:i+3]
protein += DNA_Codons.get(dna, "")
return protein
def loadCodons():
DNA_Codons = {
# 'M' - START, '_' - STOP
"GCT": "A", "GCC": "A", "GCA": "A", "GCG": "A",
"TGT": "C", "TGC": "C",
"GAT": "D", "GAC": "D",
"GAA": "E", "GAG": "E",
"TTT": "F", "TTC": "F",
"GGT": "G", "GGC": "G", "GGA": "G", "GGG": "G",
"CAT": "H", "CAC": "H",
"ATA": "I", "ATT": "I", "ATC": "I",
"AAA": "K", "AAG": "K",
"TTA": "L", "TTG": "L", "CTT": "L", "CTC": "L", "CTA": "L", "CTG": "L",
"ATG": "M",
"AAT": "N", "AAC": "N",
"CCT": "P", "CCC": "P", "CCA": "P", "CCG": "P",
"CAA": "Q", "CAG": "Q",
"CGT": "R", "CGC": "R", "CGA": "R", "CGG": "R", "AGA": "R", "AGG": "R",
"TCT": "S", "TCC": "S", "TCA": "S", "TCG": "S", "AGT": "S", "AGC": "S",
"ACT": "T", "ACC": "T", "ACA": "T", "ACG": "T",
"GTT": "V", "GTC": "V", "GTA": "V", "GTG": "V",
"TGG": "W",
"TAT": "Y", "TAC": "Y",
"TAA": "_", "TAG": "_", "TGA": "_"
}
return DNA_Codons
| [
"[email protected]"
] | |
fb5b728496ebc201bf64058cff2ee667daef95f6 | 75d266477dd5499499695f1a555c42d6f58a242f | /src/setup.py | 393ec879ee2ffc5695e5f928e83c914dac9505fe | [] | no_license | AlexandrosV/todobackend | 9b9cbcf9559caec78ab8c327447b45f48d244d41 | 2c80cdf648342bc342dd9a21f36f81a1db53adfe | refs/heads/master | 2021-01-11T14:01:41.569086 | 2017-06-25T04:48:30 | 2017-06-25T04:48:30 | 94,932,592 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,150 | py | from setuptools import setup, find_packages
setup(
name = "todobackend",
version = "0.1.0",
description = "Backend REST service",
packages = find_packages(),
include_packages_data = True,
scripts = ["manage.py"],
install_requires = ["Django>=1.10,<2.0",
"django-cors-headers>=2.0.2",
"djangorestframework>=3.6.2",
#"mysql-connector>=2.1.4",
#"mysqlclient>=1.3.10"],
"MySQL-python>=1.2.5",
"uwsgi>=2.0"],
extras_require = {
"test": [
"colorama>=0.3.9",
"coverage>=4.4.1",
"django-nose>=1.4.4",
"nose>=1.3.7",
"pinocchio>=0.4.2"
]
}
)
# https://caremad.io/posts/2013/07/setup-vs-requirement/
| [
"[email protected]"
] | |
d8ce21f17837e687f89762f3ee89b233b648c8c5 | 8d739ef191dbde5d9d1957df913c1111b6cbd526 | /accounts/migrations/0003_auto_20210413_1419.py | 5d16f98c931175d27225af614502928eb8068f05 | [] | no_license | OmimiCode/pentagram | 3c9ea7c25b2a5e1dbace676576595df0be9c0636 | dfe102a87302e333b7907675eab3545cd44eb801 | refs/heads/main | 2023-04-03T23:28:40.901460 | 2021-04-13T16:44:18 | 2021-04-13T16:44:18 | 357,621,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | # Generated by Django 3.1.7 on 2021-04-13 14:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20210413_1301'),
]
operations = [
migrations.AlterField(
model_name='owner',
name='date_created',
field=models.DateTimeField(auto_now_add=True),
),
]
| [
"[email protected]"
] | |
137ed93e7fbcefb5a739bac5f56806a7283f60b5 | 60e318c0cdac24ef2cec0569b9e8156c8a1a85c9 | /six_hats/six_hats/urls.py | 21f6c9ce010fe1587f7366e4fd9ab40f0649c732 | [] | no_license | dkmca/six_hats | 289ca6f1e5fafae75a8638cad2ebef8eb2ce70f4 | 7db77e391dca760baf94a8a8a5096ad7387bc563 | refs/heads/master | 2023-08-12T22:06:32.582578 | 2021-10-12T19:11:33 | 2021-10-12T19:11:33 | 416,453,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | """six_hats URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from users import views
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('users/', include('users.urls', namespace='users')),
]
| [
"[email protected]"
] | |
dea4553edea13c14e42abc48ce9fbb009a0280b3 | 47b0c93709590972c54fd73127ed73ed2d77751e | /Python/Practice/Basic.py | b8d1d82306ac2e957b99a975618b124d49d19ef4 | [] | no_license | tacticalcheese/Andras89 | 73c48e861d49bf9b2150121924586dbbcfa0c7cf | aca1951f30e85d812115994a4f0cd416dd5dfd1e | refs/heads/master | 2020-07-13T07:07:17.448240 | 2018-11-12T16:03:21 | 2018-11-12T16:03:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | myStr = "Hello"
myInt = 4
myFloat = 5.2
myList = [1, 2, 3, 'Hello']
myDict = {'a': 1, 'b': 2, 'c': 3}
print(type(myStr), myStr)
print(type(myInt), myInt)
print(type(myFloat), myFloat)
print(type(myList), myList)
print(type(myDict), myDict)
print(myList[3])
greetings = myStr + ' World'
print(greetings)
peoples = ['Jim', 'Sara', 'Tim', 'Bob']
for people in peoples:
print(people)
for i in range(len(peoples)):
print(peoples[i]) | [
"[email protected]"
] | |
4c2cab03a7c635f0566611968f154fb5f3a39982 | f8b69e514b28d1a2cb3c85ce8ff06f2e37c51ae4 | /samplesite/samplesite/settings.py | c027dc0fb45a01fedc2a12b0196754f576ede8c7 | [] | no_license | AndreiAniukou/Django-my-test | a483dd69923412cd611395ed62b274a9e18977d4 | 2184c881d1d88ae1780de3b81b2a683f9a9759b7 | refs/heads/master | 2020-10-02T05:03:33.335271 | 2019-12-12T22:29:45 | 2019-12-12T22:29:45 | 227,708,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,132 | py | """
Django settings for samplesite project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w7z9n)*zm8+c(+1^0p2r46e+1$48$0g!)&yjao0@i+$is53ewa'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'bboard.apps.BboardConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'samplesite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'samplesite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
601ec0c002fb1acecab3bd8c3a699e2687c05ec5 | 79ea60694f4bfd4112d6e0e548de1d681894a2cc | /ultraTest.py | a7531d46c5dc4ed0cacc77c99e291aa0060564f3 | [] | no_license | JamesDeacon314/ME200 | 0050626257405ef3dce23d0be58752cd55e97390 | 9fd72fe242797f236870db47d1a4946ed0d431e2 | refs/heads/master | 2022-11-22T05:11:03.071247 | 2020-07-21T17:32:29 | 2020-07-21T17:32:29 | 271,051,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,243 | py | import time
import numpy as np
import RPi.GPIO as GPIO
import matplotlib.pyplot as plt
NUM_ULTRA = 5
TRIG = 33
ECHOS = [35, 36, 37, 38, 40]
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
for ECHO in ECHOS:
GPIO.setup(ECHO, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(TRIG, GPIO.OUT)
GPIO.output(TRIG, False)
time.sleep(2)
class Test:
def __init__(self):
self.lastUltra = np.zeros(NUM_ULTRA)
def ultra_back_cb(self, channel):
global back, start_back, done
if (GPIO.input(ECHOS[0])):
start_back = time.time()
else:
back = time.time()
done += 1
def ultra_left_cb(self, channel):
global left, start_left, done
if (GPIO.input(ECHOS[1])):
start_left = time.time()
else:
left = time.time()
done += 1
def ultra_right_cb(self, channel):
global right, start_right, done
if (GPIO.input(ECHOS[2])):
start_right = time.time()
else:
right = time.time()
done += 1
def ultra_frontLeft_cb(self, channel):
global frontLeft, start_frontLeft, done
if (GPIO.input(ECHOS[3])):
start_frontLeft = time.time()
else:
frontLeft = time.time()
done += 1
def ultra_frontRight_cb(self, channel):
global frontRight, start_frontRight, done
if (GPIO.input(ECHOS[4])):
start_frontRight = time.time()
else:
frontRight = time.time()
done += 1
tst = Test()
GPIO.add_event_detect(ECHOS[0], GPIO.BOTH, callback=tst.ultra_back_cb)
GPIO.add_event_detect(ECHOS[1], GPIO.BOTH, callback=tst.ultra_left_cb)
GPIO.add_event_detect(ECHOS[2], GPIO.BOTH, callback=tst.ultra_right_cb)
GPIO.add_event_detect(ECHOS[3], GPIO.BOTH, callback=tst.ultra_frontLeft_cb)
GPIO.add_event_detect(ECHOS[4], GPIO.BOTH, callback=tst.ultra_frontRight_cb)
start = time.time()
back = start
left = start
right = start
frontLeft = start
frontRight = start
done = 0
GPIO.output(TRIG, True)
time.sleep(0.00001)
GPIO.output(TRIG, False)
data = []
try:
while True:
time.sleep(0.03)
GPIO.output(TRIG, True)
time.sleep(0.00001)
GPIO.output(TRIG, False)
while (done < NUM_ULTRA):
time.sleep(0.000001)
done = 0
# print("NEW")
# print((back - start_back) * 17150)
print((left - start_left) * 17150)
data.append((left - start_left) * 17150)
# print((right - start_right) * 17150)
# print((frontLeft - start_frontLeft) * 17150)
# print((frontRight - start_frontRight) * 17150)
except (KeyboardInterrupt, SystemExit):
plt.plot(data)
plt.show()
print("Cleaning up GPIO and Exiting")
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.remove_event_detect(ECHOS[0])
GPIO.remove_event_detect(ECHOS[1])
GPIO.remove_event_detect(ECHOS[2])
GPIO.remove_event_detect(ECHOS[3])
GPIO.remove_event_detect(ECHOS[4])
GPIO.cleanup()
'''
# Prep the sensors
begin = time.time()
for ECHO in ECHOS:
time.sleep(0.01)
start = time.time()
GPIO.output(TRIG, True)
time.sleep(0.00001)
GPIO.output(TRIG, False)
while GPIO.input(ECHO) == 0:
pulse_start = time.time()
ready = time.time()
while GPIO.input(ECHO) == 1:
pulse_end = time.time()
# print("ECHO {0}".format(ECHO))
print((ready - start) * 1000)
# print((pulse_end - pulse_start) * 1000)
# print((pulse_end - pulse_start) * 17150)
# print(time.time() - begin)
# print((time.time() - begin) * 1000)
'''
| [
"[email protected]"
] | |
b365234c2a2e1a1d91e63eb6563fff121fcc1aa2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02880/s183675958.py | 4a284eca56a9e9c75253de4e01c3473410db5159 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | N = int(input())
answer = 'No'
for i in range(1, 10):
if N % i == 0:
sho = N // i
if sho < 10:
answer = 'Yes'
break
print(answer) | [
"[email protected]"
] | |
49884242a97b998291a51e619f42165385c8dc13 | ba565bb10288360a66169a107dc4b73b6c04f7c7 | /generator.py | 903a823e8b368bdca298b2d13568c703c83f94a4 | [] | no_license | jinhan/flask-restful-ex | e4fc1fb7a84037f371df77b7a53833fc47102646 | c3ca7083a46586bd5d974e79fe51ae2b3970dd1f | refs/heads/master | 2021-07-12T05:40:39.393227 | 2018-11-23T08:12:51 | 2018-11-23T08:12:51 | 129,373,520 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,558 | py | import datetime
from orm import *
from sqlalchemy.sql import func, and_
from random import randint
from timeit import default_timer as timer
from queries import query_card_data, NoTextError, regionCodeCheck
import uuid
import ast
# from templates_parser import templates_parser
from text_templates import text_templates
deploy_mode = False
def generateMeta(args):
with session_scope() as sess:
# text_templates = templates_parser()
polls = args['type']
regions = args['region']
parties = args['party']
candidates = args['candidate']
# TODO
time = datetime.datetime.strptime(args['time'], '%Y%m%d%H%M%S')
# print(datetime.datetime.now())
# time = datetime.datetime.now()
serial_current = str(uuid.uuid4().hex)
arguments = args
# if 'time' in arguments:
# del arguments['time']
# ๋ฐ์ดํฐ๊ฐ ์
๋ฐ์ดํธ ๋๋ ์๊ฐ
# ์ฟผ๋ฆฌ๋ก ์์๋ก ์๊ฐ์ ์๋ฎฌ๋ ์ด์
ํ ๋๋
if time > datetime.datetime(2018, 6, 13, 18, 59, 59):
t = 18
else:
t = time.hour
time_update = []
# TODO:
try:
time_update.append(sess.query(VoteProgressLatest.datatime).order_by(VoteProgressLatest.datatime.desc()).first()[0])
except TypeError:
pass
try:
time_update.append(sess.query(OpenProgress2.datatime).order_by(OpenProgress2.datatime.desc()).first()[0])
except TypeError:
pass
try:
time_update.append(sess.query(OpenProgress3.datatime).order_by(OpenProgress3.datatime.desc()).first()[0])
except TypeError:
pass
try:
time_update.append(sess.query(OpenProgress4.datatime).order_by(OpenProgress4.datatime.desc()).first()[0])
except TypeError:
pass
try:
time_update.append(sess.query(OpenProgress11.datatime).order_by(OpenProgress11.datatime.desc()).first()[0])
except TypeError:
pass
serial_ontable = sess.query(QueryTime.serial).filter(QueryTime.args==str(arguments), QueryTime.times==str(time_update)).first() # ๊ฐ์ด ๋์ค๋ฉด ๊ฐ์๊ฒ ์๋ค๋ ๊ฒ
if deploy_mode:
if serial_ontable != None:
serial_ontable = serial_ontable[0]
# print("serial: ", serial_ontable)
meta_previous = sess.query(MetaCards.meta).filter(MetaCards.serial==serial_ontable).scalar()
else:
serial_ontable = None
mete_previous = None
if (serial_ontable != None) and (meta_previous != None): # ์ ์ ์์ผ๋ฉด
meta = ast.literal_eval(meta_previous)
meta['updated'] = False
else: # ์ ์ ์์ผ๋ฉด
if deploy_mode:
row = QueryTime(serial=serial_current, args=str(arguments), times=str(time_update))
sess.add(row)
# sess.commit()
card_seqs, seqs_type, template = getCardSeqs(sess, polls, regions, parties, candidates, time)
# print(template, card_seqs)
meta = {}
meta['scenario'] = str(card_seqs)
meta['updated'] = True
meta['serial'] = serial_current
meta['card_count'] = len(card_seqs)
meta['design_variation'] = randint(1,4)
meta_cards = []
index = 0
for i, card_seq in enumerate(card_seqs):
if card_seqs[i-1] is card_seq:
index += 1
else:
index = 0
order = i+1
try:
meta_card = query_card_data(text_templates, sess, order, index, polls, regions, parties, candidates, time, card_seq, seqs_type, template)
except NoTextError:
print("pass: ", card_seq)
continue
meta_cards.append(meta_card)
# end for
# try:
meta_cards = list({v['data']['text']:v for v in meta_cards}.values())
meta['cards'] = meta_cards
if deploy_mode:
meta_row = MetaCards(serial=serial_current, meta=str(meta))
sess.add(meta_row)
# sess.commit()
return meta
def getCardSeqs(sess, polls, regions, parties, candidates, time):
# vote์์ ์ค์
if time > datetime.datetime(2018, 6, 13, 18, 59, 59):
t = 18
else:
t = time.hour
if time > datetime.datetime(2018, 6, 13, 23, 59, 59):
t2 = 23
else:
t2 = time.hour
card_seqs = []
if t2 > 18:
if (len(candidates) > 0):
# print(1)
try:
candidate, candidate_region, candidate_poll_code = sess.query(CandidateInfo.name, CandidateInfo.sggName, CandidateInfo.sgTypecode).filter(CandidateInfo.huboid==candidates[0]).first()
# print(candidate_region)
except TypeError:
raise NoTextError
if candidate_poll_code == 2: # ๊ตญํ์์
openrate = sess.query(func.max(OpenProgress.openPercent)).filter(OpenProgress.sgg==candidate_region, OpenProgress.datatime<=time, OpenProgress.sggCityCode!=None).scalar() # ,
elif candidate_poll_code == 3:
openrate = sess.query(func.max(OpenProgress.openPercent)).filter(OpenProgress.sido==candidate_region, OpenProgress.gusigun=='ํฉ๊ณ',OpenProgress.datatime<=time).scalar() # ,
elif candidate_poll_code == 4:
openrate = sess.query(func.max(OpenProgress.openPercent)).filter(OpenProgress.gusigun==candidate_region, OpenProgress.datatime<=time, OpenProgress.sggCityCode!=None).scalar() # ,
elif candidate_poll_code == 11:
openrate = sess.query(func.max(OpenProgress.openPercent)).filter(OpenProgress.sido==candidate_region, OpenProgress.gusigun=='ํฉ๊ณ',OpenProgress.datatime<=time).scalar() # ,
else:
openrate = None
# print(candidate_poll_code, openrate)
elif (len(candidates) == 0) and (len(regions) > 0):
# print(2)
region_num, _ = regionCodeCheck(regions[0])
try:
region1, region2 = sess.query(PrecinctCode.sido, PrecinctCode.gusigun).filter(PrecinctCode.sggCityCode==region_num).first()
except TypeError:
raise NoTextError
openrate = sess.query(func.max(OpenProgress.openPercent)).filter(OpenProgress.datatime<=time, OpenProgress.sido==region1, OpenProgress.gusigun=='ํฉ๊ณ').scalar()
# print(openrate)
# parites
elif (len(candidates) == 0) and (len(regions) == 0) and (len(polls) > 0):
# print(3)
if polls[0] == 2:
s = sess.query(OpenProgress.sgg, func.max(OpenProgress.n_total).label('n_total'), func.max(OpenProgress.invalid).label('invalid'), func.max(OpenProgress.tooTotal).label('tooTotal')).filter(OpenProgress.datatime<=time, OpenProgress.electionCode==2, OpenProgress.sggCityCode!=None).group_by(OpenProgress.sgg)
elif polls[0] == 3:
s = sess.query(OpenProgress.sido, func.max(OpenProgress.n_total).label('n_total'), func.max(OpenProgress.invalid).label('invalid'), func.max(OpenProgress.tooTotal).label('tooTotal')).filter(OpenProgress.datatime<=time, OpenProgress.electionCode==3, OpenProgress.gusigun=='ํฉ๊ณ').group_by(OpenProgress.sido)
elif polls[0] == 4:
s = sess.query(OpenProgress.gusigun, func.max(OpenProgress.n_total).label('n_total'), func.max(OpenProgress.invalid).label('invalid'), func.max(OpenProgress.tooTotal).label('tooTotal')).filter(OpenProgress.datatime<=time, OpenProgress.electionCode==4, OpenProgress.sggCityCode!=None).group_by(OpenProgress.gusigun)
elif polls[0] == 11:
s = sess.query(OpenProgress.sido, func.max(OpenProgress.n_total).label('n_total'), func.max(OpenProgress.invalid).label('invalid'), func.max(OpenProgress.tooTotal).label('tooTotal')).filter(OpenProgress.datatime<=time, OpenProgress.electionCode==11, OpenProgress.gusigun=='ํฉ๊ณ').group_by(OpenProgress.sido)
openrate = sess.query((func.sum(s.subquery().c.n_total) + func.sum(s.subquery().c.invalid)) / func.sum(s.subquery().c.tooTotal) * 100).scalar()
else:
# print(4)
s = sess.query(OpenProgress.sido, func.max(OpenProgress.n_total).label('n_total'), func.max(OpenProgress.invalid).label('invalid'), func.max(OpenProgress.tooTotal).label('tooTotal')).filter(OpenProgress.datatime<=time, OpenProgress.electionCode==3, OpenProgress.gusigun=='ํฉ๊ณ').group_by(OpenProgress.sido)
openrate = sess.query((func.sum(s.subquery().c.n_total) + func.sum(s.subquery().c.invalid)) / func.sum(s.subquery().c.tooTotal) * 100).scalar()
else:
openrate = 0
if openrate == None:
openrate = 0
# print("openrate: ", openrate)
if t2 <= 18: # ํฌํ์ค
card_seqs.extend([1, 2, 3, 6, 23]) # 6 ํน์ด์ฌํญ
card_seqs.extend([4] * len(regions))
card_seqs.extend([5] * len(candidates))
card_seqs.sort()
seqs_type = 0
template = 1
# ์ด๋ค ์ ๊ฑฐ์ ๊ฐํ์จ 10 ๊ธฐ์ค?
elif (t2 > 18) and (openrate < 10): # ํฌํ๋ง๊ฐ์ดํ
card_seqs.extend([1, 2, 3, 6, 22, 23]) # 6 ํน์ด์ฌํญ
card_seqs.extend([4] * len(regions))
card_seqs.extend([5] * len(candidates))
card_seqs.sort()
seqs_type = 0
template = 2
elif (t2 > 18) and (openrate >= 10) and (openrate < 30): # ๊ฐํ์จ 10% ์ด์
card_seqs.extend([1, 2, 3, 7, 8, 9, 20, 23]) # 6, 13, 20 ํน์ด์ฌํญ
card_seqs.extend([4] * len(regions))
card_seqs.extend([5] * len(candidates))
card_seqs.extend([10] * len(regions))
card_seqs.extend([11] * len(polls))
card_seqs.extend([12] * len(candidates))
card_seqs.extend([16] * len(regions))
card_seqs.extend([17] * len(candidates))
card_seqs.extend([18] * len(parties))
card_seqs.sort()
seqs_type = 1
template = 3
elif (t2 > 18) and (openrate >= 30): # ๊ฐํ์จ 30% ์ด์
card_seqs.extend([1, 2, 7, 15, 20, 23]) # 13, 20 ํน์ด์ฌํญ
card_seqs.extend([10] * len(regions))
card_seqs.extend([11] * len(polls))
card_seqs.extend([12] * len(candidates))
card_seqs.extend([16] * len(regions))
card_seqs.extend([17] * len(candidates))
card_seqs.extend([18] * len(parties))
card_seqs.sort()
card_seqs.insert(1, 21)
seqs_type = 1
if openrate < 100:
template = 4
else:
template = 5
# ๋ด๊ฐ ์ ํํ ์ ๊ฑฐ์์ ํ๋ช
์ด๋ผ๋ ๋น์ ํ์ ์ด ๋์ค๋ ๊ฒฝ์ฐ 21๋ฒ์ index 1์ insert
# else:
# seqs_type = 0
return card_seqs, seqs_type, template
if __name__ == '__main__':
print(VoteProgressLatest) | [
"[email protected]"
] | |
f4f466644b4aae5377cd187ccf676675629e0c80 | 62748c922acdb14823c9abf27dbc84734b80ad5f | /Elevators.py | b623af5e923d92f79783787c16afee8bd7eb5349 | [] | no_license | FinbarT/Elevators | e8845f094b2de82c07d8d2b7a821a6d68c4704c9 | 3eb3dbd78ce7af0806da019cdcdb464d44e3014c | refs/heads/master | 2016-09-06T13:14:34.294534 | 2014-02-07T08:14:41 | 2014-02-07T08:14:41 | 16,608,185 | 0 | 2 | null | 2015-09-25T13:25:18 | 2014-02-07T08:09:35 | null | UTF-8 | Python | false | false | 13,115 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 28 00:33:44 2013
@author: finbar
Create three classes: Building, Elevator, and Customer.
Equip the building with an elevator. Ask user to customize the number of
floors and the number of customers.
Program should have error checking to make sure the user inputs are
valid. For example, if a user gives non-integer inputs, notify the user
that the inputs are incorrect and prompt again.
Each customer starts from a random floor, and has a random destination
floor.
Each customer will use the elevator only once, i.e., when a customer
moves out of the elevator, he/she will never use it again.
When all customers have reached their destination floor, the simulation
is finished.
Part of the grade on this assignment will be the appropriateness of your
classes, methods, and any functions you use. The quality of the code
will now matter as well as the performance.
All classesโ methods require a docstring for a general description of
the method.
Implement both your own strategy and the default strategy and compare.
Your strategy does not have to be better but the comparison is required.
Donโt use any global variables.
"""
from random import randint
try:
import curses
except ImportError:
print("""
Curses has to be run in bash/linux, it is not dos/windows compatible.
This program will now exit.
""")
import os
os._exit(0)
import time
class customer(object):
'''
customer is a person in the building with an int(position) and
int(destination). Can call and direct elevators.
'''
def __init__(self, position, destination):
self.position = position
self.destination = destination
def __str__(self):
return "(p: %d, d: %d)" % (self.position, self.destination)
def call_elevator(self, elevator_bank):
'''
function to call elevator to his location. elevator_bank is a class
that contains all the calls for elevators
'''
elevator_bank.calls.append(self)
return elevator_bank.calls
def choose_floor(self, elevator):
'''
function to direct elevator to his distination
'''
elevator.destination = self.destination
class elevator(object):
'''
has a list of customers in the elelvator, elevator recieves call from the
elevator bank, his destination changes, and he moves to his destination.
when he is al his destination he collects his caller and takes them to
there destination.
'''
def __init__(self, id_=0):
self.id_ = str(id_)
self.occupants = []
self.position = 0
self.destination = 0
self.is_free = True
self.call = 0
def __str__(self):
return "[%d]" % (len(self.occupants))
def move(self):
'''
function to move the lift in the required direction
'''
if self.destination > self.position:
self.position += 1
elif self.destination < self.position:
self.position += -1
else:
pass
def exit_lift(self, floor):
'''
function to let off customers at the this location who need to get off
at this location. returns the number of customers to exit the lift
floor is a list of customers on the floor
'''
exited = 0
for person in self.occupants:
if person.destination == self.position:
floor.append(person)
self.occupants.remove(person)
self.call = 0
self.is_free = True
exited += 1
return exited
def board_lift(self, floor):
'''
function to board the caller. floor is a list of customers on the floor
'''
#clone taken to prevent index out of range on the loop
#after removing people from that floor in the building
floor_clone = floor[:]
for person in floor_clone:
if person == self.call:
self.occupants.append(person)
person.choose_floor(self)
floor.remove(person)
class elevator_bank(object):
'''
handles all the elevators in the buinding, has a list of elelvators,
a Queue of calls, it gives out the jobs to the lifts, tracks quaintity
of people moved, and knows how many floors are in the building
'''
def __init__(self, num_of_elevators, num_of_floors):
self.elevators = [elevator(i) for i in range(num_of_elevators)]
self.num_of_floors = num_of_floors
self.calls = []
self.people_moved = 0
def __str__(self):
output = ""
for i in range(len(self.num_of_floors)):
output += self.print_floor(i)
return output
def print_floor(self, floor_id):
'''
outputs a text sting representation of the elevator bank on that floor.
int(floor_id) is the floor you want to print
'''
output = ""
for elevator in self.elevators:
if elevator.position == floor_id:
output += "|%2s|" % elevator
else:
output += "| |"
return output
def move_lifts(self):
'''
calls the elevaor.move() function for every lift in the bank
'''
for elevator in self.elevators:
elevator.move()
def give_job(self, call):
'''
takes a job (call) and finds a lift to take the job. returns true is a
lift was found to take the job.
'''
for elevator in self.elevators:
if elevator.is_free:
elevator.destination = call.position
elevator.is_free = False
elevator.call = call
self.calls.remove(call)
return True
def arrivals(self, building):
'''
if an elelvator has arrived at its destination it take people on that
match its call and lets people off at there destination
'''
for elevator in self.elevators:
if elevator.position == elevator.destination:
exited = elevator.exit_lift(
building.floors[elevator.position]
)
elevator.board_lift(
building.floors[elevator.position]
)
self.people_moved += exited
def get_data(self):
'''
returns a string of data on the status of every elelvator, how many
calls left to answer and how many people have arrived at there
destinations
'''
data = ""
for elevator in self.elevators:
data += "Elevator ID: %s Position: %s Destination: %s " % (
elevator.id_,
str(elevator.position),
str(elevator.destination)
)
if len(elevator.occupants) > 0:
data += "Occupant: %s\n" % (
elevator.occupants[0]
)
else:
data += "\n"
data += "People arrived at their destinations: %s\n" % (
str(self.people_moved)
)
data += "Calls left to answer: %s\n" % (str(len(self.calls)))
return data
def elevators_busy(self):
'''
returns true if there's at least one busy elevator
'''
for elevator in self.elevators:
if not elevator.is_free:
return True
class building(object):
'''
building has a list of lists floors, each contain customers at random.
has an elevator bank with "n" elevators in it.
'''
def __init__(self, floors=0, customers=0, num_of_elevators=0):
self.elevator_bank = elevator_bank(num_of_elevators, floors)
self.floors = [list() for i in range(floors)]
self.customers = customers
def __str__(self):
'''
returns a text string representation of the building, displaying each
floor, their populations, and the elelvator bank.
'''
output_str = ""
for i in range(len(self.floors) - 1, -1, -1):
output_str += ((
"Level:%d Population:%4d|%s" % (i, len(self.floors[i]),
self.elevator_bank.print_floor(i))
))
output_str += "\n"
output_str += "------------------------"
for elevator in self.elevator_bank.elevators:
output_str += "| %s |" % elevator.id_
output_str += "\n"
return output_str
def spawn_customers(self):
'''
randomly places a customer around the building for every customers
thats meant to be in the building
'''
for i in range(0, self.customers):
while True:
position = randint(0, len(self.floors) - 1)
destination = randint(0, len(self.floors) - 1)
if position == destination:
continue
else:
self.floors[position].append(
customer(position, destination)
)
break
def to_screen(self, screen, data):
'''
changes the frame and outputs it to the screen.
'''
time.sleep(1)
screen.erase()
screen.addstr(str(self))
screen.addstr(data)
screen.refresh()
def to_file(self, output_file, data):
'''
pastes the current frame to a file
'''
output_file.write(str(self))
output_file.write(data)
output_file.write("---------------------------------------------------"
"------------------------------------------------\n")
def run_cycle(building, screen, output_file):
'''
handles everything that needs to happen in a cycle. a cycle involes
moving, people boring or exiting lifts, new jobs assigned, frame put
to screen and text file.
'''
building.elevator_bank.arrivals(building)
data = building.elevator_bank.get_data()
building.elevator_bank.move_lifts()
building.to_screen(screen, data)
building.to_file(output_file, data)
def simulate(building):
'''
simulates a building with elevatos and customers. takes a call from each
customer and sends a lift to collect them. customers are moved one at a
time per lift to there destinations. outputs the status of the building
to a file and the screen on every loop
'''
data = ""
output_file = open("elevators.txt", 'w')
building.to_file(output_file, data)
screen = curses.initscr()
building.to_screen(screen, data)
for floor in building.floors:
for person in floor:
if person.destination != building.floors.index(floor):
person.call_elevator(building.elevator_bank)
#clone taken to prevent index out of range on the loop
#after removing calls from the calls queue
clone_calls = building.elevator_bank.calls[:]
for call in clone_calls:
job_actioned = False
while not job_actioned:
data = ""
job_actioned = building.elevator_bank.give_job(call)
run_cycle(building, screen, output_file)
while building.elevator_bank.elevators_busy():
run_cycle(building, screen, output_file)
time.sleep(3)
output_file.close()
curses.endwin()
def get_input(prompt_input, max_value, min_value, bad_input):
'''
handles user input read in from the console. Ensures appropriate input
recieved
'''
while True:
try:
input_value = int(input(prompt_input))
if input_value < min_value:
print("Number too small")
elif input_value <= max_value:
break
else:
print(bad_input)
except ValueError:
print("Oops! That was not a valid number. Please try again...")
return input_value
def main():
'''
prompts user for input, initises the building, fills it with customers and
runs elevator simulation.
'''
floors = get_input(
"Please enter the number of floors, can't be more "
"than 10, must be more than 1:\n",
10,
2,
"You can't have that many floors, must be 10 or less\n"
)
customers = get_input(
"Please enter the number of customers, can't be "
"more than 9999, can't be zero:\n",
9999,
1,
"You can't have that many customers, must be 10 or less\n"
)
elevators = get_input(
"Please enter the number of elevators, can't be "
"more than 10, can't be zero:\n",
10, 1,
"You can't have that many elevators, must be 5 or less\n"
)
my_building = building(floors, customers, elevators)
my_building.spawn_customers()
simulate(my_building)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
51c1f49b73cecb48ebf56e507895fb6788631ed0 | e7fef3805af173b28146bf3192c52e7893f37661 | /build/lib/task/_native.py | 71a53b3e47b786e94c7b75c7b85fea02aadc06c8 | [] | no_license | hellbound22/python-rust-integration | b18864a01ac355de8d0af20a7153580d7e5337eb | 2a85032cbe1b058e8291daa1000e9d4e75fee716 | refs/heads/master | 2022-08-10T05:46:29.101468 | 2020-05-26T17:48:22 | 2020-05-26T17:48:22 | 263,789,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | # auto-generated file
__all__ = ['lib', 'ffi']
import os
from task._native__ffi import ffi
lib = ffi.dlopen(os.path.join(os.path.dirname(__file__), '_native__lib.so'), 4098)
del os
| [
"[email protected]"
] | |
a053d10dcf454607306d75e0fa321b776f0bfcbd | f36b521a4d8bf21750948284c046facee85a8cdd | /dimerfinder_analyze.py | 510b0444ee174c0a92d351435a3c6a4e980a434d | [] | no_license | attamatti/asymmetric_dimer_finder | d8e93e6a44e5c311a4e4c7b68c8f2a8bdfbf855f | db58ac4c6413f94963a59d072c5b914bcfe1a342 | refs/heads/master | 2020-08-04T11:18:50.170285 | 2019-10-09T11:46:22 | 2019-10-09T11:46:22 | 212,121,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,834 | py | #!/usr/bin/env python
import sys
import os
import subprocess
FNULL = open(os.devnull, 'w')
#### update the path to chimera here ############
chimerapath='/fbs/emsoftware2/env-modules/install/chimera/1.13.1/bin/chimera'
#################################################
errmsg = 'USAGE: dimerfinder_analyze_results.py <results file> <cc threshold> <rmsd threshold>'
try:
data = open(sys.argv[1],'r').readlines()
except:
sys.exit('ERROR reading results file\n{0}'.format(errmsg))
try:
ccthresh = float(sys.argv[2])
except:
sys.exit('ERROR cc thresh not specified\n{0}'.format(errmsg))
try:
rmsdthresh = float(sys.argv[3])
except:
sys.exit('ERROR rmsd thresh not specified\n{0}'.format(errmsg))
allpdbs = {} #{file:filename, Chain1, Chain2, number of 1->2 contacts, number of 2->1 contacts, contact correlation,rmsd,min oligomer in file,protein name}
chain1,chain2,con12,con21,cc,rmsd,oligo,protein = range(0,8)
for line in data:
i = line.split('%%')
allpdbs[i[0]] = i[1:]
cwd = os.getcwd()
## find dimers
hits = 0
hitlist = []
rotang = 0
print('\npdb file\tcc\trmsd\t#int\trot\tseqid\tprotein')
for i in allpdbs:
print(i)
if allpdbs[i][oligo] == '2' and float(allpdbs[i][cc]) < ccthresh and float(allpdbs[i][rmsd]) < rmsdthresh:
hitlist.append(i)
## write chimera script to find the symmetry relations
chiscript = open('tmp/chirot.cmd','w')
chiscript.write('open {0}/{1}; open {0}/{1};mmaker #1:.{2} #0:.{3} verbose True;measure rotation #0 #1'.format(cwd,i,allpdbs[i][chain1],allpdbs[i][chain2]))
chiscript.close()
runchimera2 = subprocess.Popen('{0} --nogui {1}/chirot.cmd'.format(chimerapath,'{0}/tmp'.format(os.getcwd())), shell=True, stdout=subprocess.PIPE,stderr=FNULL)
chimeraout2 = runchimera2.stdout.read()
con = 0
for j in chimeraout2.split('\n'):
if 'Rotation angle (degrees)' in j:
rotang = float(j.split()[-1])
if 'Residues:' in j:
seq1 = chimeraout2.split('\n')[con+2]
seq2 = chimeraout2.split('\n')[con+4]
con+=1
allpdbs[i].append(rotang)
#calculate sequence overlap the the supposedly 'identical' halves
seqreplace = [x.replace('#0','#1').replace('.{0}'.format(allpdbs[i][chain2]),'.{0}'.format(allpdbs[i][chain1])) for x in seq2.split(',')]
seqid = len(set(seq1.split(','))&set(seqreplace))/float(len(seq1.split(',')))
allpdbs[i].append(seqid)
# now filter on more characteristics:
finalhits = []
for i in hitlist:
if 10 < allpdbs[i][-2] < 170 and allpdbs[i][-1] > 0.90:
finalhits.append([i,str(round(float(allpdbs[i][cc]),2)),str(round(float(allpdbs[i][rmsd]),2)),str(allpdbs[i][con21]),str(round(allpdbs[i][-2],2)),str(allpdbs[i][-1]),allpdbs[i][protein].replace('\n','')])
finalhits.sort(key=lambda x:float(x[3]),reverse=True)
if len(finalhits) > 0:
for i in finalhits:
print'\t'.join(i)
else:
print('** no matches found **')
| [
"[email protected]"
] | |
e61024a34864c42729e3e8999a2983b453e94f07 | 78158f942bd0e223f11016457a494c76b7a689b4 | /plugins/DBpedia/__init__.py | f615a8019480fe6a0bcb8119a3abe6686149c49e | [] | no_license | frumiousbandersnatch/supybot-plugins | ad607dd22529372d72c7dc0083100b5b5cdb719b | 8c7f16c0584bdf393a56dccff6b35a83142e5ece | refs/heads/master | 2021-01-17T05:46:10.917776 | 2020-03-12T11:42:20 | 2020-03-12T11:42:20 | 6,639,759 | 0 | 1 | null | 2020-03-12T11:42:22 | 2012-11-11T15:21:36 | Python | UTF-8 | Python | false | false | 1,091 | py |
"""
Add a description of the plugin (to be presented to the user inside the wizard)
here. This should describe *what* the plugin does.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = ""
# XXX Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.authors.unknown
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
# This is a url where the most recent plugin package can be downloaded.
__url__ = '' # 'http://supybot.com/Members/yourname/DBpedia/download'
import config
import plugin
reload(plugin) # In case we're being reloaded.
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| [
"[email protected]"
] | |
906e36811116556360e0fea22746fae6de8ad3c1 | e4fb9be53a9b341e5436ec38633aa2f539dc2c94 | /alpha/alpha/settings/production.py | 201f8ccc9ac69eb03785b818dad5388faeb156a5 | [] | no_license | NilSagor/dj-shop | dc8fa7c2ded9eb949cb8190313fb6f42c94d8938 | 1a760a795c695a44e628afd26a24cea0ab6e6e87 | refs/heads/master | 2020-04-28T07:30:35.251026 | 2019-03-25T17:00:42 | 2019-03-25T17:00:42 | 175,095,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,357 | py | """
Django settings for alpha project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=__31b(b0q+dgnw4=y2(gmxqn#)8^ysajpwpm7l!-eb(@_0+67'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'products',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'alpha.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'alpha.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# ccc
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static-storage"),
]
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static-serve")
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static-serve","media_root")
| [
"[email protected]"
] | |
5bfb3ea283671b5dcfc0719b67157f0fede31488 | a7f03c6a0cde3ba417d9b9bfbd0f6e81ed6c7e20 | /A Byte of Python/Part 1/FuncDefault.py | f3291b810b8fddb69c6920ea1fe45c6625e82e68 | [] | no_license | egolgovskikh/LearningPython | 23c48cb82e4e15f6a04108e18ae662f92dce6f53 | 68c28249ea0cb534ecedda0d9035aedc1cb407ce | refs/heads/master | 2023-01-06T22:30:58.628664 | 2019-06-16T00:49:03 | 2019-06-16T00:49:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | def Say(message, times=1):
print(message * times)
Say('Hello')
Say('Hi',10) | [
"[email protected]"
] | |
df48dd9edd153a83cbff72dfc248e1d1d104ba5d | a25db3a841e844d0867a7654e2ba697dcdeb21b4 | /easyjob/migrations/0005_auto_20200208_2201.py | db2e76b9881fb5d51a698c8e507dfbcc44c6c16a | [] | no_license | mrregmi420/FinalProject | 9df9fcb6c8ae9fc4c24466e020257d477fdd9837 | 5e47fdfa350483f755ec5ca762f5ffb5f022713b | refs/heads/master | 2021-01-14T00:34:04.560799 | 2020-02-23T15:21:39 | 2020-02-23T15:21:39 | 242,544,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | # Generated by Django 2.2.7 on 2020-02-08 16:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('easyjob', '0004_auto_20200208_2059'),
]
operations = [
migrations.RenameField(
model_name='company',
old_name='CompanyName',
new_name='cCompanyName',
),
migrations.RenameField(
model_name='company',
old_name='Email',
new_name='cEmail',
),
migrations.RenameField(
model_name='company',
old_name='Location',
new_name='cLocation',
),
migrations.RenameField(
model_name='company',
old_name='Mobile',
new_name='cMobile',
),
migrations.RenameField(
model_name='company',
old_name='VacancyNumber',
new_name='cVacancyNumber',
),
migrations.RenameField(
model_name='company',
old_name='VacantPost',
new_name='cVacantPost',
),
]
| [
"[email protected]"
] | |
33559d8f59cb5c2a4726a35358b73bbba9b78c37 | 00307e60ad07f5dad1cd8beb413b7088959fd84b | /player-53).py | af17f0dbfd3a44d9d7b6c29f780ddf2b62146bad | [] | no_license | Anithasivakumar/python-pgms | 61b910f17a9f844aea7e36c60ef8836aea768ba6 | 9c470920e2901171ba620fd6df989596478759c9 | refs/heads/master | 2022-03-14T02:31:24.941596 | 2019-08-11T14:20:38 | 2019-08-11T14:20:38 | 198,208,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | s=str(input(""))
c=0
for i in s:
c+=1
print(c)
| [
"[email protected]"
] | |
c2f4fdf18767021c54854cc37d8d4bf8ee63fb12 | 0728a2e165808cfe5651693a6e7f47804bfb085f | /ry/trunk-ry/rynok/controllers/search.py | 609883c81edde290944a09278ccf32a77d0a8efc | [] | no_license | testTemtProj/OLD_PROJECT | 5b026e072017f5135159b0940370fda860241d39 | 9e5b165f4e8acf9003536e05dcefd33a5ae46890 | refs/heads/master | 2020-05-18T15:30:24.543319 | 2013-07-23T15:17:32 | 2013-07-23T15:17:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,067 | py | # coding: utf-8
from rynok.lib.base import BaseController, render
from pylons import request, session, tmpl_context as c
from pylons.controllers.util import abort, redirect
from rynok.lib import sphinxapi
import json
from rynok.model.referenceModel import ReferenceModel
from rynok.model.vendorsModel import VendorsModel
from rynok.model.categoriesModel import CategoriesModel
from rynok.model.settingsModel import SettingsModel
import re
import urllib
class SearchController(BaseController):
"""
ะะพะฝััะพะปะปะตั ะฟะพะธัะบะฐ ัะพะฒะฐัะพะฒ
"""
def _search(self, query):
sphinx = sphinxapi.SphinxClient()
sphinx.SetFieldWeights({"title":30})
sphinx.SetFieldWeights({"vendor":20})
sphinx.SetFieldWeights({"category":10})
sphinx.SetFieldWeights({"description":1})
return sphinx.Query("* "+unicode(query)+" *")
def index(self, cat_url, page):
"""
ะะปะฐะฒะฝัะน ัะบัะฝ ะฟะพะธัะบะฐ
ะฟะฐัะฐะผะตัั 'query'
ะั
ะพะดะฝัะต ะฟะฐัะฐะผะตััั:
page - ัะตะบััะฐั ัััะฐะฝะธัะฐ ะฟะตะนะดะถะธะฝะณะฐ
cat_url - ัะตะบััะฐั ะบะฐัะตะณะพัะธั
ะะฐัะฐะผะตััั ะทะฐะฟัะพัะฐ:
m_id - ะผะฐััะธะฒ ะฒัะฑัะฐะฝะฝัั
ะผะฐะณะฐะทะธะฝะพะฒ ะฒ ัะธะปัััะต
v_id - ะผะฐััะธะฒ ะฒัะฑัะฐะฝะฝัั
ะฟัะพะธะทะฒะพะดะธัะตะปะตะน ะฒ ัะธะปัััะต
price_min - ะผะธะฝะธะผะฐะปัะฝะฐั ัะตะฝะฐ ะฒ ัะธะปัััะต
price_max = ะผะฐะบัะธะผะฐะปัะฝะฐั ัะตะฝะฐ ะฒ ัะธะปัััะต
sort_by - ะฟะพ ะบะฐะบะพะผั ะฟะพะปั ัะพััะธัะพะฒะฐัั
sort_order - ะฝะฐะฟัะฐะฒะปะตะฝะธะต ัะพััะธัะพะฒะบะธ
per_page - ะบะพะปะธัะตััะฒะพ ัะพะฒะฐัะพะฒ ะฝะฐ ัััะฐะฝะธัะต
currency - ะฒะฐะปััะฐ, ะฒ ะบะพัะพัะพะน ะพัะพะฑัะฐะทะฐัััั ัะพะฒะฐัั
query - ัััะพะบะฐ ะฟะพะธัะบะพะฒะพะณะพ ะทะฐะฟัะพัะฐ
ะะฐัะฐะผะตััั ัะฐะฑะปะพะฝะธะทะฐัะพัะฐ:
c.current_cat - ัะตะบััะฐั ะบะฐัะตะณะพัะธั
c.markets - ะพัะผะตัะตะฝะฝัะต ะผะฐะณะฐะทะธะฝั
c.vendors - ะพัะผะตัะตะฝะฝัะต ะฟัะพะธะทะฒะพะดะธัะตะปะธ
c.affordable_price - ะผะฐะบัะธะผะฐะปัะฝะฐั ะฒะพะทะผะพะถะฝะฐั ัะตะฝะฐ ะฟะพ ะฒัะฑัะฐะฝะฝัะผ ะฟะฐัะฐะผะตััะฐะผ
c.price_min - ะผะธะฝะธะผะฐะปัะฝะฐั ะฒัะฑัะฐะฝะฝะฐั ัะตะฝะฐ
c.price_max - ะผะฐะบัะธะผะฐะปัะฝะฐั ะฒัะฑัะฐะฝะฝะฐั ัะตะฝะฐ
c.sort_settings - ะฒัะฑัะฐะฝะฝัะต ะฟะฐัะฐะผะตััั ัะพััะธัะพะฒะบะธ
c.page - ะฝะพะผะตั ัะตะบััะฐั ัััะฐะฝะธัะฐ ะฟะตะนะดะถะธะฝะณะฐ
c.per_page - ะฒัะฑัะฐะฝะฝะพะต ะบะพะปะธัะตััะฒะพ ัะพะฒะฐัะพะฒ ะฝะฐ ัััะฐะฝะธัะต
c.price_query - ะทะฐะฟัะพั ะดะปั ะพะฟัะตะดะตะปะตะฝะธั ัะตะฝั ะฒ ัะธะปัััะต
c.currency - ัะตะบััะฐั ะฒะฐะปััะฐ
c.find - ัะพ ััะพ ะฒั ะธัะบะฐะปะธ
c.noresult - ะฟะพะบะฐะทัะฒะฐะตั
"""
c.find = unicode(request.params.get('query', ""))
if not c.find:
c.find = " "
referer = request.headers.get('Referer', '')
http_host = request.environ.get('HTTP_HOST')
c.back_url = referer
if referer.find(http_host) == -1:
c.back_url = '/'
if not len(c.find):
return abort(status_code=404)
sphinx_result = self._search(c.find)
if sphinx_result is None:
c.cats = self._get_popular_categories()
return render('noresults.mako.html')
search_result = sphinx_result['matches']
if not len(search_result):
c.cats = self._get_popular_categories()
return render('noresults.mako.html')
c.search_query = '?query=%s' % (c.find)
categories_model = CategoriesModel()
ids = []
c.all_markets = set()
c.all_vendors = set()
c.cats = set()
query = {}
for result_item in search_result:
ids.append(result_item['id'])
c.all_markets.add(result_item['attrs']['shopid'])
if result_item['attrs']['vendor_attr']>0:
c.all_vendors.add(result_item['attrs']['vendor_attr'])
c.cats.add(result_item['attrs']['categoryid'])
c.cats = list(c.cats)
c.all_vendors = list(c.all_vendors)
c.all_markets = list(c.all_markets)
query['id_int'] = {'$in':ids}
c.current_cat = categories_model.getByURL(url = cat_url)
if c.current_cat:
query['categoryId'] = c.current_cat['ID']
try:
c.markets = json.loads(request.params.get('m_id', '[]'))
if len(c.markets):
query['shopId'] = {'$in':c.markets}
except ValueError:
print 'bad param m_id'
c.markets = []
try:
c.vendors = json.loads(request.params.get('v_id', '[]'))
if len(c.vendors):
query['vendor'] = {'$in':c.vendors}
except ValueError:
print 'bad param v_id'
c.vendors = []
c.currency = request.params.get('currency', 'UAH')
c.price_query = query
c.affordable_price = int(ReferenceModel.get_max_price(query, c.currency)) + 1
query[c.currency] = {}
c.price_min = int(request.params.get('price_min', 0))
query[c.currency]['$gt'] = c.price_min
c.price_max = request.params.get('price_max', None)
if c.price_max:
query[c.currency]['$lt'] = int(c.price_max)
count_products = ReferenceModel.get_count(query=query)
if count_products == 0:
c.find = c.find.encode('utf-8')
_url = '/search/?query='+urllib.quote(c.find, '/')
session['noresult']=True
print session
session.save()
return redirect(_url, 301)
c.cats = self._get_popular_categories()
return render('/noresults.mako.html')
print session
sort_by = request.params.get('sort_by', 'price')
sort_order = request.params.get('sort_order', 'desc')
c.per_page = int(request.params.get('per_page', 10))
if sort_by == 'rating':
by = 'Rate'
elif sort_by == 'price':
by = c.currency
elif sort_by == 'popular':
by = 'popular'
else:
by = 'price'
c.products = []
c.sort_settings = {sort_by:sort_order}
c.page = page
c.total_pages = count_products / c.per_page
if count_products % c.per_page:
c.total_pages += 1
c.current_url = "/search"
if c.current_cat:
c.current_url += c.current_cat['URL']
c.current_url += "/" + page
if (c.price_max and (int(c.price_max) > c.affordable_price)) or c.price_max is None:
c.price_max = c.affordable_price
keywords = []
for product in c.products:
keywords.append(product['title'].strip())
c.meta_title = u'ะ ัะฝะพะบ | ะ ะตะทัะปััะฐัั ะฟะพะธัะบะฐ โ ยซ%sยป'%(c.find)
c.meta_keywords = ', '.join(keywords)
c.meta_description = u'ะ ัะฝะพะบ Yottos ะพะฑัะตะดะธะฝัะตั ะฒัะต ะธะฝัะตัะฝะตั-ะผะฐะณะฐะทะธะฝั ะฒ ะพะดะฝะพะผ ะผะตััะต, ะฟะพะผะพะณะฐะตั ะฟะพะบัะฟะฐัะตะปัะผ ะฝะฐะนัะธ ัะฐะผะพะต ะฒัะณะพะดะฝะพะต ะฟัะตะดะปะพะถะตะฝะธะต, ะฐ ะฟัะพะดะฐะฒัะฐะผ โ ะทะฐะธะฝัะตัะตัะพะฒะฐะฝะฝัั
ะบะปะธะตะฝัะพะฒ.'
c.banner = SettingsModel.get_search_page_banner()
#TODO ัะดะตะปะฐัั ะฝะพัะผะฐะปัะฝัั ะฟัะพะฒะตัะบั
if len(request.params)==1 and c.current_cat is None:
for product_id in ids[(int(page)-1)*c.per_page:(int(page))*c.per_page]:
product = ReferenceModel.get_reference(where={'id_int': product_id}, one=True)
if product:
c.products.append(product)
c.sort_settings = {'price':sort_order}
else:
c.products = ReferenceModel.get_reference(where=query, perPage=c.per_page, page=int(page)-1, by=by, direction=sort_order)
if 'noresult' in session and session['noresult']:
c.noresult = u"ะะพ ะฟัะตะดัะดััะตะผั ะทะฐะฟัะพัั ัะพะฒะฐัั ะฝะต ะฝะฐะนะดะตะฝั, ะฟะพะบะฐะทะฐะฝะฝั ะฒัะต ัะตะทัะปััะฐัั"
else:
c.noresult = ""
session['noresult'] = False
session.save()
return render('/search.mako.html')
def _get_popular_categories(self):
categories_model = CategoriesModel
cats = categories_model.getChildrens(categoryId=0, non_empty=True)
categories = []
for cat in cats:
categories.append(cat)
return categories
| [
"[email protected]"
] | |
0ad3e43740b068d357f695f9cfdaa06c7d04c777 | a471ecf59e2b770bb11417e8524c81ae9942445d | /conf.py | e11bf811accb0fe1152420ec408321cb8892c4a9 | [] | no_license | AmbikaPuvvula/TD | 78b5e330d6f91797630b871b60f793d3bb5d507b | b20e6d7878ee80a03aaa2e5e82f4bd33ddeabf5d | refs/heads/main | 2023-03-06T19:30:50.407153 | 2021-02-18T07:52:58 | 2021-02-18T07:52:58 | 339,961,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,255 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Git Technical Doc'
copyright = '2021, Ambika'
author = 'Ambika'
# The full version, including alpha/beta/rc tags
release = '1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'english'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | [
"[email protected]"
] | |
bc5266e3378e9197b3743edc98f209f60943649b | d5a3e304ac3033fd720d69bb85346107826bf0c6 | /sgb/models.py | 79833f9b53d180d21cace6fcb140fdc46636691e | [] | no_license | PablloMatheus/SGB | 098c10e8ced9c4e0676007177693dac81a20e8e7 | 42745e2752adf67c6af5b16d5089582a9df21d5d | refs/heads/master | 2020-03-30T10:45:01.661286 | 2018-10-01T18:13:10 | 2018-10-01T18:13:10 | 151,134,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,068 | py | from django.db import models
from django.core.validators import RegexValidator
CC = 'Ciรชncia da Computaรงรฃo'
SPI = 'Sistemas para Internet'
GTI = 'Gestรฃo da Tecnologia da Informaรงรฃo'
RC = 'Redes de Computadores'
CURSO = (
(CC, 'Ciรชncia da Computaรงรฃo'),
(SPI, 'Sistemas para Internet'),
(GTI, 'Gestรฃo da Tecnologia da Informaรงรฃo'),
(RC, 'Redes de Computadores'),
)
P5 = 'P5'
P7 = 'P7'
P8 = 'P8'
PERIODO = (
(P5, '5ยฐ '),
(P7, '7ยฐ '),
(P8, '8ยฐ '),
)
class Professor(models.Model):
ESP = 'ESP'
MS = 'MS'
DR = 'DR'
PDR = 'PรS Dr'
FORMACAO = (
(ESP, 'ESPECIALISTA'),
(MS, 'MESTRE'),
(DR, 'DOUTOR'),
(PDR, 'PรS DOUTORADO'),
)
professor_nome = models.CharField(max_length=100, blank = False)
professor_matricula = models.CharField(max_length = 6, blank = False, unique = True, validators=[RegexValidator(r'^[0-9]*$', 'Digite um nรบmero de matricula vรกlido')])
area_de_atuacao = models.CharField(max_length=50, blank=False)
formacao = models.CharField(max_length=100, choices=FORMACAO, blank=False)
graduacao = models.CharField(max_length=500, blank=False)
especializacao = models.CharField(max_length=500, blank=True)
mestrado = models.CharField(max_length=500, blank=True)
doutorado = models.CharField(max_length=500, blank=True)
pos_doutorado = models.CharField(max_length=500, blank=True)
area_de_conhecimentos = models.CharField(max_length=500, blank=False)
area_de_orientacao = models.CharField(max_length=500, blank=False)
info_relevante = models.CharField(max_length=1000, blank=True)
def __str__(self):
return self.professor_nome
class Curso(models.Model):
curso_nome = models.CharField(max_length=50, choices=CURSO, blank=False, unique=True)
coordenador = models.ForeignKey(Professor, on_delete=models.PROTECT, blank = False, null = False, related_name = 'coordenador', default = '')
def __str__(self):
return self.curso_nome
class Aluno(models.Model):
aluno_nome = models.CharField(max_length=100)
aluno_telefone = models.CharField(max_length = 11,blank=False, null=False, validators=[RegexValidator(r'^\d{10,11}$', 'Digite um nรบmero de matricula vรกlido')])
aluno_matricula = models.CharField(max_length = 10, blank = False, unique = True, validators=[RegexValidator(r'^[0-9]*$', 'Digite um nรบmero de matricula vรกlido')])
aluno_email = models.EmailField(blank=False, null=False)
curso = models.CharField(max_length=50, choices=CURSO, blank=False)
periodo = models.CharField(max_length=2, choices=PERIODO, blank=False)
def __str__(self):
return self.aluno_nome
class User(models.Model):
username = models.CharField('Username', max_length = 50, unique = True)
email = models.EmailField(unique = True)
password = models.CharField('Password', max_length = 50)
class Tcc(models.Model):
titulo_tcc = models.CharField(max_length = 220, blank = False, unique=True)
aluno = models.OneToOneField(Aluno, blank = False, null = False, on_delete=models.PROTECT, related_name = 'aluno', unique = True)
aluno2 = models.OneToOneField(Aluno, blank = True, null = True, on_delete=models.PROTECT, related_name = 'aluno2', unique = True)
aluno3 = models.OneToOneField(Aluno, blank = True, null = True, on_delete=models.PROTECT, related_name = 'aluno3', unique = True)
aluno4 = models.OneToOneField(Aluno, blank = True, null = True, on_delete=models.PROTECT, related_name = 'aluno4', unique = True)
orientador = models.ForeignKey(Professor, blank = False, on_delete=models.PROTECT)
situacao = models.CharField(default='1', max_length=1)
def __str__(self):
return self.titulo_tcc
class Salas(models.Model):
EVA = 'EVA'
CT = 'CT'
BF = 'B.F'
BLOCOS = (
(EVA, 'EVA'),
(CT, 'CENTRO DE TECNOLOGIA'),
(BF, 'BLOCO F'),
)
blocos = models.CharField(max_length=3, choices=BLOCOS, blank=False)
numero_do_bloco = models.CharField(max_length = 3, unique = True, validators=[RegexValidator(r'^[0-9]*$', 'Digite um nรบmero de sala vรกlido')])
def __str__(self):
return self.blocos + ' - ' + str(self.numero_do_bloco)
class Meta:
unique_together = (("blocos", "numero_do_bloco"))
class Disponibilidade(models.Model):
#Define estaticamente se a Disponibilidade esta relacoinada ou nรฃo relacionada
Situacao_Choices = (
('1', 'Nรฃo Relacionado'),
('2', 'Relacionado')
)
data = models.DateField(blank = False)
hora_inicio = models.CharField(max_length=5, blank = False, validators=[RegexValidator(r'^[012]\d:[0-5]\d')])
# hora_termino = models.CharField(max_length=5, blank = False, validators=[RegexValidator(r'^[012]\d:[0-5]\d')])
examinador = models.ForeignKey(Professor, blank = False, related_name = 'examinador')
situacao = models.CharField(choices=Situacao_Choices, default='1', max_length=1)
def __str__(self):
return str(self.examinador) + ' - Data: ' + str(self.data) + ' das ' + self.hora_inicio
class Meta:
unique_together = (("data", "hora_inicio", "examinador"))
class Banca(models.Model):
titulo_tcc = models.OneToOneField(Tcc, blank = False, on_delete=models.PROTECT, unique = True)
sala = models.ForeignKey(Salas, blank = False, on_delete=models.PROTECT, related_name = 'sala')
hora_inicio = models.CharField(max_length=5, blank = False, validators=[RegexValidator(r'^[012]\d:[0-5]\d')])
# hora_termino = models.CharField(max_length=5, blank = False, validators=[RegexValidator(r'^[012]\d:[0-5]\d')])
data_banca = models.DateField(blank = False)
examinador_1 = models.ForeignKey(Disponibilidade, blank = False, on_delete=models.PROTECT, related_name = 'examinador1')
examinador_2 = models.ForeignKey(Disponibilidade, blank = False, on_delete=models.PROTECT, related_name = 'examinador2')
def __str__(self):
return str(self.titulo_tcc)
class Meta:
unique_together = (("sala", "hora_inicio", "data_banca"))
| [
"[email protected]"
] | |
4eacafe7e8bf2a642f69388478927456149fe080 | 51e312ad58c7646cc9e9db5972ab191beaee4552 | /SinNombre/SinNombre.py | 0709be8feef29eea16a6ab97238398efe4a5f890 | [] | no_license | juanma-perez/ArticuloViolencia | 7cb815c5360636f732878b12a8875fbbcf374258 | ad8d0dd7f0184932bf3cb1ec127b8b62e5821ad7 | refs/heads/master | 2021-01-23T04:23:16.941503 | 2017-05-13T15:20:13 | 2017-05-13T15:20:13 | 86,192,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,781 | py | # -*- coding: utf-8 -*-
import Simulador
import Charmer
from FileManager import FileManager
import json
class SinNombreOntologia():
def suceso(self):
return {u"Dia":"",u"Mes":"",u"A\u00f1o":""}
def lugar(self):
return {u"Nivel":u"Departamento",u"Nombre":u""}
def __init__(self, jsonLine):
self.jsonLine = jsonLine
self.fileManager = FileManager()
self.structure = {}
try:
with self.fileManager.readFile("structure.json") as z:
self.structure = json.loads(z.read().replace('\n', ''))
except Exception as e:
self.fileManager.recordError("Couldn't load the structures file ")
self.clear()
def clear(self):
self.suc = self.suceso()
self.depto = self.lugar()
self.municipio = self.lugar()
def process(self,case,value):
if case == "Dia":
self.suc = Charmer.addValue(self.suc,"Dia",value)
elif case == "Mes":
self.suc = Charmer.addValue(self.suc,"Mes",value)
elif case == u"A\u00f1o":
self.suc = Charmer.addValue(self.suc,u"A\u00f1o",value)
elif case == "Departamento":
self.depto = Charmer.addValue(self.depto,"Nombre",value)
def fillStructure(self):
try:
register=json.loads(self.jsonLine)
for reg in register:
if self.structure.has_key(reg):
self.process(reg,register[reg])
except Exception as error:
self.fileManager.recordError(error)
fileManager = FileManager()
file = "prueba.json"
with fileManager.readFile(file) as f:
for line in f:
a = SinNombreOntologia(line)
a.fillStructure()
print "Suceso"
for thing in a.suc:
print " " + thing.encode("utf-8") + ": " + a.suc[thing].encode("utf-8")
print "Lugar"
for site in a.depto:
print " " + site.encode("utf-8") + ": " + a.depto[site].encode("utf-8")
print "" | [
"[email protected]"
] | |
5c66d14f6755e6c75a989d5c01db3f9545b97c3e | 40cdd19b57b7eeecc50dd66e6911d077e1d37fe1 | /wsi_into_regions/main.py | dbb47cfa8da474ed6af07f3f7ff27c0e219e8eed | [] | no_license | oncoml/camelyon | b444c49a8743e8cf87981897e4946f6899147c02 | b403b8855c6a69c7e3bc8ae31e4a38f29f5e2983 | refs/heads/master | 2022-11-10T16:31:48.913746 | 2020-06-25T14:21:10 | 2020-06-25T14:21:10 | 264,371,298 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,746 | py | # Tile WSI (Whole Slide Imaging) files and maps tumor regions based on XML coordinates.
# Name format of WSI slides are consistent with those found in Camelyon17 Challenge.
# @author: Steve Yang
# System imports
import os
import fnmatch
import pickle
# Local imports
from util import tile_wsi, map_tumor
# Directories for WSI data and saving tiles. WSI_DIRECTORY should contain WSI files of
# the format 'patient_xxx_node_yyy.tif' as per Camelyon17 data.
IMAGES_DIR = '/home/steveyang/Disk/Camelyon17/Train/'
TILE_IMAGES_DIRECTORY = '/home/steveyang/projects/camelyon17/tile_images_2/'
# Path to positive slides text file that lists WSI slides that are positive for tumor
POSITIVE_SLIDES_PATH = '/home/steveyang/projects/camelyon17/positive_slides.txt'
# Path to saved annotations file
ANNOT_DIR = IMAGES_DIR
# Parameters for tumor annotations and coloring
XML_DIRECTORY = '/home/steveyang/Disk/Camelyon17/Train/lesion_annotations/'
def main():
annotations = []
with open(POSITIVE_SLIDES_PATH, 'r') as f:
positive_slides = f.read().splitlines()
for slide in positive_slides:
if slide == 'patient_017_node_4.tif':
continue
try:
image_dir = os.path.join(IMAGES_DIR, slide[:-11] + '/')
image_path = os.path.join(image_dir, slide)
xml_path = os.path.join(XML_DIRECTORY, slide[:-3] + 'xml')
tile_summary = tile_wsi.filter_and_tile(image_dir, slide)
annotations.append(map_tumor.map_tumor(tile_summary, image_dir, xml_path, TILE_IMAGES_DIRECTORY))
except FileNotFoundError:
continue
with open('annotations.txt', 'wb') as file:
pickle.dump(annotations, file)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
dad770a58a75f57a42156d6a36802e4a9169ecd5 | 34e3940c9d464b5b7c711956594715a1572da371 | /app/migrations/versions/cf01d7b9f9ea_.py | 0659fb65515067a2646c28097c67edf615915ba7 | [] | no_license | yufrances90/Fyyur | 715464f48931c2802cbb74cd3e6cce99b1febc9b | 0d33e788f17815ed7301ccffc3fbacbf2c45c75a | refs/heads/master | 2021-04-15T19:23:53.131307 | 2020-03-29T21:48:44 | 2020-03-29T21:48:44 | 249,292,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | """empty message
Revision ID: cf01d7b9f9ea
Revises: 0c38eeb84e2f
Create Date: 2020-03-25 14:36:20.707635
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cf01d7b9f9ea'
down_revision = '0c38eeb84e2f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('venues', sa.Column('seeking_description', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('venues', 'seeking_description')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
e046db8c8baa4666d34c2b03cfaae02460aea802 | 31d6145d70cf903e741c0d4d8996547489ec3d9f | /pysamp.py | 1d8371518cc2448403db24df8c5e0196a19bcc6f | [] | no_license | PavanKrishna95/Waukesha | aa448eee95a039288953b302bfcb618015abd6c7 | aeeeeb02f98652241a6b732416beafc1e208eb0f | refs/heads/master | 2020-09-08T12:15:53.490581 | 2019-11-15T05:12:16 | 2019-11-15T05:12:16 | 221,130,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,731 | py | def pysamp():
import com as C
print('AARR =',C.AARR)
print('FNEXTV =',C.FNEXTV)
print('FEXTV =',C.FEXTV)
print('U0IN =',C.U0IN)
print('BARR =',C.BARR)
print('DARR =',C.DARR)
print('IARR =',C.IARR)
print('TARR =',C.TARR)
print('UARR =',C.UARR)
print('ACOND =',C.ACOND)
print('AWIND =',C.AWIND)
print('BDUCT =',C.BDUCT)
print('BPART =',C.BPART)
print('CUR =',C.CUR)
print('CURPRT =',C.CURPRT)
print('CURMNT =',C.CURMNT)
print('CURMXT =',C.CURMXT)
print('CURSPT =',C.CURSPT)
print('CURDEN =',C.CURDEN)
print('CURDM =',C.CURDM)
print('IPISOL =',C.IPISOL)
print('CURRUT =',C.CURRUT)
print('DWIND =',C.DWIND)
print('DYOKE =',C.DYOKE)
print('EDDCO1 =',C.EDDCO1)
print('EDDCON =',C.EDDCON)
print('FACT =',C.FACT)
print('FILLF =',C.FILLF)
print('FRACT =',C.FRACT)
print('FRACTW =',C.FRACTW)
print('FRMPOS =',C.FRMPOS)
print('FRPPOS =',C.FRPPOS)
print('FRZPOS =',C.FRZPOS)
print('FWIND =',C.FWIND)
print('GWINCO =',C.GWINCO)
print('HPART =',C.HPART)
print('HWIND =',C.HWIND)
print('KCODE =',C.KCODE)
print('KGROUP =',C.KGROUP)
print('KWITYP =',C.KWITYP)
print('NGROUP =',C.NGROUP)
print('NLOOP =',C.NLOOP)
print('NOUCO =',C.NOUCO)
print('PCUW =',C.PCUW)
print('PEDW =',C.PEDW)
print('PRESSM =',C.PRESSM)
print('RAACON =',C.RAACON)
print('RPART =',C.RPART)
print('RRWDG =',C.RRWDG)
print('RWIND =',C.RWIND)
print('SHTCUR =',C.SHTCUR)
print('SIGM =',C.SIGM)
print('STRWMM =',C.STRWMM)
print('STRWMP =',C.STRWMP)
print('TCOV1 =',C.TCOV1)
print('TCOV2 =',C.TCOV2)
print('TENSM =',C.TENSM)
print('TSPIN =',C.TSPIN)
print('VOLTUT =',C.VOLTUT)
print('WLOSS =',C.WLOSS)
print('WLOSSM =',C.WLOSSM)
print('X10 =',C.X10)
print('X20 =',C.X20)
print('X30 =',C.X30)
print('ZCODU =',C.ZCODU)
print('ZTALMW =',C.ZTALMW)
print('ZTALRW =',C.ZTALRW)
print('ZWIND =',C.ZWIND)
print('ZWINDW =',C.ZWINDW)
print('EXTX =',C.EXTX)
print('KCON =',C.KCON)
print('KTYPRW =',C.KTYPRW)
print('NLOOPG =',C.NLOOPG)
print('NMSTEP =',C.NMSTEP)
print('NPSTEP =',C.NPSTEP)
print('PLSPOS =',C.PLSPOS)
print('PUSTEP =',C.PUSTEP)
print('RIDIMW =',C.RIDIMW)
print('RIDIRW =',C.RIDIRW)
print('RMIPOS =',C.RMIPOS)
print('SLINE =',C.SLINE)
print('SRATE =',C.SRATE)
print('SRATEP =',C.SRATEP)
print('UDIM =',C.UDIM)
print('UN =',C.UN)
print('UNTAP =',C.UNTAP)
print('UNLINE =',C.UNLINE)
print('USURG =',C.USURG)
print('XRINT =',C.XRINT)
print('ZERPOS =',C.ZERPOS)
print('BB03 =',C.BB03)
print('BB051 =',C.BB051)
print('BBCURD =',C.BBCURD)
print('BBHELP =',C.BBHELP)
print('BBRR =',C.BBRR)
print('BBRW =',C.BBRW)
print('BBSCI =',C.BBSCI)
print('BBSCJ =',C.BBSCJ)
print('BBSCK =',C.BBSCK)
print('BBSCL =',C.BBSCL)
print('BBSCR =',C.BBSCR)
print('BBVR =',C.BBVR)
print('BB08 =',C.BB08)
print('BB132 =',C.BB132)
print('BBAUTO =',C.BBAUTO)
print('BBDLI =',C.BBDLI)
print('BBDSHE =',C.BBDSHE)
print('BBEND =',C.BBEND)
print('BBERR =',C.BBERR)
print('BBERR1 =',C.BBERR1)
print('BBEXAC =',C.BBEXAC)
print('BBFLDS =',C.BBFLDS)
print('BBFLU =',C.BBFLU)
print('BBHLI =',C.BBHLI)
print('BBLT10 =',C.BBLT10)
print('BBOPTI =',C.BBOPTI)
print('BBOUT =',C.BBOUT)
print('BBREA =',C.BBREA)
print('BBSLIM =',C.BBSLIM)
print('BBTS =',C.BBTS)
print('BBUPTR =',C.BBUPTR)
print('BBVFR =',C.BBVFR)
print('BBWISU =',C.BBWISU)
print('ACORE =',C.ACORE)
print('ACOVER =',C.ACOVER)
print('AFIELD =',C.AFIELD)
print('AVALUE =',C.AVALUE)
print('ANDEL =',C.ANDEL)
print('APOED =',C.APOED)
print('ASECL =',C.ASECL)
print('ASHELL =',C.ASHELL)
print('BDRAG =',C.BDRAG)
print('BLIMBX =',C.BLIMB)
print('BLTOPX =',C.BLTOPM)
print('BMAXPU =',C.BMAXPU)
print('BMINPU =',C.BMINPU)
print('BPOED =',C.BPOED)
print('BTANK =',C.BTANK)
print('BTANKM =',C.BTANKM)
print('CCOOL =',C.CCOOL)
print('CLOSSV =',C.CLOSSV)
print('COST =',C.COST)
print('COSTTP =',C.COSTTP)
print('CPUFIX =',C.CPUFIX)
print('CPUIND =',C.CPUIND)
print('CPUPT =',C.CPUPT)
print('CTROVN =',C.CTROVN)
print('DCOCOV =',C.DCOCOV)
print('DCOMAX =',C.DCOMAX)
print('DCOMIN =',C.DCOMIN)
print('DCORE =',C.DCORE)
print('DKSL =',C.DKSL)
print('DOUTW =',C.DOUTW)
print('DPHAS =',C.DPHAS)
print('DPHSL =',C.DPHSL)
print('DRV =',C.DRV)
print('DTSFF =',C.DTSFF)
print('DTSNF =',C.DTSNF)
print('DTSNN =',C.DTSNN)
print('DWITA =',C.DWITA)
print('PKLPRM =',C.PKLPRM)
print('EPS =',C.EPS)
print('EXTANK =',C.EXTANK)
print('F1TANK =',C.F1TANK)
print('F2TANK =',C.F2TANK)
print('F3TANK =',C.F3TANK)
print('FEPS =',C.FEPS)
print('FLUXM =',C.FLUXM)
print('AF =',C.AF)
print('FONAN =',C.FONAN)
print('FREQ =',C.FREQ)
print('GACTP =',C.GACTP)
print('GCONS =',C.GCONS)
print('GCOOL =',C.GCOOL)
print('GCORLA =',C.GCORLA)
print('GCORN =',C.GCORN)
print('GCOVER =',C.GCOVER)
print('GFAN =',C.GFAN)
print('GLIMBM =',C.GLIMBM)
print('GLIMBY =',C.GLIMBY)
print('GOFWF =',C.GOFWF)
print('GRAD =',C.GRAD)
print('GTANK =',C.GTANK)
print('GTRP =',C.GTRP)
print('GTRPM =',C.GTRPM)
print('GVVAH =',C.GVVAH)
print('GYOKE =',C.GYOKE)
print('HCORE =',C.HCORE)
print('HLIMB =',C.HLIMB)
print('HLMBMA =',C.HLMBMA)
print('HTANK =',C.HTANK)
print('HTANKM =',C.HTANKM)
print('HWINDM =',C.HWINDM)
print('HYOKE =',C.HYOKE)
print('HLMBMI =',C.HLMBMI)
print('ISAM =',C.ISAM)
print('ILACK =',C.ILACK)
print('NCONDU =',C.NCONDU)
print('IPAGE =',C.IPAGE)
print('MNLY =',C.MNLY)
print('ITR =',C.ITR)
print('IVERS =',C.IVERS)
print('JFC =',C.JFC)
print('KCOOL =',C.KCOOL)
print('KCOOL1 =',C.KCOOL1)
print('KCOOL2 =',C.KCOOL2)
print('KCORE =',C.KCORE)
print('KTAN79 =',C.KTAN79)
print('KTANK =',C.KTANK)
print('NCLA =',C.NCLA)
print('NCONST =',C.NCONST)
print('NCOOLC =',C.NCOOLC)
print('NFMX =',C.NFMX)
print('NFREE =',C.NFREE)
print('NG =',C.NG)
print('NPG =',C.NPG)
print('NPHAS =',C.NPHAS)
print('NSG =',C.NSG)
print('NUMN =',C.NUMN)
print('NUYR =',C.NUYR)
print('NWILI =',C.NWILI)
print('NWOULI =',C.NWOULI)
print('P0LOSS =',C.P0LOSS)
print('PCU =',C.PCU)
print('PED =',C.PED)
print('PI =',C.PI)
print('PKLOSS =',C.PKLOSS)
print('PLIMB =',C.PLIMB)
print('PLOADM =',C.PLOADM)
print('PLOMAX =',C.PLOMAX)
print('PLSL =',C.PLSL)
print('IOPT =',C.IOPT)
print('PNOLOM =',C.PNOLOM)
print('POED =',C.POED)
print('QFINAL =',C.QFINAL)
print('RAAAL =',C.RAAAL)
print('RAACU =',C.RAACU)
print('RDRAG =',C.RDRAG)
print('ISTGRD =',C.ISTGRD)
print('RLTANK =',C.RLTANK)
print('RLTNKM =',C.RLTNKM)
print('RLTRAN =',C.RLTRAN)
print('RLYOKE =',C.RLYOKE)
print('RMY0 =',C.RMY0)
print('DN =',C.DN)
print('SBF =',C.SBF)
print('SCCONS =',C.SCCONS)
print('SEQU2W =',C.SEQU2W)
print('SIGMAL =',C.SIGMAL)
print('SIGMCU =',C.SIGMCU)
print('SNOML =',C.SNOML)
print('SOUNDM =',C.SOUNDM)
print('SQR2 =',C.SQR2)
print('SQR3 =',C.SQR3)
print('TANKDI =',C.TANKDI)
print('TASEC =',C.TASEC)
print('TCORE =',C.TCORE)
print('TDRAG =',C.TDRAG)
print('TTOILC =',C.TTOILC)
print('TTOILM =',C.TTOILM)
print('TURNRA =',C.TURNRA)
print('TWINDM =',C.TWINDM)
print('TWSUP =',C.TWSUP)
print('TYPCOR =',C.TYPCOR)
print('U0 =',C.U0)
print('UMAXPU =',C.UMAXPU)
print('USHORE =',C.USHORE)
print('VACTP =',C.VACTP)
print('VTANK =',C.VTANK)
print('YHADD =',C.YHADD)
print('YHRED =',C.YHRED)
print('YOKAMP =',C.YOKAMP)
print('ZTRANS =',C.ZTRANS)
print('ZWOULI =',C.ZWOULI)
print('CHCORE =',C.CHCORE)
print('PCUT =',C.PCUT)
print('PEDT =',C.PEDT)
print('POEDT =',C.POEDT)
print('PCEOT =',C.PCEOT)
print('CHCOST =',C.CHCOST)
print('DATE =',C.DATE)
print('FILECH =',C.FILECH)
print('IDENT =',C.IDENT)
print('MNL =',C.MNL)
'''print('OBJ =',C.OBJ)'''
print('RUBCH =',C.RUBCH)
print('NPERM =',C.NPERM)
print('P00 =',C.P00)
print('PNOLO =',C.PNOLO)
print('Q00 =',C.Q00)
print('RINOLO =',C.RINOLO)
print('POSIT =',C.POSIT)
print('SOUND0 =',C.SOUND0)
print('SOUND =',C.SOUND)
print('URC =',C.URC)
print('UXC =',C.UXC)
print('XA =',C.XA)
print('XREL =',C.XREL)
print('ZTALWG =',C.ZTALWG)
print('SAVE1 =',C.SAVE1)
print('WLOSEX =',C.WLOSSE)
print('WLOSRX =',C.WLOSSR)
print('KODL =',C.KODL)
print('KODP =',C.KODP)
print('CSTPSW =',C.CSTPSW)
print('FLUXMD =',C.FLUXMD)
print('FLUXMW =',C.FLUXMW)
print('CORBND =',C.CORBND)
print('PG =',C.PG)
print('PK =',C.PK)
print('NCOL',C.NCOL)
print('GREL =',C.GREL)
print('G =',C.G)
print('FLUXI =',C.FLUXI)
print('FLUXO =',C.FLUXO)
print('BBOOVN =',C.BBOOVN)
print('VALUEM =',C.VALUEM)
print('VALUEO =',C.VALUEO)
print('VALUEF =',C.VALUEF)
print('DVERS =',C.DVERS)
print('NLORR =',C.NLORR)
print('KWIND =',C.KWIND)
print('BBFREQ =',C.BBFREQ)
print('BBADJU =',C.BBADJU)
print('BBEXON =',C.BBEXON)
print('BBLAY =',C.BBLAY)
print('BBISGR =',C.BBISGR)
print('ZLAG =',C.ZLAG)
print('ZNLAG =',C.ZNLAG)
print('ZCOIAR =',C.ZCOIAR)
print('EXTRAR =',C.EXTRAR)
print('HCLAC =',C.HCLAC)
print('ZDISC =',C.ZDISC)
print('SWIND =',C.SWIND)
print('RINNER =',C.RINNER)
print('ZAR =',C.ZAR)
print('ZRR =',C.ZRR)
print('ZPART =',C.ZPART)
print('ZTUDI =',C.ZTUDI)
print('ZTULO =',C.ZTULO)
print('TSPIN1 =',C.TSPIN1)
print('HPRTMN =',C.HPRTMN)
print('HPRTMX =',C.HPRTMX)
print('BPRTMN =',C.BPRTMN)
print('BPRTMX =',C.BPRTMX)
print('EXTCOR =',C.EXTCOR)
print('PSPL1 =',C.PSPL1)
print('PSPL2 =',C.PSPL2)
print('PSPL3 =',C.PSPL3)
print('PSPHD1 =',C.PSPHD1)
print('PSPHD2 =',C.PSPHD2)
print('PSPHD3 =',C.PSPHD3)
print('PSPHT1 =',C.PSPHT1)
print('PSPHT2 =',C.PSPHT2)
print('PSPHT3 =',C.PSPHT3)
print('PSPTY1 =',C.PSPTY1)
print('PSPTY2 =',C.PSPTY2)
print('PSPTY3 =',C.PSPTY3)
print('SSPL1 =',C.SSPL1)
print('SSPL2 =',C.SSPL2)
print('SSPL3 =',C.SSPL3)
print('SSPHD1 =',C.SSPHD1)
print('SSPHD2 =',C.SSPHD2)
print('SSPHD3 =',C.SSPHD3)
print('SSPHT1 =',C.SSPHT1)
print('SSPHT2 =',C.SSPHT2)
print('SSPHT3 =',C.SSPHT3)
print('SSPTY1 =',C.SSPTY1)
print('SSPTY2 =',C.SSPTY2)
print('SSPTY3 =',C.SSPTY3)
print('LAMID =',C.LAMID)
print('MABOOS =',C.MABOOS)
print('VOBOOS =',C.VOBOOS)
print('REBOOS =',C.REBOOS)
print('TRBOOS =',C.TRBOOS)
print('FABOOS =',C.FABOOS)
print('BBOOS =',C.BBOOS)
print('BBOOSW =',C.BBOOSW)
print('TELOSS =',C.TELOSS)
print('STKFAC =',C.STKFAC)
print('GPBLK =',C.GPBLK)
print('GOIL =',C.GOIL)
print('CTRCMP =',C.CTRCMP)
print('CUCOS =',C.CUCOS)
print('CUCOSP =',C.CUCOSP)
print('FECOS =',C.FECOS)
print('FECOSP =',C.FECOSP)
print('OLCOS =',C.OLCOS)
print('GCONDU =',C.GCONDU)
print('OLCOSP =',C.OLCOSP)
print('PNFDUC =',C.PNFDUC)
print('PNFWIN =',C.PNFWIN)
print('PNFYOK =',C.PNFYOK)
print('PNDUCT =',C.PNDUCT)
print('PNWIND =',C.PNWIND)
print('PNYOKE =',C.PNYOKE)
print('LAMTH =',C.LAMTH)
print('LAMSPF =',C.LAMSPF)
print('LAMMW =',C.LAMMW)
print('LAMOVL =',C.LAMOVL)
print('ACCLON =',C.ACCLON)
print('ACCTRA =',C.ACCTRA)
print('ACCVER =',C.ACCVER)
print('AMBTEM =',C.AMBTEM)
print('LAMSTP =',C.LAMSTP)
print('LOSCOR =',C.LOSCOR)
print('TA1HOL =',C.TA1HOL)
print('SPRED =',C.SPRED)
print('FLPLMA =',C.FLPLMA)
print('NCOOLI =',C.NCOOLI)
print('NCOOLW =',C.NCOOLW)
print('ISTOP =',C.ISTOP)
print('SPFADJ =',C.SPFADJ)
print('EXTPOS =',C.EXTPOS)
print('FRXPOS =',C.FRXPOS)
print('NXSTEP =',C.NXSTEP)
print('FARR =',C.FARR)
print('WDGFUN =',C.WDGFUN)
| [
"[email protected]"
] | |
606f7fa891585587eac562f47ace30b002f67eff | 976ba543bf4440f9d58b519886cab8ccc9f92f04 | /captcha/prepare_data.py | f024244ce0de0739c419901103131a9a8367440e | [] | no_license | TingYulq/tensorflow-learn | 41d65eea4ab01f2c41f7128c5bed2858b7df2922 | d580aa82c8a6f81bac3a52c1952ccea71bc699e3 | refs/heads/master | 2020-04-03T19:23:19.704791 | 2019-07-16T16:30:47 | 2019-07-16T16:30:47 | 155,521,614 | 0 | 0 | null | 2018-10-31T08:14:48 | 2018-10-31T08:14:48 | null | UTF-8 | Python | false | false | 2,242 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Mail: [email protected]
# Author: StarryTeng
# Description: ่ทๅๆฐๆฎ
from captcha.image import ImageCaptcha
from PIL import Image
import numpy as np
import random
import string
class generateCaptcha():
def __init__(self,
width = 160,#้ช่ฏ็ ๅพ็็ๅฎฝ
height = 60,#้ช่ฏ็ ๅพ็็้ซ
char_num = 4,#้ช่ฏ็ ๅญ็ฌฆไธชๆฐ
characters = string.digits + string.ascii_uppercase + string.ascii_lowercase):#้ช่ฏ็ ็ปๆ๏ผๆฐๅญ+ๅคงๅๅญๆฏ+ๅฐๅๅญๆฏ
self.width = width
self.height = height
self.char_num = char_num
self.characters = characters
self.classes = len(characters)
def gen_captcha(self,batch_size = 50):
X = np.zeros([batch_size,self.height,self.width,1])
img = np.zeros((self.height,self.width),dtype=np.uint8)
Y = np.zeros([batch_size,self.char_num,self.classes])
image = ImageCaptcha(width = self.width,height = self.height)
while True:
for i in range(batch_size):
captcha_str = ''.join(random.sample(self.characters,self.char_num))
img = image.generate_image(captcha_str).convert('L') #่ฝฌ็ฐๅบฆ
img = np.array(img.getdata())
X[i] = np.reshape(img,[self.height,self.width,1])/255.0 #ๅฝไธๅ[60,160,1]
for j,ch in enumerate(captcha_str):
Y[i,j,self.characters.find(ch)] = 1
Y = np.reshape(Y,(batch_size,self.char_num*self.classes))
yield X,Y
def decode_captcha(self,y):
y = np.reshape(y,(len(y),self.char_num,self.classes))
return ''.join(self.characters[x] for x in np.argmax(y,axis = 2)[0,:])
def get_parameter(self):
return self.width,self.height,self.char_num,self.characters,self.classes
def gen_test_captcha(self):
image = ImageCaptcha(width = self.width,height = self.height)
captcha_str = ''.join(random.sample(self.characters,self.char_num))
img = image.generate_image(captcha_str)
img.save(captcha_str + '.jpg')
data = generateCaptcha()
#data.gen_test_captcha()
print data.gen_captcha() | [
"[email protected]"
] | |
7209c69bdb5d75d105816259ea0b142d4d582778 | 55a2cb380f6144cb3bc28dec5f2fe6803e3c6c2a | /CenterNet/my_lib/data/coco/coco_dataset.py | cee856475c35f253a792475fa4b6fe6ff3edea0b | [
"BSD-3-Clause",
"MIT"
] | permissive | lorcanob/modified_centernet | f6b24fa80bb6927ea48357e75339f7b2a2efe5b1 | e5a40d0907ed155ca2e99688461268e02dd726bc | refs/heads/main | 2023-03-18T06:23:04.176133 | 2020-12-31T19:44:08 | 2020-12-31T19:44:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,327 | py | import os
import torch
import torch.utils.data
import torchvision
import cv2
from PIL import Image
from pycocotools.coco import COCO
import numpy as np
class CocoPathManager(object):
"""
This class is used to manage path to CoCo dataset, and
Coco dataset follows the following path protocal:
โโโ annotations
โโโ captions_train2017.json
โโโ captions_val2017.json
โโโ image_info_test-dev2017.json
โโโ instances_train2017.json
โโโ instances_val2017.json
โโโ person_keypoints_train2017.json
โโโ person_keypoints_val2017.json
โโโ test2017
โโโ train2017
โโโ val2017
"""
def __init__(self, given_path=None):
"""
initialize the coco data path manager
:params given_path the path to the coco dataset
"""
if given_path is None:
import os
import pathlib
given_path = pathlib.Path(os.environ['CENTERNET_ROOT'])/'CenterNet'/"data"/"coco"
assert given_path.exists()
self.coco_path_ = given_path
def get_annotation_json(self, sep_type, task_type):
"""
obtain the annotation json file depending on the given
task type and data seperation type
:params sep_type 'train', 'val', 'sampletrain', 'sampleval',
:params task_type 'detection'
:params b_exists the JSON file must exist
:return the json file
"""
assert task_type == "detection"
assert sep_type in ['train', 'val', 'sampletrain', 'sampleval']
file_name = f"instances_{sep_type}2017.json"
file_name = self.coco_path_/"annotations"/file_name
return file_name
def get_image_directory(self, sep_type):
"""
obtain the image directory
:param sep_type data seperation method
:return the image directory
"""
sep_type = sep_type.replace('sample', '')
assert sep_type in ['train', 'val']
dir_name = sep_type+'2017'
dir_name = self.coco_path_/ dir_name
assert dir_name.exists()
return dir_name
class CocoSampleAnnotationJSONGenerator(object):
"""
This class is used to generate an annotation JSON file for COCO samples
"""
def __init__(self, path_manager: CocoPathManager,
sep_type: str):
"""
configure the coco sample annotation json generator
:params sep_type 'train' or 'val'
"""
annotation = path_manager.get_annotation_json(sep_type, 'detection')
self.coco_ = COCO(annotation)
self.output_json_file_ = path_manager.get_annotation_json('sample'+sep_type, 'detection')
def write_to_json(self, coco_dict, json_file):
"""
write the dictionary to json file
:params coco_dict annotation dictionary
:params json_file json file
"""
from my_lib.enhanced.path_enhenced import prepare_filepath_for_writing
prepare_filepath_for_writing(json_file)
with open(json_file, "w") as fid:
import json
json.dump(coco_dict, fid)
def generate_sample_json_file(self, sample_number=5,
json_file_name=None):
"""
generate a smaple json file for coco data set
:params sample_number sample number in the new JSON file
:params json_file_name JSON file name
"""
if json_file_name is None:
json_file_name = self.output_json_file_
print(json_file_name)
data_coco = {}
selected_image_id_list = list(sorted(self.coco_.imgs.keys()))[0:sample_number]
data_coco["info"] = self.coco_.dataset['info']
data_coco["licenses"] = self.coco_.dataset["licenses"]
# images
data_coco["images"] = self.coco_.loadImgs(selected_image_id_list)
# annotations
annotations = []
for img_id in selected_image_id_list:
ann = self.coco_.imgToAnns[img_id]
for a in ann:
# del a['segmentation']
annotations.append(a)
data_coco['annotations'] = annotations
# categoreis
cat_list = []
for k, v in self.coco_.cats.items():
cat_list.append(v)
data_coco["categories"] = cat_list
self.write_to_json(data_coco, json_file_name)
class CocoDataset(torch.utils.data.Dataset):
"""
The purpose of this class is to have a wrapper for Coco dataset
This class is inspired by the article (https://medium.com/fullstackai/how-to-train-an-object-detector-with-your-own-coco-dataset-in-pytorch-319e7090da5)
"""
def __init__(self, path_manager: CocoPathManager, sep_type: str,
task_type='detection',
transform=None):
"""
initialize the coco data set with its path manager
:param
"""
annotation = path_manager.get_annotation_json(sep_type, task_type)
self.coco_ = COCO(annotation)
self.imageids_ = list(sorted(self.coco_.imgs.keys()))
self.transform_ = transform
self.image_dir_ = path_manager.get_image_directory(sep_type)
def obtain_categories(self, cat_index_list):
"""
This function is used to obtain the categories that the given category list contains
:param cat_index_list category index list
:return category string list
"""
cats_dict_arr = self.coco_.loadCats(cat_index_list)
cat_list = [ t['name'] for t in cats_dict_arr]
return cat_list
def dataset_summary(self):
"""
This function is used to print all the information of the dataset
"""
cat_info = self._category_info()
img_info = self._image_info()
return {**cat_info, **img_info}
def _category_info(self):
"""
return the dataset's category information
"""
cat_name_list = set()
super_cat_name_list = set()
for k, v in self.coco_.cats.items():
cat_name_list.add(v['name'])
super_cat_name_list.add(v['supercategory'])
return {
'class':list(cat_name_list),
'class_number':len(cat_name_list),
'superclass':list(super_cat_name_list),
'superclass_number': len(super_cat_name_list),
}
def _image_info(self):
"""
return the image's information
"""
return {'image_number': len(self.coco_.imgs)}
def get_segmentation_mask(self, index, b_instance=False):
"""
this function is used to get the segmentation mask
"""
img_id = self.imageids_[index]
ann_ids =self.coco_.getAnnIds(imgIds=img_id)
img_dict = self.coco_.imgs[img_id]
img_height, img_width = img_dict['height'], img_dict['width']
coco_annotation = self.coco_.loadAnns(ann_ids)
mask = np.zeros((img_height, img_width), np.uint8)
for index, ann in enumerate(coco_annotation):
cat_id = int(ann['category_id'])
if b_instance is False:
mask = np.maximum(self.coco_.annToMask(ann)*cat_id, mask)
else:
mask = np.maximum(self.coco_.annToMask(ann)*(index+1), mask)
return mask
def __getitem__(self, index):
# Own coco file
coco = self.coco_
# Image ID
img_id = self.imageids_[index]
# List: get annotation id from coco
ann_ids = coco.getAnnIds(imgIds=img_id)
# Dictionary: target coco_annotation file for an image
coco_annotation = coco.loadAnns(ann_ids)
# path for input image
path = coco.loadImgs(img_id)[0]['file_name']
# open the input image
#img = Image.open(str(self.image_dir_/path))
# cv2 read image as bgr
img = cv2.imread(str(self.image_dir_/path))
# transform bgr to rgb https://note.nkmk.me/en/python-opencv-bgr-rgb-cvtcolor/
img = img[:,:,[2,1,0]]
# number of objects in the image
num_objs = len(coco_annotation)
# Bounding boxes for objects
# In coco format, bbox = [xmin, ymin, width, height]
boxes = []
cats = []
for i in range(num_objs):
xmin = coco_annotation[i]['bbox'][0]
ymin = coco_annotation[i]['bbox'][1]
width = coco_annotation[i]['bbox'][2]
height = coco_annotation[i]['bbox'][3]
boxes.append([xmin, ymin, width, height])
cats.append(coco_annotation[i]['category_id'])
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# crowd list
crowd_list = []
for i in range(num_objs):
ann = coco_annotation[i]
crowd_list.append(ann['iscrowd'])
# Tensorise img_id
img_id = torch.tensor([img_id])
# Size of bbox (Rectangular)
areas = []
for i in range(num_objs):
areas.append(coco_annotation[i]['area'])
areas = torch.as_tensor(areas, dtype=torch.float32)
# box categories
cats = torch.as_tensor(cats, dtype=torch.int)
# Annotation is in dictionary format
my_annotation = {}
my_annotation["box_list"] = boxes
my_annotation["image_id"] = img_id
my_annotation["area_list"] = areas
my_annotation["category_id_list"] = cats
my_annotation["crowd_list"] = crowd_list
if self.transform_ is not None:
img = self.transform_(img)
else:
img = torch.from_numpy(img)
return img, my_annotation
def __len__(self):
return len(self.ids)
if __name__ == "__main__":
from my_lib.data.coco.coco_dataset import CocoDataset, CocoPathManager, CocoSampleAnnotationJSONGenerator
from my_lib.visualization.image_vis import show_single_image
coco_path_manager = CocoPathManager()
sample_generator = CocoSampleAnnotationJSONGenerator(coco_path_manager, "val")
sample_generator.generate_sample_json_file(sample_number=18)
sample_generator = CocoSampleAnnotationJSONGenerator(coco_path_manager, "train")
sample_generator.generate_sample_json_file(sample_number=18)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.