blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8ebfb829b1d0def60e4c43777cb8f3bf68601fa8 | 4c458854c0b5672a098b496d871a2b4f1d8e828d | /tools/py_bulit_in/getattr_module.py | 4c2d67c27b0d03499119524cbf097660dd38e9c8 | [] | no_license | claire1234995/code | ae918aebd0fb87f50d1ac0ee434e4976e8682b23 | 1bb9f5aaad1ac801e912cd13537de2ebfe9dcb1c | refs/heads/master | 2022-02-20T05:28:40.303611 | 2019-10-11T05:28:11 | 2019-10-11T05:28:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | from importlib import import_module
defa = import_module('.'.join(["default"]))
# gettattr(defa, 'info') 是 a function
attr = getattr(defa, 'info')
print(attr())
| [
"[email protected]"
] | |
4b3f4ce7d3466af21161b49b5de4bb3c3dae016e | f3b233e5053e28fa95c549017bd75a30456eb50c | /p38a_input/L2Z/2Z-2Y_wat_20Abox/set_1ns_equi.py | caf34fdac8c7fd631e0581c3c13b6dd1db8f0e62 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | import os
dir = '/mnt/scratch/songlin3/run/p38a/L2Z/wat_20Abox/ti_one-step/2Z_2Y/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_pbs = filesdir + 'temp_1ns_equi.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../2Z-2Y_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
] | |
178cb057717e1ae8d262ad395a5abd1a2506036e | 71ad2a6587cc7c0a7149465287b2659d81f406e7 | /morpheus_chair_pkg/scripts/simple_firmata.py | d282b902a57547d624cac95b58488394e34f9c8c | [] | no_license | ArifSohaib/morpheus_chair_arduino | 389a091ad00535992260ed8eeb6d897d33c08010 | be4e4892a87f09cc86c8832a12b2ccc06172756f | refs/heads/master | 2020-04-29T09:02:15.593714 | 2019-12-07T05:35:10 | 2019-12-07T05:35:10 | 176,008,958 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,531 | py | from pyfirmata import Arduino, util
import os
if os.name == 'nt':
from pynput import keyboard
import time
"""defining pins"""
#define ENB 5
#define IN1 7
#define IN2 8
#define IN3 9
#define IN4 11
#define ENA 6
ENB = 5
IN1 = 7
IN2 = 8
IN3 = 9
IN4 = 11
ENA = 6
def forward():
"""
ORIGINAL function
void forward(){
digitalWrite(ENA, HIGH); //enable L298n A channel
digitalWrite(ENB, HIGH); //enable L298n B channel
digitalWrite(IN1, HIGH); //set IN1 hight level
digitalWrite(IN2, LOW); //set IN2 low level
digitalWrite(IN3, LOW); //set IN3 low level
digitalWrite(IN4, HIGH); //set IN4 hight level
Serial.println("Forward"); //send message to serial monitor
}"""
board.digital[ENA].write(1)
board.digital[ENB].write(1)
board.digital[IN1].write(1)
board.digital[IN2].write(0)
board.digital[IN3].write(0)
board.digital[IN4].write(1)
def back():
"""
void back(){
digitalWrite(ENA, HIGH);
digitalWrite(ENB, HIGH);
digitalWrite(IN1, LOW);
digitalWrite(IN2, HIGH);
digitalWrite(IN3, HIGH);
digitalWrite(IN4, LOW);
Serial.println("Back");
}"""
board.digital[ENA].write(1)
board.digital[ENB].write(1)
board.digital[IN1].write(0)
board.digital[IN2].write(1)
board.digital[IN3].write(1)
board.digital[IN4].write(0)
def left():
"""
void left(){
digitalWrite(ENA, HIGH);
digitalWrite(ENB, HIGH);
digitalWrite(IN1, LOW);
digitalWrite(IN2, HIGH);
digitalWrite(IN3, LOW);
digitalWrite(IN4, HIGH);
Serial.println("Left");
}"""
board.digital[ENA].write(1)
board.digital[ENB].write(1)
board.digital[IN1].write(0)
board.digital[IN2].write(1)
board.digital[IN3].write(0)
board.digital[IN4].write(1)
def right():
"""
void right(){
digitalWrite(ENA, HIGH);
digitalWrite(ENB, HIGH);
digitalWrite(IN1, HIGH);
digitalWrite(IN2, LOW);
digitalWrite(IN3, HIGH);
digitalWrite(IN4, LOW);
Serial.println("Right");
}
"""
board.digital[ENA].write(1)
board.digital[ENB].write(1)
board.digital[IN1].write(1)
board.digital[IN2].write(0)
board.digital[IN3].write(1)
board.digital[IN4].write(0)
def stop():
board.digital[ENA].write(0)
board.digital[ENB].write(0)
board.digital[IN1].write(0)
board.digital[IN2].write(0)
board.digital[IN3].write(0)
board.digital[IN4].write(0)
def on_press(key):
try:
print('alphanumeric key {0} pressed'.format(key.char))
if key.char == 'w':
forward()
elif key.char == 's':
back()
elif key.char == 'a':
left()
elif key.char == 'd':
right()
elif key.char == 'x':
stop();
board.exit()
return False
except AttributeError:
try:
board.exit()
except:
print("board not connected")
print('special key {0} pressed'.format(
key))
def on_release(key):
print('{0} released'.format(key))
if key == keyboard.Key.esc:
# Stop listener
return False
#define globals
try:
board = Arduino("COM9")
except:
board = Arduino("/dev/ttyACM0")
iterator = util.Iterator(board)
iterator.start()
def main():
# connect(board)
# Collect events until released
with keyboard.Listener(on_press=on_press,on_release=on_release) as listener:listener.join()
# import keyboard # using module keyboard
# while True: # making a loop
# try: # used try so that if user pressed other than the given key error will not be shown
# if keyboard.is_pressed('q'): # if key 'q' is pressed
# print('You Pressed A Key!')
# try:
# board.exit()
# except:
# print("not connected")
# break # finishing the loop
# else:
# pass
# except:
# try:
# board.exit()
# except:
# print("not connected")
# break # if user pressed a key other than the given key the loop will break
def main_simple():
import time
forward()
time.sleep(10)
back()
time.sleep(10)
stop()
if __name__ == "__main__":
if os.name != 'nt':
main_simple()
else:
main() | [
"[email protected]"
] | |
90aab359ae9197e2d3629214164cb1cc90ca7031 | b86608b6de44642ed29cd88bba4acbbdd31a0b04 | /examples/bq_file_load_benchmark/tests/test_parquet_util.py | 55fff7d7e6872a3077acaea7ee3a1e2d5624fa6b | [
"Apache-2.0"
] | permissive | MCRen88/professional-services | a514a926dd23e3c4ac6dadb656faed22c3d91d5d | d7bc3b194159ffdb149c9507890bb1fbae7a8d88 | refs/heads/master | 2020-12-15T16:38:17.860940 | 2020-01-06T19:29:47 | 2020-01-06T19:29:47 | 235,181,173 | 1 | 0 | Apache-2.0 | 2020-01-20T19:26:15 | 2020-01-20T19:26:14 | null | UTF-8 | Python | false | false | 1,423 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pyarrow as pa
import unittest
from google.cloud import bigquery
from bq_file_load_benchmark.generic_benchmark_tools import parquet_util
class TestParquetUtil(unittest.TestCase):
"""Tests functionality of load_benchmark_tools.parquet_util.ParquetUtil.
Attributes:
parquet_util(load_benchmark_tools.ParquetUtil): parquet utility class to be
tested.
"""
def setUp(self):
"""Sets up resources for tests.
"""
bq_schema = [
bigquery.SchemaField('string1', 'STRING', 'REQUIRED'),
bigquery.SchemaField('numeric1', 'NUMERIC', 'REQUIRED')
]
self.parquet_util = parquet_util.ParquetUtil(
bq_schema=bq_schema
)
def test_get_parquet_translated_schema(self):
"""Tests ParquetUtil.get_pa_translated_schema().
Tests ParquetUtil's ability to translate a BigQuery schema to PyArrow
schema for parquet.
Returns:
True if test passes, else False.
"""
parquet_translated_schema = self.parquet_util.get_pa_translated_schema()
expected_pa_schema = pa.schema([
pa.field('string1', pa.string()),
pa.field('numeric1', pa.int64())
])
assert parquet_translated_schema == expected_pa_schema
| [
"[email protected]"
] | |
b5ce1f91b4d8d62e404127000eb05f3225a5afd7 | 04c343a4b6ba0cee3873a17833ac910e930e27ce | /goals/migrations/0053_add_field_theme_to_sector.py | 16cdfaaa0b865ae88c90451028af51503525f230 | [
"Unlicense"
] | permissive | tehamalab/dgs | f10f45440494aa3404da068cfef69ad2f7385033 | 46de3cdaced5e4afef46fa46c7a3303d53df0da0 | refs/heads/master | 2021-03-16T05:13:57.548503 | 2017-11-24T05:45:49 | 2017-11-24T05:46:08 | 93,390,835 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-28 05:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goals', '0052_add_field_caption_on_plan'),
]
operations = [
migrations.AddField(
model_name='sector',
name='themes',
field=models.ManyToManyField(related_name='sectors', to='goals.Theme', verbose_name='Themes'),
),
]
| [
"[email protected]"
] | |
125065bc258175c73d7cc32c947d987f6a4b70e0 | e5de11874e3d68ebd48f22d75d0c5b37ed058ae9 | /src/env/dm_control/dm_control/suite/common/settings.py | b6ace94933a01f6f632e88350d11796f7e6ec6e9 | [
"Apache-2.0"
] | permissive | xiaolonw/policy-adaptation-during-deployment | 4436e568439e3e7df48b7ed529304e9d11e979f0 | e3f98eef83608bb78a55a63405e17106e4dee814 | refs/heads/master | 2022-11-17T19:09:00.967055 | 2020-07-09T00:34:10 | 2020-07-09T00:34:10 | 279,119,345 | 1 | 0 | null | 2020-07-12T17:58:50 | 2020-07-12T17:58:50 | null | UTF-8 | Python | false | false | 5,146 | py | import os
import numpy as np
from dm_control.suite import common
from dm_control.utils import io as resources
import xmltodict
_SUITE_DIR = os.path.dirname(os.path.dirname(__file__))
_FILENAMES = [
"./common/materials.xml",
"./common/skybox.xml",
"./common/visual.xml",
]
def get_model_and_assets_from_setting_kwargs(model_fname, setting_kwargs=None):
""""Returns a tuple containing the model XML string and a dict of assets."""
assets = {filename: resources.GetResource(os.path.join(_SUITE_DIR, filename))
for filename in _FILENAMES}
if setting_kwargs is None:
return common.read_model(model_fname), assets
# Convert XML to dicts
model = xmltodict.parse(common.read_model(model_fname))
materials = xmltodict.parse(assets['./common/materials.xml'])
skybox = xmltodict.parse(assets['./common/skybox.xml'])
# Edit lighting
if 'light_pos' in setting_kwargs:
assert isinstance(setting_kwargs['light_pos'], (list, tuple, np.ndarray))
light_pos = f'{setting_kwargs["light_pos"][0]} {setting_kwargs["light_pos"][1]} {setting_kwargs["light_pos"][2]}'
if 'light' in model['mujoco']['worldbody']:
model['mujoco']['worldbody']['light']['@pos'] = light_pos
elif 'light' in model['mujoco']['worldbody']['body']:
model['mujoco']['worldbody']['body']['light']['@pos'] = light_pos
else:
raise NotImplementedError('model xml does not contain entity light')
# Edit camera
if 'cam_pos' in setting_kwargs:
assert isinstance(setting_kwargs['cam_pos'], (list, tuple, np.ndarray))
cam_pos = f'{setting_kwargs["cam_pos"][0]} {setting_kwargs["cam_pos"][1]} {setting_kwargs["cam_pos"][2]}'
if 'camera' in model['mujoco']['worldbody']:
model['mujoco']['worldbody']['camera'][0]['@pos'] = cam_pos
elif 'camera' in model['mujoco']['worldbody']['body']:
model['mujoco']['worldbody']['body']['camera'][0]['@pos'] = cam_pos
else:
raise NotImplementedError('model xml does not contain entity camera')
# Edit distractor
if 'distractor_pos' in setting_kwargs:
assert isinstance(setting_kwargs['distractor_pos'], (list, tuple, np.ndarray))
distractor_pos = f'{setting_kwargs["distractor_pos"][0]} {setting_kwargs["distractor_pos"][1]} {setting_kwargs["distractor_pos"][2]}'
assert model['mujoco']['worldbody']['body'][-1]['@name'] == 'distractor', 'distractor must be in worldbody'
model['mujoco']['worldbody']['body'][-1]['geom']['@pos'] = distractor_pos
# Edit grid floor
if 'grid_rgb1' in setting_kwargs:
assert isinstance(setting_kwargs['grid_rgb1'], (list, tuple, np.ndarray))
materials['mujoco']['asset']['texture']['@rgb1'] = \
f'{setting_kwargs["grid_rgb1"][0]} {setting_kwargs["grid_rgb1"][1]} {setting_kwargs["grid_rgb1"][2]}'
if 'grid_rgb2' in setting_kwargs:
assert isinstance(setting_kwargs['grid_rgb2'], (list, tuple, np.ndarray))
materials['mujoco']['asset']['texture']['@rgb2'] = \
f'{setting_kwargs["grid_rgb2"][0]} {setting_kwargs["grid_rgb2"][1]} {setting_kwargs["grid_rgb2"][2]}'
if 'grid_texrepeat' in setting_kwargs:
assert isinstance(setting_kwargs['grid_texrepeat'], (list, tuple, np.ndarray))
materials['mujoco']['asset']['material'][0]['@texrepeat'] = \
f'{setting_kwargs["grid_texrepeat"][0]} {setting_kwargs["grid_texrepeat"][1]}'
if 'grid_reflectance' in setting_kwargs:
materials['mujoco']['asset']['material'][0]['@reflectance'] = \
str(setting_kwargs["grid_reflectance"])
# Edit self
if 'self_rgb' in setting_kwargs:
assert isinstance(setting_kwargs['self_rgb'], (list, tuple, np.ndarray))
materials['mujoco']['asset']['material'][1]['@rgba'] = \
f'{setting_kwargs["self_rgb"][0]} {setting_kwargs["self_rgb"][1]} {setting_kwargs["self_rgb"][2]} 1'
# Edit skybox
if 'skybox_rgb' in setting_kwargs:
assert isinstance(setting_kwargs['skybox_rgb'], (list, tuple, np.ndarray))
skybox['mujoco']['asset']['texture']['@rgb1'] = \
f'{setting_kwargs["skybox_rgb"][0]} {setting_kwargs["skybox_rgb"][1]} {setting_kwargs["skybox_rgb"][2]}'
if 'skybox_rgb2' in setting_kwargs:
assert isinstance(setting_kwargs['skybox_rgb2'], (list, tuple, np.ndarray))
skybox['mujoco']['asset']['texture']['@rgb2'] = \
f'{setting_kwargs["skybox_rgb2"][0]} {setting_kwargs["skybox_rgb2"][1]} {setting_kwargs["skybox_rgb2"][2]}'
if 'skybox_markrgb' in setting_kwargs:
assert isinstance(setting_kwargs['skybox_markrgb'], (list, tuple, np.ndarray))
skybox['mujoco']['asset']['texture']['@markrgb'] = \
f'{setting_kwargs["skybox_markrgb"][0]} {setting_kwargs["skybox_markrgb"][1]} {setting_kwargs["skybox_markrgb"][2]}'
# Convert back to XML
model_xml = xmltodict.unparse(model)
assets['./common/materials.xml'] = xmltodict.unparse(materials)
assets['./common/skybox.xml'] = xmltodict.unparse(skybox)
return model_xml, assets
| [
"[email protected]"
] | |
d90cd378e2025596ceacb07d965c298a1359589d | 719da820d1aad1d352544badc022e0422f1f7588 | /tools/demo.py | e8a5d14474411fbf6d0447c7a38e38b2c4bd3789 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | yuhan210/py-faster-rcnn | b0886628ba7c0f01fd4ccbd6b35b14835dfbd922 | dbf36cc2a327d6d58b92ce4b973fdca45cf9d14e | refs/heads/master | 2021-01-10T00:57:05.152454 | 2016-03-14T16:38:11 | 2016-03-14T16:38:11 | 53,615,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,090 | py | #!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {'vgg16': ('VGG16',
'VGG16_faster_rcnn_final.caffemodel'),
'zf': ('ZF',
'ZF_faster_rcnn_final.caffemodel')}
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.savefig('test.jpg')
plt.draw()
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(im, cls, dets, thresh=CONF_THRESH)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
choices=NETS.keys(), default='vgg16')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
#print prototxt, caffemodel
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(net, im)
im_names = ['000456.jpg', '000542.jpg', '001150.jpg',
'001763.jpg', '004545.jpg']
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/{}'.format(im_name)
demo(net, im_name)
plt.show()
| [
"[email protected]"
] | |
c6e1e16d3f9cac75006e39850baa427f272dee11 | 05b3d499424e0ac49a1c7489e1455a48b02439de | /playhouse/dataset.py | 09d63fb2688a5af414cd2d8be4bce1f936cda952 | [
"MIT"
] | permissive | manipuladordedados/peewee | c5d6e0debd33e8163bfbe41e1107003734be0d7f | 82a71566b1f0d76430ac5efccb2bc09f491faedc | refs/heads/master | 2020-12-11T05:56:20.679084 | 2014-11-05T09:17:16 | 2014-11-05T09:17:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,375 | py | import csv
import datetime
from decimal import Decimal
import json
import operator
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import sys
from peewee import *
from playhouse.db_url import connect
from playhouse.migrate import migrate
from playhouse.migrate import SchemaMigrator
from playhouse.reflection import Introspector
if sys.version_info[0] == 3:
basestring = str
from functools import reduce
class DataSet(object):
def __init__(self, url):
self._url = url
parse_result = urlparse(url)
self._database_path = parse_result.path[1:]
# Connect to the database.
self._database = connect(url)
self._database.connect()
# Introspect the database and generate models.
self._introspector = Introspector.from_database(self._database)
self._models = self._introspector.generate_models()
self._migrator = SchemaMigrator.from_database(self._database)
class BaseModel(Model):
class Meta:
database = self._database
self._base_model = BaseModel
self._export_formats = self.get_export_formats()
def __repr__(self):
return '<DataSet: %s>' % self._database_path
def get_export_formats(self):
return {
'csv': CSVExporter,
'json': JSONExporter}
def __getitem__(self, table):
return Table(self, table, self._models.get(table))
@property
def tables(self):
return self._database.get_tables()
def __contains__(self, table):
return table in self.tables
def connect(self):
self._database.connect()
def close(self):
self._database.close()
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self._database.is_closed():
self.close()
def query(self, sql, params=None, commit=True):
return self._database.execute_sql(sql, params, commit)
def transaction(self):
if self._database.transaction_depth() == 0:
return self._database.transaction()
else:
return self._database.savepoint()
def freeze(self, query, format='csv', filename=None, file_obj=None,
**kwargs):
if filename and file_obj:
raise ValueError('file is over-specified. Please use either '
'filename or file_obj, but not both.')
if not filename and not file_obj:
raise ValueError('A filename or file-like object must be '
'specified.')
if format not in self._export_formats:
valid_formats = ', '.join(sorted(self._export_formats.keys()))
raise ValueError('Unsupported format "%s". Use one of %s.' % (
format, valid_formats))
if filename:
file_obj = open(filename, 'w')
exporter = self._export_formats[format](query)
exporter.export(file_obj, **kwargs)
if filename:
file_obj.close()
class Table(object):
def __init__(self, dataset, name, model_class):
self.dataset = dataset
self.name = name
if model_class is None:
model_class = self._create_model()
model_class.create_table()
self.dataset._models[name] = model_class
self.model_class = model_class
def __repr__(self):
return '<Table: %s>' % self.name
def __len__(self):
return self.find().count()
def __iter__(self):
return iter(self.find().iterator())
def _create_model(self):
return type(str(self.name), (self.dataset._base_model,), {})
def create_index(self, columns, unique=False):
self.dataset._database.create_index(
self.model_class,
columns,
unique=unique)
def _guess_field_type(self, value):
if isinstance(value, basestring):
return TextField
if isinstance(value, (datetime.date, datetime.datetime)):
return DateTimeField
elif value is True or value is False:
return BooleanField
elif isinstance(value, int):
return IntegerField
elif isinstance(value, float):
return FloatField
elif isinstance(value, Decimal):
return DecimalField
return TextField
@property
def columns(self):
return self.model_class._meta.get_field_names()
def _migrate_new_columns(self, data):
new_keys = set(data) - set(self.model_class._meta.fields)
if new_keys:
operations = []
for key in new_keys:
field_class = self._guess_field_type(data[key])
field = field_class(null=True)
operations.append(
self.dataset._migrator.add_column(self.name, key, field))
field.add_to_class(self.model_class, key)
migrate(*operations)
def insert(self, **data):
self._migrate_new_columns(data)
return self.model_class.insert(**data).execute()
def _apply_where(self, query, filters, conjunction=None):
conjunction = conjunction or operator.and_
if filters:
expressions = [
(self.model_class._meta.fields[column] == value)
for column, value in filters.items()]
query = query.where(reduce(conjunction, expressions))
return query
def update(self, columns=None, conjunction=None, **data):
self._migrate_new_columns(data)
filters = {}
if columns:
for column in columns:
filters[column] = data.pop(column)
return self._apply_where(
self.model_class.update(**data),
filters,
conjunction).execute()
def _query(self, **query):
return self._apply_where(self.model_class.select(), query)
def find(self, **query):
return self._query(**query).dicts()
def find_one(self, **query):
try:
return self.find(**query).get()
except self.model_class.DoesNotExist:
return None
def all(self):
return self.find()
def delete(self, **query):
return self._apply_where(self.model_class.delete(), query).execute()
class Exporter(object):
def __init__(self, query):
self.query = query
def export(self, file_obj):
raise NotImplementedError
class JSONExporter(Exporter):
@staticmethod
def default(o):
if isinstance(o, (datetime.datetime, datetime.date, datetime.time)):
return o.isoformat()
elif isinstance(o, Decimal):
return str(o)
raise TypeError('Unable to serialize %r as JSON.' % o)
def export(self, file_obj, **kwargs):
json.dump(
list(self.query),
file_obj,
default=JSONExporter.default,
**kwargs)
class CSVExporter(Exporter):
def export(self, file_obj, header=True, **kwargs):
writer = csv.writer(file_obj, **kwargs)
if header and hasattr(self.query, '_select'):
writer.writerow([field.name for field in self.query._select])
for row in self.query.tuples():
writer.writerow(row)
| [
"[email protected]"
] | |
7ef62aaf8b814aafa9e209c301de90e59413b19d | 84b81ad47af6a4f40c1f2fa7b513b9ede260d038 | /MyDiary_Backend/test_user.py | 93f3fd647bbf531a6f5a990eba6ee1d81235dd9f | [] | no_license | michael-basweti/michael-basweti.github.io | ebc219f69943f55779a888ae7b54cae14a5b09de | 34581d8560b2f7f60a0cf67d7e631e1ef9b89d7e | refs/heads/flask_api | 2022-12-24T21:57:02.396825 | 2018-10-26T10:19:08 | 2018-10-26T10:19:08 | 140,665,411 | 0 | 1 | null | 2022-07-29T22:33:35 | 2018-07-12T05:32:05 | JavaScript | UTF-8 | Python | false | false | 993 | py | """
nose tests for the api
"""
from nose.tools import assert_true
import requests
def test_get_all():
"""
test all returns
:return:
"""
response = requests.get('http://127.0.0.1:5000/mydiary/api/v1.0/entries/get')
assert_true(response.ok)
def test_post():
"""
test post
:return:
"""
response = requests.post('http://127.0.0.1:5000/mydiary/api/v1.0/entries/post')
assert_true(response.ok)
def test_get_one():
"""
test get one
:return:
"""
response = requests.get('http://127.0.0.1:5000/mydiary/api/v1.0/entries/get/1')
assert_true(response.ok)
def test_edit_one():
"""
test editing
:return:
"""
response = requests.put('http://127.0.0.1:5000/mydiary/api/v1.0/entries/edit/1')
assert_true(response.ok)
def test_delete_one():
"""
test delete
:return:
"""
response = requests.delete('http://127.0.0.1:5000/mydiary/api/v1.0/entries/delete/1')
assert_true(response.ok)
| [
"[email protected]"
] | |
26b00c725a10bf052e14ac1f9e8a0e04d851fb2b | e3ce9a14ba58eaf7a684f2b6088a6172fa08bf41 | /02 Estructuras de control/condicional_anidados.py | 9528be679e9b51cafd89aa1cd194fdf697c8c464 | [] | no_license | edwinhrojasm/python_basico_20203 | f2134170f9ffe7d42fad590f795a8201293771c7 | 54fa3da2d9df3684bd3c07c8b95118ad5f0b7684 | refs/heads/master | 2022-12-30T17:16:31.406874 | 2020-10-22T00:45:27 | 2020-10-22T00:45:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | x = int(input("Ingrese un valor entero: "))
print("Usted ingreso el: " + str(x))
if x % 4 == 0:
x /= 4
else:
if x % 2 == 0:
x /= 2
else:
if x % 3 == 0:
x /= 3
else:
x += 1
print("Ahora el valor es: " + str(x))
| [
"[email protected]"
] | |
77a42a6aed8161e58c41bcbd33af724bea02d78d | 79e45a6e4846927da432087aba845036b11c5622 | /PROD/bin/MarketData/Daily/EOGdailyOHLC_withvol.py | 8bce941f7a85afe3b14750dc940bee4bec6e7c77 | [] | no_license | mjserpico/Scarlett-Trading | cba2bcfaacf886b9d851d978683b4ce641c8f6ad | 9778717393dbb0818ee026356996d1806345a6c2 | refs/heads/master | 2020-03-21T21:39:51.108503 | 2019-05-09T02:06:26 | 2019-05-09T02:06:26 | 139,076,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,162 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 08 09:16:43 2017
@author: Michael
"""
import mysql.connector
from ib.opt import Connection
from ib.ext.Contract import Contract
import time
import logging
import datetime
import datalink #universal logins for environment
import math
Flag = 0
CCY1 = "E"
CCY2 = "OG"
Table = 'EOG'
yClose = 0
logging.basicConfig(filename='DailyOHLC' + str(datetime.date.today()) + '.txt', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh = logging.FileHandler('DailyOHLC' + str(datetime.date.today()) + '.txt')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.debug('Starting DailyOHLC')
def truncate(f, n):
'''Truncates/pads a float f to n decimal places without rounding'''
s = '{}'.format(f)
if 'e' in s or 'E' in s:
return '{0:.{1}f}'.format(f, n)
i, p, d = s.partition('.')
return '.'.join([i, (d+'0'*n)[:n]])
def reply_handler(msg):
#print(msg.value)
logger.debug('In beginning of Reply Handler')
print("Reply:", msg)
test = msg.open
test2 = msg.high
test3 = msg.low
test4 = msg.close
test5 = msg.volume
logger.debug('test %s', test)
logger.debug('test5 %s', test5)
global Flag
logger.debug('Flag %s', Flag)
#test5 - msg.volume
logger.debug('In Reply Handler')
if float(test) != -1:
import time
logger.debug('Valid Price Found (OPEN NOT -1)')
#cnx = mysql.connector.connect(user='mjserpico', password='UrzE8B66',host="scar01.cqxmc7cib5oh.us-east-1.rds.amazonaws.com", database='SCAR01')
#cnx = mysql.connector.connect(user='Scarlett01', password='scar01lett',host="serpdb01.cqxmc7cib5oh.us-east-1.rds.amazonaws.com", database='SERPDB01')
cnx = mysql.connector.connect(user=datalink.DB_User, password=datalink.DB_Pass,host=datalink.DB_Host, database=datalink.DB_Path)
logger.debug('Connected to Database')
cur = cnx.cursor()
cur.execute("Insert Into "+ Table + """(Date, Open, High, Low, Close) values(%s,%s,%s,%s,%s)""",(time.strftime("%m/%d/%Y"),float(test),float(test2),float(test3),float(test4)))
cnx.commit()
logger.debug('Ran Insert Script')
today = datetime.date.today( )
dayofweek = datetime.datetime.today().weekday()
if dayofweek == 0: #if Today is Monday
yesterday = today - datetime.timedelta(days=3) #Get Friday
month = (str(0) + str(yesterday.month))
day = (str(0)+ str(yesterday.day))
yesterday2 = (month[-2:] +"/"+ day[-2:] +"/"+str(yesterday.year))
logger.debug('Yesterday2 was %s', str(yesterday2))
else:
yesterday = today - datetime.timedelta(days=1) #Take 1 Day back
month = (str(0) + str(yesterday.month))
day = (str(0)+ str(yesterday.day))
yesterday2 = (month[-2:] +"/"+ day[-2:] +"/"+str(yesterday.year))
logger.debug('Yesterday2 was %s', str(yesterday2))
#MovingAverage Calculation
#Step 1 Get earliest Date to calculate avg from
#reformat date to DB convention first
logger.debug('Today is still %s', today)
backdate = today - datetime.timedelta(days=13)
logger.debug('Date shifted back 10 is %s', backdate)
dayofweek = backdate.weekday()
month = (str(0) + str(backdate.month))
day = (str(0)+ str(backdate.day))
backdate2 = (month[-2:] +"/"+ day[-2:] +"/"+str(backdate.year))
logger.debug('First Date of Moving Average is %s', backdate2)
query = ("SELECT max(ID) from " + CCY1 + CCY2)
logger.debug('Query is %s', query)
cur.execute(query)
for (ID) in cur:
ID1 = ID
logger.debug('ID1 is %s', ID1)
query = ("SELECT (max(ID)-20) from " + CCY1 + CCY2)
logger.debug('Query is %s', query)
cur.execute(query)
for (ID) in cur:
ID2 = ID
logger.debug('ID1 is %s', ID1)
logger.debug('ID2 is %s', ID2)
query = ("SELECT (max(ID)-1) from " + CCY1 + CCY2)
logger.debug('Query is %s', query)
cur.execute(query)
for (ID) in cur:
ID3 = ID
logger.debug('ID3 is %s', ID3)
#Pull ATR Length From RiskParameter Table
query = ("Select RiskParametersValue from RiskParameters where RiskParametersName = 'ATRlength';")
logger.debug('Query is %s', query)
cur.execute(query)
for (ID) in cur:
atrlength = ID
logger.debug('ID4 is %s', atrlength)
#ID for ATR length start point
query = ("SELECT (max(ID)-" + str(atrlength[0]) + ") from " + CCY1 + CCY2)
logger.debug('Query is %s', query)
cur.execute(query)
for (ID) in cur:
ID4 = ID
logger.debug('ID4 is %s', ID4)
#Pull MovingAvg Length RiskParameter Table
query = ("Select RiskParametersValue from RiskParameters where RiskParametersName = 'MovAvgLength';")
logger.debug('Query is %s', query)
cur.execute(query)
for (ID) in cur:
movavglength = ID
logger.debug('ID is %s', atrlength)
#ID for MovAvg length start point
query = ("SELECT (max(ID)-" + str(movavglength[0]) + ") from " + CCY1 + CCY2)
logger.debug('Query is %s', query)
cur.execute(query)
for (ID) in cur:
ID5 = ID
logger.debug('ID5 is %s', ID5)
query = ("SELECT (max(ID)-30) from " + CCY1 + CCY2)
logger.debug('Query is %s', query)
cur.execute(query)
for (ID) in cur:
ID30 = ID
logger.debug('ID30 is %s', ID30)
query = ("SELECT (max(ID)-60) from " + CCY1 + CCY2)
logger.debug('Query is %s', query)
cur.execute(query)
for (ID) in cur:
ID60 = ID
logger.debug('ID60 is %s', ID60)
query = ("SELECT (max(ID)-90) from " + CCY1 + CCY2)
logger.debug('Query is %s', query)
cur.execute(query)
for (ID) in cur:
ID90 = ID
logger.debug('ID90 is %s', ID90)
query = ("SELECT Close from " + CCY1 + CCY2 + " where ID = " + str(ID3[0]) + ";")
cur.execute(query)
for (Close) in cur:
yClose = Close
logger.debug('yClose is %s', yClose[0])
query = ("SELECT Close from " + CCY1 + CCY2 + " where ID = " + str(ID1[0]) + ";")
cur.execute(query)
for (Close) in cur:
tClose = Close
logger.debug('tClose is %s', tClose[0])
#Interday Return
CloseReturn = float(tClose[0])
yCloseReturn = float(yClose[0])
logger.debug('yClose is %s', yClose[0])
logger.debug('Close is %s', tClose[0])
returns = round(((CloseReturn / yCloseReturn) - 1) * 100,2)
logger.debug('Return is %s', returns)
query = ("UPDATE " + CCY1 + CCY2 + " SET PercentReturn = " + str(returns) + " where ID = " + str(ID1[0]) +";")
logger.debug('Query is %s', query)
cur.execute(query)
cnx.commit()
# period Moving Average
query = ("SELECT round(Avg(Close),2) as Avg from " + CCY1 + CCY2 + " where ID BETWEEN " + str(ID5[0]) + " AND " + str(ID1[0]) + ";")
logger.debug('Query is %s', query)
cur.execute(query)
for (Avg) in cur:
BBMovAvg = Avg #Final Moving Average Value
logger.debug('MovAvg is %s', BBMovAvg)
##Puts Moving Average Value in hasPosition Table for Reference with intraday strategies
query = ("UPDATE hasPosition SET MovingAvgValue = " + str(BBMovAvg[0]) + " where CCY =\'" + CCY1 + CCY2 +"\';")
logger.debug('Query is %s', query)
cur.execute(query)
cnx.commit()
#True Range
TR1 = (test2-test3)
TR2 = abs(test2-float(yClose[0]))
TR3 = abs(test3-float(yClose[0]))
TR = truncate(max(TR1,TR2,TR3),4)
print(TR)
print(TR1)
print(TR2)
print(TR3)
query = ("UPDATE "+ Table +" SET TrueRange = " + str(TR) + " where ID =\'" + str(ID1[0]) +"\';")
logger.debug('Query is %s', query)
print(query)
cur.execute(query)
cnx.commit()
#ATR Daily
query = ("SELECT round(Avg(TrueRange),2) as Avg from " + CCY1 + CCY2 + " where ID BETWEEN " + str(ID4[0]) + " AND " + str(ID1[0]) + ";")
logger.debug('Query is %s', query)
print(query)
cur.execute(query)
for (Avg) in cur:
ATRAvg = Avg #Final Moving Average Value
logger.debug('ATR is %s', ATRAvg)
##Puts ATR in hasPosition Table for Reference with intraday strategies
query = ("UPDATE hasPosition SET ATRValue = " + str(ATRAvg[0]) + " where CCY =\'" + CCY1 + CCY2 +"\';")
logger.debug('Query is %s', query)
cur.execute(query)
print(query)
cnx.commit()
#Calculate 30D Vol
query = ("SELECT round(stddev(PercentReturn),2) as vol30 from " + CCY1 + CCY2 + " where ID BETWEEN " + str(ID30[0]) + " AND " + str(ID1[0]) + ";")
logger.debug('Query is %s', query)
cur.execute(query)
for (vol30) in cur:
thirtyd = truncate((vol30[0] * math.sqrt(252)),2) #Final Moving Average Value
logger.debug('30d is %s', thirtyd)
query = ("UPDATE "+ Table +" SET thirtyvol = " + str(thirtyd) + " where ID =\'" + str(ID1[0]) +"\';")
logger.debug('Query is %s', query)
print(query)
cur.execute(query)
cnx.commit()
#Calculate 60D Vol
query = ("SELECT round(stddev(PercentReturn),2) as vol60 from " + CCY1 + CCY2 + " where ID BETWEEN " + str(ID60[0]) + " AND " + str(ID1[0]) + ";")
logger.debug('Query is %s', query)
cur.execute(query)
for (vol60) in cur:
sixtyd = truncate((vol60[0] * math.sqrt(252)),2) #Final Moving Average Value
logger.debug('sixtyd is %s', sixtyd)
query = ("UPDATE "+ Table +" SET sixtyvol = " + str(sixtyd) + " where ID =\'" + str(ID1[0]) +"\';")
logger.debug('Query is %s', query)
print(query)
cur.execute(query)
cnx.commit()
#Calculate 90D Vol
query = ("SELECT round(stddev(PercentReturn),2) as vol90 from " + CCY1 + CCY2 + " where ID BETWEEN " + str(ID90[0]) + " AND " + str(ID1[0]) + ";")
logger.debug('Query is %s', query)
cur.execute(query)
for (vol90) in cur:
ninetyd = truncate((vol90[0] * math.sqrt(252)),2) #Final Moving Average Value
logger.debug('ninetyd is %s', ninetyd)
query = ("UPDATE "+ Table +" SET ninetyvol = " + str(ninetyd) + " where ID =\'" + str(ID1[0]) +"\';")
logger.debug('Query is %s', query)
print(query)
cur.execute(query)
cnx.commit()
Flag = 1
logger.debug('Flag set to %s', Flag)
print(Flag)
return(Flag)
while Flag == 0:
logger.debug('Flag set to %s', Flag)
conn = Connection.create(port=4002, clientId=999)
conn.connect()
logger.debug('Connecting to Server')
time.sleep(1)
conn.register(reply_handler,'HistoricalData') #By registering "HistoricalData" --the Method name only --we can eliminate all the open order garbage
logger.debug('Registered HistoricalData Reply Handler')
time.sleep(1)
qqq = Contract()
qqq.m_symbol = Table
qqq.m_secType = 'STK'
qqq.m_exchange = 'SMART:ISLAND'
qqq.m_currency = 'USD'
logger.debug('Requesting historical data')
conn.reqHistoricalData(1, qqq, '', '1 D', '1 day', 'TRADES', 0, 1)
logger.debug('Returned from Reply Handler')
time.sleep(1) #give IB time to send us messages
logger.debug('Disconnecting from Server')
conn.disconnect()
logger.debug('Finished Daily OHLC') | [
"[email protected]"
] | |
8e729bdeca6812df6854b6e2437387c139da97d3 | 223fde0acac1b6100277e8ad2a7cb0233b4fbce7 | /src/pyMission/multipoint_derivatives.py | 172ace25b7fa21169998aa55a84d802e626152d2 | [] | no_license | hwangjt/pyMission-1 | 26cf5646bf79560a7d52d2db56ecde0b535bdc85 | 316c738afde8b29225586a6a13eea0b97aaa894d | refs/heads/master | 2020-12-25T10:08:39.327912 | 2015-04-02T20:42:20 | 2015-04-02T20:42:20 | 30,985,983 | 0 | 0 | null | 2015-02-26T19:28:24 | 2015-02-18T20:45:25 | OpenEdge ABL | UTF-8 | Python | false | false | 5,825 | py | ''' Analysis with multiple mission segments in parallel '''
import time
import numpy as np
from openmdao.main.api import set_as_top, Driver, Assembly
from openmdao.main.mpiwrap import MPI
from openmdao.main.test.simpledriver import SimpleDriver
from openmdao.lib.casehandlers.api import BSONCaseRecorder
from pyoptsparse_driver.pyoptsparse_driver import pyOptSparseDriver
from pyMission.segment import MissionSegment
# Same discretization for each segment for now.
num_elem = 250
num_cp = 50
model = set_as_top(Assembly())
#------------------------
# Mission Segment 1
#------------------------
x_range = 9000.0 # nautical miles
# define bounds for the flight path angle
gamma_lb = np.tan(-35.0 * (np.pi/180.0))/1e-1
gamma_ub = np.tan(35.0 * (np.pi/180.0))/1e-1
takeoff_speed = 83.3
landing_speed = 72.2
altitude = 10 * np.sin(np.pi * np.linspace(0,1,num_elem+1))
start = time.time()
x_range *= 1.852
x_init = x_range * 1e3 * (1-np.cos(np.linspace(0, 1, num_cp)*np.pi))/2/1e6
M_init = np.ones(num_cp)*0.82
h_init = 10 * np.sin(np.pi * x_init / (x_range/1e3))
model.add('seg1', MissionSegment(num_elem=num_elem, num_cp=num_cp,
x_pts=x_init, surr_file='crm_surr'))
# Initial value of the parameter
model.seg1.h_pt = h_init
model.seg1.M_pt = M_init
model.seg1.set_init_h_pt(altitude)
# Calculate velocity from the Mach we have specified.
model.seg1.SysSpeed.v_specified = False
# Initial design parameters
model.seg1.S = 427.8/1e2
model.seg1.ac_w = 210000*9.81/1e6
model.seg1.thrust_sl = 1020000.0/1e6
model.seg1.SFCSL = 8.951*9.81
model.seg1.AR = 8.68
model.seg1.oswald = 0.8
# Flag for making sure we run serial if we do an mpirun
model.seg1.driver.system_type = 'serial'
model.seg1.coupled_solver.system_type = 'serial'
#------------------------
# Mission Segment 2
#------------------------
x_range = 7000.0 # nautical miles
# define bounds for the flight path angle
gamma_lb = np.tan(-35.0 * (np.pi/180.0))/1e-1
gamma_ub = np.tan(35.0 * (np.pi/180.0))/1e-1
takeoff_speed = 83.3
landing_speed = 72.2
altitude = 10 * np.sin(np.pi * np.linspace(0,1,num_elem+1))
start = time.time()
x_range *= 1.852
x_init = x_range * 1e3 * (1-np.cos(np.linspace(0, 1, num_cp)*np.pi))/2/1e6
M_init = np.ones(num_cp)*0.82
h_init = 10 * np.sin(np.pi * x_init / (x_range/1e3))
model.add('seg2', MissionSegment(num_elem=num_elem, num_cp=num_cp,
x_pts=x_init, surr_file='crm_surr'))
# Initial value of the parameter
model.seg2.h_pt = h_init
model.seg2.M_pt = M_init
model.seg2.set_init_h_pt(altitude)
# Calculate velocity from the Mach we have specified.
model.seg2.SysSpeed.v_specified = False
# Initial design parameters
model.seg2.S = 427.8/1e2
model.seg2.ac_w = 210000*9.81/1e6
model.seg2.thrust_sl = 1020000.0/1e6
model.seg2.SFCSL = 8.951*9.81
model.seg2.AR = 8.68
model.seg2.oswald = 0.8
# Flag for making sure we run serial if we do an mpirun
model.seg2.driver.system_type = 'serial'
model.seg2.coupled_solver.system_type = 'serial'
#------------------------
# Mission Segment 3
#------------------------
x_range = 5000.0 # nautical miles
# define bounds for the flight path angle
gamma_lb = np.tan(-35.0 * (np.pi/180.0))/1e-1
gamma_ub = np.tan(35.0 * (np.pi/180.0))/1e-1
takeoff_speed = 83.3
landing_speed = 72.2
altitude = 10 * np.sin(np.pi * np.linspace(0,1,num_elem+1))
start = time.time()
x_range *= 1.852
x_init = x_range * 1e3 * (1-np.cos(np.linspace(0, 1, num_cp)*np.pi))/2/1e6
M_init = np.ones(num_cp)*0.82
h_init = 10 * np.sin(np.pi * x_init / (x_range/1e3))
model.add('seg3', MissionSegment(num_elem=num_elem, num_cp=num_cp,
x_pts=x_init, surr_file='crm_surr'))
# Initial value of the parameter
model.seg3.h_pt = h_init
model.seg3.M_pt = M_init
model.seg3.set_init_h_pt(altitude)
# Calculate velocity from the Mach we have specified.
model.seg3.SysSpeed.v_specified = False
# Initial design parameters
model.seg3.S = 427.8/1e2
model.seg3.ac_w = 210000*9.81/1e6
model.seg3.thrust_sl = 1020000.0/1e6
model.seg3.SFCSL = 8.951*9.81
model.seg3.AR = 8.68
model.seg3.oswald = 0.8
# Flag for making sure we run serial if we do an mpirun
model.seg3.driver.system_type = 'serial'
model.seg3.coupled_solver.system_type = 'serial'
#----------------------
# Prepare to Run
#----------------------
model.driver.workflow.add(['seg1', 'seg2', 'seg3'])
#model._setup()
#from openmdao.util.dotgraph import plot_system_tree
#plot_system_tree(model._system)
model.replace('driver', SimpleDriver())
model.driver.add_objective('seg1.fuelburn + seg2.fuelburn + seg3.fuelburn')
model.driver.add_constraint('seg1.h[0] = 0.0')
model.driver.add_constraint('seg2.h[0] = 0.0')
model.driver.add_constraint('seg3.h[0] = 0.0')
model.driver.add_constraint('seg1.h[-1] = 0.0')
model.driver.add_constraint('seg2.h[-1] = 0.0')
model.driver.add_constraint('seg3.h[-1] = 0.0')
model.driver.add_constraint('seg1.Tmin < 0.0')
model.driver.add_constraint('seg2.Tmin < 0.0')
model.driver.add_constraint('seg3.Tmin < 0.0')
model.driver.add_constraint('seg1.Tmax < 0.0')
model.driver.add_constraint('seg2.Tmax < 0.0')
model.driver.add_constraint('seg3.Tmax < 0.0')
model.driver.add_parameter('seg1.h_pt', low=0.0, high=14.1)
model.driver.add_parameter('seg2.h_pt', low=0.0, high=14.1)
model.driver.add_parameter('seg3.h_pt', low=0.0, high=14.1)
model.driver.gradient_options.iprint = 1
model.driver.gradient_options.lin_solver = 'linear_gs'
model.driver.gradient_options.maxiter = 1
#model.driver.gradient_options.lin_solver = 'petsc_ksp'
start = time.time()
model.run()
J = model.driver.workflow.calc_gradient(return_format='dict')
print "."
if MPI:
J = model.driver.workflow._system.get_combined_J(J)
if MPI.COMM_WORLD.rank == 0:
print J
else:
print "J", J
print 'Simulation TIME:', time.time() - start
| [
"[email protected]"
] | |
3d8dc115afb63ed44f0989c80921541c2a63bb00 | 2aed68d1ee14eb3fc344fe1e0db99b20f0c9a166 | /xnr_0313/xnr/timed_python_files/community/community_find_weibo.py | ff05052ca2aaca67b9456c264b651a3863fdd17c | [] | no_license | zhhhzhang/xnr1 | a8ab151d99e74124eae2ec15c61281a32cb9ce8d | bfa621916c9a787bcdff4573a06d12056e25c556 | refs/heads/master | 2020-03-19T04:56:22.330912 | 2018-05-30T12:00:12 | 2018-05-30T12:00:12 | 135,883,486 | 0 | 1 | null | 2018-06-03T07:35:36 | 2018-06-03T07:35:35 | null | UTF-8 | Python | false | false | 11,090 | py | # -*- coding: utf-8 -*-
import os
import json
import time
import sys
from elasticsearch import Elasticsearch
import networkx as nx
from community_shuaijian_gailv import find_community
import json,os,time,community
sys.path.append('../../')
from global_utils import es_flow_text,retweet_index_name_pre,retweet_index_type,\
comment_index_name_pre,comment_index_type,\
weibo_bci_history_index_name,weibo_bci_history_index_type,\
weibo_sensitive_history_index_name,weibo_sensitive_history_index_type
from global_config import S_TYPE,R_BEGIN_TIME,S_DATE
from time_utils import ts2datetime,datetime2ts
from parameter import DAY
sys.path.append('../../timed_python_files/community/')
from weibo_publicfunc import get_compelete_wbxnr
from weibo_create_target_user import create_xnr_targetuser
r_beigin_ts = datetime2ts(R_BEGIN_TIME)
##########定义公共变量
PATH = ''
FILE_NAME = 'graph_edges1.txt'
user_es = es_flow_text
def get_db_num(timestamp):
date = ts2datetime(timestamp)
date_ts = datetime2ts(date)
db_number = ((date_ts - r_beigin_ts) / (DAY*7)) % 2 + 1
#run_type
if S_TYPE == 'test':
db_number = 1
return db_number
now_ts = time.time()
db_number = get_db_num(now_ts)
retweet_index = retweet_index_name_pre + str(db_number)
retweet_type = retweet_index_type
comment_index = comment_index_name_pre + str(db_number)
comment_type = comment_index_type
influence_index = weibo_bci_history_index_name
influence_type = weibo_bci_history_index_type #bci_week_ave
sensitive_index = weibo_sensitive_history_index_name
sensitive_type = weibo_sensitive_history_index_type #sensitive_week_ave
def get_users(xnr_user_no,nodes=None):
if not nodes:
print 'get xnr es...'
# result = xnr_es.search(index=save_index,doc_type=save_type,body={'query':{'match_all':{}},'size':999999})
# result = result['hits']['hits']
# uids = [i['_source']['uid'] for i in result]
uids = create_xnr_targetuser(xnr_user_no)
else:
print 'have input nodes...'
uids = nodes
retweet_result = user_es.mget(index=retweet_index, doc_type=retweet_type,body={'ids':uids}, _source=True)['docs']
comment_result = user_es.mget(index=comment_index, doc_type=comment_type,body={'ids':uids}, _source=True)['docs']
G = nx.Graph()
for i in retweet_result:
print 'i:',i
if not i['found']:
continue
uid_retweet = json.loads(i['_source']['uid_retweet'])
max_count = max([int(n) for n in uid_retweet.values()])
G.add_weighted_edges_from([(i['_source']['uid'],j,float(uid_retweet[j])/max_count) for j in uid_retweet.keys() if j != i['_source']['uid'] and j and i['_source']['uid']])
for i in comment_result:
print 'comment_i:',i
if not i['found']:
continue
uid_comment = json.loads(i['_source']['uid_comment'])
max_count = max([int(n) for n in uid_comment.values()])
G.add_weighted_edges_from([(i['_source']['uid'],j,float(uid_comment[j])/max_count) for j in uid_comment.keys() if j != i['_source']['uid'] and j and i['_source']['uid']])
return G
def find_from_uid_list(xnr_user_no,nodes=None,path=PATH,file_name=FILE_NAME,com_type='copra',G=None):
#得到用户集
print 'get users...',type(G)
if not G:
G = get_users(xnr_user_no,nodes)
else:
G = G.subgraph(nodes,xnr_user_no)
node_clus = nx.clustering(G) #50w
print 'number of users:',len(node_clus)
nodes = [i for i in node_clus if node_clus[i]>0]#7w
print 'node clustering > 0:',len(nodes)
allG = G
print 'allg',allG.number_of_nodes()
#根据聚集系数大于0筛选用户
G = G.subgraph(nodes)
try:
G.remove_node('')
except:
pass
# G.remove_nodes_from(list(set(node_clus)-set(nodes))) #80w边
# G.remove_node('')
print 'number of edges:',G.number_of_edges(),' number of nodes:',G.number_of_nodes()
degree_dict = nx.degree_centrality(G)
print 'find coms using ',com_type
start = time.time()
#选择采用的划分方法
if com_type in ['oslom','slpa']:
#存到文件里,调包用
f = open(path+file_name,'w')
count = 0
for s,r in G.edges():
if s and r:
f.write(s+' '+r+' '+str(G[s][r]['weight'])+'\n')
count += 1
if count % 100 == 0:
print count
f.close()
print('total nodes count:',len(nodes),' total edges count:',count)
if com_type == 'oslom':
coms = oslom_coms(path,file_name)
else:
coms = slpa_coms(path,file_name)
coms_list = coms.values()
else:
#传边
file_path = './weibo_data/' + ts2datetime(int(time.time())) + '_' + str(int(time.time()))
coms_list = find_community(degree_dict,G,file_path)
print 'find community time:',time.time()-start
print 'post process...'
coms_list = post_process(allG,coms_list)
return G,allG,coms_list
def post_process(G,coms_list):
#处理社区 过滤少于3的社区;对大于10000的社区再次划分
new_coms = []
count = 0
for v in coms_list:
if len(v) < 3:
continue
elif len(v) > 10000:
sub_g,sub_coms = find_from_uid_list(v,G=G)
for sub_v in sub_coms:
if len(sub_v)>=3:
new_coms.append(sub_v)
else:
new_coms.append(v)
print 'len coms:',len(new_coms)
return new_coms
def group_evaluate(xnr_user_no,nodes,all_influence,all_sensitive,G=None):
result = {}
result['xnr_user_no'] = xnr_user_no
result['nodes'] = nodes
result['num'] = len(nodes)
if G:
sub_g = G.subgraph(nodes)
else:
sub_g = get_users(xnr_user_no,nodes)
result['density'] = nx.density(sub_g)
result['cluster'] = nx.average_clustering(sub_g)
result['transitivity'] = nx.transitivity(sub_g)
# for i in user_es.mget(index=sensitive_index, doc_type=sensitive_type,body={'ids':nodes}, fields=['sensitive_week_ave'],_source=False)['docs']:
# print i#['fields']['sensitive_week_ave']
influence_result = [float(i['fields']['bci_week_ave'][0]) if i['found'] else 0 for i in user_es.mget(index=influence_index, doc_type=influence_type,body={'ids':nodes}, fields=['bci_week_ave'],_source=False)['docs']]
sensitive_result = [float(i['fields']['sensitive_week_ave'][0]) if i['found'] else 0 for i in user_es.mget(index=sensitive_index, doc_type=sensitive_type,body={'ids':nodes}, fields=['sensitive_week_ave'],_source=False)['docs']]
result['max_influence'] = max(influence_result)/float(all_influence)
result['mean_influence'] = (sum(influence_result)/len(influence_result))/float(all_influence)
result['max_sensitive'] = max(sensitive_result)/float(all_sensitive)
result['mean_sensitive'] = (sum(sensitive_result)/len(sensitive_result))/float(all_sensitive)
return result
def get_evaluate_max(index_name,index_type,field):
query_body = {
'query':{
'match_all':{}
},
'size':1,
'sort':[{field: {'order': 'desc'}}]
}
try:
result = user_es.search(index=index_name, doc_type=index_type, body=query_body)['hits']['hits']
except Exception, e:
raise e
max_evaluate = result[0]['_source'][field]
return max_evaluate
def oslom_coms(path,name): #1222.24431992秒
t1 = time.time()
print os.getcwd()
os.system('cp '+path+name+' OSLOM2/OSLOM2/')
os.system('OSLOM2/OSLOM2/./oslom_undir -f OSLOM2/OSLOM2/'+name+' -hr 0 -w')
print time.time()-t1
with open('OSLOM2/OSLOM2/'+name+'_oslo_files/tp') as f:
#coms = collections.defaultdict(list)
coms = {}
i = 0
j = 0
lines = f.readlines()
for line in lines:
j += 1
if j%2 == 0:
coms[i] = line.split()
i += 1
#print 'coms',coms
return coms
def slpa_coms(path,name):
t1 = time.time()
os.chdir('/home/jiangln/own_weibo/code/GANXiS_v3.0.2')
os.system('java -jar GANXiSw.jar -i ../'+path+name)#+' -Sym 1'
print name,time.time()-t1
with open('output/SLPAw_'+name.split('.')[0]+'_run1_r0.4_v3_T100.icpm') as f:
# os.system('java -jar GANXiSw.jar -i 107_m_6.txt -Sym 1')
# print time.time()-t1
# with open('output/SLPAw_107_m_6_run1_r0.4_v3_T100.icpm') as f:
#coms = collections.defaultdict(list)
coms = {}
i = 0
lines = f.readlines()
for line in lines:
coms[i] = line.split()
i += 1
#print 'coms:::',coms
return coms
def ExtendQ(G,coms_list):
#获取每个节点属于的标签数
o = {}
r = {}
s = 0
for i in coms_list:
for j in i:
try:
o[j] += 1
except:
o[j] = 1
final_g = G.subgraph(o.keys())
for i in final_g.adjacency():
#i: ('a', {'c': {'weight': 2}, 'b': {'weight': 1}, 'd': {'weight': 1}})
r[i[0]] = sum([j['weight'] for j in i[1].values()])
s += r[i[0]]
EQ = 0.0
for com in coms_list:
for i in com:
for j in com:
if i == j:
continue
try:
EQ += (final_g.adj[i][j]['weight']-(r[i]*r[j])/s)/float(o[i]*o[j])
except:
continue
EQ = EQ/s
return EQ
#组织社区生成
def create_weibo_community():
# xnr_user_no_list = get_compelete_wbxnr()
xnr_user_no_list = ['WXNR0004']
for xnr_user_no in xnr_user_no_list:
#给定的nodes可能会因为网络结构或排序被删掉
s = time.time()
#得到划分的社区
G,allG,coms_list = find_from_uid_list(xnr_user_no)
print 'group evaluate...'
all_influence = get_evaluate_max(influence_index,influence_type,'bci_week_ave')
all_sensitive = get_evaluate_max(sensitive_index,sensitive_type,'sensitive_week_ave')
print 'allG nodes:',allG.number_of_nodes()
print 'G nodes:',G.number_of_nodes()
file_path = './weibo_data/' + xnr_user_no + '_' + ts2datetime(int(time.time())) + '_' +'save_com.json'
print 'file_path:',file_path
f = open(file_path,'w')
for k,v in enumerate(coms_list):
#计算评价指标
f.write(json.dumps(group_evaluate(xnr_user_no,v,all_influence,all_sensitive,G))+'\n')
print 'total time:',time.time()-s
print 'eq:',ExtendQ(allG,coms_list)
if __name__ == '__main__':
create_weibo_community()
| [
"[email protected]"
] | |
d6ed7c947efd9d5100b44e9a063b25dac284ccc0 | b68c2043016baec884e8fb7edcda53243a9807b8 | /src/storage/storage.py | fd44d652b0d77394ca916177b3f979d69662dff8 | [] | no_license | INP-Group/SystemJ | 7a1dcf4867591c663e96be0c41089151cc31887d | c48ada6386653402ed1b6a8895ae223846873158 | refs/heads/master | 2020-04-05T23:20:11.972621 | 2015-07-19T03:34:03 | 2015-07-19T03:34:03 | 39,321,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | # -*- encoding: utf-8 -*-
from project.settings import POSTGRESQL_DB, \
POSTGRESQL_HOST, POSTGRESQL_PASSWORD, POSTGRESQL_TABLE, \
POSTGRESQL_USER, ZEROMQ_HOST, ZEROMQ_PORT
from src.storage.berkeley import BerkeleyStorage
from src.storage.postgresql import PostgresqlStorage
from src.storage.zeromqserver import ZeroMQServer
class Storage(object):
def __init__(self):
self.sql_storage = PostgresqlStorage(database=POSTGRESQL_DB,
user=POSTGRESQL_USER,
password=POSTGRESQL_PASSWORD,
tablename=POSTGRESQL_TABLE,
host=POSTGRESQL_HOST)
self.berkeley_db = BerkeleyStorage(sql_storage=self.sql_storage)
self.zeromq = ZeroMQServer(host=ZEROMQ_HOST,
port=ZEROMQ_PORT,
berkeley_db=self.berkeley_db)
def start(self):
self.zeromq.start()
| [
"[email protected]"
] | |
3731cf807cb614a2393e21e6bdc30eece5399cf1 | 690cdb4acc7becf3c18955e3c2732ec5b0735b2c | /python-demos/concurrent_parallel_demos/cpu_bound_parallel.py | b0e9e2b7975ba3b0740bc754929bc91af41d7692 | [
"MIT"
] | permissive | t4d-classes/advanced-python_04192021 | 30ba51cad9bc6af96fae2b9e2998fdb60244d6ba | a30cd924d918bf41c0775a1235eef849746a5f3d | refs/heads/master | 2023-04-08T03:22:01.145375 | 2021-04-23T22:35:18 | 2021-04-23T22:35:18 | 357,016,064 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,150 | py | """ cpu bound single demo """
from collections.abc import Generator
import itertools
import time
import multiprocessing as mp
def fibonacci() -> Generator[int, None, None]:
""" generate an infinite fibonacci sequence """
num_1 = 0
num_2 = 1
yield num_1
yield num_2
while True:
next_num = num_1 + num_2
yield next_num
num_1 = num_2
num_2 = next_num
def calc_fib_total(p_results: list[int]) -> None:
""" calc fib total and add to list """
total = 0
for num in itertools.islice(fibonacci(), 0, 500000):
total += num
p_results.append(total)
if __name__ == "__main__":
start_time = time.time()
with mp.Manager() as manager:
results: list[int] = manager.list()
processes: list[mp.Process] = []
for _ in range(8):
a_process = mp.Process(target=calc_fib_total, args=(results,))
a_process.start()
processes.append(a_process)
for a_process in processes:
a_process.join()
time_elapsed = time.time() - start_time
print(len(results))
print(time_elapsed)
| [
"[email protected]"
] | |
c7050b95df428a9e2e0aba7c94567211af4cf38f | bb6ce2f4fc53dc9d2fc7a701cd4683b23ecf30b2 | /tests/py/test_browsing.py | 4897ee674d4f844978c35362aa7bff14e17d8d0c | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | liberapay/liberapay.com | 2640b9f830efaadd17009e3aed72eadd19c0f94e | a02eea631f89b17ac8179bdd37e5fa89ac54ede8 | refs/heads/master | 2023-08-31T10:44:04.235477 | 2023-08-27T07:29:47 | 2023-08-27T07:29:47 | 36,075,352 | 1,586 | 282 | null | 2023-09-03T09:23:26 | 2015-05-22T14:03:52 | Python | UTF-8 | Python | false | false | 4,611 | py | import os
import re
from pando import Response
import pytest
from liberapay.billing.payday import Payday
from liberapay.testing import EUR, Harness
from liberapay.utils import find_files
overescaping_re = re.compile(r'&(#[0-9]+|#x[0-9a-f]+|[a-z0-9]+);')
class BrowseTestHarness(Harness):
@classmethod
def setUpClass(cls):
super().setUpClass()
i = len(cls.client.www_root)
def f(spt):
if spt[spt.rfind('/')+1:].startswith('index.'):
return spt[i:spt.rfind('/')+1]
return spt[i:-4]
urls = {}
for url in sorted(map(f, find_files(cls.client.www_root, '*.spt'))):
url = url.replace('/%username/membership/', '/team/membership/') \
.replace('/team/membership/%action', '/team/membership/join') \
.replace('/%username/news/%action', '/%username/news/subscribe') \
.replace('/for/%name/', '/for/wonderland/') \
.replace('/for/wonderland/%action', '/for/wonderland/leave') \
.replace('/%platform', '/github') \
.replace('/%user_name/', '/liberapay/') \
.replace('/%redirect_to', '/giving') \
.replace('/%back_to', '/') \
.replace('/%provider', '/stripe') \
.replace('/%payment_id', '/') \
.replace('/%payin_id', '/') \
.replace('/payday/%id', '/payday/') \
.replace('/%type', '/receiving.js')
urls[url.replace('/%username/', '/david/')] = None
urls[url.replace('/%username/', '/team/')] = None
cls.urls = list(urls)
def browse_setup(self):
self.david = self.make_participant('david')
self.team = self.make_participant('team', kind='group')
c = self.david.create_community('Wonderland')
self.david.upsert_community_membership(True, c.id)
self.team.add_member(self.david)
self.org = self.make_participant('org', kind='organization')
self.invoice_id = self.db.one("""
INSERT INTO invoices
(sender, addressee, nature, amount, description, details, documents, status)
VALUES (%s, %s, 'expense', ('28.04','EUR'), 'badges and stickers', null, '{}'::jsonb, 'new')
RETURNING id
""", (self.david.id, self.org.id))
Payday.start().run()
def browse(self, **kw):
for url in self.urls:
if url.endswith('/%exchange_id') or '/receipts/' in url:
continue
url = url.replace('/team/invoices/%invoice_id', '/org/invoices/%s' % self.invoice_id)
url = url.replace('/%invoice_id', '/%s' % self.invoice_id)
assert '/%' not in url
try:
r = self.client.GET(url, **kw)
except Response as e:
if e.code == 404 or e.code >= 500:
raise
r = e
assert r.code != 404
assert r.code < 500
assert not overescaping_re.search(r.text)
class TestBrowsing(BrowseTestHarness):
def test_anon_can_browse_in_french(self):
self.browse_setup()
self.browse(HTTP_ACCEPT_LANGUAGE=b'fr')
def test_new_participant_can_browse(self):
self.browse_setup()
self.browse(auth_as=self.david)
def test_active_participant_can_browse(self):
self.browse_setup()
self.add_payment_account(self.david, 'stripe')
bob = self.make_participant('bob')
self.add_payment_account(bob, 'paypal')
bob.set_tip_to(self.david, EUR('1.00'))
bob_card = self.upsert_route(bob, 'stripe-card')
self.make_payin_and_transfer(bob_card, self.david, EUR('2.00'))
self.david.set_tip_to(bob, EUR('0.50'))
david_paypal = self.upsert_route(self.david, 'paypal')
self.make_payin_and_transfer(david_paypal, bob, EUR('20.00'))
self.browse(auth_as=self.david)
def test_admin_can_browse(self):
self.browse_setup()
admin = self.make_participant('admin', privileges=1)
self.browse(auth_as=admin)
@pytest.mark.skipif(
os.environ.get('LIBERAPAY_I18N_TEST') != 'yes',
reason="this is an expensive test, we don't want to run it every time",
)
class TestTranslations(BrowseTestHarness):
def test_all_pages_in_all_supported_langs(self):
self.browse_setup()
for _, l in self.client.website.lang_list:
self.browse(HTTP_ACCEPT_LANGUAGE=l.tag.encode('ascii'))
| [
"[email protected]"
] | |
506efc67add5dd69127142ed921f92fd83266cd6 | 1bfad01139237049eded6c42981ee9b4c09bb6de | /RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/rip/router/router.py | 67c75801f0a30a3cc4d2221e5d627163ebf81cfc | [
"MIT"
] | permissive | kakkotetsu/IxNetwork | 3a395c2b4de1488994a0cfe51bca36d21e4368a5 | f9fb614b51bb8988af035967991ad36702933274 | refs/heads/master | 2020-04-22T09:46:37.408010 | 2019-02-07T18:12:20 | 2019-02-07T18:12:20 | 170,284,084 | 0 | 0 | MIT | 2019-02-12T08:51:02 | 2019-02-12T08:51:01 | null | UTF-8 | Python | false | false | 10,795 | py |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Router(Base):
"""The Router class encapsulates a user managed router node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the Router property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server using the find method.
The internal properties list can be managed by the user by using the add and remove methods.
"""
_SDM_NAME = 'router'
def __init__(self, parent):
super(Router, self).__init__(parent)
@property
def RouteRange(self):
"""An instance of the RouteRange class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.rip.router.routerange.routerange.RouteRange)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.rip.router.routerange.routerange import RouteRange
return RouteRange(self)
@property
def AuthorizationPassword(self):
"""If enableAuthorization is set, this is the 16-character password to be used. Only simple password authentication is supported.
Returns:
str
"""
return self._get_attribute('authorizationPassword')
@AuthorizationPassword.setter
def AuthorizationPassword(self, value):
self._set_attribute('authorizationPassword', value)
@property
def EnableAuthorization(self):
"""Indicates whether authorization is included in update messages.
Returns:
bool
"""
return self._get_attribute('enableAuthorization')
@EnableAuthorization.setter
def EnableAuthorization(self, value):
self._set_attribute('enableAuthorization', value)
@property
def Enabled(self):
"""Enables or disables the simulated router.
Returns:
bool
"""
return self._get_attribute('enabled')
@Enabled.setter
def Enabled(self, value):
self._set_attribute('enabled', value)
@property
def InterfaceId(self):
"""The ID associated with the simulated interface.
Returns:
str(None|/api/v1/sessions/1/ixnetwork/vport?deepchild=interface)
"""
return self._get_attribute('interfaceId')
@InterfaceId.setter
def InterfaceId(self, value):
self._set_attribute('interfaceId', value)
@property
def ReceiveType(self):
"""Filters the RIP version of messages this router will receive.
Returns:
str(receiveVersion1|receiveVersion2|receiveVersion1And2)
"""
return self._get_attribute('receiveType')
@ReceiveType.setter
def ReceiveType(self, value):
self._set_attribute('receiveType', value)
@property
def ResponseMode(self):
"""Controls the manner in which received routes are repeated back to their source. The modes are split horizon, no split horizon, and split horizon with poison reverse.
Returns:
str(default|splitHorizon|poisonReverse|splitHorizonSpaceSaver|silent)
"""
return self._get_attribute('responseMode')
@ResponseMode.setter
def ResponseMode(self, value):
self._set_attribute('responseMode', value)
@property
def SendType(self):
"""The method for sending RIP packets.
Returns:
str(multicast|broadcastV1|broadcastV2)
"""
return self._get_attribute('sendType')
@SendType.setter
def SendType(self, value):
self._set_attribute('sendType', value)
@property
def TrafficGroupId(self):
"""The name of the group to which this port is assigned, for the purpose of creating traffic streams among source/destination members of the group.
Returns:
str(None|/api/v1/sessions/1/ixnetwork/traffic?deepchild=trafficGroup)
"""
return self._get_attribute('trafficGroupId')
@TrafficGroupId.setter
def TrafficGroupId(self, value):
self._set_attribute('trafficGroupId', value)
@property
def UpdateInterval(self):
"""The time, in seconds, between transmitted update messages.
Returns:
number
"""
return self._get_attribute('updateInterval')
@UpdateInterval.setter
def UpdateInterval(self, value):
self._set_attribute('updateInterval', value)
@property
def UpdateIntervalOffset(self):
"""A random percentage of the time value, expressed in seconds, is added to or subtracted from the update interval to stagger the transmission of messages.
Returns:
number
"""
return self._get_attribute('updateIntervalOffset')
@UpdateIntervalOffset.setter
def UpdateIntervalOffset(self, value):
self._set_attribute('updateIntervalOffset', value)
def add(self, AuthorizationPassword=None, EnableAuthorization=None, Enabled=None, InterfaceId=None, ReceiveType=None, ResponseMode=None, SendType=None, TrafficGroupId=None, UpdateInterval=None, UpdateIntervalOffset=None):
"""Adds a new router node on the server and retrieves it in this instance.
Args:
AuthorizationPassword (str): If enableAuthorization is set, this is the 16-character password to be used. Only simple password authentication is supported.
EnableAuthorization (bool): Indicates whether authorization is included in update messages.
Enabled (bool): Enables or disables the simulated router.
InterfaceId (str(None|/api/v1/sessions/1/ixnetwork/vport?deepchild=interface)): The ID associated with the simulated interface.
ReceiveType (str(receiveVersion1|receiveVersion2|receiveVersion1And2)): Filters the RIP version of messages this router will receive.
ResponseMode (str(default|splitHorizon|poisonReverse|splitHorizonSpaceSaver|silent)): Controls the manner in which received routes are repeated back to their source. The modes are split horizon, no split horizon, and split horizon with poison reverse.
SendType (str(multicast|broadcastV1|broadcastV2)): The method for sending RIP packets.
TrafficGroupId (str(None|/api/v1/sessions/1/ixnetwork/traffic?deepchild=trafficGroup)): The name of the group to which this port is assigned, for the purpose of creating traffic streams among source/destination members of the group.
UpdateInterval (number): The time, in seconds, between transmitted update messages.
UpdateIntervalOffset (number): A random percentage of the time value, expressed in seconds, is added to or subtracted from the update interval to stagger the transmission of messages.
Returns:
self: This instance with all currently retrieved router data using find and the newly added router data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the router data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, AuthorizationPassword=None, EnableAuthorization=None, Enabled=None, InterfaceId=None, ReceiveType=None, ResponseMode=None, SendType=None, TrafficGroupId=None, UpdateInterval=None, UpdateIntervalOffset=None):
"""Finds and retrieves router data from the server.
All named parameters support regex and can be used to selectively retrieve router data from the server.
By default the find method takes no parameters and will retrieve all router data from the server.
Args:
AuthorizationPassword (str): If enableAuthorization is set, this is the 16-character password to be used. Only simple password authentication is supported.
EnableAuthorization (bool): Indicates whether authorization is included in update messages.
Enabled (bool): Enables or disables the simulated router.
InterfaceId (str(None|/api/v1/sessions/1/ixnetwork/vport?deepchild=interface)): The ID associated with the simulated interface.
ReceiveType (str(receiveVersion1|receiveVersion2|receiveVersion1And2)): Filters the RIP version of messages this router will receive.
ResponseMode (str(default|splitHorizon|poisonReverse|splitHorizonSpaceSaver|silent)): Controls the manner in which received routes are repeated back to their source. The modes are split horizon, no split horizon, and split horizon with poison reverse.
SendType (str(multicast|broadcastV1|broadcastV2)): The method for sending RIP packets.
TrafficGroupId (str(None|/api/v1/sessions/1/ixnetwork/traffic?deepchild=trafficGroup)): The name of the group to which this port is assigned, for the purpose of creating traffic streams among source/destination members of the group.
UpdateInterval (number): The time, in seconds, between transmitted update messages.
UpdateIntervalOffset (number): A random percentage of the time value, expressed in seconds, is added to or subtracted from the update interval to stagger the transmission of messages.
Returns:
self: This instance with matching router data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of router data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the router data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"[email protected]"
] | |
8d1ba021d13c75f5e8887ecec97357f511760b85 | 993ef8924418866f932396a58e3ad0c2a940ddd3 | /Production/python/PrivateSamples/SVJ_2017_mZprime-1100_mDark-20_rinv-0p3_alpha-peak_n-2000_cff.py | d4d743d32e51d3a0fd33d110eea3d4204c870ddc | [] | no_license | TreeMaker/TreeMaker | 48d81f6c95a17828dbb599d29c15137cd6ef009a | 15dd7fe9e9e6f97d9e52614c900c27d200a6c45f | refs/heads/Run2_UL | 2023-07-07T15:04:56.672709 | 2023-07-03T16:43:17 | 2023-07-03T16:43:17 | 29,192,343 | 16 | 92 | null | 2023-07-03T16:43:28 | 2015-01-13T13:59:30 | Python | UTF-8 | Python | false | false | 7,432 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-1.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-2.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-3.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-4.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-5.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-6.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-7.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-8.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-9.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-10.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-11.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-12.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-13.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-14.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-15.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-16.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-17.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-18.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-19.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-20.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-21.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-22.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-23.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-24.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-25.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-26.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-27.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-28.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-29.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-30.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-31.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-32.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-33.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-34.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-35.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-36.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-37.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-38.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-39.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-40.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-41.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-42.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-43.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-44.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-45.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-46.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-47.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-48.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-49.root',
'/store/user/lpcsusyhad/SVJ2017/ProductionV3/2017/MINIAOD/step4_MINIAOD_mZprime-1100_mDark-20_rinv-0.3_alpha-peak_n-2000_part-50.root',
] )
| [
"[email protected]"
] | |
f2b2b4d9515fcb5e791cf75aec0382ee54e71dfc | 2d4b9ef6aa8c3e39999206cbfd1d1bb60e170077 | /cup/version.py | ef9dfc6395747ccf22e0131a2940f38f9781ecab | [
"MIT",
"Apache-2.0"
] | permissive | IsaacjlWu/CUP | 786ff784c7a22854d083e9cd041b605a1934072e | 5c985cd33ee7dc6f1f052a491d1c7b8915670942 | refs/heads/master | 2023-05-11T23:31:51.975880 | 2018-07-16T08:54:59 | 2018-07-16T08:54:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | #!/usr/bin/python
# -*- coding: utf-8 -*
# #############################################################################
#
# Copyright (c) Baidu.com, Inc. All Rights Reserved
#
# #############################################################################
"""
:author:
Guannan Ma maguannan @mythmgn
"""
VERSION = '1.6.1'
AUTHOR = 'nfs-qa'
# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
| [
"[email protected]"
] | |
d58a13c2b21ea0a06e363f887cb465d9d2d70886 | 9680c27718346be69cf7695dba674e7a0ec662ca | /game-Python/Python Challenge-Math Game.py | 9bfe4c62b34c305859a22614baca17bad957cb14 | [] | no_license | Md-Monirul-Islam/Python-code | 5a2cdbe7cd3dae94aa63298b5b0ef7e0e31cd298 | df98f37dd9d21784a65c8bb0e46d47a646259110 | refs/heads/main | 2023-01-19T05:15:04.963904 | 2020-11-19T06:10:09 | 2020-11-19T06:10:09 | 314,145,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | import random
import operator
score = 0
def random_problem():
operators = {
"+": operator.add,
"-": operator.sub,
"*":operator.mul,
"/":operator.truediv,
}
num_1 = random.randint(1,10)
num_2 = random.randint(1,10)
operation = random.choice(list(operators.keys()))
answer = operators.get(operation)(num_1,num_2)
print(f"What is {num_1} {operation} {num_2}?")
return answer
def ask_question():
answer = random_problem()
guess = float(input())
return guess==answer
def game():
print("How well do you math?\n")
score = 0
for i in range(5):
if ask_question()==True:
score += 1
print("Correct")
else:
print("Incorrect")
print(f"Your score is {score}")
game() | [
"[email protected]"
] | |
459b4624310b53ddf1066b5c175112767f16b74e | 60f7d711cb3f743f148ca4be4c507244a61d823d | /gaphor/diagram/classes/__init__.py | 2c89e61845c0a50239c9e661f4a18d4de9e98b58 | [
"Apache-2.0"
] | permissive | paulopperman/gaphor | 84ffd8c18ac4f015668fbd44662cbb3ae43b9016 | 6986c4f3469720a1618a9e8526cb6f826aea626a | refs/heads/master | 2020-05-24T21:57:54.252307 | 2019-05-16T22:51:50 | 2019-05-16T22:51:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | from gaphor.diagram.classes.association import AssociationItem
from gaphor.diagram.classes.dependency import DependencyItem
from gaphor.diagram.classes.generalization import GeneralizationItem
from gaphor.diagram.classes.implementation import ImplementationItem
from gaphor.diagram.classes.interface import InterfaceItem
from gaphor.diagram.classes.klass import ClassItem
from gaphor.diagram.classes.package import PackageItem
def _load():
from gaphor.diagram.classes import (
classconnect,
interfaceconnect,
classespropertypages,
)
_load()
| [
"[email protected]"
] | |
74c35250f7819ac52063403afc93980734d0a8ca | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /dockerized-gists/781a8eb20c5b32f8e378353cde4daa51/snippet.py | 1042a5ba0a6f94ea25c9ea17708bc9b21544c48c | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 2,545 | py | # [filter size, stride, padding]
#Assume the two dimensions are the same
#Each kernel requires the following parameters:
# - k_i: kernel size
# - s_i: stride
# - p_i: padding (if padding is uneven, right padding will higher than left padding; "SAME" option in tensorflow)
#
#Each layer i requires the following parameters to be fully represented:
# - n_i: number of feature (data layer has n_1 = imagesize )
# - j_i: distance (projected to image pixel distance) between center of two adjacent features
# - r_i: receptive field of a feature in layer i
# - start_i: position of the first feature's receptive field in layer i (idx start from 0, negative means the center fall into padding)
import math
convnet = [[11,4,0],[3,2,0],[5,1,2],[3,2,0],[3,1,1],[3,1,1],[3,1,1],[3,2,0],[6,1,0], [1, 1, 0]]
layer_names = ['conv1','pool1','conv2','pool2','conv3','conv4','conv5','pool5','fc6-conv', 'fc7-conv']
imsize = 227
def outFromIn(conv, layerIn):
n_in = layerIn[0]
j_in = layerIn[1]
r_in = layerIn[2]
start_in = layerIn[3]
k = conv[0]
s = conv[1]
p = conv[2]
n_out = math.floor((n_in - k + 2*p)/s) + 1
actualP = (n_out-1)*s - n_in + k
pR = math.ceil(actualP/2)
pL = math.floor(actualP/2)
j_out = j_in * s
r_out = r_in + (k - 1)*j_in
start_out = start_in + ((k-1)/2 - pL)*j_in
return n_out, j_out, r_out, start_out
def printLayer(layer, layer_name):
print(layer_name + ":")
print("\t n features: %s \n \t jump: %s \n \t receptive size: %s \t start: %s " % (layer[0], layer[1], layer[2], layer[3]))
layerInfos = []
if __name__ == '__main__':
#first layer is the data layer (image) with n_0 = image size; j_0 = 1; r_0 = 1; and start_0 = 0.5
print ("-------Net summary------")
currentLayer = [imsize, 1, 1, 0.5]
printLayer(currentLayer, "input image")
for i in range(len(convnet)):
currentLayer = outFromIn(convnet[i], currentLayer)
layerInfos.append(currentLayer)
printLayer(currentLayer, layer_names[i])
print ("------------------------")
layer_name = raw_input ("Layer name where the feature in: ")
layer_idx = layer_names.index(layer_name)
idx_x = int(raw_input ("index of the feature in x dimension (from 0)"))
idx_y = int(raw_input ("index of the feature in y dimension (from 0)"))
n = layerInfos[layer_idx][0]
j = layerInfos[layer_idx][1]
r = layerInfos[layer_idx][2]
start = layerInfos[layer_idx][3]
assert(idx_x < n)
assert(idx_y < n)
print ("receptive field: (%s, %s)" % (r, r))
print ("center: (%s, %s)" % (start+idx_x*j, start+idx_y*j))
| [
"[email protected]"
] | |
975ef2a3f509bb2905399cf97654bef2b7ff076f | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/fabric/acdropexcesshist1d.py | 0ab471b7eb43f5a3c7a4993b0f156a96b13d4623 | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 31,834 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class AcDropExcessHist1d(Mo):
"""
A class that represents historical statistics for drop packets in a 1 day sampling interval. This class updates every hour.
"""
meta = StatsClassMeta("cobra.model.fabric.AcDropExcessHist1d", "drop packets")
counter = CounterMeta("excessPktPercentage", CounterCategory.GAUGE, "percentage", "excess percentage")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "excessPktPercentageMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "excessPktPercentageMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "excessPktPercentageAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "excessPktPercentageSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "excessPktPercentageThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "excessPktPercentageTr"
meta._counters.append(counter)
counter = CounterMeta("dropPktPercentage", CounterCategory.GAUGE, "percentage", "drop percentage")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "dropPktPercentageMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "dropPktPercentageMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "dropPktPercentageAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "dropPktPercentageSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "dropPktPercentageThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "dropPktPercentageTr"
meta._counters.append(counter)
counter = CounterMeta("excessPkt", CounterCategory.COUNTER, "packets", "excess packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "excessPktCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "excessPktPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "excessPktMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "excessPktMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "excessPktAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "excessPktSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "excessPktThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "excessPktTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "excessPktRate"
meta._counters.append(counter)
counter = CounterMeta("dropPkt", CounterCategory.COUNTER, "packets", "dropped packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "dropPktCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "dropPktPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "dropPktMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "dropPktMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "dropPktAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "dropPktSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "dropPktThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "dropPktTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "dropPktRate"
meta._counters.append(counter)
meta.moClassName = "fabricAcDropExcessHist1d"
meta.rnFormat = "HDfabricAcDropExcess1d-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical drop packets stats in 1 day"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.fabric.Path")
meta.parentClasses.add("cobra.model.fabric.Trail")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.fabric.AcDropExcessHist")
meta.rnPrefixes = [
('HDfabricAcDropExcess1d-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "dropPktAvg", "dropPktAvg", 9129, PropCategory.IMPLICIT_AVG)
prop.label = "dropped packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropPktAvg", prop)
prop = PropMeta("str", "dropPktCum", "dropPktCum", 9125, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "dropped packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("dropPktCum", prop)
prop = PropMeta("str", "dropPktMax", "dropPktMax", 9128, PropCategory.IMPLICIT_MAX)
prop.label = "dropped packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropPktMax", prop)
prop = PropMeta("str", "dropPktMin", "dropPktMin", 9127, PropCategory.IMPLICIT_MIN)
prop.label = "dropped packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropPktMin", prop)
prop = PropMeta("str", "dropPktPer", "dropPktPer", 9126, PropCategory.IMPLICIT_PERIODIC)
prop.label = "dropped packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("dropPktPer", prop)
prop = PropMeta("str", "dropPktPercentageAvg", "dropPktPercentageAvg", 9151, PropCategory.IMPLICIT_AVG)
prop.label = "drop percentage average value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropPktPercentageAvg", prop)
prop = PropMeta("str", "dropPktPercentageMax", "dropPktPercentageMax", 9150, PropCategory.IMPLICIT_MAX)
prop.label = "drop percentage maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropPktPercentageMax", prop)
prop = PropMeta("str", "dropPktPercentageMin", "dropPktPercentageMin", 9149, PropCategory.IMPLICIT_MIN)
prop.label = "drop percentage minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropPktPercentageMin", prop)
prop = PropMeta("str", "dropPktPercentageSpct", "dropPktPercentageSpct", 9152, PropCategory.IMPLICIT_SUSPECT)
prop.label = "drop percentage suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("dropPktPercentageSpct", prop)
prop = PropMeta("str", "dropPktPercentageThr", "dropPktPercentageThr", 9153, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "drop percentage thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("dropPktPercentageThr", prop)
prop = PropMeta("str", "dropPktPercentageTr", "dropPktPercentageTr", 9154, PropCategory.IMPLICIT_TREND)
prop.label = "drop percentage trend"
prop.isOper = True
prop.isStats = True
meta.props.add("dropPktPercentageTr", prop)
prop = PropMeta("str", "dropPktRate", "dropPktRate", 9133, PropCategory.IMPLICIT_RATE)
prop.label = "dropped packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("dropPktRate", prop)
prop = PropMeta("str", "dropPktSpct", "dropPktSpct", 9130, PropCategory.IMPLICIT_SUSPECT)
prop.label = "dropped packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("dropPktSpct", prop)
prop = PropMeta("str", "dropPktThr", "dropPktThr", 9131, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "dropped packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("dropPktThr", prop)
prop = PropMeta("str", "dropPktTr", "dropPktTr", 9132, PropCategory.IMPLICIT_TREND)
prop.label = "dropped packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("dropPktTr", prop)
prop = PropMeta("str", "excessPktAvg", "excessPktAvg", 9177, PropCategory.IMPLICIT_AVG)
prop.label = "excess packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("excessPktAvg", prop)
prop = PropMeta("str", "excessPktCum", "excessPktCum", 9173, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "excess packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("excessPktCum", prop)
prop = PropMeta("str", "excessPktMax", "excessPktMax", 9176, PropCategory.IMPLICIT_MAX)
prop.label = "excess packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("excessPktMax", prop)
prop = PropMeta("str", "excessPktMin", "excessPktMin", 9175, PropCategory.IMPLICIT_MIN)
prop.label = "excess packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("excessPktMin", prop)
prop = PropMeta("str", "excessPktPer", "excessPktPer", 9174, PropCategory.IMPLICIT_PERIODIC)
prop.label = "excess packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("excessPktPer", prop)
prop = PropMeta("str", "excessPktPercentageAvg", "excessPktPercentageAvg", 9199, PropCategory.IMPLICIT_AVG)
prop.label = "excess percentage average value"
prop.isOper = True
prop.isStats = True
meta.props.add("excessPktPercentageAvg", prop)
prop = PropMeta("str", "excessPktPercentageMax", "excessPktPercentageMax", 9198, PropCategory.IMPLICIT_MAX)
prop.label = "excess percentage maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("excessPktPercentageMax", prop)
prop = PropMeta("str", "excessPktPercentageMin", "excessPktPercentageMin", 9197, PropCategory.IMPLICIT_MIN)
prop.label = "excess percentage minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("excessPktPercentageMin", prop)
prop = PropMeta("str", "excessPktPercentageSpct", "excessPktPercentageSpct", 9200, PropCategory.IMPLICIT_SUSPECT)
prop.label = "excess percentage suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("excessPktPercentageSpct", prop)
prop = PropMeta("str", "excessPktPercentageThr", "excessPktPercentageThr", 9201, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "excess percentage thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("excessPktPercentageThr", prop)
prop = PropMeta("str", "excessPktPercentageTr", "excessPktPercentageTr", 9202, PropCategory.IMPLICIT_TREND)
prop.label = "excess percentage trend"
prop.isOper = True
prop.isStats = True
meta.props.add("excessPktPercentageTr", prop)
prop = PropMeta("str", "excessPktRate", "excessPktRate", 9181, PropCategory.IMPLICIT_RATE)
prop.label = "excess packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("excessPktRate", prop)
prop = PropMeta("str", "excessPktSpct", "excessPktSpct", 9178, PropCategory.IMPLICIT_SUSPECT)
prop.label = "excess packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("excessPktSpct", prop)
prop = PropMeta("str", "excessPktThr", "excessPktThr", 9179, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "excess packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("excessPktThr", prop)
prop = PropMeta("str", "excessPktTr", "excessPktTr", 9180, PropCategory.IMPLICIT_TREND)
prop.label = "excess packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("excessPktTr", prop)
prop = PropMeta("str", "index", "index", 6419, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
789d91f20f5e4d64ac0bfbc79cc982f2bf8ddde4 | 20cef5de28d025d4d37eb86ba2b1f832d52c089d | /src/gallium/drivers/zink/zink_extensions.py | 6d01052c39f122aec5bcd098d634e276e7171d12 | [] | no_license | martinmullins/mesa-emscripten | 73da0a64901b7664468f951ef09fb9a462134660 | b4225e327b67fd7eef411cc046c1c0fecb3900de | refs/heads/main | 2023-03-27T03:38:19.037337 | 2021-03-29T19:17:22 | 2021-03-29T19:17:22 | 352,753,658 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,925 | py | # Copyright © 2020 Hoe Hao Cheng
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
class Version:
device_version : (1,0,0)
struct_version : (1,0)
def __init__(self, version, struct=()):
self.device_version = version
if not struct:
self.struct_version = (version[0], version[1])
else:
self.struct_version = struct
# e.g. "VK_MAKE_VERSION(1,2,0)"
def version(self):
return ("VK_MAKE_VERSION("
+ str(self.device_version[0])
+ ","
+ str(self.device_version[1])
+ ","
+ str(self.device_version[2])
+ ")")
# e.g. "10"
def struct(self):
return (str(self.struct_version[0])+str(self.struct_version[1]))
# the sType of the extension's struct
# e.g. VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT
# for VK_EXT_transform_feedback and struct="FEATURES"
def stype(self, struct: str):
return ("VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_"
+ str(self.struct_version[0]) + "_" + str(self.struct_version[1])
+ '_' + struct)
class Extension:
name : str = None
alias : str = None
is_required : bool = False
enable_conds : [str] = None
# these are specific to zink_device_info.py:
has_properties : bool = False
has_features : bool = False
guard : bool = False
# these are specific to zink_instance.py:
core_since : Version = None
instance_funcs : [str] = None
def __init__(self, name, alias="", required=False, properties=False,
features=False, conditions=None, guard=False, core_since=None,
functions=None):
self.name = name
self.alias = alias
self.is_required = required
self.has_properties = properties
self.has_features = features
self.enable_conds = conditions
self.guard = guard
self.core_since = core_since
self.instance_funcs = functions
if alias == "" and (properties == True or features == True):
raise RuntimeError("alias must be available when properties and/or features are used")
# e.g.: "VK_EXT_robustness2" -> "robustness2"
def pure_name(self):
return '_'.join(self.name.split('_')[2:])
# e.g.: "VK_EXT_robustness2" -> "EXT_robustness2"
def name_with_vendor(self):
return self.name[3:]
# e.g.: "VK_EXT_robustness2" -> "Robustness2"
def name_in_camel_case(self):
return "".join([x.title() for x in self.name.split('_')[2:]])
# e.g.: "VK_EXT_robustness2" -> "VK_EXT_ROBUSTNESS2_EXTENSION_NAME"
# do note that inconsistencies exist, i.e. we have
# VK_EXT_ROBUSTNESS_2_EXTENSION_NAME defined in the headers, but then
# we also have VK_KHR_MAINTENANCE1_EXTENSION_NAME
def extension_name(self):
return self.name.upper() + "_EXTENSION_NAME"
# generate a C string literal for the extension
def extension_name_literal(self):
return '"' + self.name + '"'
# get the field in zink_device_info that refers to the extension's
# feature/properties struct
# e.g. rb2_<suffix> for VK_EXT_robustness2
def field(self, suffix: str):
return self.alias + '_' + suffix
# the sType of the extension's struct
# e.g. VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT
# for VK_EXT_transform_feedback and struct="FEATURES"
def stype(self, struct: str):
return ("VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_"
+ self.pure_name().upper()
+ '_' + struct + '_'
+ self.vendor())
# e.g. EXT in VK_EXT_robustness2
def vendor(self):
return self.name.split('_')[1]
# Type aliases
Layer = Extension
| [
"[email protected]"
] | |
c4dffc3e3d2d97e40aea2ee1a8c985b1198d34c9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04005/s016124663.py | dbda349dc94067bc2bed8bd607675a19ba22e8d3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | A,B,C = map(int, input().split())
if A*B*C%2==0:
print(0)
exit()
mylist=[A*B,B*C,A*C]
print(min(mylist)) | [
"[email protected]"
] | |
cf2b1104c97b1464770fe039124140845851493c | 8b40a2959b8d2a2faca09f017e529bb6e02c0030 | /backend/manga/asgi.py | 29cb53e8774243095ab48245d7c364e42a77d799 | [
"MIT"
] | permissive | linea-it/manga | 80d0982ec83590abcec10a4d09510425d39c289d | 26add95475345d6c7f34465848d4d33a7a6b63b7 | refs/heads/master | 2023-09-05T20:05:50.471757 | 2023-09-04T15:45:18 | 2023-09-04T15:45:18 | 212,671,416 | 0 | 0 | MIT | 2023-09-12T19:31:22 | 2019-10-03T20:15:19 | Jupyter Notebook | UTF-8 | Python | false | false | 403 | py | """
ASGI config for manga project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'manga.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
5be29987b306b8975af2a9ab57afca0a1aa21f8a | 3b99e2863db4bcd3707e6f13641ddd9156289bc6 | /tilemap/part 13/settings.py | e9b72d78360aa52426696357aa16353739911721 | [
"MIT"
] | permissive | m-julian/pygame_tutorials | 500a5be3b4fad86fad577a7ea5493ac09ca41168 | be57d865de4ac0c18148e1785443c05445159779 | refs/heads/master | 2022-07-18T00:15:01.335459 | 2020-05-12T15:39:45 | 2020-05-12T15:39:45 | 263,055,700 | 0 | 0 | MIT | 2020-05-11T13:48:49 | 2020-05-11T13:48:48 | null | UTF-8 | Python | false | false | 1,016 | py | import pygame as pg
vec = pg.math.Vector2
# define some colors (R, G, B)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
DARKGREY = (40, 40, 40)
LIGHTGREY = (100, 100, 100)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
BROWN = (106, 55, 5)
CYAN = (0, 255, 255)
# game settings
WIDTH = 1024 # 16 * 64 or 32 * 32 or 64 * 16
HEIGHT = 768 # 16 * 48 or 32 * 24 or 64 * 12
FPS = 60
TITLE = "Tilemap Demo"
BGCOLOR = BROWN
TILESIZE = 64
GRIDWIDTH = WIDTH / TILESIZE
GRIDHEIGHT = HEIGHT / TILESIZE
WALL_IMG = 'tileGreen_39.png'
# Player settings
PLAYER_HEALTH = 100
PLAYER_SPEED = 280
PLAYER_ROT_SPEED = 200
PLAYER_IMG = 'manBlue_gun.png'
PLAYER_HIT_RECT = pg.Rect(0, 0, 35, 35)
BARREL_OFFSET = vec(30, 10)
# Gun settings
BULLET_IMG = 'bullet.png'
BULLET_SPEED = 500
BULLET_LIFETIME = 1000
BULLET_RATE = 150
KICKBACK = 200
GUN_SPREAD = 5
BULLET_DAMAGE = 10
# Mob settings
MOB_IMG = 'zombie1_hold.png'
MOB_SPEED = 150
MOB_HIT_RECT = pg.Rect(0, 0, 30, 30)
MOB_HEALTH = 100
MOB_DAMAGE = 10
MOB_KNOCKBACK = 20
| [
"[email protected]"
] | |
232a9fa2ac5e25b2aad3b20924a51dcc73d3f9b1 | 349f39b27a7c3157a1f3db65f35b96bcdb2f5919 | /03/xx/07-sklearn/16-deep-learning/cnn.py | 12a3bc7c6707c150b18a6aa9e0a89c549f22e8bb | [] | no_license | microgenios/cod | 5f870c9cefbb80d18690909baa4c9d8b9be463c2 | 0805609cc780244c640963dc4c70052e3df57b4e | refs/heads/master | 2022-12-08T20:10:11.742940 | 2020-02-29T10:37:10 | 2020-02-29T10:37:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,855 | py | #!/usr/bin/python
import argparse
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def build_arg_parser():
parser = argparse.ArgumentParser(description="Build a CNN classifier using MNIST data")
parser.add_argument("--input-dir", dest="input_dir", type=str, default="./mnist_data", help="Directory for storing data")
return parser
def get_weights(shape):
data = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(data)
def get_biases(shape):
data = tf.constant(0.1, shape=shape)
return tf.Variable(data)
def create_layer(shape):
W = get_weights(shape) # Get the weights and biases
b = get_biases([shape[-1]])
return W, b
def convolution_2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="SAME")
def max_pooling(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
if __name__ == "__main__":
args = build_arg_parser().parse_args()
mnist = input_data.read_data_sets(args.input_dir, one_hot=True) # Get the MNIST data
x = tf.placeholder(tf.float32, [None, 784]) # with 784 neurons (28x28=784) # The images are 28x28, so create the input layer
x_image = tf.reshape(x, [-1, 28, 28, 1]) # Reshape 'x' into a 4D tensor
W_conv1, b_conv1 = create_layer([5, 5, 1, 32]) # Define the first convolutional layer
h_conv1 = tf.nn.relu(convolution_2d(x_image, W_conv1) + b_conv1) # bias, and then apply the ReLU function # Convolve the image with weight tensor, add the
h_pool1 = max_pooling(h_conv1) # Apply the max pooling operator
W_conv2, b_conv2 = create_layer([5, 5, 32, 64]) # Define the second convolutional layer
h_conv2 = tf.nn.relu(convolution_2d(h_pool1, W_conv2) + b_conv2) # the ReLU function # weight tensor, add the bias, and then apply # Convolve the output of previous layer with the
h_pool2 = max_pooling(h_conv2) # Apply the max pooling operator
W_fc1, b_fc1 = create_layer([7 * 7 * 64, 1024]) # Define the fully connected layer
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) # Reshape the output of the previous layer
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # the ReLU function # weight tensor, add the bias, and then apply # Multiply the output of previous layer by the
keep_prob = tf.placeholder(tf.float32) # for all the neurons # Define the dropout layer using a probability placeholder
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2, b_fc2 = create_layer([1024, 10]) # Define the readout layer (output layer)
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
y_loss = tf.placeholder(tf.float32, [None, 10]) # Define the entropy loss and the optimizer
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_loss))
optimizer = tf.train.AdamOptimizer(1e-4).minimize(loss)
predicted = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_loss, 1)) # Define the accuracy computation
accuracy = tf.reduce_mean(tf.cast(predicted, tf.float32))
sess = tf.InteractiveSession() # Create and run a session
init = tf.initialize_all_variables()
sess.run(init)
num_iterations = 21000 # Start training
batch_size = 75
print("\nTraining the model....")
for i in range(num_iterations):
batch = mnist.train.next_batch(batch_size) # Get the next batch of images
if i % 50 == 0: # Print progress
cur_accuracy = accuracy.eval(feed_dict={x: batch[0], y_loss: batch[1], keep_prob: 1.0})
print("Iteration", i, ", Accuracy =", cur_accuracy)
optimizer.run(feed_dict={x: batch[0], y_loss: batch[1], keep_prob: 0.5}) # Train on the current batch
print("Test accuracy =", accuracy.eval(feed_dict={x: mnist.test.images, y_loss: mnist.test.labels, keep_prob: 1.0})) # Compute accuracy using test data
| [
"[email protected]"
] | |
ef444176e36f44038492a44d71ac8e6aca7a16c7 | d1c352676563b2decacfad19120001959b043f05 | /superset/migrations/versions/c5756bec8b47_time_grain_sqla.py | 13eb8c9b65479c0103f6fddc7aee44249354651c | [
"CC-BY-4.0",
"Apache-2.0",
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | permissive | Affirm/incubator-superset | c9a09a10289b4ebf8a09284a483bca93725a4b51 | 421183d3f46c48215e88e9d7d285f2dc6c7ccfe6 | refs/heads/master | 2023-07-06T11:34:38.538178 | 2019-05-22T23:39:01 | 2019-05-22T23:39:01 | 128,005,001 | 1 | 3 | Apache-2.0 | 2023-03-20T19:49:14 | 2018-04-04T04:02:42 | JavaScript | UTF-8 | Python | false | false | 2,190 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Time grain SQLA
Revision ID: c5756bec8b47
Revises: e502db2af7be
Create Date: 2018-06-04 11:12:59.878742
"""
# revision identifiers, used by Alembic.
revision = 'c5756bec8b47'
down_revision = 'e502db2af7be'
from alembic import op
import json
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Text
from superset import db
Base = declarative_base()
class Slice(Base):
__tablename__ = 'slices'
id = Column(Integer, primary_key=True)
params = Column(Text)
def upgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
for slc in session.query(Slice).all():
try:
params = json.loads(slc.params)
if params.get('time_grain_sqla') == 'Time Column':
params['time_grain_sqla'] = None
slc.params = json.dumps(params, sort_keys=True)
except Exception:
pass
session.commit()
session.close()
def downgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
for slc in session.query(Slice).all():
try:
params = json.loads(slc.params)
if params.get('time_grain_sqla') is None:
params['time_grain_sqla'] = 'Time Column'
slc.params = json.dumps(params, sort_keys=True)
except Exception:
pass
session.commit()
session.close()
| [
"[email protected]"
] | |
5fd0656ad77ea794a18064b517e5d366cbc9653d | d6ac41c1b17deb7d870a1658b0a613a7ee38e907 | /ex02/2_4.py | 9c77b1f7fbfbcdfb9f102387759955b680037861 | [] | no_license | sarangkhim/sk_python | 9e76a14ad6180420ecbe6f17009f7ac4450ef5b1 | cc3d8f600e343d6f8d967c684cab04ad8e0011d1 | refs/heads/master | 2020-03-10T07:17:31.147720 | 2018-04-12T15:15:51 | 2018-04-12T15:15:51 | 129,259,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | count = 1
count = count + 1
count = 1
print(count)
| [
"[email protected]"
] | |
898ac4865128e40a00c0e6ebf4e83bf0a1cdff2e | 43fd8b12dc1b6a2fc7cf4d9b8a80d3f1ae0fac66 | /Visualization/11. 3D绘图函数-Points3d().py | b763a01b925d2dfae930f78b64abb6ea2a8b557b | [] | no_license | gxiang666/python_file | e707f829b2c35e6126bea79e299333faabe76b19 | 2ee0f52d53892d193dc83c10564f7326e0bad0da | refs/heads/master | 2022-12-07T04:16:29.166707 | 2019-10-25T02:59:26 | 2019-10-25T02:59:26 | 139,252,161 | 1 | 0 | null | 2022-11-22T02:38:40 | 2018-06-30T13:35:14 | Python | UTF-8 | Python | false | false | 305 | py | import numpy as np
from mayavi import mlab
# 建立数据
t = np.linspace(0, 4 * np.pi, 20) # 0-4pi之间均匀的20个数
x = np.sin(2 * t)
y = np.cos(t)
z = np.cos(2 * t)
s = 2 + np.sin(t)
# 对数据进行可视化
points = mlab.points3d(x, y, z, s, colormap="Greens", scale_factor=.25)
mlab.show()
| [
"[email protected]"
] | |
a48d2e017f789758b775bae34f5ef4309987a757 | 458b1133df5b38a017f3a690a624a54f0f43fda7 | /PaperExperiments/XHExp076/parameters.py | 6f436fa18bf3398fcd04dd8ad45ea24a61fd102c | [
"MIT"
] | permissive | stefan-c-kremer/TE_World2 | 9c7eca30ee6200d371183c5ba32b3345a4cc04ee | 8e1fae218af8a1eabae776deecac62192c22e0ca | refs/heads/master | 2020-12-18T14:31:00.639003 | 2020-02-04T15:55:49 | 2020-02-04T15:55:49 | 235,413,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,604 | py |
# parameters.py
"""
Exp 76 - {'Initial_genes': '5000', 'Host_mutation_rate': '0.30', 'TE_progeny': '0.15, 0, 0.55, 1, 0.30, 2', 'TE_Insertion_Distribution': 'Flat()', 'Carrying_capacity': '300', 'TE_excision_rate': '0.5', 'Junk_BP': '1.4', 'Gene_Insertion_Distribution': 'Flat()', 'mutation_effect': '0.01', 'TE_death_rate': '0.005'}
"""
from TEUtil import *;
# note that "#" indicates a comment
# set the following to True if you want messages printed to the screen
# while the program runs - search for these keywords in TESim.py to see
# what each one prints out
output = {
"SPLAT": False,
"SPLAT FITNESS": False,
"INITIALIZATION": False,
"GENERATION": True,
"HOST EXTINCTION": True,
"TE EXTINCTION": True,
"TRIAL NO": True,
"GENE INIT": False,
"TE INIT": False,
};
TE_Insertion_Distribution = Flat();
Gene_Insertion_Distribution = Flat();
# Triangle( pmax, pzero ) generates values between pmax and pzero with
# a triangular probability distribution, where pmax is the point of highest
# probability, and pzero is the point of lowest probability
# - you can change the orientation of the triangle by reversing the values
# of pmax and pzero
# Flat() generates values between 0 and 1 with uniform probability
Gene_length = 1000; # use 1000?
TE_length = 1000; # use 1000?
TE_death_rate = 0.005;
TE_excision_rate = 0.5; # set this to zero for retro transposons
# for retro transposons this is the probability of the given number of progeny
# for dna transposons this is the probability of the given number of progeny
# ___PLUS___ the original re-inserting
TE_progeny = ProbabilityTable( 0.15, 0, 0.55, 1, 0.30, 2 );
Initial_genes = 5000;
Append_gene = True; # True: when the intialization routine tries to place
# a gene inside another gene, it instead appends it
# at the end of the original gene (use this with small
# amounts of Junk_BP).
# False: when the intialization routine tries to place
# a gene inside another gene, try to place it somewhere
# else again (don't use theis option with samll amounts
# of Junk_BP).
Initial_TEs = 1;
MILLION = 1000000;
Junk_BP = 1.4 * MILLION;
Host_start_fitness = 1.0;
Host_mutation_rate = 0.30;
Host_mutation = ProbabilityTable( 0.40, lambda fit: 0.0,
0.30, lambda fit: fit - random.random()*0.01,
0.15, lambda fit: fit,
0.15, lambda fit: fit + random.random()*0.01
);
# what happens when a TA hits a gene
Insertion_effect = ProbabilityTable(0.30, lambda fit: 0.0,
0.20, lambda fit: fit - random.random()*0.01,
0.30, lambda fit: fit,
0.20, lambda fit: fit + random.random()*0.01
);
Carrying_capacity = 300;
Host_reproduction_rate = 1; # how many offspring each host has
Host_survival_rate = lambda propfit: min( Carrying_capacity * propfit, 0.95 );
# propfit = proportion of fitness owned by this individual
Maximum_generations = 1500;
Terminate_no_TEs = True; # end simulation if there are no TEs left
# seed = 0;
seed = None; # if seed = None, the random number generator's initial state is
# set "randomly"
save_frequency = 50; # Frequency with with which to save state of experiment
saved = None; # if saved = None then we start a new simulation from scratch
# if saves = string, then we open that file and resume a simulation
| [
"[email protected]"
] | |
df03d7cf506c83750e564807e2c690429a10ca18 | e0b750751b22e2d3d93c72f0947b3dd3d173ce54 | /runtests.py | b59af0a0dfe8c95546d7382b46d18af15e8bdf10 | [
"MIT"
] | permissive | CryptAxe/pinax-images | 8deac776c534a2d3ab2d66df364ab654fb02b8df | d041993671cdec6ed3011f6d7ffa499591e7cc9a | refs/heads/master | 2021-01-17T22:27:44.849972 | 2016-02-22T14:38:13 | 2016-02-22T14:38:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | #!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"pinax.images",
"pinax.images.tests"
],
MIDDLEWARE_CLASSES=[],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
SITE_ID=1,
ROOT_URLCONF="pinax.images.tests.urls",
SECRET_KEY="notasecret",
)
def runtests(*test_args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
try:
from django.test.runner import DiscoverRunner
runner_class = DiscoverRunner
test_args = ["pinax.images.tests"]
except ImportError:
from django.test.simple import DjangoTestSuiteRunner
runner_class = DjangoTestSuiteRunner
test_args = ["tests"]
failures = runner_class(verbosity=1, interactive=True, failfast=False).run_tests(test_args)
sys.exit(failures)
if __name__ == "__main__":
runtests(*sys.argv[1:])
| [
"[email protected]"
] | |
eba9ca8e639338e02363bdde79d0a78a05881186 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-6860.py | 9d6702f59fb7fd8b3f78e657ca25242a0b170f23 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,752 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + $Member
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
ecacc3a564ffcaf31f442d38ff3d813c6c585b53 | 73d8089381b92b1965e2ac2a5f7c80d9293af2f5 | /main.py | 6a0a75f799a93f53747575c949df0934a27cee92 | [] | no_license | Fufuhu/python_movie_transcoder | b92e420bc0433676863f57c154cc2a7f3d939384 | ddce7f230c52ff532a74b1e1f5b0a5d16fa7bbaf | refs/heads/master | 2021-01-20T04:50:15.511316 | 2017-04-29T14:40:44 | 2017-04-29T14:40:44 | 89,738,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | import sys
from python_movie_transformer.ffmpeg.image_file import ImageFile
from python_movie_transformer.ffmpeg.options import ImageFileOptions
from python_movie_transformer.ffmpeg.manipulater import FFmpegManipulater
in_file = sys.argv[1]
out_file = sys.argv[2]
out_file_options = ImageFileOptions()
out_file_options.set_scale(width=640)
in_file_image = ImageFile(file_name=in_file)
out_file_image = ImageFile(file_name=out_file, options=out_file_options)
manipulater = FFmpegManipulater(input_file = in_file_image, output_file=out_file_image)
manipulater.manipulate()
| [
"[email protected]"
] | |
55bc19097227723308c5974a0b3429268e833458 | 03bf031efc1f171f0bb3cf8a565d7199ff073f96 | /apps/splash/utils.py | 55a7485240634ae9178da8eacef57efa57c85d31 | [
"MIT"
] | permissive | emilps/onlineweb4 | a213175678ac76b1fbede9b0897c538c435a97e2 | 6f4aca2a4522698366ecdc6ab63c807ce5df2a96 | refs/heads/develop | 2020-03-30T01:11:46.941170 | 2019-05-10T19:49:21 | 2019-05-10T19:49:21 | 150,564,330 | 0 | 0 | MIT | 2019-05-10T19:49:22 | 2018-09-27T09:43:32 | Python | UTF-8 | Python | false | false | 699 | py | import icalendar
from django.utils import timezone
from apps.events.utils import Calendar
from apps.splash.models import SplashEvent
class SplashCalendar(Calendar):
def add_event(self, event):
cal_event = icalendar.Event()
cal_event.add('dtstart', event.start_time)
cal_event.add('dtend', event.end_time)
cal_event.add('summary', event.title)
cal_event.add('description', event.content)
cal_event.add('uid', 'splash-' + str(event.id) + '@online.ntnu.no')
self.cal.add_component(cal_event)
def events(self):
self.add_events(SplashEvent.objects.filter(start_time__year=timezone.now().year))
self.filename = 'events'
| [
"[email protected]"
] | |
bbb5385859b86e8e0d16e6fb2b6b59981333724f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_binaries.py | 57ef5dd6379ebd43cd7d9aa73601d47f274634a6 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
#calss header
class _BINARIES():
def __init__(self,):
self.name = "BINARIES"
self.definitions = binary
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['binary']
| [
"[email protected]"
] | |
c39122b83132e528602a47b61c9c2da7e5a93bbb | 302442c32bacca6cde69184d3f2d7529361e4f3c | /servidores/servidor_7-nao-comitados/code2py/migra_cass_1.py | 483f945ca021c1c77c6c6c22f65b373840ff8c82 | [] | no_license | fucknoob/WebSemantic | 580b85563072b1c9cc1fc8755f4b09dda5a14b03 | f2b4584a994e00e76caccce167eb04ea61afa3e0 | refs/heads/master | 2021-01-19T09:41:59.135927 | 2015-02-07T02:11:23 | 2015-02-07T02:11:23 | 30,441,659 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,202 | py |
import pycassa
from pycassa.pool import ConnectionPool
from pycassa import index
from pycassa.columnfamily import ColumnFamily
pool1 = ConnectionPool('MINDNET', ['localhost:9160'],timeout=1000000)
pool2 = ConnectionPool('MINDNET', ['213.136.81.102:9160'],timeout=1000000)
def migr(tab1,tab2,tb):
#r1=tab1.get_range()
#tab2.truncate()
ind=0
while True:
cach=[]
r1=tab1.get_range()
for ky,col in r1:
cach.append([ky,col])
if len(cach) %1000==0:
print 'collect(',tb,'):',len(cach)
if len(cach) >= 500000:
break
if len(cach) == 0: break
b1 = tab2.batch(55000)
b2 = tab1.batch(55000)
indc=0
for ky,col in cach:
tab2.insert(ky,col)
tab1.remove(ky)
indc+=1
if indc % 50000==0:
b1.send()
b2.send()
b1 = tab2.batch(55000)
b2 = tab1.batch(55000)
print tb,'->',ind
b1.send()
b2.send()
print tb,'->',ind
web_cache10_1 = pycassa.ColumnFamily(pool1, 'web_cache10')
web_cache10_2 = pycassa.ColumnFamily(pool2, 'web_cache10')
migr(web_cache10_1,web_cache10_2,'web_cache10')
fz_store_sufix_1 = pycassa.ColumnFamily(pool1, 'fz_store_sufix')
fz_store_sufix_2 = pycassa.ColumnFamily(pool2, 'fz_store_sufix')
#migr(fz_store_sufix_1,fz_store_sufix_2,'fz_store_sufix')
SEMANTIC_RELACTIONS_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_RELACTIONS')
SEMANTIC_RELACTIONS_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_RELACTIONS')
#migr(SEMANTIC_RELACTIONS_1,SEMANTIC_RELACTIONS_2,'semantic_relactions')
SEMANTIC_OBJECT_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_OBJECT')
SEMANTIC_OBJECT_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT')
#migr(SEMANTIC_OBJECT_1,SEMANTIC_OBJECT_2,'semantic_object')
#
fz_store_defs_1 = pycassa.ColumnFamily(pool1, 'fz_store_defs')
fz_store_defs_2 = pycassa.ColumnFamily(pool2, 'fz_store_defs')
#migr(fz_store_defs_1,fz_store_defs_2,'fz_store_defs')
SEMANTIC_RELACTIONS3_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_RELACTIONS3')
SEMANTIC_RELACTIONS3_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_RELACTIONS3')
#migr(SEMANTIC_RELACTIONS3_1,SEMANTIC_RELACTIONS3_2,'semantic_relactions3')
knowledge_manager_1 = pycassa.ColumnFamily(pool1, 'knowledge_manager')
knowledge_manager_2 = pycassa.ColumnFamily(pool2, 'knowledge_manager')
#migr(knowledge_manager_1,knowledge_manager_2,'kwnolegde_manager')
SEMANTIC_OBJECT3_1_4_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_OBJECT3_1_4')
SEMANTIC_OBJECT3_1_4_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT3_1_4')
#migr(SEMANTIC_OBJECT3_1_4_1,SEMANTIC_OBJECT3_1_4_2,'semantic_object3_1_4')
web_cache3_1 = pycassa.ColumnFamily(pool1, 'web_cache3')
web_cache3_2 = pycassa.ColumnFamily(pool2, 'web_cache3')
#migr(web_cache3_1,web_cache3_2,'web_cache3')
fcb_users1_1 = pycassa.ColumnFamily(pool1, 'fcb_users1')
fcb_users1_2 = pycassa.ColumnFamily(pool2, 'fcb_users1')
#migr(fcb_users1_1,fcb_users1_2,'fcb_users1')
fz_store_refer_1 = pycassa.ColumnFamily(pool1, 'fz_store_refer')
fz_store_refer_2 = pycassa.ColumnFamily(pool2, 'fz_store_refer')
#migr(fz_store_refer_1,fz_store_refer_2,'fz_store_refer')
DATA_BEHAVIOUR_CODE_PY_1 = pycassa.ColumnFamily(pool1, 'DATA_BEHAVIOUR_CODE_PY')
DATA_BEHAVIOUR_CODE_PY_2 = pycassa.ColumnFamily(pool2, 'DATA_BEHAVIOUR_CODE_PY')
#migr(DATA_BEHAVIOUR_CODE_PY_1,DATA_BEHAVIOUR_CODE_PY_2,'data_behaviour_code_py')
SEMANTIC_OBJECT_DT_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_OBJECT_DT')
SEMANTIC_OBJECT_DT_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT_DT')
#migr(SEMANTIC_OBJECT_DT_1,SEMANTIC_OBJECT_DT_2,'semantic_object_dt')
SEMANTIC_OBJECT3_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_OBJECT3')
SEMANTIC_OBJECT3_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT3')
#migr(SEMANTIC_OBJECT3_1,SEMANTIC_OBJECT3_2,'semantic_object3')
to_posting_1 = pycassa.ColumnFamily(pool1, 'to_posting')
to_posting_2 = pycassa.ColumnFamily(pool2, 'to_posting')
#migr(to_posting_1,to_posting_2,'to_posting')
SEMANTIC_RELACTIONS3_1_4_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_RELACTIONS3_1_4')
SEMANTIC_RELACTIONS3_1_4_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_RELACTIONS3_1_4')
#migr(SEMANTIC_RELACTIONS3_1_4_1,SEMANTIC_RELACTIONS3_1_4_2,'semantic_relactions3_1_4')
SEMANTIC_OBJECT_DT3_1_4_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_OBJECT_DT3_1_4')
SEMANTIC_OBJECT_DT3_1_4_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT_DT3_1_4')
#migr(SEMANTIC_OBJECT_DT3_1_4_1,SEMANTIC_OBJECT_DT3_1_4_2,'semantic_object_dt3_1_4')
fuzzy_store_1 = pycassa.ColumnFamily(pool1, 'fuzzy_store')
fuzzy_store_2 = pycassa.ColumnFamily(pool2, 'fuzzy_store')
#migr(fuzzy_store_1,fuzzy_store_2,'fuzzy_store')
cache_products_1 = pycassa.ColumnFamily(pool1, 'cache_products')
cache_products_2 = pycassa.ColumnFamily(pool2, 'cache_products')
#migr(cache_products_1,cache_products_2,'cache_products')
cache_links_1 = pycassa.ColumnFamily(pool1, 'cache_links')
cache_links_2 = pycassa.ColumnFamily(pool2, 'cache_links')
#migr(cache_links_1,cache_links_2,'cache_links')
DATA_BEHAVIOUR_PY_1 = pycassa.ColumnFamily(pool1, 'DATA_BEHAVIOUR_PY')
DATA_BEHAVIOUR_PY_2 = pycassa.ColumnFamily(pool2, 'DATA_BEHAVIOUR_PY')
#migr(DATA_BEHAVIOUR_PY_1,DATA_BEHAVIOUR_PY_2,'data_behaviour_py')
SEMANTIC_OBJECT_DT3_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_OBJECT_DT3')
SEMANTIC_OBJECT_DT3_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT_DT3')
#migr(SEMANTIC_OBJECT_DT3_1,SEMANTIC_OBJECT_DT3_2,'semantic_object_dt3')
to_posting2_1 = pycassa.ColumnFamily(pool1, 'to_posting2')
to_posting2_2 = pycassa.ColumnFamily(pool2, 'to_posting2')
#migr(to_posting2_1,to_posting2_2,'to_posting2')
fz_store_pref_1 = pycassa.ColumnFamily(pool1, 'fz_store_pref')
fz_store_pref_2 = pycassa.ColumnFamily(pool2, 'fz_store_pref')
#migr(fz_store_pref_1,fz_store_pref_2,'fz_store_pref')
web_cache1_1 = pycassa.ColumnFamily(pool1, 'web_cache1')
web_cache1_2 = pycassa.ColumnFamily(pool2, 'web_cache1')
#migr(web_cache1_1,web_cache1_2,'web_cache1')
fz_arround_points_1 = pycassa.ColumnFamily(pool1, 'fz_arround_points')
fz_arround_points_2 = pycassa.ColumnFamily(pool2, 'fz_arround_points')
#migr(fz_arround_points_1,fz_arround_points_2,'fz_arround_points')
| [
"[email protected]"
] | |
41184d6766f09a040692ad47e6ee2ba8729e2760 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02842/s678047490.py | 06867a1e60f0fc09c8a80429d6de00e18537b2ab | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | N = int(input())
n = int(N/1.08)
while int(n*1.08) <= N:
if int(n*1.08) == N:
print(n)
break
else:
n += 1
else:
print(":(") | [
"[email protected]"
] | |
f73b1d1ae70592ed57aa30e7311dcf68cb0ae8bf | 691c70d88aa242ef97c2b5587de210e94854148a | /omega_miya/plugins/bilibili_dynamic_monitor/monitor.py | 0a86cf1b574a40b34b60614fd103dd63d21f19ce | [
"Python-2.0",
"MIT"
] | permissive | yekang-wu/omega-miya | bf632e28c788f06c74b61056142de23da9201282 | 53a6683fccb0618e306abe9e103cec78445f3796 | refs/heads/master | 2023-08-27T13:54:56.756272 | 2021-10-07T11:31:23 | 2021-10-07T11:31:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,673 | py | import asyncio
import random
from typing import List
from nonebot import logger, require, get_bots, get_driver
from nonebot.adapters.cqhttp import MessageSegment
from nonebot.adapters.cqhttp.bot import Bot
from omega_miya.database import DBSubscription, DBDynamic
from omega_miya.utils.bilibili_utils import BiliUser, BiliDynamic, BiliRequestUtils
from omega_miya.utils.omega_plugin_utils import MsgSender
from .config import Config
__global_config = get_driver().config
plugin_config = Config(**__global_config.dict())
ENABLE_DYNAMIC_CHECK_POOL_MODE = plugin_config.enable_dynamic_check_pool_mode
# 检查池模式使用的检查队列
checking_pool = []
# 启用检查动态状态的定时任务
scheduler = require("nonebot_plugin_apscheduler").scheduler
# 创建用于更新数据库里面UP名称的定时任务
@scheduler.scheduled_job(
'cron',
# year=None,
# month=None,
# day='*/1',
# week=None,
# day_of_week=None,
hour='3',
minute='3',
second='22',
# start_date=None,
# end_date=None,
# timezone=None,
id='dynamic_db_upgrade',
coalesce=True,
misfire_grace_time=60
)
async def dynamic_db_upgrade():
logger.debug('dynamic_db_upgrade: started upgrade subscription info')
sub_res = await DBSubscription.list_sub_by_type(sub_type=2)
for sub_id in sub_res.result:
sub = DBSubscription(sub_type=2, sub_id=sub_id)
user_info_result = await BiliUser(user_id=int(sub_id)).get_info()
if user_info_result.error:
logger.error(f'dynamic_db_upgrade: 更新用户信息失败, uid: {sub_id}, error: {user_info_result.info}')
continue
up_name = user_info_result.result.name
_res = await sub.add(up_name=up_name, live_info='B站动态')
if not _res.success():
logger.error(f'dynamic_db_upgrade: 更新用户信息失败, uid: {sub_id}, error: {_res.info}')
continue
logger.debug('dynamic_db_upgrade: upgrade subscription info completed')
# 处理图片序列
async def pic_to_seg(pic_list: list) -> str:
# 处理图片序列
pic_segs = []
for pic_url in pic_list:
pic_result = await BiliRequestUtils.pic_to_file(url=pic_url)
if pic_result.error:
logger.warning(f'BiliDynamic get base64pic failed, error: {pic_result.info}, pic url: {pic_url}')
pic_segs.append(str(MessageSegment.image(pic_result.result)))
pic_seg = '\n'.join(pic_segs)
return pic_seg
# 检查单个用户动态的函数
async def dynamic_checker(user_id: int, bots: List[Bot]):
# 获取动态并返回动态类型及内容
user_dynamic_result = await BiliUser(user_id=user_id).get_dynamic_history()
if user_dynamic_result.error:
logger.error(f'bilibili_dynamic_monitor: 获取用户 {user_id} 动态失败, error: {user_dynamic_result.info}')
# 解析动态内容
dynamics_data = []
for data in user_dynamic_result.result:
data_parse_result = BiliDynamic.data_parser(dynamic_data=data)
if data_parse_result.error:
logger.error(f'bilibili_dynamic_monitor: 解析新动态时发生了错误, error: {data_parse_result.info}')
continue
dynamics_data.append(data_parse_result)
# 用户所有的动态id
exist_dynamic_result = await DBDynamic.list_dynamic_by_uid(uid=user_id)
if exist_dynamic_result.error:
logger.error(f'bilibili_dynamic_monitor: 获取用户 {user_id} 已有动态失败, error: {exist_dynamic_result.info}')
return
user_dynamic_list = [int(x) for x in exist_dynamic_result.result]
new_dynamic_data = [data for data in dynamics_data if data.result.dynamic_id not in user_dynamic_list]
subscription = DBSubscription(sub_type=2, sub_id=str(user_id))
for data in new_dynamic_data:
dynamic_info = data.result
dynamic_card = dynamic_info.data
dynamic_id = dynamic_info.dynamic_id
user_name = dynamic_info.user_name
desc = dynamic_info.desc
url = dynamic_info.url
content = dynamic_card.content
title = dynamic_card.title
description = dynamic_card.description
# 转发的动态
if dynamic_info.type == 1:
# 转发的动态还需要获取原动态信息
orig_dy_info_result = await BiliDynamic(dynamic_id=dynamic_info.orig_dy_id).get_info()
if orig_dy_info_result.success():
orig_dy_data_result = BiliDynamic.data_parser(dynamic_data=orig_dy_info_result.result)
if orig_dy_data_result.success():
# 原动态type=2, 8 或 4200, 带图片
if orig_dy_data_result.result.type in [2, 8, 4200]:
# 处理图片序列
pic_seg = await pic_to_seg(pic_list=orig_dy_data_result.result.data.pictures)
orig_user = orig_dy_data_result.result.user_name
orig_contant = orig_dy_data_result.result.data.content
if not orig_contant:
orig_contant = orig_dy_data_result.result.data.title
msg = f"{user_name}{desc}!\n\n“{content}”\n{url}\n{'=' * 16}\n" \
f"@{orig_user}: {orig_contant}\n{pic_seg}"
# 原动态type=32 或 512, 为番剧类型
elif orig_dy_data_result.result.type in [32, 512]:
# 处理图片序列
pic_seg = await pic_to_seg(pic_list=orig_dy_data_result.result.data.pictures)
orig_user = orig_dy_data_result.result.user_name
orig_title = orig_dy_data_result.result.data.title
msg = f"{user_name}{desc}!\n\n“{content}”\n{url}\n{'=' * 16}\n" \
f"@{orig_user}: {orig_title}\n{pic_seg}"
# 原动态为其他类型, 无图
else:
orig_user = orig_dy_data_result.result.user_name
orig_contant = orig_dy_data_result.result.data.content
if not orig_contant:
orig_contant = orig_dy_data_result.result.data.title
msg = f"{user_name}{desc}!\n\n“{content}”\n{url}\n{'=' * 16}\n" \
f"@{orig_user}: {orig_contant}"
else:
msg = f"{user_name}{desc}!\n\n“{content}”\n{url}\n{'=' * 16}\n@Unknown: 获取原动态失败"
else:
msg = f"{user_name}{desc}!\n\n“{content}”\n{url}\n{'=' * 16}\n@Unknown: 获取原动态失败"
# 原创的动态(有图片)
elif dynamic_info.type == 2:
# 处理图片序列
pic_seg = await pic_to_seg(pic_list=dynamic_info.data.pictures)
msg = f"{user_name}{desc}!\n\n“{content}”\n{url}\n{pic_seg}"
# 原创的动态(无图片)
elif dynamic_info.type == 4:
msg = f"{user_name}{desc}!\n\n“{content}”\n{url}"
# 视频
elif dynamic_info.type == 8:
# 处理图片序列
pic_seg = await pic_to_seg(pic_list=dynamic_info.data.pictures)
if content:
msg = f"{user_name}{desc}!\n\n《{title}》\n\n“{content}”\n{url}\n{pic_seg}"
else:
msg = f"{user_name}{desc}!\n\n《{title}》\n\n{description}\n{url}\n{pic_seg}"
# 小视频
elif dynamic_info.type == 16:
msg = f"{user_name}{desc}!\n\n“{content}”\n{url}"
# 番剧
elif dynamic_info.type in [32, 512]:
# 处理图片序列
pic_seg = await pic_to_seg(pic_list=dynamic_info.data.pictures)
msg = f"{user_name}{desc}!\n\n《{title}》\n\n{content}\n{url}\n{pic_seg}"
# 文章
elif dynamic_info.type == 64:
# 处理图片序列
pic_seg = await pic_to_seg(pic_list=dynamic_info.data.pictures)
msg = f"{user_name}{desc}!\n\n《{title}》\n\n{content}\n{url}\n{pic_seg}"
# 音频
elif dynamic_info.type == 256:
# 处理图片序列
pic_seg = await pic_to_seg(pic_list=dynamic_info.data.pictures)
msg = f"{user_name}{desc}!\n\n《{title}》\n\n{content}\n{url}\n{pic_seg}"
# B站活动相关
elif dynamic_info.type == 2048:
if description:
msg = f"{user_name}{desc}!\n\n【{title} - {description}】\n\n“{content}”\n{url}"
else:
msg = f"{user_name}{desc}!\n\n【{title}】\n“{content}”\n\n{url}"
else:
logger.warning(f"未知的动态类型: {type}, id: {dynamic_id}")
continue
# 向群组和好友推送消息
for _bot in bots:
msg_sender = MsgSender(bot=_bot, log_flag='BiliDynamicNotice')
await msg_sender.safe_broadcast_groups_subscription(subscription=subscription, message=msg)
await msg_sender.safe_broadcast_friends_subscription(subscription=subscription, message=msg)
# 更新动态内容到数据库
# 向数据库中写入动态信息
dynamic = DBDynamic(uid=user_id, dynamic_id=dynamic_id)
_res = await dynamic.add(dynamic_type=dynamic_info.type, content=content)
if _res.success():
logger.info(f"向数据库写入动态信息: {dynamic_id} 成功")
else:
logger.error(f"向数据库写入动态信息: {dynamic_id} 失败, error: {_res.info}")
# 用于首次订阅时刷新数据库动态信息
async def init_user_dynamic(user_id: int):
# 暂停计划任务避免中途检查更新
scheduler.pause()
await dynamic_checker(user_id=user_id, bots=[])
scheduler.resume()
logger.info(f'Init new subscription user {user_id} dynamic completed.')
# 动态检查主函数
async def bilibili_dynamic_monitor():
logger.debug(f"bilibili_dynamic_monitor: checking started")
# 获取当前bot列表
bots = [bot for bot_id, bot in get_bots().items()]
# 获取订阅表中的所有动态订阅
sub_res = await DBSubscription.list_sub_by_type(sub_type=2)
check_sub = [int(x) for x in sub_res.result]
if not check_sub:
logger.debug(f'bilibili_dynamic_monitor: no dynamic subscription, ignore.')
return
# 启用了检查池模式
if ENABLE_DYNAMIC_CHECK_POOL_MODE:
global checking_pool
# checking_pool为空则上一轮检查完了, 重新往里面放新一轮的uid
if not checking_pool:
checking_pool.extend(check_sub)
# 看下checking_pool里面还剩多少
waiting_num = len(checking_pool)
# 默认单次检查并发数为3, 默认检查间隔为20s
logger.debug(f'bili dynamic pool mode debug info, B_checking_pool: {checking_pool}')
if waiting_num >= 3:
# 抽取检查对象
now_checking = random.sample(checking_pool, k=3)
# 更新checking_pool
checking_pool = [x for x in checking_pool if x not in now_checking]
else:
now_checking = checking_pool.copy()
checking_pool.clear()
logger.debug(f'bili dynamic pool mode debug info, A_checking_pool: {checking_pool}')
logger.debug(f'bili dynamic pool mode debug info, now_checking: {now_checking}')
# 检查now_checking里面的直播间(异步)
tasks = []
for uid in now_checking:
tasks.append(dynamic_checker(user_id=uid, bots=bots))
try:
await asyncio.gather(*tasks)
logger.debug(f"bilibili_dynamic_monitor: pool mode enable, checking completed, "
f"checked: {', '.join([str(x) for x in now_checking])}.")
except Exception as e:
logger.error(f'bilibili_dynamic_monitor: pool mode enable, error occurred in checking: {repr(e)}')
# 没有启用检查池模式
else:
# 检查所有在订阅表里面的动态(异步)
tasks = []
for uid in check_sub:
tasks.append(dynamic_checker(user_id=uid, bots=bots))
try:
await asyncio.gather(*tasks)
logger.debug(f"bilibili_dynamic_monitor: pool mode disable, checking completed, "
f"checked: {', '.join([str(x) for x in check_sub])}.")
except Exception as e:
logger.error(f'bilibili_dynamic_monitor: pool mode disable, error occurred in checking {repr(e)}')
# 根据检查池模式初始化检查时间间隔
if ENABLE_DYNAMIC_CHECK_POOL_MODE:
# 检查池启用
scheduler.add_job(
bilibili_dynamic_monitor,
'cron',
# year=None,
# month=None,
# day='*/1',
# week=None,
# day_of_week=None,
# hour='9-23',
# minute='*/3',
second='*/20',
# start_date=None,
# end_date=None,
# timezone=None,
id='bilibili_dynamic_monitor_pool_enable',
coalesce=True,
misfire_grace_time=20
)
else:
# 检查池禁用
scheduler.add_job(
bilibili_dynamic_monitor,
'cron',
# year=None,
# month=None,
# day='*/1',
# week=None,
# day_of_week=None,
# hour=None,
minute='*/3',
# second='*/30',
# start_date=None,
# end_date=None,
# timezone=None,
id='bilibili_dynamic_monitor_pool_disable',
coalesce=True,
misfire_grace_time=30
)
__all__ = [
'scheduler',
'init_user_dynamic'
]
| [
"[email protected]"
] | |
13a1236df74a65225769b76df44b5338409bbc7a | 51af810c46774818dbc1c59712164080a3724055 | /miniflow/ops.py | 98d537fddf8e026755c4d69c60e40742361b0f13 | [
"Apache-2.0"
] | permissive | BenJamesbabala/miniflow | bf626f53d887370c5f12bec4892fe8891e62ebc1 | cd92726fc63c2ae9cb52e02a8e9771081a7253a7 | refs/heads/master | 2020-12-02T21:09:34.852835 | 2017-07-04T16:23:39 | 2017-07-04T16:23:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,807 | py | # Copyright 2017 The Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains all the basic operations."""
import logging
import math
import os
import sys
import graph
# Enable swig by environment variable
if os.environ.has_key("ENABLE_SWIG_OP"):
logging.info("Enable swig operations by environment variable")
sys.path.append("../")
import swig.op
class Op(object):
"""The basic class for all operation."""
def __init__(self):
pass
def forward(self):
raise NotImplementedError
def grad(self):
raise NotImplementedError
def __add__(self, other):
return AddOp(self, other)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
return MinusOp(self, other)
def __rsub__(self, other):
return MinusOp(other, self)
def __mul__(self, other):
return MultipleOp(self, other)
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
return DivideOp(self, other)
def __rdiv__(self, other):
return DivideOp(other, self)
def __pow__(self, power, modulo=None):
return PowerOp(self, power)
class PlaceholderOp(Op):
"""The placeholer operation which value is set when Session.run()"""
def __init__(self, dtype=None, shape=None, name="Placeholder"):
# TODO: Use dtype and shape
self.dtype = dtype
self.shape = shape
self.name = name
# The value is None util Session.run() with feed_dict parameter
self.value = None
# TODO: Support other graph instance
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
def set_value(self, value):
self.value = value
def get_value(self):
return self.value
def forward(self):
return self.value
def grad(self, partial_derivative_opname=None):
return 0
class ConstantOp(Op):
"""The constant operation which contains one initialized value."""
def __init__(self, value, name="Constant"):
self.value = value
self.name = name
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
# TODO: Not allow to set the value
def get_value(self):
return self.value
def forward(self):
return self.value
def grad(self, partial_derivative_opname=None):
return 0
class VariableOp(Op):
"""
The variable operation which contains one variable. The variable may be
trainable or not-trainable. This is used to define the machine learning
models.
"""
def __init__(self, value, is_trainable=True, name="Variable"):
self.value = value
self.is_trainable = is_trainable
self.name = name
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
if self.is_trainable:
self.graph.add_to_trainable_variables_collection(self.name, self)
def get_value(self):
return self.value
def set_value(self, value):
self.value = value
def forward(self):
return self.value
def grad(self, partial_derivative_opname=None):
if partial_derivative_opname is None:
grad = 1
else:
if self.name == partial_derivative_opname:
# Specify to compute this derivative
grad = 1
else:
# Specify to compute other derivative
grad = 0
return grad
def test_VariableOp():
x = 10
variable = VariableOp(x)
print("X: {}, forward: {}, grad: {}".format(
x, variable.forward(), variable.grad()))
class PowerOp(Op):
def __init__(self, input, power, name="Power"):
if not isinstance(input, Op):
self.op = ConstantOp(input)
else:
self.op = input
self.power = power
self.name = name
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
def forward(self):
result = pow(self.op.forward(), self.power)
return result
def grad(self, partial_derivative_opname=None):
if isinstance(self.op, PlaceholderOp) or isinstance(self.op, ConstantOp):
# op is the constant
grad = 0
elif isinstance(self.op, VariableOp):
# op is the variable
grad = self.power * pow(self.op.forward(), self.power - 1)
else:
# op is other complex operation and use chain rule
grad = self.power * pow(self.op.forward(), self.power - 1
) * self.op.grad(partial_derivative_opname)
return grad
class SquareOp(PowerOp):
def __init__(self, input, name="Square"):
super(SquareOp, self).__init__(input, 2, name)
class SquareOpOld(Op):
def __init__(self, input, name="Square"):
if not isinstance(input, Op):
self.op = ConstantOp(input)
else:
self.op = input
self.name = name
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
def forward(self):
if os.environ.has_key("ENABLE_SWIG_OP"):
result = swig.op.square(self.op.forward())
else:
result = pow(self.op.forward(), 2)
return result
def grad(self, partial_derivative_opname=None):
if isinstance(self.op, PlaceholderOp) or isinstance(self.op, ConstantOp):
# op is the constant
grad = 0
elif isinstance(self.op, VariableOp):
# op is the variable
if os.environ.has_key("ENABLE_SWIG_OP"):
grad = swig.op.multiple(2, self.op.forward())
else:
grad = 2 * self.op.forward()
else:
# op is other complex operation and use chain rule
grad = 2 * self.op.forward() * self.op.grad(partial_derivative_opname)
return grad
class CubicOp(PowerOp):
def __init__(self, input, name="Cubic"):
super(CubicOp, self).__init__(input, 3, name)
class CubicOpOld(Op):
def __init__(self, input, name="Cubic"):
if not isinstance(input, Op):
self.op = ConstantOp(input)
else:
self.op = input
self.name = name
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
def forward(self):
if os.environ.has_key("ENABLE_SWIG_OP"):
result = swig.op.cubic(self.op.forward())
else:
result = math.pow(self.op.forward(), 3)
return result
def grad(self, partial_derivative_opname=None):
if isinstance(self.op, PlaceholderOp) or isinstance(self.op, ConstantOp):
# op is the constant
grad = 0
elif isinstance(self.op, VariableOp):
# op is the variable
if os.environ.has_key("ENABLE_SWIG_OP"):
grad = swig.op.multiple(3, swig.op.square(self.op.forward()))
else:
grad = 3 * math.pow(self.op.forward(), 2)
else:
# op is other complex operation
grad = 3 * math.pow(self.op.forward(),
2) * self.op.grad(partial_derivative_opname)
return grad
def test_CubicOp():
x = 10
variable = CubicOp(x)
print("X: {}, forward: {}, grad: {}".format(
x, variable.forward(), variable.grad()))
def SigmoidOp(Op):
def __init__(self, value, name="Sigmoid"):
pass
class AddOp(Op):
"""
The addition operation which has only two inputs. The input can be
primitive, ConstantOp, PlaceholerOp, VariableOp or other ops.
"""
def __init__(self, input1, input2, name="Add"):
if not isinstance(input1, Op):
self.op1 = ConstantOp(input1)
else:
self.op1 = input1
if not isinstance(input2, Op):
self.op2 = ConstantOp(input2)
else:
self.op2 = input2
self.name = name
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
def forward(self):
result = self.op1.forward() + self.op2.forward()
return result
def grad(self, partial_derivative_opname=None):
result = self.op1.grad(partial_derivative_opname) + self.op2.grad(
partial_derivative_opname)
return result
class MinusOp(Op):
"""
The minus operation.
"""
def __init__(self, input1, input2, name="Minus"):
if not isinstance(input1, Op):
self.op1 = ConstantOp(input1)
else:
self.op1 = input1
if not isinstance(input2, Op):
self.op2 = ConstantOp(input2)
else:
self.op2 = input2
self.name = name
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
def forward(self):
result = self.op1.forward() - self.op2.forward()
return result
def grad(self, partial_derivative_opname=None):
result = self.op1.grad(partial_derivative_opname) - self.op2.grad(
partial_derivative_opname)
return result
class AddNOp(Op):
def __init__(self, *inputs):
# TODO: Support user defined name in the parameter
self.name = "AddN"
self.ops = []
for input in inputs:
if not isinstance(input, Op):
input = ConstantOp(input)
self.ops.append(input)
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
def forward(self):
result = 0
for op in self.ops:
result += op.forward()
return result
def grad(self, partial_derivative_opname=None):
result = 0
for op in self.ops:
result += op.grad(partial_derivative_opname)
return result
# TODO: Can not support operations like "x * x", only "x * 3"
class MultipleOp(Op):
def __init__(self, input1, input2, name="Multiple"):
if not isinstance(input1, Op):
self.op1 = ConstantOp(input1)
else:
self.op1 = input1
if not isinstance(input2, Op):
self.op2 = ConstantOp(input2)
else:
self.op2 = input2
self.name = name
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
def forward(self):
result = self.op1.forward() * self.op2.forward()
return result
def grad(self, partial_derivative_opname=None):
if isinstance(self.op1, PlaceholderOp) or isinstance(self.op1, ConstantOp):
# op1 is the coefficient of this formula
op1_grad = self.op1.forward()
if isinstance(self.op2, PlaceholderOp) or isinstance(
self.op2, ConstantOp):
# two elements are both constant values
op2_grad = 0
else:
# op2 may has VariableOp
op2_grad = self.op2.grad(partial_derivative_opname)
elif isinstance(self.op2, PlaceholderOp) or isinstance(
self.op2, ConstantOp):
# op2 is the coefficient of this formula
op2_grad = self.op2.forward()
# op1 may has VariableOp
op1_grad = self.op1.grad(partial_derivative_opname)
else:
# op1 and op2 may has VariableOp
logging.error(
"Not support complex formula which has multiple VariableOp")
raise NotImplementedError
result = op1_grad * op2_grad
return result
class MultipleNOp(Op):
"""The multiple operation for n inputs."""
def __init__(self, *inputs):
self.name = "MultipleN"
self.ops = []
for input in inputs:
if not isinstance(input, Op):
input = ConstantOp(input)
self.ops.append(input)
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
def forward(self):
result = 1
for op in self.ops:
result *= op.forward()
return result
def grad(self, partial_derivative_opname=None):
# TODO: Check the type of op to compute gradients
result = 1
for op in self.ops:
result *= op.grad(partial_derivative_opname)
return result
class DivideOp(Op):
def __init__(self, input1, input2, name="Divide"):
if not isinstance(input1, Op):
self.op1 = ConstantOp(input1)
else:
self.op1 = input1
if not isinstance(input2, Op):
self.op2 = ConstantOp(input2)
else:
self.op2 = input2
self.name = name
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
def forward(self):
result = self.op1.forward() / self.op2.forward()
return result
def grad(self, partial_derivative_opname=None):
result = self.op1.grad(partial_derivative_opname) / self.op2.grad(
partial_derivative_opname)
return result
class UpdateVariableOp(Op):
def __init__(self, variableOp, value, name="UpdateVariableOp"):
self.variableOp = variableOp
self.value = value
self.name = name
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
def forward(self):
self.variableOp.set_value(self.value)
return self.value
# TODO: Add grad() if needed
class UpdateVariableNOp(Op):
def __init__(self, variableop_value_map, name="UpdateVariableNOp"):
self.variableop_value_map = variableop_value_map
self.name = name
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
def forward(self):
# TODO: Need to test the loop
for variableOp, value in enumerate(self.variableop_value_map):
variableOp.set_value(value)
return self.variableop_value_map
def test_SquareOp():
w = VariableOp(10)
b = VariableOp(20)
x = PlaceholderOp(float)
x.set_value(2.0)
y = PlaceholderOp(float)
y.set_value(3.0)
loss = SquareOp(y - (w * x + b))
print("w: {}, forward: {}, grad: {}".format(w.get_value(),
loss.forward(),
loss.grad(w.name))) # 148.0
print("b: {}, forward: {}, grad: {}".format(b.get_value(),
loss.forward(),
loss.grad(b.name))) # 74.0
def global_variables_initializer():
pass
def local_variables_initializer():
pass
| [
"[email protected]"
] | |
54dcbee3deeff946bab62193875ecf7f2be7928b | eddb3dfb5e1a0a3e58254f285c3700b45dce76d9 | /mountaintools/mlprocessors/registry.py | 959fb39fb2b78e11dd4457f64337724e13bb1063 | [
"Apache-2.0"
] | permissive | tjd2002/spikeforest2 | f2281a8d3103b3fbdd85829c176819a5e6d310d0 | 2e393564b858b2995aa2ccccd9bd73065681b5de | refs/heads/master | 2020-04-25T07:55:19.997810 | 2019-02-26T01:19:23 | 2019-02-26T01:19:23 | 172,628,686 | 0 | 0 | Apache-2.0 | 2019-02-26T03:11:27 | 2019-02-26T03:11:26 | null | UTF-8 | Python | false | false | 4,238 | py | #!/usr/bin/env python
import json
import os
import traceback
import argparse
from .execute import execute
import types
class ProcessorRegistry:
def __init__(self, processors=[], namespace=None):
self.processors = processors
self.namespace = namespace
if namespace:
for proc in self.processors:
proc.NAME = "{}.{}".format(namespace, proc.NAME)
proc.NAMESPACE = namespace
def spec(self):
s = {}
s['processors'] = [cls.spec() for cls in self.processors]
return s
def find(self, **kwargs):
for P in self.processors:
for key in kwargs:
if not hasattr(P, key):
continue
if getattr(P, key) != kwargs[key]:
continue
return P
def get_processor_by_name(self, name):
return self.find(NAME=name)
def test(self, args, **kwargs):
procname = args[0]
proc = self.find(NAME=procname)
if not proc:
raise KeyError("Unable to find processor %s" % procname)
if not hasattr(proc, 'test') or not callable(proc.test):
raise AttributeError("No test function defined for %s" % proc.NAME)
print("----------------------------------------------")
print("Testing", proc.NAME)
try:
result = proc.test()
print("SUCCESS" if result else "FAILURE")
except Exception as e:
print("FAILURE:", e)
if kwargs.get('trace', False):
traceback.print_exc()
finally:
print("----------------------------------------------")
def process(self, args):
parser = argparse.ArgumentParser(prog=args[0])
subparsers = parser.add_subparsers(dest='command', help='main help')
parser_spec = subparsers.add_parser(
'spec', help='Print processor specs')
parser_spec.add_argument('processor', nargs='?')
parser_test = subparsers.add_parser('test', help='Run processor tests')
parser_test.add_argument('processor')
parser_test.add_argument('args', nargs=argparse.REMAINDER)
for proc in self.processors:
proc.invoke_parser(subparsers)
opts = parser.parse_args(args[1:])
opcode = opts.command
if not opcode:
parser.print_usage()
return
if opcode == 'spec':
if opts.processor:
try:
proc = self.get_processor_by_name(opts.processor)
print(json.dumps(proc.spec(), sort_keys=True, indent=4))
except:
print("Processor {} not found".format(opts.processor))
return
print(json.dumps(self.spec(), sort_keys=True, indent=4))
return
if opcode == 'test':
try:
self.test([opts.processor]+opts.args, trace=os.getenv('TRACEBACK',
False) not in ['0', 0, 'False', 'F', False])
except KeyError as e:
# taking __str__ from Base to prevent adding quotes to KeyError
print(BaseException.__str__(e))
except Exception as e:
print(e)
finally:
return
if opcode in [x.NAME for x in self.processors]:
try:
self.invoke(self.get_processor_by_name(opcode), args[2:])
except:
import sys
sys.exit(-1)
else:
print("Processor {} not found".format(opcode))
def invoke(self, proc, args):
return proc.invoke(args)
def register(self, proc):
if self.namespace and not proc.NAMESPACE:
proc.NAME = "{}.{}".format(self.namespace, proc.NAME)
proc.NAMESPACE = self.namespace
self.processors.append(proc)
def register_processor(registry):
def decor(cls):
cls = mlprocessor(cls)
registry.register(cls)
return cls
return decor
def mlprocessor(cls):
cls.execute = types.MethodType(execute, cls)
return cls
registry = ProcessorRegistry()
| [
"[email protected]"
] | |
9df227dc90f6ac68fe20815bb42614b23252771e | 42cc27460f455808e251148cdbf672b04d468156 | /maya/rbRrSubmit/rbRrSubmit.py | 4d5208459d1a393a0bacb22e8a8eb8a5bdc18192 | [] | no_license | cgguo/rugbybugs | dc5f76a48bb0315ae336e8192cdad5d13b087f65 | df66aa2e3a8e38a34224627a7222d6854aa1597f | refs/heads/master | 2020-12-25T05:03:11.905437 | 2015-05-26T18:48:46 | 2015-05-26T18:48:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,597 | py |
#rbRrSubmit Module
#------------------------------------------------------------------
'''
Description:
Saves and submits the current scene to the rrSubmitter
'''
'''
ToDo:
'''
#Imports
#------------------------------------------------------------------
import os, sys
import maya.cmds as cmds
import pymel.core as pm
#RbRrSubmit class
#------------------------------------------------------------------
class RbRrSubmit():
#Constructor / Main Procedure
def __init__(self):
#Instance Vars
#------------------------------------------------------------------
self.verbose = True
#Methods
#------------------------------------------------------------------
#saveAndSubmitToRr
@staticmethod
def saveAndSubmitToRr(*args, **kwargs):
try:
# Get the current scene name.
curName, curExt = os.path.splitext(cmds.file(query=True, sceneName=True))
# Determine the file type.
if(curExt == ".ma"): curType = "mayaAscii"
if(curExt == ".mb"): curType = "mayaBinary"
#save file
cmds.file(f=True, type=curType, save=True)
#Check if animation in Render settings is on, otherwise print warning
if(pm.getAttr('defaultRenderGlobals.animation') == 0):
print('No Animation specified in Renderglobals. RRSubmitter will not open file to get settings')
#print to output window
sys.__stdout__.write('No Animation specified in Renderglobals. RRSubmitter will not open file to get settings \n')
#get rrSubmiterDir
rrSubmiterDir = os.environ['RR_Root']
#get scenePath
scenePath = cmds.file(q = True, sceneName = True)
#Check if scene path true, if so start submit
if (scenePath):
if ((sys.platform.lower() == "win32") or (sys.platform.lower() == "win64")):
os.system(rrSubmiterDir+"\\win__rrSubmitter.bat \""+scenePath+"\"")
elif (sys.platform.lower() == "darwin"):
os.system(rrSubmiterDir+"/bin/mac/rrSubmitter.app/Contents/MacOS/rrSubmitter \""+scenePath+"\"")
else:
os.system(rrSubmiterDir+"/lx__rrSubmitter.sh \""+scenePath+"\"")
print('Successfully submited scene to RRSubmitter')
except:
print('Error submitting scene to RRSubmitter')
#Shared Methods
#------------------------------------------------------------------
#Execute TMP
#------------------------------------------------------------------
'''
from rugbyBugs.maya.rbRrSubmit import rbRrSubmit
reload(rbRrSubmit)
rbRrSubmit.RbRrSubmit.saveAndSubmitToRr()
RbRrSubmitInstance = rbRrSubmit.RbRrSubmit()
RbRrSubmitInstance.saveAndSubmitToRr()
'''
| [
"[email protected]"
] | |
2fd741f3bbfaaaae1940e8b8e48323a38f156a35 | aa0083936eff7afc66fdf62cb7f632e5b3f26d20 | /Implementation/Boj 10816.py | be340f087f29a435538965a4916c5b3dd5927778 | [] | no_license | b8goal/Boj | ab31a8e1a414125bb4a0eb243db7dce2dda1ed4a | b7e395191eda01427a6db8a886a5ce3c49b03abf | refs/heads/master | 2022-02-03T13:15:26.904488 | 2021-12-30T11:58:07 | 2021-12-30T11:58:07 | 161,286,778 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | a,b=0,1
for i in range(int(input())):
a,b=a+b,a
print(a) | [
"[email protected]"
] | |
e128911fbf66839c06d7fca59b87cd5b97dee95f | cc096d321ab5c6abf54fdcea67f10e77cd02dfde | /flex-backend/pypy/translator/c/gc.py | b4dab2aed4db908b150f72af0a3e773bbbde0a49 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | limweb/flex-pypy | 310bd8fcd6a9ddc01c0b14a92f0298d0ae3aabd2 | 05aeeda183babdac80f9c10fca41e3fb1a272ccb | refs/heads/master | 2021-01-19T22:10:56.654997 | 2008-03-19T23:51:59 | 2008-03-19T23:51:59 | 32,463,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,221 | py | import sys
from pypy.translator.c.support import cdecl
from pypy.translator.c.node import ContainerNode
from pypy.rpython.lltypesystem.lltype import \
typeOf, Ptr, ContainerType, RttiStruct, \
RuntimeTypeInfo, getRuntimeTypeInfo, top_container
from pypy.rpython.memory.gctransform import \
refcounting, boehm, framework, stacklessframework
from pypy.rpython.lltypesystem import lltype, llmemory
class BasicGcPolicy(object):
requires_stackless = False
def __init__(self, db, thread_enabled=False):
self.db = db
self.thread_enabled = thread_enabled
def common_gcheader_definition(self, defnode):
return []
def common_gcheader_initdata(self, defnode):
return []
def struct_gcheader_definition(self, defnode):
return self.common_gcheader_definition(defnode)
def struct_gcheader_initdata(self, defnode):
return self.common_gcheader_initdata(defnode)
def array_gcheader_definition(self, defnode):
return self.common_gcheader_definition(defnode)
def array_gcheader_initdata(self, defnode):
return self.common_gcheader_initdata(defnode)
def struct_after_definition(self, defnode):
return []
def gc_libraries(self):
return []
def pre_pre_gc_code(self): # code that goes before include g_prerequisite.h
return []
def pre_gc_code(self):
return ['typedef void *GC_hidden_pointer;']
def gc_startup_code(self):
return []
def struct_setup(self, structdefnode, rtti):
return None
def array_setup(self, arraydefnode):
return None
def rtti_type(self):
return ''
def OP_GC_PUSH_ALIVE_PYOBJ(self, funcgen, op):
expr = funcgen.expr(op.args[0])
if expr == 'NULL':
return ''
return 'Py_XINCREF(%s);' % expr
def OP_GC_POP_ALIVE_PYOBJ(self, funcgen, op):
expr = funcgen.expr(op.args[0])
return 'Py_XDECREF(%s);' % expr
class RefcountingInfo:
static_deallocator = None
from pypy.rlib.objectmodel import CDefinedIntSymbolic
class RefcountingGcPolicy(BasicGcPolicy):
transformerclass = refcounting.RefcountingGCTransformer
def common_gcheader_definition(self, defnode):
if defnode.db.gctransformer is not None:
HDR = defnode.db.gctransformer.HDR
return [(name, HDR._flds[name]) for name in HDR._names]
else:
return []
def common_gcheader_initdata(self, defnode):
if defnode.db.gctransformer is not None:
gct = defnode.db.gctransformer
hdr = gct.gcheaderbuilder.header_of_object(top_container(defnode.obj))
HDR = gct.HDR
return [getattr(hdr, fldname) for fldname in HDR._names]
else:
return []
# for structs
def struct_setup(self, structdefnode, rtti):
if rtti is not None:
transformer = structdefnode.db.gctransformer
fptr = transformer.static_deallocation_funcptr_for_type(
structdefnode.STRUCT)
structdefnode.gcinfo = RefcountingInfo()
structdefnode.gcinfo.static_deallocator = structdefnode.db.get(fptr)
# for arrays
def array_setup(self, arraydefnode):
pass
# for rtti node
def rtti_type(self):
return 'void (@)(void *)' # void dealloc_xx(struct xx *)
def rtti_node_factory(self):
return RefcountingRuntimeTypeInfo_OpaqueNode
# zero malloc impl
def OP_GC_CALL_RTTI_DESTRUCTOR(self, funcgen, op):
args = [funcgen.expr(v) for v in op.args]
line = '%s(%s);' % (args[0], ', '.join(args[1:]))
return line
def OP_GC_FREE(self, funcgen, op):
args = [funcgen.expr(v) for v in op.args]
return 'OP_FREE(%s);' % (args[0], )
def OP_GC_FETCH_EXCEPTION(self, funcgen, op):
result = funcgen.expr(op.result)
return ('%s = RPyFetchExceptionValue();\n'
'RPyClearException();') % (result, )
def OP_GC_RESTORE_EXCEPTION(self, funcgen, op):
argh = funcgen.expr(op.args[0])
return 'if (%s != NULL) RPyRaiseException(RPYTHON_TYPE_OF_EXC_INST(%s), %s);' % (argh, argh, argh)
def OP_GC__COLLECT(self, funcgen, op):
return ''
class RefcountingRuntimeTypeInfo_OpaqueNode(ContainerNode):
nodekind = 'refcnt rtti'
globalcontainer = True
typename = 'void (@)(void *)'
def __init__(self, db, T, obj):
assert T == RuntimeTypeInfo
assert isinstance(obj.about, RttiStruct)
self.db = db
self.T = T
self.obj = obj
defnode = db.gettypedefnode(obj.about)
self.implementationtypename = 'void (@)(void *)'
self.name = defnode.gcinfo.static_deallocator
self.ptrname = '((void (*)(void *)) %s)' % (self.name,)
def enum_dependencies(self):
return []
def implementation(self):
return []
class BoehmInfo:
finalizer = None
# for MoreExactBoehmGcPolicy
malloc_exact = False
class BoehmGcPolicy(BasicGcPolicy):
transformerclass = boehm.BoehmGCTransformer
def array_setup(self, arraydefnode):
pass
def struct_setup(self, structdefnode, rtti):
pass
def rtti_type(self):
return BoehmGcRuntimeTypeInfo_OpaqueNode.typename
def rtti_node_factory(self):
return BoehmGcRuntimeTypeInfo_OpaqueNode
def gc_libraries(self):
if sys.platform == 'win32':
return ['gc_pypy']
return ['gc']
def pre_pre_gc_code(self):
if sys.platform == "linux2":
yield "#define _REENTRANT 1"
yield "#define GC_LINUX_THREADS 1"
if sys.platform != "win32":
# GC_REDIRECT_TO_LOCAL is not supported on Win32 by gc6.8
yield "#define GC_REDIRECT_TO_LOCAL 1"
yield "#define GC_I_HIDE_POINTERS 1"
yield '#include <gc/gc.h>'
yield '#define USING_BOEHM_GC'
def pre_gc_code(self):
return []
def gc_startup_code(self):
if sys.platform == 'win32':
pass # yield 'assert(GC_all_interior_pointers == 0);'
else:
yield 'GC_all_interior_pointers = 0;'
yield 'GC_init();'
def OP_GC_FETCH_EXCEPTION(self, funcgen, op):
result = funcgen.expr(op.result)
return ('%s = RPyFetchExceptionValue();\n'
'RPyClearException();') % (result, )
def OP_GC_RESTORE_EXCEPTION(self, funcgen, op):
argh = funcgen.expr(op.args[0])
return 'if (%s != NULL) RPyRaiseException(RPYTHON_TYPE_OF_EXC_INST(%s), %s);' % (argh, argh, argh)
def OP_GC__COLLECT(self, funcgen, op):
return 'GC_gcollect(); GC_invoke_finalizers();'
class BoehmGcRuntimeTypeInfo_OpaqueNode(ContainerNode):
nodekind = 'boehm rtti'
globalcontainer = True
typename = 'char @'
def __init__(self, db, T, obj):
assert T == RuntimeTypeInfo
assert isinstance(obj.about, RttiStruct)
self.db = db
self.T = T
self.obj = obj
defnode = db.gettypedefnode(obj.about)
self.implementationtypename = self.typename
self.name = self.db.namespace.uniquename('g_rtti_v_'+ defnode.barename)
self.ptrname = '(&%s)' % (self.name,)
def enum_dependencies(self):
return []
def implementation(self):
yield 'char %s /* uninitialized */;' % self.name
class FrameworkGcRuntimeTypeInfo_OpaqueNode(BoehmGcRuntimeTypeInfo_OpaqueNode):
nodekind = 'framework rtti'
class MoreExactBoehmGcPolicy(BoehmGcPolicy):
""" policy to experiment with giving some layout information to boehm. Use
new class to prevent breakage. """
def __init__(self, db, thread_enabled=False):
super(MoreExactBoehmGcPolicy, self).__init__(db, thread_enabled)
self.exactly_typed_structs = {}
def get_descr_name(self, defnode):
# XXX somewhat illegal way of introducing a name
return '%s__gc_descr__' % (defnode.name, )
def pre_pre_gc_code(self):
for line in super(MoreExactBoehmGcPolicy, self).pre_pre_gc_code():
yield line
yield "#include <gc/gc_typed.h>"
def struct_setup(self, structdefnode, rtti):
T = structdefnode.STRUCT
if T._is_atomic():
malloc_exact = False
else:
if T._is_varsize():
malloc_exact = T._flds[T._arrayfld]._is_atomic()
else:
malloc_exact = True
if malloc_exact:
if structdefnode.gcinfo is None:
structdefnode.gcinfo = BoehmInfo()
structdefnode.gcinfo.malloc_exact = True
self.exactly_typed_structs[structdefnode.STRUCT] = structdefnode
def struct_after_definition(self, defnode):
if defnode.gcinfo and defnode.gcinfo.malloc_exact:
yield 'GC_descr %s;' % (self.get_descr_name(defnode), )
def gc_startup_code(self):
for line in super(MoreExactBoehmGcPolicy, self).gc_startup_code():
yield line
for TYPE, defnode in self.exactly_typed_structs.iteritems():
T = defnode.gettype().replace("@", "")
yield "{"
yield "GC_word T_bitmap[GC_BITMAP_SIZE(%s)] = {0};" % (T, )
for field in TYPE._flds:
if getattr(TYPE, field) == lltype.Void:
continue
yield "GC_set_bit(T_bitmap, GC_WORD_OFFSET(%s, %s));" % (
T, defnode.c_struct_field_name(field))
yield "%s = GC_make_descriptor(T_bitmap, GC_WORD_LEN(%s));" % (
self.get_descr_name(defnode), T)
yield "}"
# to get an idea how it looks like with no refcount/gc at all
class NoneGcPolicy(BoehmGcPolicy):
gc_libraries = RefcountingGcPolicy.gc_libraries.im_func
gc_startup_code = RefcountingGcPolicy.gc_startup_code.im_func
def pre_pre_gc_code(self):
yield '#define USING_NO_GC'
class FrameworkGcPolicy(BasicGcPolicy):
transformerclass = framework.FrameworkGCTransformer
def struct_setup(self, structdefnode, rtti):
if rtti is not None and hasattr(rtti._obj, 'destructor_funcptr'):
destrptr = rtti._obj.destructor_funcptr
# make sure this is seen by the database early, i.e. before
# finish_helpers() on the gctransformer
self.db.get(destrptr)
# the following, on the other hand, will only discover ll_finalizer
# helpers. The get() sees and records a delayed pointer. It is
# still important to see it so that it can be followed as soon as
# the mixlevelannotator resolves it.
gctransf = self.db.gctransformer
fptr = gctransf.finalizer_funcptr_for_type(structdefnode.STRUCT)
self.db.get(fptr)
def array_setup(self, arraydefnode):
pass
def rtti_type(self):
return FrameworkGcRuntimeTypeInfo_OpaqueNode.typename
def rtti_node_factory(self):
return FrameworkGcRuntimeTypeInfo_OpaqueNode
def pre_pre_gc_code(self):
yield '#define USING_FRAMEWORK_GC'
def gc_startup_code(self):
fnptr = self.db.gctransformer.frameworkgc_setup_ptr.value
yield '%s();' % (self.db.get(fnptr),)
def OP_GC_RELOAD_POSSIBLY_MOVED(self, funcgen, op):
args = [funcgen.expr(v) for v in op.args]
# XXX this more or less assumes mark-and-sweep gc
return ''
# proper return value for moving GCs:
# %s = %s; /* for moving GCs */' % (args[1], args[0])
def common_gcheader_definition(self, defnode):
return defnode.db.gctransformer.gc_fields()
def common_gcheader_initdata(self, defnode):
o = top_container(defnode.obj)
return defnode.db.gctransformer.gc_field_values_for(o)
class StacklessFrameworkGcPolicy(FrameworkGcPolicy):
transformerclass = stacklessframework.StacklessFrameworkGCTransformer
requires_stackless = True
name_to_gcpolicy = {
'boehm': BoehmGcPolicy,
'exact_boehm': MoreExactBoehmGcPolicy,
'ref': RefcountingGcPolicy,
'none': NoneGcPolicy,
'framework': FrameworkGcPolicy,
'stacklessgc': StacklessFrameworkGcPolicy,
}
| [
"lucio.torre@dbd81ab4-9648-0410-a770-9b81666e587d"
] | lucio.torre@dbd81ab4-9648-0410-a770-9b81666e587d |
eb3ad9c31d3ebfe873ad9cae4a6722addf5dd306 | 946111147d7e3c2e9bc75f41e2c1fccaa365ae2d | /EdgeDetector.py | cfa70d7d305fe923db485f288522b479b83e557c | [
"MIT"
] | permissive | Sid2697/Image-Processing | 3c779c2be82e430f2f207ef2fc9d134dc0400196 | d25628d9b90e238b1df0881ec55359c41692ebbb | refs/heads/master | 2021-04-29T17:45:35.239800 | 2018-02-15T20:23:34 | 2018-02-15T20:23:34 | 121,676,811 | 2 | 0 | MIT | 2018-02-15T20:23:35 | 2018-02-15T20:06:34 | Python | UTF-8 | Python | false | false | 284 | py | import cv2
import numpy as np
cap=cv2.VideoCapture(0)
while True:
_,frame = cap.read()
Black=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
edges=cv2.Canny(frame,100,130)
cv2.imshow('Edges',edges)
if cv2.waitKey(2) & 0xFF == ord(' '):
break
cv2.destroyAllWindows()
cap.release()
| [
"[email protected]"
] | |
f965f55a5bd74cc12296683f04052d1b179291c4 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_jostles.py | e3ce9b04d14de25bb110e8ff39d95f207397bd36 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _JOSTLES():
def __init__(self,):
self.name = "JOSTLES"
self.definitions = jostle
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['jostle']
| [
"[email protected]"
] | |
61f5914638bc300b27e0e1bbddc6ba9a4dfdcc4a | 967de4753954f8a7988446c9edc5fbb14e3013a5 | /conf/wsgi.py | b98642582f06de1bf7b21bac19ed12f6c73a3ef8 | [
"MIT"
] | permissive | uktrade/directory-ui-verification | 20e199c9f4da180d82328a26f306f382736f10e1 | e95b0e51c23ac2b79c8fab8b40cbc30808e3ea47 | refs/heads/master | 2020-03-18T11:34:07.562385 | 2018-06-18T11:13:53 | 2018-06-18T11:13:53 | 134,679,321 | 0 | 0 | MIT | 2018-06-18T11:13:54 | 2018-05-24T07:39:17 | Python | UTF-8 | Python | false | false | 494 | py | """
WSGI config for directory-verification project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "conf.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| [
"[email protected]"
] | |
a2b3aac32895d4ae785523f896686c3d758b2889 | 9f9f4280a02f451776ea08365a3f119448025c25 | /plans/hsppw/qcut_hsp-l_005_pwcc_linear_hs.py | 77e7cf7c8225054e48851cd9a3b74fcb28338848 | [
"BSD-2-Clause"
] | permissive | dbis-uibk/hit-prediction-code | 6b7effb2313d2499f49b2b14dd95ae7545299291 | c95be2cdedfcd5d5c27d0186f4c801d9be475389 | refs/heads/master | 2023-02-04T16:07:24.118915 | 2022-09-22T12:49:50 | 2022-09-22T12:49:50 | 226,829,436 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,070 | py | """Plan using all features."""
import os.path
from dbispipeline.evaluators import CvEpochEvaluator
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
import hit_prediction_code.common as common
from hit_prediction_code.dataloaders import ClassLoaderWrapper
from hit_prediction_code.dataloaders import EssentiaLoader
from hit_prediction_code.dataloaders import QcutLoaderWrapper
import hit_prediction_code.evaluations as evaluations
from hit_prediction_code.models.pairwise import PairwiseOrdinalModel
from hit_prediction_code.result_handlers import print_results_as_json
from hit_prediction_code.transformers.label import compute_hit_score_on_df
PATH_PREFIX = 'data/hit_song_prediction_msd_bb_lfm_ab/processed'
number_of_classes = 5
dataloader = ClassLoaderWrapper(
wrapped_loader=QcutLoaderWrapper(
wrapped_loader=EssentiaLoader(
dataset_path=os.path.join(
PATH_PREFIX,
'hsp-l_acousticbrainz.parquet',
),
features=[
*common.all_no_year_list(),
],
label='yang_hit_score',
nan_value=0,
data_modifier=lambda df: compute_hit_score_on_df(
df,
pc_column='lastfm_playcount',
lc_column='lastfm_listener_count',
hit_score_column='yang_hit_score',
),
),
number_of_bins=number_of_classes,
),
labels=list(range(number_of_classes)),
)
pipeline = Pipeline([
('scale', MinMaxScaler()),
('model',
PairwiseOrdinalModel(
wrapped_model=LinearRegression(),
pairs_factor=3.,
threshold_type='average',
pair_strategy='random',
pair_encoding='concat',
threshold_sample_training=False,
)),
])
evaluator = CvEpochEvaluator(
cv=evaluations.cv(),
scoring=evaluations.metrics.ordinal_classifier_scoring(),
scoring_step_size=1,
)
result_handlers = [
print_results_as_json,
]
| [
"[email protected]"
] | |
849c7944c6f42de4793f349e57f2d1419d86a881 | 7361493342853a2bd9a3225eb71819c3cfd39985 | /python-numpy-to-cnn/Momentum.py | 7989051e0e6a566f7ee4e8fd25fac6a1921d6eef | [] | no_license | brightparagon/learn-machine-learning | 234df2c1298f9d0a34b0db010d9f870f97f1b867 | 3e6fe095d416317b97827615dbb7aa538261d117 | refs/heads/master | 2021-05-01T10:45:55.661458 | 2018-05-26T15:54:45 | 2018-05-26T15:54:45 | 121,107,145 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | import numpy as np
class Momentum:
def __init__(self, lr=0.01, momentum=0.9):
self.lr = lr
self.momentum = momentum
self.v = None
def update(self, params, grads):
if self.v is None:
self.v = {}
for key, val in params.items():
self.v[key] = np.zeros_like(val)
for key in params.key():
self.v[key] = self.momentum*self.v[key] - self.lr*grads[key]
params[key] += self.v[key]
| [
"[email protected]"
] | |
e91e8d0a0d428aa8bb747635a2c022adadbd95bc | 9ac405635f3ac9332e02d0c7803df757417b7fee | /cotizaciones/migrations/0076_cotizacionpagoproyectadoacuerdopago_motivo.py | d72a7ebf5cd297d779fd188093e3930c7e98020e | [] | no_license | odecsarrollo/07_intranet_proyectos | 80af5de8da5faeb40807dd7df3a4f55f432ff4c0 | 524aeebb140bda9b1bf7a09b60e54a02f56fec9f | refs/heads/master | 2023-01-08T04:59:57.617626 | 2020-09-25T18:01:09 | 2020-09-25T18:01:09 | 187,250,667 | 0 | 0 | null | 2022-12-30T09:36:37 | 2019-05-17T16:41:35 | JavaScript | UTF-8 | Python | false | false | 475 | py | # Generated by Django 2.2.6 on 2020-07-29 19:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cotizaciones', '0075_auto_20200729_1345'),
]
operations = [
migrations.AddField(
model_name='cotizacionpagoproyectadoacuerdopago',
name='motivo',
field=models.CharField(default='Falloooo', max_length=100),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
def10769c95cfd6a081ef74f1f9c3574746eb34b | a4830a0189c325c35c9021479a5958ec870a2e8b | /routing/signals.py | c84cfbc1a8c76acc691ca275dec3a812c4b8c014 | [] | no_license | solutionprovider9174/steward | 044c7d299a625108824c854839ac41f51d2ca3fd | fd681593a9d2d339aab0f6f3688412d71cd2ae32 | refs/heads/master | 2022-12-11T06:45:04.544838 | 2020-08-21T02:56:55 | 2020-08-21T02:56:55 | 289,162,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | # Python
import datetime
# Django
from django.db.models.signals import post_save
from django.dispatch import receiver
# Application
from routing import models
@receiver(post_save, sender=models.Record)
def route_post_save(sender, **kwargs):
instance = kwargs['instance']
instance.route.numbers.all().update(modified=datetime.datetime.now())
print(instance)
| [
"[email protected]"
] | |
06565d34f279d486d103d05a7560479d58d6a764 | 45eb50864138759adbcc7f8d9742c9f6c8102604 | /remind_me/services/ping.py | 1903aa1947bed8b5c31a86dd4b5ce54fa8521f32 | [] | no_license | bbelderbos/remind_me | bc8df4b24f701bb96edf336f9b310ee43dbbd9b4 | d432d4fb9632aa9531ee6e101f80de233d97ce56 | refs/heads/master | 2023-08-29T06:39:08.127091 | 2021-04-28T17:51:44 | 2021-04-28T17:51:44 | 409,145,017 | 0 | 0 | null | 2021-09-22T09:40:07 | 2021-09-22T09:40:06 | null | UTF-8 | Python | false | false | 1,066 | py | from datetime import datetime, timedelta
import requests
from apscheduler.schedulers.background import BackgroundScheduler
from dateutil.parser import parse
from remind_me.data import db_session
from remind_me.data.events import Events
from remind_me.schedule_jobs import timezones
from remind_me.sms import send
scheduler = BackgroundScheduler()
@scheduler.scheduled_job('interval', minutes=20)
def ping_site() -> None:
session = db_session.create_session()
all_events = session.query(Events).filter(Events.sent == False).all()
print(all_events)
for ev in all_events:
event_time = parse(ev.date_and_time) + timedelta(hours=timezones[ev.timezone])
current_time = datetime.now()
if current_time > event_time:
ev.sent = True
session.commit()
send(ev.event, ev.phone_number, ev.carrier)
response = requests.get('https://desolate-garden-98632.herokuapp.com/')
print(response.status_code)
session.close()
def make_pings():
scheduler.start()
print(scheduler.get_jobs()) | [
"[email protected]"
] | |
5647ac16de076973951bdb0f0c028435874b9b27 | 5b40c6df03e477f3109fda9dc9b15a347df6c2ca | /ch04_bayes/monoDemo.py | bbd883492c4566a90337f9ee56d6ff09de3171b9 | [] | no_license | yphacker/machinelearninginaction | e40dfebd4634fd8fa48473c497ce5c9184cd6513 | 886a86e0cb1f5e61828774d4337cd08d2b2c54ed | refs/heads/master | 2020-03-28T02:09:38.090126 | 2019-12-06T11:54:25 | 2019-12-06T11:54:25 | 147,551,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | # coding=utf-8
# author=yphacker
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
t = arange(0.0, 0.5, 0.01)
s = sin(2*pi*t)
logS = log(s)
fig = plt.figure()
ax = fig.add_subplot(211)
ax.plot(t,s)
ax.set_ylabel('f(x)')
ax.set_xlabel('x')
ax = fig.add_subplot(212)
ax.plot(t,logS)
ax.set_ylabel('ln(f(x))')
ax.set_xlabel('x')
plt.show() | [
"[email protected]"
] | |
a1606ca7c6c07c8ac5fa8713cd5d9e1a21a4b4d0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/96/usersdata/218/52922/submittedfiles/estatistica.py | ee08c21d7372d72bf781c64fa80059a6fcfa769b | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | # -*- coding: utf-8 -*-
def media(lista):
soma = 0
for i in range(0,len(lista),1):
soma = soma + lista[i]
resultado = soma/len(lista)
return resultado
#Baseado na função acima, escreva a função para calcular o desvio padrão de uma lista
def desvio(lista):
soma=0
for i in range(0,len(lista),1):
soma=soma+(lista[i]-media(lista))**2
d=(soma/(len(lista)-1))**0,5
return d
#Por último escreva o programa principal, que pede a entrada e chama as funções criadas.
n=int(input('escreva o numero de elementos das listas:'))
a=[]
for i in range(0,n,1):
valor=float(input('digite o numero a ser anexado a lista:'))
a.append(valor)
b=[]
for i in range(0,n,1):
valor=float(input('digite o numero a ser anexado a segunda lista:'))
b.append(valor)
ma=media(a)
print('%.2f'%ma)
dsva=desvio(a)
print('%.2f'% dsva)
mb=media(b)
print('%.2f'%mb)
dsvb=desvio(b)
print('%.2f'%dsvb)
| [
"[email protected]"
] | |
4bb8a72949aeb629e53475884686f226216061d6 | 45560786af3eddd0a538edcbf3027bd688b9a763 | /wed2/wed2/wsgi.py | 9cb3ca2445f8dfd3fc744d7fb89317e7fc684f08 | [] | no_license | MunSeoHee/gachonlikelion8th | 9c1d2470fcf27b87558c933afd14c1c748cb6ff6 | 9eff4a10e09764ac7ae6d6156f6b6c81eac4f2da | refs/heads/master | 2023-03-31T05:57:50.094402 | 2020-08-18T09:53:50 | 2020-08-18T09:53:50 | 273,183,296 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
WSGI config for wed2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wed2.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
fd892f61cc3cebc966b6d92a58fa0b4f1b3e556f | a857d1911a118b8aa62ffeaa8f154c8325cdc939 | /toontown/safezone/DistributedDLTreasure.py | 1eebfe55516411c19e8b8c7a936f73b8fe62096a | [
"MIT"
] | permissive | DioExtreme/TT-CL-Edition | 761d3463c829ec51f6bd2818a28b667c670c44b6 | 6b85ca8352a57e11f89337e1c381754d45af02ea | refs/heads/main | 2023-06-01T16:37:49.924935 | 2021-06-24T02:25:22 | 2021-06-24T02:25:22 | 379,310,849 | 0 | 0 | MIT | 2021-06-22T15:07:31 | 2021-06-22T15:07:30 | null | UTF-8 | Python | false | false | 333 | py | import DistributedSZTreasure
class DistributedDLTreasure(DistributedSZTreasure.DistributedSZTreasure):
def __init__(self, cr):
DistributedSZTreasure.DistributedSZTreasure.__init__(self, cr)
self.modelPath = 'phase_8/models/props/zzz_treasure'
self.grabSoundPath = 'phase_4/audio/sfx/SZ_DD_treasure.ogg'
| [
"[email protected]"
] | |
1a8c7ee320b8aa83c9b60017f7c089b22d17f1f6 | 70fa6468c768d4ec9b4b14fc94fa785da557f1b5 | /lib/surface/iam/service_accounts/get_iam_policy.py | b17087b6af89786547df27eda790cbdb2de1bd61 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | kylewuolle/google-cloud-sdk | d43286ef646aec053ecd7eb58566ab2075e04e76 | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | refs/heads/master | 2020-04-20T22:10:41.774132 | 2019-01-26T09:29:26 | 2019-01-26T09:29:26 | 169,131,028 | 0 | 0 | NOASSERTION | 2019-02-04T19:04:40 | 2019-02-04T18:58:36 | Python | UTF-8 | Python | false | false | 2,191 | py | # -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for getting IAM policies for service accounts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import textwrap
from googlecloudsdk.api_lib.iam import util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iam import iam_util
class GetIamPolicy(base.ListCommand):
"""Get the IAM policy for a service account.
This command gets the IAM policy for a service account. If formatted as
JSON, the output can be edited and used as a policy file for
set-iam-policy. The output includes an "etag" field identifying the version
emitted and allowing detection of concurrent policy updates; see
$ gcloud iam service-accounts set-iam-policy for additional details.
"""
detailed_help = {
'EXAMPLES': textwrap.dedent("""\
To print the IAM policy for a given service account, run:
$ {command} [email protected]
"""),
'DESCRIPTION': '\n\n'.join([
'{description}',
iam_util.GetHintForServiceAccountResource('get the iam policy of')])
}
@staticmethod
def Args(parser):
iam_util.AddServiceAccountNameArg(
parser,
action='whose policy to get')
base.URI_FLAG.RemoveFromParser(parser)
def Run(self, args):
client, messages = util.GetClientAndMessages()
return client.projects_serviceAccounts.GetIamPolicy(
messages.IamProjectsServiceAccountsGetIamPolicyRequest(
resource=iam_util.EmailToAccountResourceName(args.service_account)))
| [
"[email protected]"
] | |
457b1a12a0a97fa73a7741275fd78efe40fc3593 | 00b762e37ecef30ed04698033f719f04be9c5545 | /scripts/test_results/keras_test_results/conflicts/22_convolutional_actual.py | 558cdd4d8b8eeeea857b7fd396dcb456440dc194 | [] | no_license | kenji-nicholson/smerge | 4f9af17e2e516333b041727b77b8330e3255b7c2 | 3da9ebfdee02f9b4c882af1f26fe2e15d037271b | refs/heads/master | 2020-07-22T02:32:03.579003 | 2018-06-08T00:40:53 | 2018-06-08T00:40:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,277 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from .. import activations, initializations, regularizers, constraints
from ..utils.theano_utils import shared_zeros, on_gpu
from ..layers.core import Layer
if on_gpu():
from theano.sandbox.cuda import dnn
def conv_output_length(input_length, filter_size, border_mode, stride):
assert border_mode in {'same', 'full', 'valid'}
if border_mode == 'same':
output_length = input_length
elif border_mode == 'full':
output_length = input_length + filter_size - 1
elif border_mode == 'valid':
output_length = input_length - filter_size + 1
return (output_length + stride - 1) // stride
def pool_output_length(input_length, pool_size, ignore_border, stride):
if ignore_border:
output_length = input_length - pool_size + 1
output_length = (output_length + stride - 1) // stride
else:
if pool_size == input_length:
output_length = min(input_length, stride - stride % 2)
if output_length <= 0:
output_length = 1
elif stride >= pool_size:
output_length = (input_length + stride - 1) // stride
else:
output_length = (input_length - pool_size + stride - 1) // stride
if output_length <= 0:
output_length = 1
else:
output_length += 1
return output_length
class Convolution1D(Layer):
def __init__(self, input_dim, nb_filter, filter_length,
init='uniform', activation='linear', weights=None,
border_mode='valid', subsample_length=1,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None):
if border_mode not in {'valid', 'full', 'same'}:
raise Exception('Invalid border mode for Convolution1D:', border_mode)
super(Convolution1D, self).__init__()
self.nb_filter = nb_filter
self.input_dim = input_dim
self.filter_length = filter_length
self.subsample_length = subsample_length
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.subsample = (subsample_length, 1)
self.border_mode = border_mode
self.input = T.tensor3()
self.W_shape = (nb_filter, input_dim, filter_length, 1)
self.W = self.init(self.W_shape)
self.b = shared_zeros((nb_filter,))
self.params = [self.W, self.b]
self.regularizers = []
self.W_regularizer = regularizers.get(W_regularizer)
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.constraints = [self.W_constraint, self.b_constraint]
if weights is not None:
self.set_weights(weights)
@property
def output_shape(self):
length = conv_output_length(self.input_shape[1], self.filter_length, self.border_mode, self.subsample[0])
return (self.input_shape[0], length, self.nb_filter)
def get_output(self, train=False):
X = self.get_input(train)
X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3)
<<<<<<< REMOTE
if on_gpu() and dnn.dnn_available():
if border_mode == 'same':
assert(self.subsample_length == 1)
pad_x = (self.filter_length - self.subsample_length) // 2
conv_out = dnn.dnn_conv(img=X,
kerns=self.W,
border_mode=(pad_x, 0))
else:
conv_out = dnn.dnn_conv(img=X,
kerns=self.W,
border_mode=border_mode,
subsample=self.subsample)
=======
if border_mode == 'same':
border_mode = 'full'
assert self.subsample == (1, 1)
>>>>>>> LOCAL
else:
if border_mode == 'same':
border_mode = 'full'
conv_out = T.nnet.conv.conv2d(X, self.W,
border_mode=border_mode,
subsample=self.subsample)
if self.border_mode == 'same':
shift_x = (self.filter_length - 1) // 2
conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, :]
border_mode = self.border_mode
output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = T.reshape(output, (output.shape[0], output.shape[1], output.shape[2])).dimshuffle(0, 2, 1)
return output
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"nb_filter": self.nb_filter,
"filter_length": self.filter_length,
"init": self.init.__name__,
"activation": self.activation.__name__,
"border_mode": self.border_mode,
"subsample_length": self.subsample_length,
"W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
"b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None,
"activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None,
"W_constraint": self.W_constraint.get_config() if self.W_constraint else None,
"b_constraint": self.b_constraint.get_config() if self.b_constraint else None}
class Convolution2D(Layer):
def __init__(self, nb_filter, stack_size, nb_row, nb_col,
init='glorot_uniform', activation='linear', weights=None,
border_mode='valid', subsample=(1, 1),
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None):
if border_mode not in {'valid', 'full', 'same'}:
raise Exception('Invalid border mode for Convolution2D:', border_mode)
super(Convolution2D, self).__init__()
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.subsample = tuple(subsample)
self.border_mode = border_mode
self.nb_filter = nb_filter
self.stack_size = stack_size
self.nb_row = nb_row
self.nb_col = nb_col
self.input = T.tensor4()
self.W_shape = (nb_filter, stack_size, nb_row, nb_col)
self.W = self.init(self.W_shape)
self.b = shared_zeros((nb_filter,))
self.params = [self.W, self.b]
self.regularizers = []
self.W_regularizer = regularizers.get(W_regularizer)
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.constraints = [self.W_constraint, self.b_constraint]
if weights is not None:
self.set_weights(weights)
@property
def output_shape(self):
input_shape = self.input_shape
rows = input_shape[2]
cols = input_shape[3]
rows = conv_output_length(rows, self.nb_row, self.border_mode, self.subsample[0])
cols = conv_output_length(cols, self.nb_col, self.border_mode, self.subsample[1])
return (input_shape[0], self.nb_filter, rows, cols)
def get_output(self, train=False):
X = self.get_input(train)
border_mode = self.border_mode
if on_gpu() and dnn.dnn_available():
if border_mode == 'same':
assert(self.subsample == (1, 1))
pad_x = (self.nb_row - self.subsample[0]) // 2
pad_y = (self.nb_col - self.subsample[1]) // 2
conv_out = dnn.dnn_conv(img=X,
kerns=self.W,
border_mode=(pad_x, pad_y))
else:
conv_out = dnn.dnn_conv(img=X,
kerns=self.W,
border_mode=border_mode,
subsample=self.subsample)
else:
return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
def get_config(self):
return {"name": self.__class__.__name__,
"nb_filter": self.nb_filter,
"stack_size": self.stack_size,
"nb_row": self.nb_row,
"nb_col": self.nb_col,
"init": self.init.__name__,
"activation": self.activation.__name__,
"border_mode": self.border_mode,
"subsample": self.subsample,
"W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
"b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None,
"activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None,
"W_constraint": self.W_constraint.get_config() if self.W_constraint else None,
"b_constraint": self.b_constraint.get_config() if self.b_constraint else None}
class MaxPooling1D(Layer):
def __init__(self, pool_length=2, stride=1, ignore_border=True):
super(MaxPooling1D, self).__init__()
if type(stride) is not int or not stride:
raise Exception('"stride" argument in MaxPooling1D should be an int > 0.')
self.pool_length = pool_length
<<<<<<< REMOTE
else:
self.st = None
=======
self.st = (self.stride, 1)
>>>>>>> LOCAL
self.stride = stride
self.input = T.tensor3()
self.poolsize = (pool_length, 1)
self.ignore_border = ignore_border
@property
def output_shape(self):
input_shape = self.input_shape
length = pool_output_length(input_shape[1], self.pool_length, self.ignore_border, self.stride)
return (input_shape[0], length, input_shape[2])
def get_output(self, train=False):
X = self.get_input(train)
X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3)
output = downsample.max_pool_2d(X, ds=self.poolsize, st=self.st, ignore_border=self.ignore_border)
output = output.dimshuffle(0, 2, 1, 3)
return T.reshape(output, (output.shape[0], output.shape[1], output.shape[2]))
def get_config(self):
return {"name": self.__class__.__name__,
"stride": self.stride,
"pool_length": self.pool_length,
"ignore_border": self.ignore_border}
class MaxPooling2D(Layer):
def __init__(self, poolsize=(2, 2), stride=(1, 1), ignore_border=True):
super(MaxPooling2D, self).__init__()
self.input = T.tensor4()
self.poolsize = tuple(poolsize)
self.stride = tuple(stride)
self.ignore_border = ignore_border
@property
def output_shape(self):
input_shape = self.input_shape
rows = pool_output_length(input_shape[2], self.poolsize[0], self.ignore_border, self.stride[0])
cols = pool_output_length(input_shape[3], self.poolsize[1], self.ignore_border, self.stride[1])
return (input_shape[0], input_shape[1], rows, cols)
def get_output(self, train=False):
X = self.get_input(train)
output = downsample.max_pool_2d(X, ds=self.poolsize, st=self.stride, ignore_border=self.ignore_border)
return output
def get_config(self):
return {"name": self.__class__.__name__,
"poolsize": self.poolsize,
"ignore_border": self.ignore_border,
"stride": self.stride}
class UpSample1D(Layer):
def __init__(self, length=2):
super(UpSample1D, self).__init__()
self.length = length
self.input = T.tensor3()
@property
def output_shape(self):
input_shape = self.input_shape
return (input_shape[0], self.length * input_shape[1], input_shape[2])
def get_output(self, train=False):
X = self.get_input(train)
output = theano.tensor.extra_ops.repeat(X, self.length, axis=1)
return output
def get_config(self):
return {"name": self.__class__.__name__,
"length": self.length}
class UpSample2D(Layer):
def __init__(self, size=(2, 2)):
super(UpSample2D, self).__init__()
self.input = T.tensor4()
self.size = tuple(size)
@property
def output_shape(self):
input_shape = self.input_shape
return (input_shape[0], input_shape[1], self.size[0] * input_shape[2], self.size[1] * input_shape[3])
def get_output(self, train=False):
X = self.get_input(train)
Y = theano.tensor.extra_ops.repeat(X, self.size[0], axis=2)
output = theano.tensor.extra_ops.repeat(Y, self.size[1], axis=3)
return output
def get_config(self):
return {"name": self.__class__.__name__,
"size": self.size}
class ZeroPadding2D(Layer):
def __init__(self, pad=(1, 1)):
super(ZeroPadding2D, self).__init__()
self.pad = tuple(pad)
self.input = T.tensor4()
@property
def output_shape(self):
input_shape = self.input_shape
return (input_shape[0], input_shape[1], input_shape[2] + 2 * self.pad[0], input_shape[3] + 2 * self.pad[1])
def get_output(self, train=False):
X = self.get_input(train)
pad = self.pad
in_shape = X.shape
out_shape = (in_shape[0], in_shape[1], in_shape[2] + 2 * pad[0], in_shape[3] + 2 * pad[1])
out = T.zeros(out_shape)
indices = (slice(None), slice(None), slice(pad[0], in_shape[2] + pad[0]), slice(pad[1], in_shape[3] + pad[1]))
return T.set_subtensor(out[indices], X)
def get_config(self):
return {"name": self.__class__.__name__,
"pad": self.pad}
| [
"[email protected]"
] | |
cc15763bac4ad87f4375500cf02cf860c4a57dec | ecd4b06d5d5368b71fd72a1c2191510a03b728fd | /8 - statistical thinking in python - part 1/quantitative exploratory data analysis/computing the ECDF.py | 2ed8e62c2f0144dda23cca03c57322c98d0e5f72 | [
"MIT"
] | permissive | Baidaly/datacamp-samples | 86055db5e326b59bfdce732729c80d76bf44629e | 37b4f78a967a429e0abca4a568da0eb9d58e4dff | refs/heads/master | 2022-07-27T01:18:00.700386 | 2022-07-18T19:27:23 | 2022-07-18T19:27:23 | 123,827,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | '''
In this exercise, you will write a function that takes as input a 1D array of data and then returns the x and y values of the ECDF. You will use this function over and over again throughout this course and its sequel. ECDFs are among the most important plots in statistical analysis. You can write your own function, foo(x,y) according to the following skeleton:
def foo(a,b):
"""State what function does here"""
# Computation performed here
return x, y
The function foo() above takes two arguments a and b and returns two values x and y. The function header def foo(a,b): contains the function signature foo(a,b), which consists of the function name, along with its parameters. For more on writing your own functions, see DataCamp's course Python Data Science Toolbox (Part 1) here!
'''
def ecdf(data):
"""Compute ECDF for a one-dimensional array of measurements."""
# Number of data points: n
n = len(data)
# x-data for the ECDF: x
x = np.sort(data)
# y-data for the ECDF: y
y = np.arange(1, n + 1) / n
return x, y
| [
"[email protected]"
] | |
990da7948f16010dbffcd011ca083b1ea177b02e | fed6c6bdb6276d195bc565e527c3f19369d22b74 | /test/multi_shear.py | 39dd5a7acffb39105c56fd9dc77b27df6c74ef91 | [] | no_license | hekunlie/astrophy-research | edbe12d8dde83e0896e982f08b463fdcd3279bab | 7b2b7ada7e7421585e8993192f6111282c9cbb38 | refs/heads/master | 2021-11-15T05:08:51.271669 | 2021-11-13T08:53:33 | 2021-11-13T08:53:33 | 85,927,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | import numpy
import os
from sys import path,argv
path.append("E:/Github/astrophy-research/my_lib")
from Fourier_Quad import Fourier_Quad
import tool_box
import time
from astropy.io import fits
import matplotlib.pyplot as plt
import h5py
rng = numpy.random.RandomState(123)
fq = Fourier_Quad(12,122)
bin_num = 20
gh_num = 150
gh = numpy.linspace(-0.07, 0.07, gh_num)
signals = [0.05, -0.05]
sigmas = [2, 2]
nums = [50*1.e4, 50*1.e4]
datas = [rng.normal(signals[i], sigmas[i], int(nums[i])).reshape((int(nums[i]),1)) for i in range(len(signals))]
for i in range(len(datas)):
if i == 0:
data = datas[i]
else:
data = numpy.row_stack((data, datas[i]))
print(data.shape)
print(bin_num, data.shape)
bins = fq.set_bin(data, bin_num)
print("Bin length: ", bins.shape)
plt.scatter(bins, [0 for i in range(len(bins))])
plt.show()
# each single signal
for i in range(len(datas)):
chisq = []
for j in range(gh_num):
chisq.append(fq.G_bin(datas[i], 1, gh[j], bins, 0))
plt.scatter(gh, chisq)
plt.show()
plt.close()
est_g, est_g_sig = fq.fmin_g_new(datas[i], 1, bin_num)
print(signals[i], est_g, est_g_sig)
chisq = []
for i in range(gh_num):
chisq.append(fq.G_bin(data, 1, gh[i], bins, 0))
plt.figure(figsize=(16,12))
plt.scatter(gh, chisq)
plt.show()
plt.close()
| [
"[email protected]"
] | |
994ed213b18ce3f3062cdf14bd95b41d0758b7f6 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/Lambda-Resource-Static-Assets/2-resources/_External-learning-resources/02-pyth/python-patterns-master/tests/behavioral/test_observer.py | 821f97a61aa7a46e2b10c705af230637a92179e0 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 876 | py | from unittest.mock import Mock, patch
import pytest
from patterns.behavioral.observer import Data, DecimalViewer, HexViewer
@pytest.fixture
def observable():
return Data("some data")
def test_attach_detach(observable):
decimal_viewer = DecimalViewer()
assert len(observable._observers) == 0
observable.attach(decimal_viewer)
assert decimal_viewer in observable._observers
observable.detach(decimal_viewer)
assert decimal_viewer not in observable._observers
def test_one_data_change_notifies_each_observer_once(observable):
observable.attach(DecimalViewer())
observable.attach(HexViewer())
with patch(
"patterns.behavioral.observer.DecimalViewer.update", new_callable=Mock()
) as mocked_update:
assert mocked_update.call_count == 0
observable.data = 10
assert mocked_update.call_count == 1
| [
"[email protected]"
] | |
4bcf70c72206f3613479dcdf9297012a0979000b | 099b57613250ae0a0c3c75cc2a9b8095a5aac312 | /leetcode/Tree/235. 二叉搜索树的最近公共祖先.py | 1acbf0c219561464eeeca8a6ce5a098e67523a47 | [] | no_license | MitsurugiMeiya/Leetcoding | 36e41c8d649b777e5c057a5241007d04ad8f61cd | 87a6912ab4e21ab9be4dd6e90c2a6f8da9c68663 | refs/heads/master | 2022-06-17T19:48:41.692320 | 2020-05-13T16:45:54 | 2020-05-13T16:45:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | """
Given a binary search tree (BST), find the lowest common ancestor (LCA)最小公共祖先
of two given nodes in the BST.
"""
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if root.val > p.val and root.val > q.val:
return self.lowestCommonAncestor(root.right,p,q)
if root.val < p.val and root.val < q.val:
return self.lowestCommonAncestor(root.left,p,q)
return root
"""
答案:
236的简化版,已知BST
1.假如说p,q的值小于root,说明这两个node在root的左子树,找
2.假如说p,q的值大于root,说明这两个node在root的右子树
3.假如终于发现分叉了,说明最小公共节点就是这个root
或者是一个就是root,另一个小或大,那也不满足1,2的同小或同大
这种写法检查不了,p,q不在树上的情况,且能通过官方的所有测试条件
""" | [
"[email protected]"
] | |
79f3fc754229d3f7b6c4f441ef53015c1b039e64 | b595a24b07662a89826a1b6d334dfcaa3ec1c4b0 | /venv/lib/python3.6/site-packages/storages/backends/mogile.py | d6194194368306a8bb32dfed8b8bdf42f24f7c91 | [
"CC0-1.0"
] | permissive | kentarofujiy/base1 | 4629b638f96b3ed091ea695c81b3b7837af1ec79 | f820b9b379cda86ca5b446c63800fbe4bb8f3bce | refs/heads/master | 2021-07-13T02:06:01.371773 | 2017-03-11T12:43:19 | 2017-03-11T12:43:19 | 84,649,225 | 0 | 1 | CC0-1.0 | 2020-07-26T01:08:25 | 2017-03-11T12:43:32 | Python | UTF-8 | Python | false | false | 4,079 | py | from __future__ import print_function
import mimetypes
import warnings
from django.conf import settings
from django.core.cache import cache
from django.utils.deconstruct import deconstructible
from django.utils.text import force_text
from django.http import HttpResponse, HttpResponseNotFound
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import Storage
try:
import mogilefs
except ImportError:
raise ImproperlyConfigured("Could not load mogilefs dependency.\
\nSee http://mogilefs.pbworks.com/Client-Libraries")
warnings.warn(
'MogileFSStorage is unmaintained and will be removed in the next django-storages version'
'See https://github.com/jschneier/django-storages/issues/202',
PendingDeprecationWarning
)
@deconstructible
class MogileFSStorage(Storage):
"""MogileFS filesystem storage"""
def __init__(self, base_url=settings.MEDIA_URL):
# the MOGILEFS_MEDIA_URL overrides MEDIA_URL
if hasattr(settings, 'MOGILEFS_MEDIA_URL'):
self.base_url = settings.MOGILEFS_MEDIA_URL
else:
self.base_url = base_url
for var in ('MOGILEFS_TRACKERS', 'MOGILEFS_DOMAIN',):
if not hasattr(settings, var):
raise ImproperlyConfigured("You must define %s to use the MogileFS backend." % var)
self.trackers = settings.MOGILEFS_TRACKERS
self.domain = settings.MOGILEFS_DOMAIN
self.client = mogilefs.Client(self.domain, self.trackers)
def get_mogile_paths(self, filename):
return self.client.get_paths(filename)
# The following methods define the Backend API
def filesize(self, filename):
raise NotImplemented
#return os.path.getsize(self._get_absolute_path(filename))
def path(self, filename):
paths = self.get_mogile_paths(filename)
if paths:
return self.get_mogile_paths(filename)[0]
else:
return None
def url(self, filename):
return urlparse.urljoin(self.base_url, filename).replace('\\', '/')
def open(self, filename, mode='rb'):
raise NotImplemented
#return open(self._get_absolute_path(filename), mode)
def exists(self, filename):
return filename in self.client
def save(self, filename, raw_contents, max_length=None):
filename = self.get_available_name(filename, max_length)
if not hasattr(self, 'mogile_class'):
self.mogile_class = None
# Write the file to mogile
success = self.client.send_file(filename, BytesIO(raw_contents), self.mogile_class)
if success:
print("Wrote file to key %s, %s@%s" % (filename, self.domain, self.trackers[0]))
else:
print("FAILURE writing file %s" % (filename))
return force_text(filename.replace('\\', '/'))
def delete(self, filename):
self.client.delete(filename)
def serve_mogilefs_file(request, key=None):
"""
Called when a user requests an image.
Either reproxy the path to perlbal, or serve the image outright
"""
# not the best way to do this, since we create a client each time
mimetype = mimetypes.guess_type(key)[0] or "application/x-octet-stream"
client = mogilefs.Client(settings.MOGILEFS_DOMAIN, settings.MOGILEFS_TRACKERS)
if hasattr(settings, "SERVE_WITH_PERLBAL") and settings.SERVE_WITH_PERLBAL:
# we're reproxying with perlbal
# check the path cache
path = cache.get(key)
if not path:
path = client.get_paths(key)
cache.set(key, path, 60)
if path:
response = HttpResponse(content_type=mimetype)
response['X-REPROXY-URL'] = path[0]
else:
response = HttpResponseNotFound()
else:
# we don't have perlbal, let's just serve the image via django
file_data = client[key]
if file_data:
response = HttpResponse(file_data, mimetype=mimetype)
else:
response = HttpResponseNotFound()
return response
| [
"[email protected]"
] | |
7ab2153783df2bde81ef89f4762af1316f8b3a5c | 6ef3b1919e7acbc72e5706b2dc6d716f8929e3d2 | /transformers/commands/convert.py | 8c3f952f4a73fb49cf88b2f47e54fcb22282ebb7 | [
"MIT"
] | permissive | linshaoxin-maker/taas | 04f7dcc7c0d2818718e6b245531e017ca5370231 | 34e11fab167a7beb78fbe6991ff8721dc9208793 | refs/heads/main | 2023-01-19T20:58:04.459980 | 2020-11-27T02:28:36 | 2020-11-27T02:28:36 | 329,522,465 | 6 | 0 | MIT | 2021-01-14T06:02:08 | 2021-01-14T06:02:07 | null | UTF-8 | Python | false | false | 7,151 | py | from argparse import ArgumentParser, Namespace
from transformers.commands import BaseTransformersCLICommand
from ..utils import logging
def convert_command_factory(args: Namespace):
"""
Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
:return: ServeCommand
"""
return ConvertCommand(
args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name
)
class ConvertCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
:param parser: Root parser to register command-specific arguments
:return:
"""
train_parser = parser.add_parser(
"convert",
help="CLI tool to run convert model from original "
"author checkpoints to Transformers PyTorch checkpoints.",
)
train_parser.add_argument("--model_type", type=str, required=True, help="Model's type.")
train_parser.add_argument(
"--tf_checkpoint", type=str, required=True, help="TensorFlow checkpoint path or folder."
)
train_parser.add_argument(
"--pytorch_dump_output", type=str, required=True, help="Path to the PyTorch savd model output."
)
train_parser.add_argument("--config", type=str, default="", help="Configuration file path or folder.")
train_parser.add_argument(
"--finetuning_task_name",
type=str,
default=None,
help="Optional fine-tuning task name if the TF model was a finetuned model.",
)
train_parser.set_defaults(func=convert_command_factory)
def __init__(
self,
model_type: str,
tf_checkpoint: str,
pytorch_dump_output: str,
config: str,
finetuning_task_name: str,
*args
):
self._logger = logging.get_logger("transformers-cli/converting")
self._logger.info("Loading model {}".format(model_type))
self._model_type = model_type
self._tf_checkpoint = tf_checkpoint
self._pytorch_dump_output = pytorch_dump_output
self._config = config
self._finetuning_task_name = finetuning_task_name
def run(self):
if self._model_type == "albert":
try:
from transformers.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from transformers.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "gpt":
from transformers.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from transformers.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
if "ckpt" in self._tf_checkpoint.lower():
TF_CHECKPOINT = self._tf_checkpoint
TF_DATASET_FILE = ""
else:
TF_DATASET_FILE = self._tf_checkpoint
TF_CHECKPOINT = ""
convert_transfo_xl_checkpoint_to_pytorch(
TF_CHECKPOINT, self._config, self._pytorch_dump_output, TF_DATASET_FILE
)
elif self._model_type == "gpt2":
try:
from transformers.convert_gpt2_original_tf_checkpoint_to_pytorch import (
convert_gpt2_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from transformers.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name
)
elif self._model_type == "xlm":
from transformers.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
else:
raise ValueError("--model_type should be selected in the list [bert, gpt, gpt2, transfo_xl, xlnet, xlm]")
| [
"[email protected]"
] | |
87cb1e514667d13033ff73c89aa8f2625c17fd15 | b2260e6588f60f0830248757a858be8c129350f4 | /QLabel img.py | c876389f8ade045218cc263cac41cfe34549d9d6 | [] | no_license | nengkya/PyQt | 1fe04aeb23532f4a5b92248a3414ac000d41d078 | 06068556348c6906198d4db7efc979889263fd56 | refs/heads/master | 2023-02-08T01:10:20.970167 | 2023-01-20T18:03:57 | 2023-01-20T18:03:57 | 105,175,866 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | import sys
#from PyQt5.QtGui import *
#from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class MainForm(QWidget):
def __init__(self):
super().__init__()
self.setupUi()
def setupUi(self):
self.resize(400, 200)
self.move(300, 300)
self.setWindowTitle('Demo QLabel')
self.label1 = QLabel()
self.label1.setText('Demo menampilkan gambar dengan QLabel')
self.label2 = QLabel()
self.label2.setText('<img src = PyPassContinue.png>')
layout = QVBoxLayout()
layout.addWidget(self.label1)
layout.addWidget(self.label2)
self.setLayout(layout)
if __name__ == '__main__':
a = QApplication(sys.argv)
form = MainForm()
form.show()
a.exec()
| [
"[email protected]"
] | |
53dd9814341ea2b91a56c29803edfa507a2eaf25 | 08ee36e0bb1c250f7f2dfda12c1a73d1984cd2bc | /src/mnistk/networks/conv3dsigmoid_11.py | 5c9ca5cdb81a4e3052630d571bc92b9b040cf256 | [] | no_license | ahgamut/mnistk | 58dadffad204602d425b18549e9b3d245dbf5486 | 19a661185e6d82996624fc6fcc03de7ad9213eb0 | refs/heads/master | 2021-11-04T07:36:07.394100 | 2021-10-27T18:37:12 | 2021-10-27T18:37:12 | 227,103,881 | 2 | 1 | null | 2020-02-19T22:07:24 | 2019-12-10T11:33:09 | Python | UTF-8 | Python | false | false | 1,460 | py | # -*- coding: utf-8 -*-
"""
conv3dsigmoid_11.py
:copyright: (c) 2019 by Gautham Venkatasubramanian.
:license: MIT
"""
import torch
from torch import nn
class Conv3dSigmoid_11(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.f0 = nn.Conv3d(in_channels=1, out_channels=12, kernel_size=(4, 4, 4), stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, bias=False, padding_mode='zeros')
self.f1 = nn.Sigmoid()
self.f2 = nn.Conv3d(in_channels=12, out_channels=32, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, bias=False, padding_mode='zeros')
self.f3 = nn.Sigmoid()
self.f4 = nn.Conv3d(in_channels=32, out_channels=10, kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, bias=False, padding_mode='zeros')
self.f5 = nn.Sigmoid()
self.f6 = nn.Conv3d(in_channels=10, out_channels=10, kernel_size=(11, 2, 2), stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, bias=False, padding_mode='zeros')
self.f7 = nn.LogSoftmax(dim=1)
def forward(self, *inputs):
x = inputs[0]
x = x.view(x.shape[0],1,16,7,7)
x = self.f0(x)
x = self.f1(x)
x = self.f2(x)
x = self.f3(x)
x = self.f4(x)
x = self.f5(x)
x = self.f6(x)
x = x.view(x.shape[0],10)
x = self.f7(x)
return x
| [
"[email protected]"
] | |
203728418ef83b30a6c1a44c18db32698264f957 | e68c3cbb9d6291fcdd51adae8a55616dcfafe55c | /spf/parser/ccg/cky/chart/cell.py | e6c2ec2917c119fdfa78536df3e673831ccf333b | [] | no_license | Oneplus/pyspf | 26126f5094065960d5f034fea2be4709aa1a4c50 | 175f90b4f837aa60fd660cba850d10a82dd578a1 | refs/heads/master | 2016-08-12T15:18:25.606712 | 2015-11-22T02:49:07 | 2015-11-22T02:49:07 | 45,725,414 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | #!/usr/bin/env python
from spf.utils.log import get_logger
class Cell(object):
LOG = get_logger(__name__)
def __init__(self, parse_step, start, end, is_complete_span):
"""
:param CKYParseStepI parse_step:
:param int start:
:param int end:
:param bool is_complete_span:
:return:
"""
self.is_complete_span = is_complete_span
self.is_full_parse = parse_step.is_full_parse() | [
"[email protected]"
] | |
c53065dc8d2be291b77cd95ecce9adf660a7552e | a0dbc48f31cf3fbddd3cc7672cf3db415cb391c4 | /compiler/datasheet/__init__.py | 369ba8feafaf111065bcf9505df5044d9a93ff82 | [
"BSD-3-Clause"
] | permissive | wangyaobsz/OpenRAM | 4178ef93816b233bac0aaecc580e2cbd235ac39d | 0d616ae072e6c42a0d8a006eebc681408502e956 | refs/heads/master | 2022-08-23T07:51:39.745708 | 2022-07-21T16:37:24 | 2022-07-21T16:37:24 | 113,813,373 | 1 | 0 | null | 2017-12-11T04:47:53 | 2017-12-11T04:47:52 | null | UTF-8 | Python | false | false | 41 | py | from .datasheet_gen import datasheet_gen
| [
"[email protected]"
] | |
0a58b79c5071070e9cbe7f8dc945427cbf48c695 | fea10afefd89f5138a57fd4d6ac1a87844cf6e7c | /robocrys/condense/component.py | 98ff05f3b524ef9f9e298c3ed5fd346b31066453 | [
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-hdf5",
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | mukhtarbayerouniversity/robocrystallographer | ed192898acb60b7f18af4740cbf1524a2865bb79 | 11b39bfac157624d84d704dd4e7602e76b45a5b9 | refs/heads/main | 2023-06-12T16:23:00.260579 | 2021-07-05T18:12:35 | 2021-07-05T18:12:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,425 | py | """
This module implements functions for handling structure components.
"""
from copy import deepcopy
from typing import Any, Dict, List, Tuple
import networkx as nx
import numpy as np
from monty.fractions import gcd
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.structure import PeriodicSite, Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.string import formula_double_format
from robocrys import common_formulas
from robocrys.condense.fingerprint import get_structure_fingerprint
Component = Dict[str, Any]
def get_structure_inequiv_components(
components: List[Component],
use_structure_graph: bool = False,
fingerprint_tol: int = 0.01,
) -> List[Component]:
"""Gets and counts the structurally inequivalent components.
Supports matching through StructureMatcher or by a combined structure graph/
site fingerprint approach. For the latter method, the component data has to
have been generated with ``inc_graph=True``.
Args:
components: A list of structure components, generated using
:obj:`pymatgen.analysis.dimensionality.get_structure_components`.
If ``use_structure_graph=True``, the components should be generated
with ``inc_graph=True``.
use_structure_graph: Whether to use the bonding graph and site
fingerprints to determine if components are structurally equivalent.
If ``False``,
:obj:`pymatgen.analysis.structure_matcher.StructureMatcher` will be
used to compare the components.
fingerprint_tol: The fingerprint tolerance to determine whether two
components have a matching fingerprint. This option is ignored if
``use_structure_graph=False``.
Returns:
A list of the structurally inequivalent components. Any duplicate
components will only be returned once. The component objects are in the
same format is given by
:obj:`pymatgen.analysis.dimensionality.get_structure_components` but
have an additional field:
- ``"count"`` (:obj:`int`): The number of times this component appears
in the structure.
"""
components = deepcopy(components)
for component in components:
component["count"] = 1
if use_structure_graph:
# check fingerprints match and components are isomorphic.
fingerprints = [
get_structure_fingerprint(c["structure_graph"].structure)
for c in components
]
seen_components = [components[0]]
seen_fingers = [fingerprints[0]]
for component, fingerprint in zip(components[1:], fingerprints[1:]):
graph_match = [
components_are_isomorphic(component, c) for c in seen_components
]
finger_match = [
np.linalg.norm(fingerprint - c) < fingerprint_tol for c in seen_fingers
]
structure_match = [i and f for i, f in zip(graph_match, finger_match)]
if any(structure_match):
# there should only ever be a single match so we take index of
# the first match and increment the component count
loc = np.where(structure_match)[0][0]
seen_components[loc]["count"] += 1
else:
seen_components.append(component)
seen_fingers.append(fingerprint)
else:
sm = StructureMatcher()
seen_components = [components[0]]
for component in components[1:]:
structure_match = [
sm.fit(
component["structure_graph"].structure,
c["structure_graph"].structure,
)
for c in seen_components
]
if any(structure_match):
# there should only ever be a single match so we take index of
# the first match and increment the component count
loc = np.where(structure_match)[0][0]
seen_components[loc]["count"] += 1
else:
seen_components.append(component)
return seen_components
def components_are_isomorphic(
component_a: Component, component_b: Component, use_weights: bool = False
):
"""Determines whether the graphs of two components are isomorphic.
Only takes into account graph connectivity and not local geometry (e.g. bond
angles and distances).
Args:
component_a: The first component.
component_b: The second component.
use_weights: Whether to use the graph edge weights in comparing graphs.
Returns:
Whether the components are isomorphic.
"""
def node_match(n1, n2):
return n1["specie"] == n2["specie"]
def edge_match(e1, e2):
if use_weights:
return e1["weight"] == e2["weight"]
else:
return True
graph_a = component_a["structure_graph"].graph
graph_b = component_b["structure_graph"].graph
species_a = {
n: {"specie": str(component_a["structure_graph"].structure[n].specie)}
for n in graph_a
}
species_b = {
n: {"specie": str(component_b["structure_graph"].structure[n].specie)}
for n in graph_b
}
nx.set_node_attributes(graph_a, species_a)
nx.set_node_attributes(graph_b, species_b)
return nx.is_isomorphic(
graph_a, graph_b, node_match=node_match, edge_match=edge_match
)
def get_sym_inequiv_components(
components: List[Component], spg_analyzer: SpacegroupAnalyzer
) -> List[Component]:
"""Gets and counts the symmetrically inequivalent components.
Component data has to have been generated with ``inc_site_ids=True``.
Args:
components: A list of structure components, generated using
:obj:`pymatgen.analysis.dimensionality.get_structure_components`,
with ``inc_site_ids=True``.
spg_analyzer: A `pymatgen.symmetry.analyzer.SpacegroupAnalyzer` analyzer
object for the structure containing the components.
Returns:
A list of the symmetrically inequivalent components. Any duplicate
components will only be returned once. The component objects are in the
same format is given by
:obj:`pymatgen.analysis.dimensionality.get_structure_components` but
the additional property:
- ``"count"`` (:obj:`int`): The number of times this component appears
in the structure.
"""
components = deepcopy(components)
sym_inequiv_components = {}
equivalent_atoms = spg_analyzer.get_symmetry_dataset()["equivalent_atoms"]
for component in components:
sym_indices = frozenset(equivalent_atoms[x] for x in component["site_ids"])
# if two components are composed of atoms that are symmetrically
# equivalent they are the same.
if sym_indices in sym_inequiv_components:
sym_inequiv_components[sym_indices]["count"] += 1
continue
component["count"] = 1
sym_inequiv_components[sym_indices] = component
return list(sym_inequiv_components.values())
def get_formula_inequiv_components(
components: List[Component],
use_iupac_formula: bool = True,
use_common_formulas: bool = True,
) -> List[Component]:
"""Gets and counts the inequivalent components based on their formuula.
Note that the counting of compounds is different to in
``get_sym_inequiv_equivalent``. I.e. the count is not the number of
components with the same formula. For example, the count of the formula
"GaAs" in a system with two Ga2As2 components would be 4.
Args:
components: A list of structure components, generated using
:obj:`pymatgen.analysis.dimensionality.get_structure_components`,
with ``inc_site_ids=True``.
use_iupac_formula (bool, optional): Whether to order formulas by the
iupac "electronegativity" series, defined in Table VI of
"Nomenclature of Inorganic Chemistry (IUPAC Recommendations 2005)".
This ordering effectively follows the groups and rows of the
periodic table, except the Lanthanides, Actanides and hydrogen. If
set to ``False``, the elements will be ordered according to the
electronegativity values.
use_common_formulas: Whether to use the database of common formulas.
The common formula will be used preferentially to the iupac or
reduced formula.
Returns:
A list of the compositionally inequivalent components. Any duplicate
components will only be returned once. The component objects are in the
same format is given by
:obj:`pymatgen.analysis.dimensionality.get_structure_components` but
have two additional fields:
- ``"count"`` (:obj:`int`): The number of formula units of this
component. Note, this is not the number of components with the same
formula. For example, the count of the formula "GaAs" in a system
with two Ga2As2 components would be 4.
- ``"formula"`` (``list[int]``): The reduced formula of the component.
"""
components = deepcopy(components)
inequiv_components = {}
for component in components:
formula, factor = get_component_formula_and_factor(
component,
use_iupac_formula=use_iupac_formula,
use_common_formulas=use_common_formulas,
)
# if two components have the same composition we treat them as the same
if formula in inequiv_components:
inequiv_components[formula]["count"] += factor
continue
component["count"] = factor
component["formula"] = formula
inequiv_components[formula] = component
return list(inequiv_components.values())
def filter_molecular_components(
components: List[Component],
) -> Tuple[List[Component], List[Component]]:
"""Separate list of components into molecular and non-molecular components.
Args:
components: A list of structure components, generated using
:obj:`pymatgen.analysis.dimensionality.get_structure_components`.
Returns:
The filtered components as a tuple of ``(molecular_components,
other_components)``.
"""
molecular_components = [c for c in components if c["dimensionality"] == 0]
other_components = [c for c in components if c["dimensionality"] != 0]
return molecular_components, other_components
def get_reconstructed_structure(
components: List[Component], simplify_molecules: bool = True
) -> Structure:
"""Reconstructs a structure from a list of components.
Has the option to simplify molecular components into a single site
positioned at the centre of mass of the molecular. If using this option,
the components must have been generated with ``inc_molecule_graph=True``.
Args:
components: A list of structure components, generated using
:obj:`pymatgen.analysis.dimensionality.get_structure_components`,
with ``inc_molecule_graph=True``.
simplify_molecules: Whether to simplify the molecular components into
a single site positioned at the centre of mass of the molecule.
Returns:
The reconstructed structure.
"""
mol_sites = []
other_sites = []
if simplify_molecules:
mol_components, components = filter_molecular_components(components)
if mol_components:
lattice = mol_components[0]["structure_graph"].structure.lattice
mol_sites = [
PeriodicSite(
c["structure_graph"].structure[0].specie,
c["molecule_graph"].molecule.center_of_mass,
lattice,
coords_are_cartesian=True,
)
for c in mol_components
]
if components:
other_sites = [
site for c in components for site in c["structure_graph"].structure
]
return Structure.from_sites(other_sites + mol_sites)
def get_component_formula_and_factor(
component: Component,
use_iupac_formula: bool = True,
use_common_formulas: bool = True,
) -> Tuple[str, int]:
"""Gets the reduced formula and factor of a single component.
Args:
component: A structure component, generated using
:obj:`pymatgen.analysis.dimensionality.get_structure_components`.
use_iupac_formula (bool, optional): Whether to order formulas by the
iupac "electronegativity" series, defined in Table VI of
"Nomenclature of Inorganic Chemistry (IUPAC Recommendations 2005)".
This ordering effectively follows the groups and rows of the
periodic table, except the Lanthanides, Actanides and hydrogen. If
set to ``False``, the elements will be ordered according to the
electronegativity values.
use_common_formulas: Whether to use the database of common formulas.
The common formula will be used preferentially to the iupac or
reduced formula.
Returns:
The formula and factor of the component.
"""
formula, factor = component[
"structure_graph"
].structure.composition.get_reduced_formula_and_factor(
iupac_ordering=use_iupac_formula
)
reduced_formula = component["structure_graph"].structure.composition.reduced_formula
if use_common_formulas and reduced_formula in common_formulas:
formula = common_formulas[reduced_formula]
return formula, factor
def get_component_formula(
component: Component,
use_iupac_formula: bool = True,
use_common_formulas: bool = True,
) -> str:
"""Gets the reduced formula of a single component.
Args:
component: A structure component, generated using
:obj:`pymatgen.analysis.dimensionality.get_structure_components`.
use_iupac_formula (bool, optional): Whether to order formulas by the
iupac "electronegativity" series, defined in Table VI of
"Nomenclature of Inorganic Chemistry (IUPAC Recommendations 2005)".
This ordering effectively follows the groups and rows of the
periodic table, except the Lanthanides, Actanides and hydrogen. If
set to ``False``, the elements will be ordered according to the
electronegativity values.
use_common_formulas: Whether to use the database of common formulas.
The common formula will be used preferentially to the iupac or
reduced formula.
Returns:
The formula and factor of the component.
"""
return get_component_formula_and_factor(
component,
use_iupac_formula=use_iupac_formula,
use_common_formulas=use_common_formulas,
)[0]
def get_formula_from_components(
components: List[Component],
molecules_first: bool = False,
use_iupac_formula: bool = True,
use_common_formulas: bool = True,
) -> str:
"""Reconstructs a chemical formula from structure components.
The chemical formulas for the individual components will be grouped
together. If two components share the same composition, they will be
treated as equivalent.
Args:
components: A list of structure components, generated using
:obj:`pymatgen.analysis.dimensionality.get_structure_components`.
molecules_first: Whether to put any molecules (zero-dimensional
components) at the beginning of the formula.
use_iupac_formula (bool, optional): Whether to order formulas by the
iupac "electronegativity" series, defined in Table VI of
"Nomenclature of Inorganic Chemistry (IUPAC Recommendations 2005)".
This ordering effectively follows the groups and rows of the
periodic table, except the Lanthanides, Actanides and hydrogen. If
set to ``False``, the elements will be ordered according to the
electronegativity values.
use_common_formulas: Whether to use the database of common formulas.
The common formula will be used preferentially to the iupac or
reduced formula.
Returns:
The chemical formula.
"""
def order(comp_formula):
composition = Composition(comp_formula)
if use_iupac_formula:
return sum(
[get_el_sp(s).iupac_ordering for s in composition.elements]
) / len(composition.elements)
else:
return composition.average_electroneg
components = get_formula_inequiv_components(
components,
use_iupac_formula=use_iupac_formula,
use_common_formulas=use_common_formulas,
)
if molecules_first:
mol_comps, other_comps = filter_molecular_components(components)
else:
mol_comps = []
other_comps = components
formulas = sorted([c["formula"] for c in mol_comps], key=order) + sorted(
[c["formula"] for c in other_comps], key=order
)
# if components include special formulas, then the count can be 0.5
# therefore if any non integer amounts we can just use a factor of 2
all_int = all(v["count"] % 1 == 0 for v in components)
prefactor = 1 if all_int else 2
form_count_dict = {c["formula"]: int(c["count"] * prefactor) for c in components}
# the following is based on ``pymatgen.core.composition.reduce_formula``
num_comps = len(formulas)
factor = abs(gcd(*(form_count_dict.values())))
reduced_form = []
for i in range(0, num_comps):
formula = formulas[i]
normamt = form_count_dict[formula] * 1.0 / factor
formatted_formula = formula if normamt == 1 else f"({formula})"
reduced_form.append(formatted_formula)
reduced_form.append(formula_double_format(normamt))
reduced_form = "".join(reduced_form)
return reduced_form
def components_are_vdw_heterostructure(components: List[Component]) -> bool:
"""Whether a list of components form a van der Waals heterostructure.
A heterostructure is defined here as a structure with more than one
formula inequivalent 2D component.
Args:
components: A list of structure components, generated using
:obj:`pymatgen.analysis.dimensionality.get_structure_components`.
Returns:
Whether the list of components from a heterostructure.
"""
components = get_formula_inequiv_components(components)
if len([c for c in components if c["dimensionality"] == 2]):
return True
else:
return False
def get_vdw_heterostructure_information(
components: List[Component],
use_iupac_formula: bool = True,
use_common_formulas: bool = True,
inc_ordered_components: bool = False,
inc_intercalants: bool = False,
) -> Dict[str, Any]:
"""Gets information about ordering of components in a vdw heterostructure.
Args:
components: A list of structure components, generated using
:obj:`pymatgen.analysis.dimensionality.get_structure_components`
with ``inc_orientation=True``.
use_iupac_formula (bool, optional): Whether to order formulas by the
iupac "electronegativity" series, defined in Table VI of
"Nomenclature of Inorganic Chemistry (IUPAC Recommendations 2005)".
This ordering effectively follows the groups and rows of the
periodic table, except the Lanthanides, Actanides and hydrogen. If
set to ``False``, the elements will be ordered according to the
electronegativity values.
use_common_formulas: Whether to use the database of common formulas.
The common formula will be used preferentially to the iupac or
reduced formula.
inc_ordered_components: Whether to return a list of the ordered
components. If False, just the component formulas will be returned.
inc_intercalants: Whether to return a list of the intercalants. If
False, just the intercalant formulas will be returned.
Returns:
Information on the heterostructure, as an :obj:`dict` with they keys:
- ``"repeating_unit"`` (``list[str]``): A :obj:`List` of formulas of the
smallest repeating series of components. For example. if the
structure consists of A and B components ordered as "A B A B A B",
the repeating unit is "A B".
- ``"num_repetitions"`` (:obj:`int`): The number of repetitions of the
repeating unit that forms the overall structure. For example. if
the structure consists of A and B components ordered as
"A B A B A B", the number of repetitions is 3.
- ``"intercalant_formulas"`` (:obj:`list[str]`): The formulas of the
intercalated compounds.
- ``"ordered_components"`` (``list[component]``): If
``inc_ordered_components``, a :obj:`List` of components, ordered as
they appear in the heteostructure stacking direction.
- ``"intercalants"`` (``list[component]``: If ``inc_intercalants``, a
:obj:`List` of intercalated components.
"""
if not components_are_vdw_heterostructure(components):
raise ValueError("Components do not form a heterostructure.")
try:
millers = {c["orientation"] for c in components if c["dimensionality"] == 2}
except KeyError as e:
if "orientation" in str(e):
raise KeyError("Components not generated with inc_orientation=True")
else:
raise e
if len(millers) != 1:
raise ValueError("2D components don't all have the same orientation.")
cart_miller = (
components[0]["structure_graph"]
.structure.lattice.get_cartesian_coords(millers.pop())
.tolist()
)
# plane is used to find the distances of all components along a certain axis
# should use normal vector of plane to get exact distances but we just care
# about relative distances.
def distances_to_plane(points):
return [np.dot(cart_miller, pp) for pp in points]
min_distances = [
min(distances_to_plane(c["structure_graph"].structure.cart_coords))
for c in components
]
# sort the components by distance to plane
ordering = np.argsort(min_distances)
ordered_components = [components[x] for x in ordering]
# only consider the layered components formulae
ordered_layers = [c for c in ordered_components if c["dimensionality"] == 2]
ordered_layers_formula = [
get_component_formula(
c,
use_iupac_formula=use_iupac_formula,
use_common_formulas=use_common_formulas,
)
for c in ordered_layers
]
num_layer_formulas = len(set(ordered_layers_formula))
repeating_formula = ordered_layers_formula
num_repetitions = 1
# depending on the number of inequivalent formulae, there is a maximum
# number of repetitions that can occur. To avoid unnecessary work we start
# from this number of repetitions and move to 1 repetition (e.g. no
# repetition)
max_repetitions = int(np.floor(len(ordered_layers) / num_layer_formulas))
for n in range(max_repetitions, 0, -1):
if (
all(
[
len(set(ordered_layers_formula[i::num_layer_formulas])) == 1
for i in range(n)
]
)
and len(ordered_layers) % n == 0
):
repeating_formula = ordered_layers_formula[: int(len(ordered_layers) / n)]
num_repetitions = n
break
intercalants = [c for c in components if c["dimensionality"] < 2]
intercalant_formulas = [get_component_formula(c) for c in intercalants]
data = {
"repeating_unit": repeating_formula,
"num_repetitions": num_repetitions,
"intercalant_formulas": intercalant_formulas,
}
if inc_intercalants:
data["intercalants"] = intercalants
if inc_ordered_components:
data["ordered_components"] = ordered_components
return data
| [
"[email protected]"
] | |
6fc0344b9b9c4a260c80ee9f7f61a9f1d948ca1e | 40d371136f2d7de9c95bfe40fd3c0437095e9819 | /build/rbx1/rbx1_nav/catkin_generated/pkg.develspace.context.pc.py | a74ba9f55c2c3dd4d7c607c520ce9e56cd9b59fa | [] | no_license | marine0131/ros_ws | b4e6c5cf317260eaae1c406fb3ee234b3a3e67d5 | 6ddded3a92a717879bb646e7f2df1fea1a2d46b2 | refs/heads/master | 2021-07-05T06:29:43.054275 | 2017-09-28T08:29:14 | 2017-09-28T08:29:14 | 100,458,679 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/whj/catkin_ws/devel/include".split(';') if "/home/whj/catkin_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "dynamic_reconfigure".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rbx1_nav"
PROJECT_SPACE_DIR = "/home/whj/catkin_ws/devel"
PROJECT_VERSION = "0.4.0"
| [
"[email protected]"
] | |
4ab744495bdd71914089b3fabd09162e9ee06ce5 | 4c77c3f68ddd280ad26ed78a9f4927ff9eb5e1f1 | /src/ledger/lib/python2.7/site-packages/pip/_internal/commands/freeze.py | ac562d7d84b626944ef398efbea2d4f93b40da0b | [
"MIT"
] | permissive | neoscoin/neos-core | 5f4a4e9fcdf13a21d1dbedfc7c01a8a8ba454a98 | 22cecda54875e3554e7c2a4569551c042fa6c0a2 | refs/heads/master | 2020-03-23T18:54:58.602764 | 2019-08-04T16:44:27 | 2019-08-04T16:44:27 | 141,940,658 | 4 | 4 | MIT | 2018-07-28T21:39:26 | 2018-07-23T00:05:03 | C++ | UTF-8 | Python | false | false | 3,320 | py | from __future__ import absolute_import
import sys
from pip._internal import index
from pip._internal.basecommand import Command
from pip._internal.cache import WheelCache
from pip._internal.compat import stdlib_pkgs
from pip._internal.operations.freeze import freeze
DEV_PKGS = {'pip', 'setuptools', 'distribute', 'wheel'}
class FreezeCommand(Command):
"""
Output installed packages in requirements format.
packages are listed in a case-insensitive sorted order.
"""
name = 'freeze'
usage = """
%prog [options]"""
summary = 'Output installed packages in requirements format.'
log_streams = ("ext://sys.stderr", "ext://sys.stderr")
def __init__(self, *args, **kw):
super(FreezeCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help="Use the order in the given requirements file and its "
"comments when generating output. This option can be "
"used multiple times.")
self.cmd_opts.add_option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='URL',
help='URL for finding packages, which will be added to the '
'output.')
self.cmd_opts.add_option(
'-l', '--local',
dest='local',
action='store_true',
default=False,
help='If in a virtualenv that has global access, do not output '
'globally-installed packages.')
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
self.cmd_opts.add_option(
'--all',
dest='freeze_all',
action='store_true',
help='Do not skip these packages in the output:'
' %s' % ', '.join(DEV_PKGS))
self.cmd_opts.add_option(
'--exclude-editable',
dest='exclude_editable',
action='store_true',
help='Exclude editable package from output.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
format_control = index.FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
skip = set(stdlib_pkgs)
if not options.freeze_all:
skip.update(DEV_PKGS)
freeze_kwargs = dict(
requirement=options.requirements,
find_links=options.find_links,
local_only=options.local,
user_only=options.user,
skip_regex=options.skip_requirements_regex,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
skip=skip,
exclude_editable=options.exclude_editable,
)
try:
for line in freeze(**freeze_kwargs):
sys.stdout.write(line + '\n')
finally:
wheel_cache.cleanup()
| [
"[email protected]"
] | |
0e888f3c5656339bfcd90422c0e0e3b11133b3ef | c94662c1c58f4b75e01195da6e2446993eada579 | /core/cooggerapp/signals/__init__.py | 96b341230d38d951150ab6786ad691283328fec6 | [
"MIT"
] | permissive | ozcanyarimdunya/coogger | e8f74ac215630473f88b612e6c236bd5441b32a8 | 832b9af196cf68917dabaa5b9c5ab0b80725ca6e | refs/heads/master | 2020-07-12T03:59:41.928819 | 2019-08-27T08:33:09 | 2019-08-27T08:33:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | from .topic import when_utopic_create, increase_utopic_view
from .userextra import follow_and_repos_update, create_userprofile, send_mail_to_follow
from .issue import when_issue_delete, issue_counter
from .content import when_content_delete, when_content_create
from .commit import when_commit_create
| [
"[email protected]"
] | |
851aede55bcaefdea7999c415c35cdc90ce4b200 | 518bf342bc4138982af3e2724e75f1d9ca3ba56c | /solutions/2808. Minimum Seconds to Equalize a Circular Array/2808.py | 5390dcc57938a36fa03a978735ee042ecbc01463 | [
"MIT"
] | permissive | walkccc/LeetCode | dae85af7cc689882a84ee5011f0a13a19ad97f18 | a27be41c174565d365cbfe785f0633f634a01b2a | refs/heads/main | 2023-08-28T01:32:43.384999 | 2023-08-20T19:00:45 | 2023-08-20T19:00:45 | 172,231,974 | 692 | 302 | MIT | 2023-08-13T14:48:42 | 2019-02-23T15:46:23 | C++ | UTF-8 | Python | false | false | 622 | py | class Solution:
def minimumSeconds(self, nums: List[int]) -> int:
n = len(nums)
ans = n
numToIndices = collections.defaultdict(list)
for i, num in enumerate(nums):
numToIndices[num].append(i)
def getSeconds(i: int, j: int) -> int:
"""Returns the number of seconds required to make nums[i..j] the same."""
return (i - j) // 2
for indices in numToIndices.values():
seconds = getSeconds(indices[0] + n, indices[-1])
for i in range(1, len(indices)):
seconds = max(seconds, getSeconds(indices[i], indices[i - 1]))
ans = min(ans, seconds)
return ans
| [
"[email protected]"
] | |
11a1ce5dc526b7604a7b8b1257f22f55b26ae5e1 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/cirq_new/cirq_program/startCirq_noisy383.py | 552ed9b1b74d4b568697c570d633b3c387234b8a | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,185 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=17
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=13
c.append(cirq.H.on(input_qubit[0])) # number=14
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=11
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=15
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
circuit = circuit.with_noise(cirq.depolarize(p=0.01))
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_noisy383.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
] | |
5c491fba4b421a9a4742e951f5c6d9f00279d088 | 88d8eed7081167bf5f81766dc5758ea4074eb9e5 | /opt2q_examples/cell_death_data_calibration/calibration_fixed_measurement_model_p9_pysb_timout_arg.py | 7be59c473e49651616e8b3bb052ae553721a3e2a | [] | no_license | michael-irvin/Opt2Q | e28ee272dc1630f1f1cbc6ef2692888d9a09b6b2 | 58c18fd7ecab11857ce386202f13a8586c329836 | refs/heads/master | 2023-04-20T00:12:09.985042 | 2021-05-15T06:20:27 | 2021-05-15T06:20:27 | 143,816,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,691 | py | import numpy as np
import datetime as dt
from scipy.stats import norm, invgamma
from pydream.core import run_dream
from pydream.convergence import Gelman_Rubin
from pydream.parameters import SampledParam
from multiprocessing import current_process
from opt2q.calibrator import objective_function
from opt2q_examples.cell_death_data_calibration.cell_death_data_calibration_setup \
import shift_and_scale_heterogeneous_population_to_new_params as sim_population
from opt2q_examples.cell_death_data_calibration.cell_death_data_calibration_setup \
import set_up_simulator, pre_processing, true_params, set_up_classifier, synth_data, \
time_axis, handle_timeouts, TimeoutException
from pysb.simulator import ScipyOdeSimulator
from opt2q_examples.apoptosis_model import model
import time
import signal
# Model name
now = dt.datetime.now()
model_name = f'apoptosis_model_tbid_cell_death_data_calibration_fmm_{now.year}{now.month}{now.day}'
# Priors
nu = 100
noisy_param_stdev = 0.20
alpha = int(np.ceil(nu/2.0))
beta = alpha/noisy_param_stdev**2
sampled_params_0 = [SampledParam(norm, loc=true_params, scale=1.5),
SampledParam(invgamma, *[alpha], scale=beta)]
n_chains = 4
n_iterations = 100000 # iterations per file-save
burn_in_len = 50000 # number of iterations during burn-in
max_iterations = 100000
# Simulator
# opt2q_solver doesn't run on Power9, but has useful methods for handling simulation results
opt2q_solver = set_up_simulator('cupsoda')
delattr(opt2q_solver, 'sim')
delattr(opt2q_solver, 'solver')
solver = ScipyOdeSimulator(model, tspan=time_axis, **{'integrator': 'lsoda', 'integrator_options': {'mxstep': 2**20}})
# Measurement Model
slope = 4
intercept = slope * -0.25 # Intercept (-0.25)
unr_coef = slope * 0.00 # "Unrelated_Signal" coef (0.00)
tbid_coef = slope * 0.25 # "tBID_obs" coef (0.25)
time_coef = slope * -1.00 # "time" coef (-1.00)
classifier = set_up_classifier()
classifier.set_params(**{'coefficients__apoptosis__coef_': np.array([[unr_coef, tbid_coef, time_coef]]),
'coefficients__apoptosis__intercept_': np.array([intercept]),
'do_fit_transform': False})
# likelihood function
def likelihood(x):
params_df = sim_population(x) # simulate heterogeneous population around new param values
opt2q_solver.param_values = params_df
# Add scipyodesolver using parameter values from Opt2Q solver
params_array = opt2q_solver._param_values_run
start_time = time.time()
try:
results = solver.run(param_values=params_array, num_processors=2, timeout=60) # run model
new_results = opt2q_solver.opt2q_dataframe(results.dataframe).reset_index()
features = pre_processing(new_results)
# run fixed classifier
prediction = classifier.transform(
features[['simulation', 'tBID_obs', 'time', 'Unrelated_Signal', 'TRAIL_conc']])
# calculate likelihood
ll = sum(np.log(prediction[synth_data.apoptosis == 1]['apoptosis__1']))
ll += sum(np.log(prediction[synth_data.apoptosis == 0]['apoptosis__0']))
elapsed_time = time.time() - start_time
print("Elapsed time: ", elapsed_time)
print(x[:len(true_params)])
print(ll)
return ll
except (ValueError, ZeroDivisionError, TypeError, TimeoutException):
elapsed_time = time.time() - start_time
print("Elapsed time: ", elapsed_time)
print(x[:len(true_params)])
return -1e10
# -------- Calibration -------
# Model Inference via PyDREAM
if __name__ == '__main__':
ncr = 25
gamma_levels = 8
p_gamma_unity = 0.1
print(ncr, gamma_levels, p_gamma_unity)
# Run DREAM sampling. Documentation of DREAM options is in Dream.py.
converged = False
total_iterations = n_iterations
sampled_params, log_ps = run_dream(parameters=sampled_params_0,
likelihood=likelihood,
niterations=n_iterations,
nchains=n_chains,
multitry=False,
nCR=ncr,
gamma_levels=gamma_levels,
adapt_gamma=True,
p_gamma_unity=p_gamma_unity,
history_thin=1,
model_name=model_name,
verbose=True,
crossover_burnin=min(n_iterations, burn_in_len),
)
# Save sampling output (sampled parameter values and their corresponding logps).
for chain in range(len(sampled_params)):
np.save(model_name + '_' + str(chain) + '_' + str(total_iterations) + '_' + 'parameters', sampled_params[chain])
np.save(model_name + '_' + str(chain) + '_' + str(total_iterations) + '_' + 'log_p', log_ps[chain])
GR = Gelman_Rubin(sampled_params)
burn_in_len = max(burn_in_len - n_iterations, 0)
print('At iteration: ', total_iterations, ' GR = ', GR)
print(f'At iteration: {total_iterations}, {burn_in_len} steps of burn-in remain.')
np.savetxt(model_name + str(total_iterations) + '.txt', GR)
old_samples = sampled_params
if np.isnan(GR).any() or np.any(GR > 1.2):
# append sample with a re-run of the pyDream algorithm
while not converged or (total_iterations < max_iterations):
starts = [sampled_params[chain][-1, :] for chain in range(n_chains)]
total_iterations += n_iterations
sampled_params, log_ps = run_dream(parameters=sampled_params_0,
likelihood=likelihood,
niterations=n_iterations,
nchains=n_chains,
multitry=False,
nCR=ncr,
gamma_levels=gamma_levels,
adapt_gamma=True,
p_gamma_unity=p_gamma_unity,
history_thin=1,
model_name=model_name,
verbose=True,
restart=True, # restart at the last sampled position
start=starts,
crossover_burnin=min(n_iterations, burn_in_len))
# Save sampling output (sampled parameter values and their corresponding logps).
for chain in range(len(sampled_params)):
np.save(model_name + '_' + str(chain) + '_' + str(total_iterations) + '_' + 'parameters',
sampled_params[chain])
np.save(model_name + '_' + str(chain) + '_' + str(total_iterations) + '_' + 'log_p', log_ps[chain])
old_samples = [np.concatenate((old_samples[chain], sampled_params[chain])) for chain in range(n_chains)]
GR = Gelman_Rubin(old_samples)
burn_in_len = max(burn_in_len - n_iterations, 0)
print('At iteration: ', total_iterations, ' GR = ', GR)
print(f'At iteration: {total_iterations}, {burn_in_len} steps of burn-in remain.')
np.savetxt(model_name + str(total_iterations) + '.txt', GR)
if np.all(GR < 1.2):
converged = True
| [
"[email protected]"
] | |
07a4523135d60b04ed51747157c9e44b0f036a7f | dd3bbd4e7aaee7a8a5f26b927ce28ac472c855a5 | /eggs/Products.CMFQuickInstallerTool-3.0.5-py2.7.egg/Products/CMFQuickInstallerTool/tests/test_install.py | 159fd3c8b974543f139c8c6551b4c3dc743d10b2 | [] | no_license | nacho22martin/tesis | ea0a822f8bdbdef6f13f41276ecd4d6e85427ca5 | e137eb6225cc5e724bee74a892567796166134ac | refs/heads/master | 2020-12-24T13:20:58.334839 | 2013-11-09T12:42:41 | 2013-11-09T12:42:41 | 14,261,570 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | import doctest
import unittest
from Products.CMFTestCase import CMFTestCase
from Products.GenericSetup import EXTENSION, profile_registry
from Testing.ZopeTestCase import FunctionalDocFileSuite as Suite
CMFTestCase.installProduct('CMFQuickInstallerTool')
CMFTestCase.installProduct('CMFCalendar')
CMFTestCase.setupCMFSite()
OPTIONFLAGS = (doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
def registerTestProfile(test):
profile_registry.registerProfile('test',
'CMFQI test profile',
'Test profile for CMFQuickInstallerTool',
'profiles/test',
'Products.CMFQuickInstallerTool',
EXTENSION,
for_=None)
def test_suite():
return unittest.TestSuite((
Suite('actions.txt',
optionflags=OPTIONFLAGS,
package='Products.CMFQuickInstallerTool.tests',
setUp=registerTestProfile,
test_class=CMFTestCase.FunctionalTestCase),
Suite('profiles.txt',
optionflags=OPTIONFLAGS,
package='Products.CMFQuickInstallerTool.tests',
test_class=CMFTestCase.FunctionalTestCase),
Suite('install.txt',
optionflags=OPTIONFLAGS,
package='Products.CMFQuickInstallerTool.tests',
test_class=CMFTestCase.FunctionalTestCase),
))
| [
"ignacio@plone.(none)"
] | ignacio@plone.(none) |
a0ae43e473fed201713fb7ef16cd61bf0708f846 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /zqr6f8dRD84K8Lvzk_3.py | cf260be9367695b002d1e201a20f2264da1b9dae | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,722 | py | """
As stated on the [On-Line Encyclopedia of Integer
Sequences](https://oeis.org/A003215):
> The hexagonal lattice is the familiar 2-dimensional lattice in which each
> point has 6 neighbors.
A **centered hexagonal number** is a centered figurate number that represents
a hexagon with a dot in the center and all other dots surrounding the center
dot in a hexagonal lattice.
At the end of that web page the following illustration is shown:
Illustration of initial terms:
.
. o o o o
. o o o o o o o o
. o o o o o o o o o o o o
. o o o o o o o o o o o o o o o o
. o o o o o o o o o o o o
. o o o o o o o o
. o o o o
.
. 1 7 19 37
.
Write a function that takes an integer `n` and returns `"Invalid"` if `n` is
not a **centered hexagonal number** or its illustration as a multiline
rectangular string otherwise.
### Examples
hex_lattice(1) ➞ " o "
# o
hex_lattice(7) ➞ " o o \n o o o \n o o "
# o o
# o o o
# o o
hex_lattice(19) ➞ " o o o \n o o o o \n o o o o o \n o o o o \n o o o "
# o o o
# o o o o
# o o o o o
# o o o o
# o o o
hex_lattice(21) ➞ "Invalid"
### Notes
N/A
"""
def hex_lattice(n):
r = (3 + (12 * n - 3)**0.5) / 6
layers = int(r)
if layers != r : return "Invalid"
rlen = layers*4 -1
prnt = []
d = (layers-1)*2
for _ in range(layers):
prnt.append('{: ^{}}'.format('o '*d + 'o', rlen))
d -= 1
return '\n'.join(prnt[1:][::-1] + prnt)
| [
"[email protected]"
] | |
654481ab224a9394d1a33536a5456ad7582ecd1a | c6070314ce23ede0f7b10cf3a4126b3575909e57 | /canvas2nbgrader.py | 5248ec5c33aa093d864dec72c8d1e31de970075e | [] | no_license | vahtras/canvas2nbgrader | b4615b49c4ebdd041a3a91d9be6d4c2fd7275349 | b880e478b9c98c9976005df63620a264d257d134 | refs/heads/master | 2020-03-08T10:40:08.219057 | 2018-04-04T19:13:24 | 2018-04-04T19:13:24 | 128,078,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | #!/usr/bin/env python
"""Import student id from Canvas (Exported Grade table)"""
import sys
import csv
def get_records(csv_file):
return list(csv.DictReader(csv_file))
def split_names(records):
new = []
for r in records:
s = r['Student']
if s == 'Studenttest' or s.strip() == 'Points Possible':
pass
else:
last, first = r['Student'].split(', ')
d = dict(first_name=first, last_name=last)
new.append({**r, **d})
return new
def out_dict(records):
select = []
for r in records:
select.append(
{
'id': r["ID"],
'first_name': r["first_name"],
'last_name': r["last_name"],
"email": r["SIS Login ID"],
}
)
with open('students.csv', 'w') as csvfile:
writer = csv.DictWriter(
csvfile,
fieldnames=["id", "first_name", "last_name", "email"]
)
writer.writeheader()
for r in select:
writer.writerow(r)
def main():
try:
csv_file = sys.argv[1]
except IndexError:
print("Usage: {} csv_file".format(sys.argv[0]))
sys.exit(1)
with open(csv_file) as f:
#Remove BOM character in file
lines = [line.replace('\ufeff', '') for line in f]
records = split_names(get_records(lines))
out_dict(records)
if __name__ == "__main__":
sys.exit(main())
| [
"[email protected]"
] | |
e2840943beaeff8e715d3e36a75da31ff4fa3a26 | fb3b5ec37b2a56f1170cd750e1e30fce9a6dc125 | /test2020013004_dev_1562/urls.py | 57cd70ddd1e5bbfefbda3653605aa9b87e7edcd1 | [] | no_license | crowdbotics-apps/test2020013004-dev-1562 | cd3d2b88cbf6c0172ddb80bced1dcfa8a1843098 | a2bea260fe854574ffd38315a88953560bb335f2 | refs/heads/master | 2022-04-02T18:38:24.481241 | 2020-02-05T22:41:23 | 2020-02-05T22:41:23 | 237,356,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,994 | py | """test2020013004_dev_1562 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("home/", include("home.urls")),
]
admin.site.site_header = "Test2020013004"
admin.site.site_title = "Test2020013004 Admin Portal"
admin.site.index_title = "Test2020013004 Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="Test2020013004 API",
default_version="v1",
description="API documentation for Test2020013004 App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"[email protected]"
] | |
61e0747aab767e96bb13632b74f3de7fa9282af3 | e146d44875fb44a13b3b004604694bccaa23ddf2 | /docs/Amadeus-master/pactravel-master/swagger_client/models/flight_search_bound.py | 5fd17c599197fd7743dbc3b5a7ee7b5989daf56d | [] | no_license | shopglobal/travel | 8d959b66d77f2e1883b671628c856daf0f3b21bb | 0c33467cd2057da6e01f9240be2fd4b8f5490539 | refs/heads/master | 2022-12-23T00:13:02.597730 | 2017-09-26T06:03:15 | 2017-09-26T06:03:15 | 104,405,869 | 0 | 0 | null | 2022-12-08T00:35:36 | 2017-09-21T22:43:23 | PHP | UTF-8 | Python | false | false | 3,993 | py | # coding: utf-8
"""
Amadeus Travel Innovation Sandbox
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class FlightSearchBound(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'flights': 'list[FlightSearchSegment]',
'duration': 'str'
}
attribute_map = {
'flights': 'flights',
'duration': 'duration'
}
def __init__(self, flights=None, duration=None):
"""
FlightSearchBound - a model defined in Swagger
"""
self._flights = None
self._duration = None
self.flights = flights
if duration is not None:
self.duration = duration
@property
def flights(self):
"""
Gets the flights of this FlightSearchBound.
:return: The flights of this FlightSearchBound.
:rtype: list[FlightSearchSegment]
"""
return self._flights
@flights.setter
def flights(self, flights):
"""
Sets the flights of this FlightSearchBound.
:param flights: The flights of this FlightSearchBound.
:type: list[FlightSearchSegment]
"""
if flights is None:
raise ValueError("Invalid value for `flights`, must not be `None`")
self._flights = flights
@property
def duration(self):
"""
Gets the duration of this FlightSearchBound.
The duration of this bound, including layover time, expressed in the format hh:mm
:return: The duration of this FlightSearchBound.
:rtype: str
"""
return self._duration
@duration.setter
def duration(self, duration):
"""
Sets the duration of this FlightSearchBound.
The duration of this bound, including layover time, expressed in the format hh:mm
:param duration: The duration of this FlightSearchBound.
:type: str
"""
self._duration = duration
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, FlightSearchBound):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
de52fcf01c00c0c8c33658cec50f0c1df04688f3 | 32eba552c1a8bccb3a329d3d152b6b042161be3c | /9_pj_mcw.pyw | 3ed748fdabe7367b51ed1ef4ff3088848476a0c4 | [] | no_license | ilmoi/ATBS | d3f501dbf4b1099b76c42bead3ec48de3a935a86 | 7f6993751e2ad18af36de04168d32b049d85a9c1 | refs/heads/master | 2022-07-11T21:56:23.284871 | 2020-05-15T05:26:06 | 2020-05-15T05:26:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,518 | pyw | import pyperclip
import sys
import shelve
# uncomment and run once!
# TEXT = {'agree': """Yes, I agree. That sounds fine to me.""",
# 'busy': """Sorry, can we do this later this week or next week?""",
# 'upsell': """Would you consider making this a monthly donation?"""}
# file =shelve.open('phrase_db')
# for key, value in list(TEXT.items()):
# file[key] = value
# # test
# print(file['agree'])
# file.close()
if len(sys.argv) < 2:
print('Usage: python 9_pj_mcw [action - save / list / item to load]')
sys.exit()
first = sys.argv[1]
file = shelve.open('phrase_db')
if first == 'save':
keyword = sys.argv[2]
text = pyperclip.paste()
file[keyword] = text
elif first == 'delete':
second = sys.argv[2]
if second == 'all':
confirm = input('are you sure you want to wipe the dic?')
if confirm == 'yes':
for key in file.keys():
del file[key]
print("done! clean like a baby's ass?[wtf]")
else:
second = sys.argv[2]
if second in file.keys():
del file[second]
print('deleted!')
else:
print('no such keyword')
sys.exit()
elif first == 'list':
print('___current contents are:___')
for key, value in list(file.items()):
print(f'{key}: {value}')
else:
if first in file.keys():
pyperclip.copy(file[first])
print('copied to clipboard!')
else:
print('no such keyword')
sys.exit()
file.close()
| [
"[email protected]"
] | |
b99eaded4ce9e40c473b322f7ef3d19ceb146945 | 3299ee49e292fc5f4a0f1c0e364eb27cd539521a | /lesson_33_homework/test_site/Articles/views.py | 0ae598994470e23fd2cc65f5fba238d2c49aba5f | [] | no_license | alexeypodorozhnyi/Python_course_django_part | 5258db76b5ca0592ed62f1d2d7a5bf8f80353bf1 | 9b51531f8b2d042210797c707d5f38b398897977 | refs/heads/master | 2020-11-26T01:52:13.467747 | 2020-01-29T20:55:23 | 2020-01-29T20:55:23 | 228,926,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | from django.http import HttpResponse
from django.shortcuts import render
class MyClass:
string = ''
def __init__(self, s):
self.string = s
def index(request):
my_list = [1,2,3,4,5,6]
return render(request, 'index.html', {
'my_list': my_list
})
def new(request):
return HttpResponse("Hey your new News!")
def edit(request):
return HttpResponse("Hey you can edit News!")
def lock(request):
return HttpResponse("Hey your can lock News!")
def add(request, item_id):
if item_id:
return HttpResponse('Add new news with item id:' + str(item_id))
def processing(request, mode, item_id):
if mode == 'add':
if item_id:
return HttpResponse('Add new articles with item id:' + str(item_id))
elif mode == 'delete':
if item_id:
return HttpResponse('Delete articles with item id:' + str(item_id))
else:
return HttpResponse('Error chouse correct mode')
def return_code(request, code):
if code:
return HttpResponse('Article code:' + str(code))
| [
"[email protected]"
] | |
a8efba78448b1dc04d5429185122c8a008f4251c | a560269290749e10466b1a29584f06a2b8385a47 | /Notebooks/py/tsai90k/dnn-titanic/dnn-titanic.py | 05f82ff728e796c5618951fe9f09ab2533515609 | [] | no_license | nischalshrestha/automatic_wat_discovery | c71befad1aa358ae876d5494a67b0f4aa1266f23 | 982e700d8e4698a501afffd6c3a2f35346c34f95 | refs/heads/master | 2022-04-07T12:40:24.376871 | 2020-03-15T22:27:39 | 2020-03-15T22:27:39 | 208,379,586 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 35,685 | py | #!/usr/bin/env python
# coding: utf-8
# ## Setup environment
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from matplotlib import pyplot as plt # for plotting graphs
import seaborn # statistical data visualization library
from functools import cmp_to_key
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
import xgboost as xgb
from sklearn import metrics
import math
import copy
import sys
import glob
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input/"))
# Any results you write to the current directory are saved as output.
# In[ ]:
class BucketMapper(object):
def __init__(self, field_info, assume_no_na = False):
self.field_info = copy.copy(field_info)
self.digit_keys = list(self.field_info.keys())
self.digit_keys.sort()
self.digits = []
self.digits_rev = []
self.multipiliers = []
self.total_comb = 1
self.assume_no_na = assume_no_na
self.add_for_na = 1
if self.assume_no_na == True:
self.add_for_na = 0
# for example:
# 1. (consider N/A) If field_info = {'A':['a','b'],'B':['x']}
# then combinations will be 3 * 2:
# 0 -> 0 0 -> (na, na) -> [1, 0, 0, 0, 0, 0]
# 1 -> 0 1 -> (na, 'x') -> [0, 1, 0, 0, 0, 0]
# 2 -> 1 0 -> ('a', na) -> [0, 0, 1, 0, 0, 0]
# 3 -> 1 1 -> ('a', 'x') -> [0, 0, 0, 1, 0, 0]
# 4 -> 2 0 -> ('b', na) -> [0, 0, 0, 0, 1, 0]
# 5 -> 2 1 -> ('b', 'x') -> [0, 0, 0, 0, 0, 1]
#
# then self.digit_keys = ['A','B']
# then self.digits will become
# [
# {'a':1, 'b':2}, # implies that N/A = 0
# {'x':1}, # implies that N/A = 0
# ]
# and self.multipiliers will become
# [
# 2, # multipiliers of digits of 'A' field
# 1, # multipiliers of digits of 'B' field
# ]
#
# so ('a', 'x') will be (1*2) + (1*1) = 3
# and ('b', 'x') will be (2*2) + (1*1) = 5
#
# 2. (assume no N/A) If the field info = {'A':['a','b','c'],'B':['x','y']}
# NOTE: if there is no N/A, then a field MUST have at least 2 categories.
# then combination will be 3 * 2:
# 0 -> 0 0 -> ('a', 'x') -> [1, 0, 0, 0, 0, 0]
# 1 -> 0 1 -> ('a', 'y') -> [0, 1, 0, 0, 0, 0]
# 2 -> 1 0 -> ('b', 'x') -> [0, 0, 1, 0, 0, 0]
# 3 -> 1 1 -> ('b', 'y') -> [0, 0, 0, 1, 0, 0]
# 4 -> 2 0 -> ('c', 'x') -> [0, 0, 0, 0, 1, 0]
# 5 -> 2 1 -> ('c', 'y') -> [0, 0, 0, 0, 0, 1]
#
# then self.digit_keys = ['A','B']
# then self.digits will become
# [
# {'a':0, 'b':1, 'c':2},
# {'x':0, 'y':1},
# ]
# and self.multipiliers will become
# [
# 2, # multipiliers of digits of 'A' field
# 1, # multipiliers of digits of 'B' field
# ]
#
# so ('a', 'x') will be (0*2) + (0*1) = 0
# and ('b', 'y') will be (1*2) + (1*1) = 3
# check each field to ensure every felds have at least 2 categories
# include or not include N/A
for k in self.digit_keys:
if (len(self.field_info[k]) + self.add_for_na) < 2:
if self.assume_no_na == True:
raise ValueError('the field %s must have at least 2 categories')
else:
raise ValueError('the field %s must have at least 1 category')
for i in range(0, len(self.digit_keys)):
k = self.digit_keys[i]
next_i = i + 1
# a higher digit's base (mul) is decided by variation of its lower digit
mul = 1
if next_i < len(self.digit_keys):
next_k = self.digit_keys[next_i]
mul = len(self.field_info[next_k]) + self.add_for_na
dig = copy.copy(self.field_info[k])
self.total_comb *= (len(dig) + self.add_for_na)
dig_d = {}
# with N/A, i starts from 1 (0 for N/A)
# without N/A, i starts from 0
i = self.add_for_na
for v in dig:
dig_d[v] = i
i += 1
self.multipiliers.append(1)
self.multipiliers = [_m * mul for _m in self.multipiliers]
self.digits.append(dig_d)
self.digits_rev.append(dict([(_v,_k) for (_k,_v) in dig_d.items()]))
def fields_to_bucket_id(self, fields):
n = 0
for i in range(0, len(fields)):
mul = self.multipiliers[i]
dig_d = self.digits[i]
k = fields[i]
try:
n += (dig_d[k] * mul)
except KeyError as e:
if self.assume_no_na == True:
raise ValueError('unexpected value: %s is not allowed because assume_no_na is True' % (str(e)))
else:
n += 0
return n
def bucket_id_to_fields(self, bucket_id):
fields = []
for i in range(0, len(self.digit_keys)):
mul = self.multipiliers[i]
dig_rev_d = self.digits_rev[i]
digit = bucket_id // mul
bucket_id = bucket_id % mul
try:
fields.append(str(dig_rev_d[digit]))
except KeyError as e:
if self.assume_no_na == True:
raise ValueError('unexpected value: %s is not allowed because assume_no_na is True' % (str(e)))
else:
fields.append('na')
return str.join('_', fields)
def to_bucketlized(self, df, name):
df1 = df[self.digit_keys]
df2 = pd.DataFrame()
s = []
for i in range(0, len(df1)):
r = [n for n in df1.iloc[i]]
bucket_id = self.fields_to_bucket_id(r)
s.append(bucket_id)
df2[name] = s
return df2
def to_one_hot(self, df, name):
df1 = df[self.digit_keys]
df2 = pd.DataFrame()
s = []
for i in range(0, self.total_comb):
s.append([])
for i in range(0, len(df1)):
r = [n for n in df1.iloc[i]]
bucket_id = self.fields_to_bucket_id(r)
for j in range(0, self.total_comb):
s[j].append(0)
s[bucket_id][-1] = 1
for i in range(0, self.total_comb):
field_name = self.bucket_id_to_fields(i)
df2['%s_%s' % (name, field_name)] = s[i]
return df2
# ## Examine the data first
# In[ ]:
df_train = pd.read_csv('../input/train.csv')
df_train.describe()
print('record number of df_train = %d' % (len(df_train)))
# In[ ]:
df_train.head(50)
# In[ ]:
df_test = pd.read_csv('../input/test.csv')
df_test.describe()
print('record number of df_test = %d' % (len(df_test)))
# In[ ]:
# combine training data and test data
df_all = df_test.copy()
df_all = df_all.append(df_train, ignore_index = True)
# In[ ]:
# list all fields
print('total fields = %d' % (len(df_train.keys())))
print('fields = %s' % (df_train.keys()))
# In[ ]:
# check N/A fields
df_all.drop('Survived', axis = 1).loc[:,df_all.isnull().any()].isnull().sum()
# In[ ]:
def log_no_error(n):
if n <= 0:
return 0.0
else:
return np.log1p(n)
def min2(l, default = 0.0):
if len(l) == 0:
return default
else:
return min(l)
def max2(l, default = 0.0):
if len(l) == 0:
return default
else:
return max(l)
def avg2(l, default = 0.0):
if len(l) == 0:
return default
else:
return float(sum(l)) / float(len(l))
def std2(l, default = 0.0):
if len(l) == 0:
return default
else:
return np.std(l)
def histogram_for_non_numerical_series(s):
d = {}
for v in s:
d[v] = d.get(v, 0) + 1
bin_s_label = list(d.keys())
bin_s_label.sort()
bin_s = list(range(0, len(bin_s_label)))
hist_s = [d[v] for v in bin_s_label]
bin_s.append(len(bin_s))
bin_s_label.insert(0, '_')
return (hist_s, bin_s, bin_s_label)
def plot_hist_with_target3(plt, df, feature, target, histogram_bins = 10):
# reference:
# https://stackoverflow.com/questions/33328774/box-plot-with-min-max-average-and-standard-deviation
# https://matplotlib.org/gallery/api/two_scales.html
# https://matplotlib.org/1.2.1/examples/pylab_examples/errorbar_demo.html
# https://matplotlib.org/2.0.0/examples/color/named_colors.html
# https://matplotlib.org/api/_as_gen/matplotlib.pyplot.xticks.html
title = feature
plt.title(title)
s = df[feature]
t = df[target]
t_max = max(t)
# get histogram of the feature
bin_s_label = None
# fillna with 0.0 or '_N/A_'
na_cnt = sum(s.isna())
if na_cnt > 0:
if True in [type(_) == str for _ in s]:
print('found %d na in string field %s' % (na_cnt, feature))
s = s.fillna('_N/A_')
else:
print('found %d na in numerical field %s' % (na_cnt, feature))
s = s.fillna(0.0)
try:
hist_s, bin_s = np.histogram(s, bins = histogram_bins)
except Exception as e:
# print('ERROR: failed to draw histogram for %s: %s: %s' % (name, type(e).__name__, str(e)))
hist_s, bin_s, bin_s_label = histogram_for_non_numerical_series(s)
# return
# histogram of target by distribution of feature
hist_t_by_s_cnt = [0] * (len(bin_s) - 1)
hist_t_by_s = []
for i in range(0, (len(bin_s) - 1)):
hist_t_by_s.append([])
# get target histogram for numerical feature
if bin_s_label is None:
for (sv, tv) in zip(s, t):
pos = 0
for i in range(0, len(bin_s) - 1):
if sv >= bin_s[i]:
pos = i
hist_t_by_s_cnt[pos] += 1
hist_t_by_s[pos].append(tv)
bin_s_new = []
hist_t_by_s_new = []
bin_s_new.append(bin_s[0])
for (bv, hv) in zip(bin_s[1:], hist_t_by_s):
if len(hv) != 0:
bin_s_new.append(bv)
hist_t_by_s_new.append(hv)
bin_s_err = bin_s_new
hist_t_by_s = hist_t_by_s_new
# print('target_hist:\n%s(%d)\n%s(%d)' % (bin_s, len(bin_s), hist_t_by_s, len(hist_t_by_s)))
else:
for (sv, tv) in zip(s, t):
pos = bin_s_label.index(sv) - 1
hist_t_by_s_cnt[pos] += 1
hist_t_by_s[pos].append(tv)
# count avg, to re-sort bin_s and bin_s_label by avg
hist_t_by_s_avg = [float(avg2(n)) for n in hist_t_by_s]
# hist_t_by_s_std = [float(std2(n)) for n in hist_t_by_s]
# hist_t_by_s_adj = list(np.array(hist_t_by_s_avg) + np.array(hist_t_by_s_std))
hist_t_by_s_adj = hist_t_by_s_avg
# print('before sort:\n%s\n%s\n%s' % (bin_s, bin_s_label, hist_t_by_s_adj))
bin_hist_label = list(zip(bin_s[1:], hist_t_by_s_adj, bin_s_label[1:]))
bin_hist_label.sort(key = cmp_to_key(lambda x, y: x[1] - y[1]))
(bin_s, hist_t_by_s_adj, bin_s_label) = zip(*bin_hist_label)
bin_s = list(bin_s)
hist_t_by_s_adj = list(hist_t_by_s_adj)
bin_s_label = list(bin_s_label)
bin_s.insert(0, 0)
bin_s_label.insert(0, '_')
# re-arrange hist_s and hist_t_by_s
hist_s_new = []
hist_t_by_s_new = []
for i in bin_s[1:]:
hist_s_new.append(hist_s[i - 1])
hist_t_by_s_new.append(hist_t_by_s[i - 1])
hist_s = hist_s_new
hist_t_by_s = hist_t_by_s_new
# print('after sort:\n%s\n%s\n%s' % (bin_s, bin_s_label, hist_t_by_s_adj))
# reset bin_s's ordering
bin_s.sort()
bin_s_err = bin_s
hist_s = list(hist_s)
if len(hist_s) < len(bin_s):
hist_s.insert(0, 0.0)
hist_s_max = max(hist_s)
plt.fill_between(bin_s, hist_s, step = 'mid', alpha = 0.5, label = feature)
if bin_s_label is not None:
plt.xticks(bin_s, bin_s_label)
plt.xticks(rotation = 90)
# just to show legend for ax2
# plt.errorbar([], [], yerr = [], fmt = 'ok', lw = 3, ecolor = 'sienna', mfc = 'sienna', label = target)
plt.legend(loc = 'upper right')
hist_t_by_s = list(hist_t_by_s)
if len(hist_t_by_s) < len(bin_s):
hist_t_by_s.insert(0, [0.0])
hist_t_by_s_min = [float(min2(n)) for n in hist_t_by_s]
hist_t_by_s_max = [float(max2(n)) for n in hist_t_by_s]
hist_t_by_s_avg = [float(avg2(n)) for n in hist_t_by_s]
# hist_t_by_s_std = [float(std2(n)) for n in hist_t_by_s][1:]
hist_t_by_s_25_percentile = [float(np.percentile(n, 25)) for n in hist_t_by_s]
hist_t_by_s_75_percentile = [float(np.percentile(n, 75)) for n in hist_t_by_s]
hist_t_by_s_std = [[max(0, _) for _ in (np.array(hist_t_by_s_avg) - np.array(hist_t_by_s_25_percentile))[1:]],
[max(0, _) for _ in (np.array(hist_t_by_s_75_percentile) - np.array(hist_t_by_s_avg))[1:]]]
hist_t_by_s_err = [(np.array(hist_t_by_s_avg) - np.array(hist_t_by_s_min))[1:], (np.array(hist_t_by_s_max) - np.array(hist_t_by_s_avg))[1:]]
plt.xlabel(feature)
plt.ylabel('Count')
ax2 = plt.twinx()
ax2.grid(False)
ax2.errorbar(bin_s_err[1:], hist_t_by_s_avg[1:], yerr = hist_t_by_s_err, fmt='.k', lw = 1, ecolor = 'sienna')
ax2.errorbar(bin_s_err[1:], hist_t_by_s_avg[1:], yerr = hist_t_by_s_std, fmt='ok', lw = 3, ecolor = 'sienna', mfc = 'sienna', label = target)
ax2.set_ylabel(target)
plt.legend(loc = 'upper left')
plt.tight_layout()
def plot_df(df, y, fields = None):
if fields is None:
fields = df.keys()
figs = len(fields)
cols = 4
rows = int(figs / cols)
if (rows * cols) < figs:
rows += 1
plt.figure(figsize = (5 * cols, 5 * rows))
i = 1
for name in fields:
if name == y:
continue
plt.subplot(rows, cols, i)
plot_hist_with_target3(plt, df, name, y, histogram_bins = 'rice')
i += 1
plt.tight_layout()
# In[ ]:
# draw relationship between target and features
plot_df(df_train.drop('Name', axis = 1), y = 'Survived')
# In[ ]:
plt.figure(figsize = (10, 10))
seaborn.heatmap(df_train.corr())
# In[ ]:
_df = df_train.copy()
# ### Pclass
# Female and male have very different Survived distribution on Pclass
# In[ ]:
plot_df(_df[['Pclass','Survived']], y = 'Survived')
# #### Female and Pclass
# In[ ]:
plot_df(_df[_df['Sex'] == 'female'][['Pclass','Survived']], y = 'Survived')
# #### Male and Pclass
# In[ ]:
plot_df(_df[_df['Sex'] == 'male'][['Pclass','Survived']], y = 'Survived')
# ### Cabin
# If we categorize cabin by the prefix alphabet, we can see there are 4 groups:
# - B, E, D: 70 ~ 80% of survive rate
# - C, F: 60% of survive rate
# - A, G: 50% of survive rate
# - nan: 30% of survive rate
# - T: 0% of survive rate
#
# In[ ]:
_df['Cabin'] = df_train['Cabin'].apply(lambda x: x[0] if type(x) == str else 'nan')
plot_df(_df[['Cabin','Survived']], y = 'Survived')
# #### Female and Cabin
# Female has 100% survive rate at some categories: A, B, D, F, if we just categorize by average survive rate, A will not be in highest survive rate category...
# In[ ]:
plot_df(_df[_df['Sex'] == 'female'][['Cabin','Survived']], y = 'Survived')
# #### Male and Cabin
# In[ ]:
plot_df(_df[_df['Sex'] == 'male'][['Cabin','Survived']], y = 'Survived')
# ### Ticket
# If we categorize ticket id by prefix alphabet (if no prefix, set is as 'num' category), we can see there are some groups:
# - 9: 100 % of survive rate
# - 1,P: 60 ~ 65% of survive rate
# - F: 50 ~ 60% of survive rate
# - 2,C,S: 30% ~ 40% of survive rate
# - L,3,4,6,W,7: 15% ~ 30% of survive rate
# - A,5,8: 10% of survive rate
# In[ ]:
_df['Ticket'] = df_train['Ticket'].apply(lambda x: x[0] if type(x) == str else 'num')
plot_df(_df[['Ticket','Survived']], y = 'Survived')
# #### Female and Ticket
# Some female have 100% of survive rate at 9,F category, and have 0% of survive rate at A,7 category, and have low survive rate at W,4 category.
# In[ ]:
plot_df(_df[_df['Sex'] == 'female'][['Ticket','Survived']], y = 'Survived')
# #### Male and Ticket
# - Some male have 0% of survive rate in some categories: 4,5,8,F,W
# In[ ]:
plot_df(_df[_df['Sex'] == 'male'][['Ticket', 'Survived']], y = 'Survived')
# ### Age
# In[ ]:
# convert N/A age to 100 to see its distribution
_df2 = _df.copy()
_df2['Age'] = _df2['Age'].fillna(100)
plot_df(_df2[['Age', 'Survived']], y = 'Survived')
# #### Female and Age
# In[ ]:
plot_df(_df2[_df2['Sex'] == 'female'][['Age', 'Survived']], y = 'Survived')
# N/A value of female age should can be filled with median age of female
# In[ ]:
_df[_df['Sex'] == 'female']['Age'].median()
# #### Male and Age
# Male older then 62 have almost 0% of survive rate (except 80)
# In[ ]:
plot_df(_df2[_df2['Sex'] == 'male'][['Age', 'Survived']], y = 'Survived')
# In[ ]:
_df[(_df['Age'] > 62) & (_df['Sex'] == 'male')]
# N/A value of male age should can be filled with median of male age
# In[ ]:
_df[_df['Sex'] == 'male']['Age'].median()
# ### Fare
# Use median value as N/A value
# In[ ]:
_df['Fare'].median()
# In[ ]:
plot_df(_df[['Fare', 'Survived']], y = 'Survived')
# #### Female and Fare
# Female in some fare range has 100% of survive rate
# In[ ]:
plot_df(_df[_df['Sex'] == 'female'][['Fare', 'Survived']], y = 'Survived')
# #### Male and Fare
# Male in lowst fare range has lowest survive rate, and has 0% of survive rate when fale between 200 ~ 500, and has 100% survive rate when rate > 500.
# In[ ]:
plot_df(_df[_df['Sex'] == 'male'][['Fare', 'Survived']], y = 'Survived')
# In[ ]:
_df[(_df['Sex'] == 'male') & (_df['Fare'] > 200) & (_df['Fare'] < 500)].sort_values(by = ['Fare'])
# In[ ]:
_df[(_df['Sex'] == 'male') & (_df['Fare'] > 500)].sort_values(by = ['Fare'])
# ### SibSp
# In[ ]:
plot_df(_df[['SibSp', 'Survived']], y = 'Survived')
# #### Female and SibSp
# In[ ]:
plot_df(_df[_df['Sex'] == 'female'][['SibSp', 'Survived']], y = 'Survived')
# #### Male and SibSp
# In[ ]:
plot_df(_df[_df['Sex'] == 'male'][['SibSp', 'Survived']], y = 'Survived')
# ### Parch
# In[ ]:
plot_df(_df[['Parch', 'Survived']], y = 'Survived')
# #### Female and Parch
# In[ ]:
plot_df(_df[_df['Sex'] == 'female'][['Parch', 'Survived']], y = 'Survived')
# #### Male and Parch
# In[ ]:
plot_df(_df[_df['Sex'] == 'male'][['SibSp', 'Survived']], y = 'Survived')
# ### Embarked
# In[ ]:
plot_df(_df[['Embarked', 'Survived']], y = 'Survived')
# #### Female and Embarked
# 2 N/A records are all survived, maybe just set their Embarked to C
# In[ ]:
plot_df(_df[_df['Sex'] == 'female'][['Embarked', 'Survived']], y = 'Survived')
# #### Male and Embarked
# In[ ]:
plot_df(_df[_df['Sex'] == 'male'][['Embarked', 'Survived']], y = 'Survived')
# ### SibSp and Parch
# Seems that adding SibSp and Parch can not separate Survived well.
# In[ ]:
_df['SibSp_Parch'] = _df['SibSp'] + _df['Parch']
plot_df(_df[['SibSp_Parch', 'SibSp', 'Parch', 'Survived']], y = 'Survived')
# #### Female and SibSp_Parch
# In[ ]:
plot_df(_df[_df['Sex'] == 'female'][['SibSp_Parch', 'SibSp', 'Parch', 'Survived']], y = 'Survived')
# #### Male and SibSp_Parch
# In[ ]:
plot_df(_df[_df['Sex'] == 'male'][['SibSp_Parch', 'SibSp', 'Parch', 'Survived']], y = 'Survived')
# In[ ]:
fix_funcs = []
'''
Following fields sohuld be categorical fields
- Pclass
'''
def fix_df_pclass_to_cat(df):
df1 = df.copy()
for k in ['Pclass']:
s = df1[k]
df1[k] = s.astype(str)
return df1
fix_funcs.append(fix_df_pclass_to_cat)
'''
1. The first latter of Cabin will be the category name, replace N/A with 'nan' category
2. The first latter of Ticket will be the category name, replace N/A with 'nan' category
'''
def fix_df_cabin_and_ticket(df):
df1 = df.copy()
for k in ['Cabin', 'Ticket']:
df1[k] = df1[k].apply(lambda x: x[0] if type(x) == str else 'nan')
return df1
fix_funcs.append(fix_df_cabin_and_ticket)
'''
1. Add SibSp and Parch + 1 as faimly number
2. Convert Fare to 4 level
3. Convert Age to 5 level
'''
def wrap_fix_df_copied_from_kaggle():
d = {}
def _fix_df_copied_from_kaggle(df):
df1 = df.copy()
# FamilyNum
df1['FamilyNum'] = df1['SibSp'] + df1['Parch'] + 1
# IsAlone, single passenger?
# df1['IsAlone'] = df1['FamilyNum'].apply(lambda x: 1 if x == 1 else 0)
# bucketlize Fare to 4 quantilies
if not ('fare_med' in d):
d['fare_med'] = df1['Fare'].median()
fare_med = d['fare_med']
df1['FareLevel'] = df1['Fare'].fillna(fare_med)
if not ('fare_bin' in d):
d['fare_bin'] = list(pd.qcut(df1['FareLevel'], 4).unique())
d['fare_bin'].sort()
fare_bin = d['fare_bin']
df1['FareLevel'] = df1['FareLevel'].apply(lambda x: sum([x >= b.left for b in fare_bin]))
# bucketlize Age to 5 buckets
if not ('age_med' in d):
d['age_med'] = df1['Age'].median()
age_med = d['age_med']
df1['AgeLevel'] = df1['Age'].fillna(age_med)
if not ('age_bin' in d):
d['age_bin'] = list(pd.cut(df1['AgeLevel'], 5).unique())
d['age_bin'].sort()
age_bin = d['age_bin']
df1['AgeLevel'] = df1['AgeLevel'].apply(lambda x: sum([x >= b.left for b in age_bin]))
return df1
return _fix_df_copied_from_kaggle
# fix_funcs.append(wrap_fix_df_copied_from_kaggle())
'''
Converts Age to categorical feature, 20 years a category
Age has N/A value, use median value.
'''
def wrap_fix_age():
med_l = []
def _fix_age(df):
df1 = df.copy()
if len(med_l) == 0:
med_l.append(df1['Age'].median())
med = med_l[0]
df1['Age'] = df1['Age'].fillna(med)
df1['Age'] = df1['Age'].apply(lambda x: str(int(x / 10) * 10))
return df1
return _fix_age
fix_funcs.append(wrap_fix_age())
'''
Converts Fare to categorical feature, 100 a category
Fare has N/A value, use median value.
'''
def wrap_fix_fare():
med_l = []
def _fix_fare(df):
df1 = df.copy()
if len(med_l) == 0:
med_l.append(df1['Fare'].median())
med = med_l[0]
df1['Fare'] = df1['Fare'].fillna(med)
df1['Fare'] = df1['Fare'].apply(lambda x: str(int(x / 100) * 100))
return df1
return _fix_fare
fix_funcs.append(wrap_fix_fare())
'''
Converts SibSp to no(0), less(1~2), Many(3+)
'''
def fix_sibsp(df):
df1 = df.copy()
df1['SibSp'] = df1['SibSp'].apply(lambda x: 'no' if x == 0 else 'less' if x <= 2 else 'many')
return df1
fix_funcs.append(fix_sibsp)
'''
Converts Parch to no(0), less(0~3), Many(4+)
'''
def fix_parch(df):
df1 = df.copy()
df1['Parch'] = df1['Parch'].apply(lambda x: 'no' if x == 0 else 'less' if x <= 3 else 'many')
return df1
fix_funcs.append(fix_parch)
'''
Converts N/A in Embarked to a new category most repeated value
'''
def fix_embarked(df):
df1 = df.copy()
most_repeated_value = df1['Embarked'].mode()[0]
df1['Embarked'] = df1['Embarked'].fillna(most_repeated_value)
return df1
fix_funcs.append(fix_embarked)
'''
Convert categorical fields to binned fields and do cross features
'''
def wrap_fix_bin_and_cross():
bm_d = {}
def _fix_bin_and_cross(df):
df1 = df.copy()
for kl in [
['SibSp','Parch'],
['Sex','Age'],
['Sex'],
['Cabin'],
['Pclass'],
['SibSp'],
['Parch'],
['Fare'],
['Ticket'],
['Embarked'],
['Age']
]:
kname = str.join('_', kl)
if not (kname in bm_d):
field_info = {}
for k in kl:
field_info[k] = df1[k].unique()
bm_d[kname] = BucketMapper(field_info, assume_no_na = True)
bm = bm_d[kname]
df_kl = df1[kl]
df_bin = bm.to_one_hot(df_kl, kname)
df1 = df1.join(df_bin)
return df1
return _fix_bin_and_cross
fix_funcs.append(wrap_fix_bin_and_cross())
'''
Drop fields
'''
def fix_df_drop_fields(df):
df1 = df.copy()
for k in [
'Name',
'PassengerId',
'Sex',
'Pclass',
'Cabin',
'Parch',
'Fare',
'SibSp',
'Ticket',
'Embarked',
'Age'
]:
df1.drop(k, axis = 1, inplace = True)
return df1
fix_funcs.append(fix_df_drop_fields)
def fix_df(df, funcs):
df1 = df.copy()
for func in funcs:
df1 = func(df1)
return df1
df_train[df_train['Sex'] == 'male']
print('fix df_train2')
df_train2 = fix_df(df_train, fix_funcs)
print('fix df_test2')
df_test2 = fix_df(df_test, fix_funcs)
print('features in df_train2 (%d) = %s' % (len(df_train2.keys()), list(df_train2.keys())))
print('features in df_test2 (%d) = %s' % (len(df_test2.keys()), list(df_test2.keys())))
features = list(df_train2.keys())
features.remove('Survived')
# In[ ]:
# draw relationship between target and features
# plot_df(df_train2, y = 'Survived')
# ### DNN model
# In[ ]:
def construct_feature_columns(input_features):
"""Construct the TensorFlow Feature Columns.
Args:
input_features: The names of the numerical input features to use.
Returns:
A set of feature columns
"""
return set([tf.feature_column.numeric_column(my_feature)
for my_feature in input_features])
def train_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
"""Input function for training
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
# Convert pandas data into a dict of np arrays.
#features = {key:np.array(value) for key,value in dict(features).items()}
features = dict(features)
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified.
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
def pred_input_fn(features, batch_size=1):
"""Input function for prediction
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
# Convert pandas data into a dict of np arrays.
#features = {key:np.array(value) for key,value in dict(features).items()}
features = dict(features)
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices(features) # warning: 2GB limit
ds = ds.batch(batch_size)
# Return the next batch of data.
features = ds.make_one_shot_iterator().get_next()
return features
def calc_err_at_threshold(y_prob, y_true, threshold):
y_true = np.array(y_true)
y_prob = np.array(y_prob)
y_pred = np.array([1 if p >= threshold else 0 for p in y_prob])
err = sum(y_pred != y_true).astype(float) / len(y_true)
return err
def split_train_validate(df, ratio):
train_num = int(len(df) * ratio)
validate_num = len(df) - train_num
return df.head(train_num), df.tail(validate_num)
def train_dnn_classifier(hidden_units,
learning_rate,
steps,
batch_size,
df_train,
df_validate,
features,
target,
threshold):
"""Trains a dnn classification model.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, and a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
df_train: A `DataFrame` containing the training features and labels.
df_validate: A `DataFrame` containing the validation features and labels.
Returns:
The trained `DNNClassifier` object.
"""
periods = 10
steps_per_period = steps / periods
# prepare features and targets
train_features = df_train[features]
train_targets = df_train[target]
validate_features = df_validate[features]
validate_targets = df_validate[target]
# create the input functions.
train_fn = lambda: train_input_fn(features = train_features,
targets = train_targets,
batch_size = batch_size,
shuffle = True,
num_epochs = None)
train_pred_fn = lambda: train_input_fn(features = train_features,
targets = train_targets,
batch_size = 1,
shuffle = False,
num_epochs = 1)
validate_pred_fn = lambda: train_input_fn(features = validate_features,
targets = validate_targets,
batch_size = 1,
shuffle = False,
num_epochs = 1)
# Create a DNNClassifier object.
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
hidden_units = hidden_units,
feature_columns=construct_feature_columns(train_features),
optimizer=my_optimizer,
config=tf.estimator.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
train_validate_metrics = pd.DataFrame()
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn = train_fn,
steps = steps_per_period
)
# Take a break and compute probabilities.
train_pred = list(classifier.predict(input_fn=train_pred_fn))
train_prob = np.array([item['probabilities'] for item in train_pred])
validate_pred = list(classifier.predict(input_fn=validate_pred_fn))
validate_prob = np.array([item['probabilities'] for item in validate_pred])
# Compute training and validation errors.
train_metrics = {
'train-logloss': [metrics.log_loss(train_targets, train_prob)],
'test-logloss': [metrics.log_loss(validate_targets, validate_prob)],
'train-error': [calc_err_at_threshold([p[1] for p in train_prob], train_targets, threshold)],
'test-error': [calc_err_at_threshold([p[1] for p in validate_prob], validate_targets, threshold)],
}
# Occasionally print the current loss.
print(" period %02d (%d samples): LogLoss: %0.2f/%0.2f, Error: %0.2f/%0.2f" % (period, (period + 1) * steps_per_period * batch_size,
train_metrics['train-logloss'][0],
train_metrics['test-logloss'][0],
train_metrics['train-error'][0],
train_metrics['test-error'][0]))
# Add the loss metrics from this period to our list.
train_validate_metrics = train_validate_metrics.append(train_metrics, ignore_index = True)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Output a graph of loss metrics over periods.
plt.figure(figsize = (10, 5))
plt.subplot(1, 2, 1)
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(list(train_validate_metrics['train-logloss']), label="training")
plt.plot(list(train_validate_metrics['test-logloss']), label="validation")
plt.legend()
# Output a graph of error metrics over periods.
plt.subplot(1, 2, 2)
plt.ylabel("Error")
plt.xlabel("Periods")
plt.title("Error vs. Periods")
plt.plot(list(train_validate_metrics['train-error']), label="training")
plt.plot(list(train_validate_metrics['test-error']), label="validation")
plt.legend()
plt.tight_layout()
return classifier
# ### Use DNN
# In[ ]:
df_train3 = df_train2.reindex(np.random.permutation(df_train2.index))
df_train3 = df_train3.reset_index(drop = True)
(df_train4, df_validate4) = split_train_validate(df_train3, 0.7)
dnn_classifier = train_dnn_classifier(hidden_units = [30],
learning_rate = 0.03,
steps = 1000,
batch_size = 30,
df_train = df_train4,
df_validate = df_validate4,
features = features,
target = 'Survived',
threshold = 0.5)
# In[ ]:
# predict the test data
test_predict_fn = lambda: pred_input_fn(features = df_test2, batch_size = 1)
test_pred = list(dnn_classifier.predict(input_fn = test_predict_fn))
test_prob = np.array([item['probabilities'] for item in test_pred])
test_label = np.array([1 if p[1] >= 0.5 else 0 for p in test_prob])
df_submit = pd.DataFrame()
df_submit['PassengerId'] = df_test['PassengerId']
df_submit['Survived'] = test_label
df_submit.to_csv('./test_prediction_dnn.csv', index = False)
print(df_submit.head(20))
| [
"[email protected]"
] | |
a88b2e2e2cbe66502e17b460f6c306fee092a54e | 660e87488effa7f3e6c043cf45a11bc59b31a1e9 | /setup.py | 59eae9dd4f0ecf542ee3dfeaa1513f436fa1a424 | [] | no_license | bramwelt/alabaster | df967aa165ea15678cce0b960d2993cff058c697 | 65876483837ecdd4e6798b4a5c5b1842f598f4f2 | refs/heads/master | 2021-01-17T15:48:51.117359 | 2014-01-01T03:07:10 | 2014-01-01T03:07:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | #!/usr/bin/env python
from setuptools import setup
# Version info -- read without importing
_locals = {}
with open('alabaster/_version.py') as fp:
exec(fp.read(), None, _locals)
version = _locals['__version__']
setup(
name='alabaster',
version=version,
description='A configurable sidebar-enabled Sphinx theme',
author='Jeff Forcier',
author_email='[email protected]',
url='https://github.com/bitprophet/sphinx-theme',
packages=['alabaster'],
include_package_data=True,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Documentation',
'Topic :: Software Development :: Documentation',
],
)
| [
"[email protected]"
] | |
a0f34dfff064add39a5a0e2c24fef9d5508e159a | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_1/schcha027/question2.py | 592328ec8d41a424a62af6a489e967d7653733b3 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | hours=eval(input("Enter the hours:\n"))
minutes=eval(input("Enter the minutes:\n"))
seconds=eval(input("Enter the seconds:\n"))
if 0<=hours<=23:
checkH=1
else:
checkH=0
if 0<=minutes<=59:
checkM=1
else:
checkM=0
if 0<=seconds<=59:
checkS=1
else:
checkS=0
#print("H: ",checkH , " M: ",checkM , " S: ",checkS )
if checkM==1 & checkH==1 & checkS==1:
print("Your time is valid.")
else:
print("Your time is invalid.")
| [
"[email protected]"
] | |
2c4948d9b6fb6d08543014d62e1eec37adfb6f85 | 4dbe3b1b2af3ff77e8086ec32ab58dcf47849a3e | /dynamo3/connection.py | 51e7dfcbb584766f04e06a7e9a35022cfd55775a | [
"MIT"
] | permissive | mnpk/dynamo3 | b83dc700345972ea2336ac8ca842fd9f23edf5c2 | 51eacee60bdf8d058831a9ab3583a2cfe9f91ca9 | refs/heads/master | 2021-01-16T21:54:32.089114 | 2016-04-30T00:53:55 | 2016-04-30T00:53:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50,842 | py | """ Connection class for DynamoDB """
from contextlib import contextmanager
import time
import warnings
import botocore.session
import six
from botocore.exceptions import ClientError
from .batch import BatchWriter, encode_query_kwargs
from .constants import NONE, COUNT, INDEXES, READ_COMMANDS
from .exception import translate_exception, DynamoDBError, ThroughputException
from .fields import Throughput, Table
from .result import (ResultSet, GetResultSet, Result, Count, ConsumedCapacity,
TableResultSet, Limit)
from .types import Dynamizer, is_null
def build_expected(dynamizer, expected):
""" Build the Expected parameters from a dict """
ret = {}
for k, v in six.iteritems(expected):
if is_null(v):
ret[k] = {
'Exists': False,
}
else:
ret[k] = {
'Exists': True,
'Value': dynamizer.encode(v),
}
return ret
def build_expression_values(dynamizer, expr_values, kwargs):
""" Build ExpresionAttributeValues from a value or kwargs """
if expr_values:
values = expr_values
return dynamizer.encode_keys(values)
elif kwargs:
values = dict(((':' + k, v) for k, v in six.iteritems(kwargs)))
return dynamizer.encode_keys(values)
class DynamoDBConnection(object):
"""
Connection to DynamoDB.
You should generally call :meth:`~.connect` instead of the constructor.
Parameters
----------
client : :class:`~botocore.client.BaseClient`, optional
The botocore client that will be used for requests
dynamizer : :class:`~dynamo3.types.Dynamizer`, optional
The Dynamizer object to use for encoding/decoding values
Attributes
----------
request_retries : int
Number of times to retry an API call if the throughput is exceeded
(default 10)
default_return_capacity : bool
If true, all relevant calls will default to fetching the
ConsumedCapacity
"""
def __init__(self, client=None, dynamizer=Dynamizer()):
self.client = client
self.dynamizer = dynamizer
self.request_retries = 10
self.default_return_capacity = False
self._hooks = None
self.clear_hooks()
self.rate_limiters = []
@property
def host(self):
""" The address of the endpoint """
return self.client.meta.endpoint_url
@property
def region(self):
""" The name of the current connected region """
return self.client.meta.region_name
@classmethod
def connect_to_region(cls, region, session=None, access_key=None,
secret_key=None, **kwargs):
"""
Connect to an AWS region.
This method has been deprecated in favor of :meth:`~.connect`
Parameters
----------
region : str
Name of an AWS region
session : :class:`~botocore.session.Session`, optional
The Session object to use for the connection
access_key : str, optional
If session is None, set this access key when creating the session
secret_key : str, optional
If session is None, set this secret key when creating the session
**kwargs : dict
Keyword arguments to pass to the constructor
"""
warnings.warn("connect_to_region is deprecated and will be removed. "
"Use connect instead.")
if session is None:
session = botocore.session.get_session()
if access_key is not None:
session.set_credentials(access_key, secret_key)
client = session.create_client('dynamodb', region)
return cls(client, **kwargs)
@classmethod
def connect_to_host(cls, host='localhost', port=8000, is_secure=False,
session=None, access_key=None, secret_key=None,
**kwargs):
"""
Connect to a specific host.
This method has been deprecated in favor of :meth:`~.connect`
Parameters
----------
host : str, optional
Address of the host (default 'localhost')
port : int, optional
Connect to the host on this port (default 8000)
is_secure : bool, optional
Enforce https connection (default False)
session : :class:`~botocore.session.Session`, optional
The Session object to use for the connection
access_key : str, optional
If session is None, set this access key when creating the session
secret_key : str, optional
If session is None, set this secret key when creating the session
**kwargs : dict
Keyword arguments to pass to the constructor
"""
warnings.warn("connect_to_host is deprecated and will be removed. "
"Use connect instead.")
if session is None:
session = botocore.session.get_session()
if access_key is not None:
session.set_credentials(access_key, secret_key)
url = "http://%s:%d" % (host, port)
client = session.create_client('dynamodb', 'local', endpoint_url=url,
use_ssl=is_secure)
return cls(client, **kwargs)
@classmethod
def connect(cls, region, session=None, access_key=None, secret_key=None,
host=None, port=80, is_secure=True, **kwargs):
"""
Connect to an AWS region.
Parameters
----------
region : str
Name of an AWS region
session : :class:`~botocore.session.Session`, optional
The Session object to use for the connection
access_key : str, optional
If session is None, set this access key when creating the session
secret_key : str, optional
If session is None, set this secret key when creating the session
host : str, optional
Address of the host. Use this to connect to a local instance.
port : int, optional
Connect to the host on this port (default 80)
is_secure : bool, optional
Enforce https connection (default True)
**kwargs : dict
Keyword arguments to pass to the constructor
"""
if session is None:
session = botocore.session.get_session()
if access_key is not None:
session.set_credentials(access_key, secret_key)
url = None
if host is not None:
protocol = 'https' if is_secure else 'http'
url = "%s://%s:%d" % (protocol, host, port)
client = session.create_client('dynamodb', region, endpoint_url=url,
use_ssl=is_secure)
return cls(client, **kwargs)
def call(self, command, **kwargs):
"""
Make a request to DynamoDB using the raw botocore API
Parameters
----------
command : str
The name of the Dynamo command to execute
**kwargs : dict
The parameters to pass up in the request
Raises
------
exc : :class:`~.DynamoDBError`
Returns
-------
data : dict
"""
for hook in self._hooks['precall']:
hook(self, command, kwargs)
op = getattr(self.client, command)
attempt = 0
while True:
try:
data = op(**kwargs)
break
except ClientError as e:
exc = translate_exception(e, kwargs)
attempt += 1
if isinstance(exc, ThroughputException):
if attempt > self.request_retries:
exc.re_raise()
self.exponential_sleep(attempt)
else:
exc.re_raise()
for hook in self._hooks['postcall']:
hook(self, command, kwargs, data)
if 'ConsumedCapacity' in data:
is_read = command in READ_COMMANDS
consumed = data['ConsumedCapacity']
if isinstance(consumed, list):
data['consumed_capacity'] = [
ConsumedCapacity.from_response(cap, is_read)
for cap in consumed
]
else:
capacity = ConsumedCapacity.from_response(consumed, is_read)
data['consumed_capacity'] = capacity
if 'consumed_capacity' in data:
if isinstance(data['consumed_capacity'], list):
all_caps = data['consumed_capacity']
else:
all_caps = [data['consumed_capacity']]
for hook in self._hooks['capacity']:
for cap in all_caps:
hook(self, command, kwargs, data, cap)
return data
def exponential_sleep(self, attempt):
""" Sleep with exponential backoff """
if attempt > 1:
time.sleep(0.1 * 2 ** attempt)
def subscribe(self, event, hook):
"""
Subscribe a callback to an event
Parameters
----------
event : str
Available events are 'precall', 'postcall', and 'capacity'.
precall is called with: (connection, command, query_kwargs)
postcall is called with: (connection, command, query_kwargs, response)
capacity is called with: (connection, command, query_kwargs, response, capacity)
hook : callable
"""
if hook not in self._hooks[event]:
self._hooks[event].append(hook)
def unsubscribe(self, event, hook):
""" Unsubscribe a hook from an event """
if hook in self._hooks[event]:
self._hooks[event].remove(hook)
def add_rate_limit(self, limiter):
""" Add a RateLimit to the connection """
if limiter not in self.rate_limiters:
self.subscribe('capacity', limiter.on_capacity)
self.rate_limiters.append(limiter)
def remove_rate_limit(self, limiter):
""" Remove a RateLimit from the connection """
if limiter in self.rate_limiters:
self.unsubscribe('capacity', limiter.on_capacity)
self.rate_limiters.remove(limiter)
@contextmanager
def limit(self, limiter):
""" Context manager that applies a RateLimit to the connection """
self.add_rate_limit(limiter)
try:
yield
finally:
self.remove_rate_limit(limiter)
def clear_hooks(self):
""" Remove all hooks from all events """
self._hooks = {
'precall': [],
'postcall': [],
'capacity': [],
}
def _default_capacity(self, value):
""" Get the value for ReturnConsumedCapacity from provided value """
if value is not None:
return value
if self.default_return_capacity or self.rate_limiters:
return INDEXES
return NONE
def _count(self, method, limit, keywords):
""" Do a scan or query and aggregate the results into a Count """
# The limit will be mutated, so copy it and leave the original intact
limit = limit.copy()
has_more = True
count = None
while has_more:
limit.set_request_args(keywords)
response = self.call(method, **keywords)
limit.post_fetch(response)
count += Count.from_response(response)
last_evaluated_key = response.get('LastEvaluatedKey')
has_more = last_evaluated_key is not None and not limit.complete
if has_more:
keywords['ExclusiveStartKey'] = last_evaluated_key
return count
def list_tables(self, limit=None):
"""
List all tables.
Parameters
----------
limit : int, optional
Maximum number of tables to return
Returns
-------
tables : Iterator
Iterator that returns table names as strings
"""
return TableResultSet(self, limit)
def describe_table(self, tablename):
"""
Get the details about a table
Parameters
----------
tablename : str
Name of the table
Returns
-------
table : :class:`~dynamo3.fields.Table`
"""
try:
response = self.call(
'describe_table', TableName=tablename)['Table']
return Table.from_response(response)
except DynamoDBError as e:
if e.kwargs['Code'] == 'ResourceNotFoundException':
return None
else: # pragma: no cover
raise
def create_table(self, tablename, hash_key, range_key=None,
indexes=None, global_indexes=None, throughput=None):
"""
Create a table
Parameters
----------
tablename : str
Name of the table
hash_key : :class:`~dynamo3.fields.DynamoKey`
The key to use as the Hash key
range_key : :class:`~dynamo3.fields.DynamoKey`, optional
The key to use as the Range key
indexes : list, optional
List of :class:`~dynamo3.fields.LocalIndex`
global_indexes : list, optional
List of :class:`~dynamo3.fields.GlobalIndex`
throughput : :class:`~dynamo3.fields.Throughput`, optional
The throughput of the table
"""
if throughput is None:
throughput = Throughput()
all_attrs = set([hash_key])
if range_key is not None:
all_attrs.add(range_key)
key_schema = [hash_key.hash_schema()]
if range_key is not None:
key_schema.append(range_key.range_schema())
kwargs = {
'TableName': tablename,
'KeySchema': key_schema,
'ProvisionedThroughput': throughput.schema(),
}
if indexes:
kwargs['LocalSecondaryIndexes'] = [
idx.schema(hash_key) for idx in indexes
]
for idx in indexes:
all_attrs.add(idx.range_key)
if global_indexes:
kwargs['GlobalSecondaryIndexes'] = [
idx.schema() for idx in global_indexes
]
for idx in global_indexes:
all_attrs.add(idx.hash_key)
if idx.range_key is not None:
all_attrs.add(idx.range_key)
kwargs['AttributeDefinitions'] = [attr.definition() for attr in
all_attrs]
return self.call('create_table', **kwargs)
def delete_table(self, tablename):
"""
Delete a table
Parameters
----------
tablename : str
Name of the table to delete
Returns
-------
response : bool
True if the table was deleted, False if no table exists
"""
try:
self.call('delete_table', TableName=tablename)
return True
except DynamoDBError as e:
if e.kwargs['Code'] == 'ResourceNotFoundException':
return False
else: # pragma: no cover
raise
def put_item(self, tablename, item, expected=None, returns=NONE,
return_capacity=None, expect_or=False, **kwargs):
"""
Store an item, overwriting existing data
This uses the older version of the DynamoDB API.
See also: :meth:`~.put_item2`.
Parameters
----------
tablename : str
Name of the table to write
item : dict
Item data
expected : dict, optional
DEPRECATED (use **kwargs instead).
If present, will check the values in Dynamo before performing the
write. If values do not match, will raise an exception. (Using None
as a value checks that the field does not exist).
returns : {NONE, ALL_OLD}, optional
If ALL_OLD, will return any data that was overwritten (default
NONE)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
expect_or : bool, optional
If True, the **kwargs conditionals will be OR'd together. If False,
they will be AND'd. (default False).
**kwargs : dict, optional
Conditional filter on the PUT. Same format as the kwargs for
:meth:`~.scan`.
"""
keywords = {}
if kwargs:
keywords['Expected'] = encode_query_kwargs(self.dynamizer, kwargs)
if len(keywords['Expected']) > 1:
keywords['ConditionalOperator'] = 'OR' if expect_or else 'AND'
elif expected is not None:
keywords['Expected'] = build_expected(self.dynamizer, expected)
keywords['ReturnConsumedCapacity'] = \
self._default_capacity(return_capacity)
item = self.dynamizer.encode_keys(item)
ret = self.call('put_item', TableName=tablename, Item=item,
ReturnValues=returns, **keywords)
if ret:
return Result(self.dynamizer, ret, 'Attributes')
def put_item2(self, tablename, item, expr_values=None, alias=None,
condition=None, returns=NONE, return_capacity=None,
return_item_collection_metrics=NONE, **kwargs):
"""
Put a new item into a table
For many parameters you will want to reference the DynamoDB API:
http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_PutItem.html
Parameters
----------
tablename : str
Name of the table to update
item : dict
Item data
expr_values : dict, optional
See docs for ExpressionAttributeValues. See also: kwargs
alias : dict, optional
See docs for ExpressionAttributeNames
condition : str, optional
See docs for ConditionExpression
returns : {NONE, ALL_OLD}, optional
Return nothing or the old values from the item that was
overwritten, if any (default NONE)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
return_item_collection_metrics : (NONE, SIZE), optional
SIZE will return statistics about item collections that were
modified.
**kwargs : dict, optional
If expr_values is not provided, the kwargs dict will be used as the
ExpressionAttributeValues (a ':' will be automatically prepended to
all keys).
"""
keywords = {
'TableName': tablename,
'Item': self.dynamizer.encode_keys(item),
'ReturnValues': returns,
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
'ReturnItemCollectionMetrics': return_item_collection_metrics,
}
values = build_expression_values(self.dynamizer, expr_values, kwargs)
if values:
keywords['ExpressionAttributeValues'] = values
if alias:
keywords['ExpressionAttributeNames'] = alias
if condition:
keywords['ConditionExpression'] = condition
result = self.call('put_item', **keywords)
if result:
return Result(self.dynamizer, result, 'Attributes')
def get_item(self, tablename, key, attributes=None, consistent=False,
return_capacity=None):
"""
Fetch a single item from a table
This uses the older version of the DynamoDB API.
See also: :meth:`~.get_item2`.
Parameters
----------
tablename : str
Name of the table to fetch from
key : dict
Primary key dict specifying the hash key and, if applicable, the
range key of the item.
attributes : list, optional
If present, only fetch these attributes from the item
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
"""
kwargs = {
'TableName': tablename,
'Key': self.dynamizer.encode_keys(key),
'ConsistentRead': consistent,
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
}
if attributes is not None:
kwargs['AttributesToGet'] = attributes
data = self.call('get_item', **kwargs)
return Result(self.dynamizer, data, 'Item')
def get_item2(self, tablename, key, attributes=None, alias=None,
consistent=False, return_capacity=None):
"""
Fetch a single item from a table
Parameters
----------
tablename : str
Name of the table to fetch from
key : dict
Primary key dict specifying the hash key and, if applicable, the
range key of the item.
attributes : str or list, optional
See docs for ProjectionExpression. If list, it will be joined by
commas.
alias : dict, optional
See docs for ExpressionAttributeNames
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
"""
kwargs = {
'TableName': tablename,
'Key': self.dynamizer.encode_keys(key),
'ConsistentRead': consistent,
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
}
if attributes is not None:
if not isinstance(attributes, six.string_types):
attributes = ', '.join(attributes)
kwargs['ProjectionExpression'] = attributes
if alias:
kwargs['ExpressionAttributeNames'] = alias
data = self.call('get_item', **kwargs)
return Result(self.dynamizer, data, 'Item')
def delete_item(self, tablename, key, expected=None, returns=NONE,
return_capacity=None, expect_or=False, **kwargs):
"""
Delete an item
This uses the older version of the DynamoDB API.
See also: :meth:`~.delete_item2`.
Parameters
----------
tablename : str
Name of the table to delete from
key : dict
Primary key dict specifying the hash key and, if applicable, the
range key of the item.
expected : dict, optional
DEPRECATED (use **kwargs instead).
If present, will check the values in Dynamo before performing the
write. If values do not match, will raise an exception. (Using None
as a value checks that the field does not exist).
returns : {NONE, ALL_OLD}, optional
If ALL_OLD, return the data that was deleted (default NONE)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
expect_or : bool, optional
If True, the **kwargs conditionals will be OR'd together. If False,
they will be AND'd. (default False).
**kwargs : dict, optional
Conditional filter on the DELETE. Same format as the kwargs for
:meth:`~.scan`.
"""
key = self.dynamizer.encode_keys(key)
keywords = {
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
}
if kwargs:
keywords['Expected'] = encode_query_kwargs(self.dynamizer, kwargs)
if len(keywords['Expected']) > 1:
keywords['ConditionalOperator'] = 'OR' if expect_or else 'AND'
elif expected is not None:
keywords['Expected'] = build_expected(self.dynamizer, expected)
ret = self.call('delete_item', TableName=tablename, Key=key,
ReturnValues=returns, **keywords)
if ret:
return Result(self.dynamizer, ret, 'Attributes')
def delete_item2(self, tablename, key, expr_values=None, alias=None,
condition=None, returns=NONE, return_capacity=None,
return_item_collection_metrics=NONE, **kwargs):
"""
Delete an item from a table
For many parameters you will want to reference the DynamoDB API:
http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DeleteItem.html
Parameters
----------
tablename : str
Name of the table to update
key : dict
Primary key dict specifying the hash key and, if applicable, the
range key of the item.
expr_values : dict, optional
See docs for ExpressionAttributeValues. See also: kwargs
alias : dict, optional
See docs for ExpressionAttributeNames
condition : str, optional
See docs for ConditionExpression
returns : {NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW}, optional
Return either the old or new values, either all attributes or just
the ones that changed. (default NONE)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
return_item_collection_metrics : (NONE, SIZE), optional
SIZE will return statistics about item collections that were
modified.
**kwargs : dict, optional
If expr_values is not provided, the kwargs dict will be used as the
ExpressionAttributeValues (a ':' will be automatically prepended to
all keys).
"""
keywords = {
'TableName': tablename,
'Key': self.dynamizer.encode_keys(key),
'ReturnValues': returns,
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
'ReturnItemCollectionMetrics': return_item_collection_metrics,
}
values = build_expression_values(self.dynamizer, expr_values, kwargs)
if values:
keywords['ExpressionAttributeValues'] = values
if alias:
keywords['ExpressionAttributeNames'] = alias
if condition:
keywords['ConditionExpression'] = condition
result = self.call('delete_item', **keywords)
if result:
return Result(self.dynamizer, result, 'Attributes')
def batch_write(self, tablename, return_capacity=None,
return_item_collection_metrics=NONE):
"""
Perform a batch write on a table
Parameters
----------
tablename : str
Name of the table to write to
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
return_item_collection_metrics : (NONE, SIZE), optional
SIZE will return statistics about item collections that were
modified.
Examples
--------
.. code-block:: python
with connection.batch_write('mytable') as batch:
batch.put({'id': 'id1', 'foo': 'bar'})
batch.delete({'id': 'oldid'})
"""
return_capacity = self._default_capacity(return_capacity)
return BatchWriter(self, tablename, return_capacity=return_capacity,
return_item_collection_metrics=return_item_collection_metrics)
def batch_get(self, tablename, keys, attributes=None, alias=None,
consistent=False, return_capacity=None):
"""
Perform a batch get of many items in a table
Parameters
----------
tablename : str
Name of the table to fetch from
keys : list or iterable
List or iterable of primary key dicts that specify the hash key and
the optional range key of each item to fetch
attributes : str or list, optional
See docs for ProjectionExpression. If list, it will be joined by
commas.
alias : dict, optional
See docs for ExpressionAttributeNames
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
"""
keys = [self.dynamizer.encode_keys(k) for k in keys]
return_capacity = self._default_capacity(return_capacity)
ret = GetResultSet(self, tablename, keys,
consistent=consistent, attributes=attributes,
alias=alias, return_capacity=return_capacity)
return ret
def update_item(self, tablename, key, updates, returns=NONE,
return_capacity=None, expect_or=False, **kwargs):
"""
Update a single item in a table
This uses the older version of the DynamoDB API.
See also: :meth:`~.update_item2`.
Parameters
----------
tablename : str
Name of the table to update
key : dict
Primary key dict specifying the hash key and, if applicable, the
range key of the item.
updates : list
List of :class:`~dynamo3.batch.ItemUpdate`
returns : {NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW}, optional
Return either the old or new values, either all attributes or just
the ones that changed. (default NONE)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
expect_or : bool, optional
If True, the updates conditionals will be OR'd together. If False,
they will be AND'd. (default False).
**kwargs : dict, optional
Conditional filter on the PUT. Same format as the kwargs for
:meth:`~.scan`.
Notes
-----
There are two ways to specify the expected values of fields. The
simplest is via the list of updates. Each updated field may specify a
constraint on the current value of that field. You may pass additional
constraints in via the **kwargs the same way you would for put_item.
This is necessary if you have constraints on fields that are not being
updated.
"""
key = self.dynamizer.encode_keys(key)
attr_updates = {}
expected = {}
keywords = {
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
}
for update in updates:
attr_updates.update(update.attrs(self.dynamizer))
expected.update(update.expected(self.dynamizer))
# Pull the 'expected' constraints from the kwargs
for k, v in six.iteritems(encode_query_kwargs(self.dynamizer, kwargs)):
if k in expected:
raise ValueError("Cannot have more than one condition on a single field")
expected[k] = v
if expected:
keywords['Expected'] = expected
if len(expected) > 1:
keywords['ConditionalOperator'] = 'OR' if expect_or else 'AND'
result = self.call('update_item', TableName=tablename, Key=key,
AttributeUpdates=attr_updates,
ReturnValues=returns,
**keywords)
if result:
return Result(self.dynamizer, result, 'Attributes')
def update_item2(self, tablename, key, expression, expr_values=None, alias=None,
condition=None, returns=NONE, return_capacity=None,
return_item_collection_metrics=NONE, **kwargs):
"""
Update a single item in a table
For many parameters you will want to reference the DynamoDB API:
http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html
Parameters
----------
tablename : str
Name of the table to update
key : dict
Primary key dict specifying the hash key and, if applicable, the
range key of the item.
expression : str
See docs for UpdateExpression
expr_values : dict, optional
See docs for ExpressionAttributeValues. See also: kwargs
alias : dict, optional
See docs for ExpressionAttributeNames
condition : str, optional
See docs for ConditionExpression
returns : {NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW}, optional
Return either the old or new values, either all attributes or just
the ones that changed. (default NONE)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
return_item_collection_metrics : (NONE, SIZE), optional
SIZE will return statistics about item collections that were
modified.
**kwargs : dict, optional
If expr_values is not provided, the kwargs dict will be used as the
ExpressionAttributeValues (a ':' will be automatically prepended to
all keys).
"""
keywords = {
'TableName': tablename,
'Key': self.dynamizer.encode_keys(key),
'UpdateExpression': expression,
'ReturnValues': returns,
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
'ReturnItemCollectionMetrics': return_item_collection_metrics,
}
values = build_expression_values(self.dynamizer, expr_values, kwargs)
if values:
keywords['ExpressionAttributeValues'] = values
if alias:
keywords['ExpressionAttributeNames'] = alias
if condition:
keywords['ConditionExpression'] = condition
result = self.call('update_item', **keywords)
if result:
return Result(self.dynamizer, result, 'Attributes')
def scan(self, tablename, attributes=None, count=False, limit=None,
return_capacity=None, filter_or=False, exclusive_start_key=None,
**kwargs):
"""
Perform a full-table scan
This uses the older version of the DynamoDB API.
See also: :meth:`~.scan2`.
Parameters
----------
tablename : str
Name of the table to scan
attributes : list
If present, only fetch these attributes from the item
count : bool, optional
If True, return a count of matched items instead of the items
themselves (default False)
limit : int, optional
Maximum number of items to return
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
filter_or : bool, optional
If True, multiple filter kwargs will be OR'd together. If False,
they will be AND'd together. (default False)
exclusive_start_key : dict, optional
The ExclusiveStartKey to resume a previous query
**kwargs : dict, optional
Filter arguments (examples below)
Examples
--------
You may pass in constraints using the Django-style '__' syntax. For
example:
.. code-block:: python
connection.scan('mytable', tags__contains='searchtag')
connection.scan('mytable', name__eq='dsa')
connection.scan('mytable', action__in=['wibble', 'wobble'])
"""
keywords = {
'TableName': tablename,
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
}
if attributes is not None:
keywords['AttributesToGet'] = attributes
if exclusive_start_key is not None:
keywords['ExclusiveStartKey'] = \
self.dynamizer.maybe_encode_keys(exclusive_start_key)
if kwargs:
keywords['ScanFilter'] = encode_query_kwargs(
self.dynamizer, kwargs)
if len(kwargs) > 1:
keywords['ConditionalOperator'] = 'OR' if filter_or else 'AND'
if not isinstance(limit, Limit):
limit = Limit(limit)
if count:
keywords['Select'] = COUNT
return self._count('scan', limit, keywords)
else:
return ResultSet(self, limit, 'scan', **keywords)
def scan2(self, tablename, expr_values=None, alias=None, attributes=None,
consistent=False, select=None, index=None, limit=None,
return_capacity=None, filter=False, segment=None,
total_segments=None, exclusive_start_key=None, **kwargs):
"""
Perform a full-table scan
For many parameters you will want to reference the DynamoDB API:
http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Scan.html
Parameters
----------
tablename : str
Name of the table to scan
expr_values : dict, optional
See docs for ExpressionAttributeValues. See also: kwargs
alias : dict, optional
See docs for ExpressionAttributeNames
attributes : str or list, optional
See docs for ProjectionExpression. If list, it will be joined by
commas.
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
select : str, optional
See docs for Select
index : str, optional
The name of the index to query
limit : int, optional
Maximum number of items to return
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
filter : str, optional
See docs for FilterExpression
segment : int, optional
When doing a parallel scan, the unique thread identifier for this
scan. If present, total_segments must also be present.
total_segments : int, optional
When doing a parallel scan, the total number of threads performing
the scan.
exclusive_start_key : dict, optional
The ExclusiveStartKey to resume a previous query
**kwargs : dict, optional
If expr_values is not provided, the kwargs dict will be used as the
ExpressionAttributeValues (a ':' will be automatically prepended to
all keys).
Examples
--------
.. code-block:: python
connection.scan2('mytable', filter='contains(tags, :search)', search='text)
connection.scan2('mytable', filter='id = :id', expr_values={':id': 'dsa'})
"""
keywords = {
'TableName': tablename,
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
'ConsistentRead': consistent,
}
values = build_expression_values(self.dynamizer, expr_values, kwargs)
if values:
keywords['ExpressionAttributeValues'] = values
if attributes is not None:
if not isinstance(attributes, six.string_types):
attributes = ', '.join(attributes)
keywords['ProjectionExpression'] = attributes
if index is not None:
keywords['IndexName'] = index
if alias:
keywords['ExpressionAttributeNames'] = alias
if select:
keywords['Select'] = select
if filter:
keywords['FilterExpression'] = filter
if segment is not None:
keywords['Segment'] = segment
if total_segments is not None:
keywords['TotalSegments'] = total_segments
if exclusive_start_key is not None:
keywords['ExclusiveStartKey'] = \
self.dynamizer.maybe_encode_keys(exclusive_start_key)
if not isinstance(limit, Limit):
limit = Limit(limit)
if select == COUNT:
return self._count('scan', limit, keywords)
else:
return ResultSet(self, limit, 'scan', **keywords)
def query(self, tablename, attributes=None, consistent=False, count=False,
index=None, limit=None, desc=False, return_capacity=None,
filter=None, filter_or=False, exclusive_start_key=None, **kwargs):
"""
Perform an index query on a table
This uses the older version of the DynamoDB API.
See also: :meth:`~.query2`.
Parameters
----------
tablename : str
Name of the table to query
attributes : list
If present, only fetch these attributes from the item
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
count : bool, optional
If True, return a count of matched items instead of the items
themselves (default False)
index : str, optional
The name of the index to query
limit : int, optional
Maximum number of items to return
desc : bool, optional
If True, return items in descending order (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
filter : dict, optional
Query arguments. Same format as **kwargs, but these arguments
filter the results on the server before they are returned. They
will NOT use an index, as that is what the **kwargs are for.
filter_or : bool, optional
If True, multiple filter args will be OR'd together. If False, they
will be AND'd together. (default False)
exclusive_start_key : dict, optional
The ExclusiveStartKey to resume a previous query
**kwargs : dict, optional
Query arguments (examples below)
Examples
--------
You may pass in constraints using the Django-style '__' syntax. For
example:
.. code-block:: python
connection.query('mytable', foo__eq=5)
connection.query('mytable', foo__eq=5, bar__lt=22)
connection.query('mytable', foo__eq=5, bar__between=(1, 10))
"""
keywords = {
'TableName': tablename,
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
'ConsistentRead': consistent,
'ScanIndexForward': not desc,
'KeyConditions': encode_query_kwargs(self.dynamizer, kwargs),
}
if attributes is not None:
keywords['AttributesToGet'] = attributes
if index is not None:
keywords['IndexName'] = index
if filter is not None:
if len(filter) > 1:
keywords['ConditionalOperator'] = 'OR' if filter_or else 'AND'
keywords['QueryFilter'] = encode_query_kwargs(self.dynamizer,
filter)
if exclusive_start_key is not None:
keywords['ExclusiveStartKey'] = \
self.dynamizer.maybe_encode_keys(exclusive_start_key)
if not isinstance(limit, Limit):
limit = Limit(limit)
if count:
keywords['Select'] = COUNT
return self._count('query', limit, keywords)
else:
return ResultSet(self, limit, 'query', **keywords)
def query2(self, tablename, key_condition_expr, expr_values=None,
alias=None, attributes=None, consistent=False, select=None,
index=None, limit=None, desc=False, return_capacity=None,
filter=None, exclusive_start_key=None, **kwargs):
"""
Perform an index query on a table
For many parameters you will want to reference the DynamoDB API:
http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Query.html
Parameters
----------
tablename : str
Name of the table to query
key_condition_expr : str
See docs for KeyConditionExpression
expr_values : dict, optional
See docs for ExpressionAttributeValues. See also: kwargs
alias : dict, optional
See docs for ExpressionAttributeNames
attributes : str or list, optional
See docs for ProjectionExpression. If list, it will be joined by
commas.
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
select : str, optional
See docs for Select
index : str, optional
The name of the index to query
limit : int, optional
Maximum number of items to return
desc : bool, optional
If True, return items in descending order (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
filter : str, optional
See docs for FilterExpression
exclusive_start_key : dict, optional
The ExclusiveStartKey to resume a previous query
**kwargs : dict, optional
If expr_values is not provided, the kwargs dict will be used as the
ExpressionAttributeValues (a ':' will be automatically prepended to
all keys).
Examples
--------
.. code-block:: python
connection.query2('mytable', 'foo = :foo', foo=5)
connection.query2('mytable', 'foo = :foo', expr_values={':foo': 5})
"""
values = build_expression_values(self.dynamizer, expr_values, kwargs)
keywords = {
'TableName': tablename,
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
'ConsistentRead': consistent,
'KeyConditionExpression': key_condition_expr,
'ExpressionAttributeValues': values,
'ScanIndexForward': not desc,
}
if attributes is not None:
if not isinstance(attributes, six.string_types):
attributes = ', '.join(attributes)
keywords['ProjectionExpression'] = attributes
if index is not None:
keywords['IndexName'] = index
if alias:
keywords['ExpressionAttributeNames'] = alias
if select:
keywords['Select'] = select
if filter:
keywords['FilterExpression'] = filter
if exclusive_start_key is not None:
keywords['ExclusiveStartKey'] = \
self.dynamizer.maybe_encode_keys(exclusive_start_key)
if not isinstance(limit, Limit):
limit = Limit(limit)
if select == COUNT:
return self._count('query', limit, keywords)
else:
return ResultSet(self, limit, 'query', **keywords)
def update_table(self, tablename, throughput=None, global_indexes=None,
index_updates=None):
"""
Update the throughput of a table and/or global indexes
Parameters
----------
tablename : str
Name of the table to update
throughput : :class:`~dynamo3.fields.Throughput`, optional
The new throughput of the table
global_indexes : dict, optional
DEPRECATED. Use index_updates now.
Map of index name to :class:`~dynamo3.fields.Throughput`
index_updates : list of :class:`~dynamo3.fields.IndexUpdate`, optional
List of IndexUpdates to perform
"""
kwargs = {
'TableName': tablename
}
all_attrs = set()
if throughput is not None:
kwargs['ProvisionedThroughput'] = throughput.schema()
if index_updates is not None:
updates = []
for update in index_updates:
all_attrs.update(update.get_attrs())
updates.append(update.serialize())
kwargs['GlobalSecondaryIndexUpdates'] = updates
elif global_indexes is not None:
kwargs['GlobalSecondaryIndexUpdates'] = [
{
'Update': {
'IndexName': key,
'ProvisionedThroughput': value.schema(),
}
}
for key, value in six.iteritems(global_indexes)
]
if all_attrs:
attr_definitions = [attr.definition() for attr in all_attrs]
kwargs['AttributeDefinitions'] = attr_definitions
return self.call('update_table', **kwargs)
| [
"[email protected]"
] | |
b17e57c7ad95e495e68871a72f53d9b3fa51a4f5 | bf5dcdc1cb57ed72a47e0c444bb2fb631d3f0933 | /setup.py | 3d40c4fdc4479440f3c1e2913596de9a253375ae | [] | no_license | vphpersson/twitter_osint | 3e2128f1d9944053ee127ec748a56ede55cefcac | a437825d488afa2d5b15c221348cc72157f25227 | refs/heads/master | 2023-07-05T08:44:29.579442 | 2021-08-22T09:12:28 | 2021-08-22T09:12:28 | 398,564,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | from setuptools import setup, find_packages
setup(
name='twitter_osint',
version='0.1',
packages=find_packages(),
install_requires=[
'httpx',
'httpx_oauth @ git+ssh://[email protected]/vphpersson/httpx_oauth.git#egg=httpx_oauth',
'pyutils @ git+ssh://[email protected]/vphpersson/pyutils.git#egg=pyutils'
'twitter_api @ git+ssh://[email protected]/vphpersson/twitter_api.git#egg=twitter_api'
]
)
| [
"[email protected]"
] | |
893741290acaa4737579c1cfb54e07484866c834 | 70b0d4b4440a97b648a08de0d89cc536e8f4c569 | /1313.py | 69aedd9d90dbd61fabe97a8b396f434ba1868c40 | [] | no_license | seoseokbeom/leetcode | 01c9ca8a23e38a3d3c91d2de26f0b2a3a1710487 | 9d68de2271c2d5666750c8060407b56abbf6f45d | refs/heads/master | 2023-03-27T20:20:24.790750 | 2021-03-25T04:43:50 | 2021-03-25T04:43:50 | 273,779,517 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | class Solution(object):
def decompressRLElist(self, nums):
freq = 0
arr = []
for i, v in enumerate(nums):
if i % 2 == 0:
freq = nums[i]
else:
arr.extend(([nums[i]]*freq))
return arr
a = Solution()
print(a.decompressRLElist([1, 1, 2, 3]))
| [
"[email protected]"
] | |
1d8792acf20db18580b85389fa2d5f8108a2d512 | b3e9a8963b9aca334b93b95bc340c379544e1046 | /euler/59.py | eab411b4784038d836ba37febc29bd02a82d47d8 | [] | no_license | protocol7/euler.py | 86ea512c2c216968e6c260b19469c0c8d038feb7 | e2a8e46a9b07e6d0b039a5496059f3bf73aa5441 | refs/heads/master | 2022-09-08T22:49:47.486631 | 2022-08-23T20:07:00 | 2022-08-23T20:07:00 | 169,478,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | #!/usr/bin/env python3
from string import ascii_lowercase
from itertools import permutations, cycle
def read_cipher():
with open("p059_cipher.txt") as f:
s = f.read().strip()
return [int(i) for i in s.split(",")]
c = read_cipher()
def to_string(l):
return "".join([chr(x) for x in l])
def find():
for key in permutations(ascii_lowercase, 3):
key = cycle([ord(x) for x in key])
pt = list(map(lambda x: x[0] ^ x[1], zip(c, key)))
if " the " in to_string(pt):
return sum(pt)
assert 129448 == find()
| [
"[email protected]"
] | |
010cfa1c616d88a8f9af32b2216f527d47fe7ef3 | dd3b8bd6c9f6f1d9f207678b101eff93b032b0f0 | /basis/AbletonLive10.1_MIDIRemoteScripts/ableton/v2/control_surface/elements/optional.py | f4cef1c4ec91e78d8434b687f1a2ab8f8b7de8ca | [] | no_license | jhlax/les | 62955f57c33299ebfc4fca8d0482b30ee97adfe7 | d865478bf02778e509e61370174a450104d20a28 | refs/heads/master | 2023-08-17T17:24:44.297302 | 2019-12-15T08:13:29 | 2019-12-15T08:13:29 | 228,120,861 | 3 | 0 | null | 2023-08-03T16:40:44 | 2019-12-15T03:02:27 | Python | UTF-8 | Python | false | false | 1,432 | py | # uncompyle6 version 3.4.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.16 (v2.7.16:413a49145e, Mar 2 2019, 14:32:10)
# [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)]
# Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/ableton/v2/control_surface/elements/optional.py
# Compiled at: 2019-04-09 19:23:45
from __future__ import absolute_import, print_function, unicode_literals
from ...base import listens
from .combo import ToggleElement
class ChoosingElement(ToggleElement):
u"""
An Element wrapper that enables one of the nested elements based on
the value of the given flag.
"""
def __init__(self, flag=None, *a, **k):
super(ChoosingElement, self).__init__(*a, **k)
self.__on_flag_changed.subject = flag
self.__on_flag_changed(flag.value)
@listens('value')
def __on_flag_changed(self, value):
self.set_toggled(value)
class OptionalElement(ChoosingElement):
u"""
An Element wrapper that enables the nested element IFF some given
flag is set to a specific value.
"""
def __init__(self, control=None, flag=None, value=None, *a, **k):
on_control = control if value else None
off_control = None if value else control
super(OptionalElement, self).__init__(on_control=on_control, off_control=off_control, flag=flag, *a, **k)
return | [
"[email protected]"
] | |
fbba06a4b19bbae28afe04b3603983a619889f87 | a6f9e8412682d8a9f21b2a3bf54b7088f7149cc9 | /pytest/Compiler/constants32.py | df0f657cfab914f20431d61a087c5fe902148935 | [
"Apache-2.0",
"LLVM-exception"
] | permissive | stellaraccident/mlir-npcomp | 49a3c285d728d43db4caf7d18cb5919be40d6206 | a9d7610f9d6740e984cbeb55854abac1f92414f9 | refs/heads/master | 2021-09-26T18:24:46.630327 | 2020-07-13T23:15:42 | 2020-07-13T23:15:42 | 250,896,585 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | # RUN: %PYTHON %s | npcomp-opt -split-input-file | FileCheck %s --dump-input=fail
# Subset of constant tests which verify against a GenericTarget32.
from npcomp.compiler import test_config
from npcomp.compiler.target import *
import_global = test_config.create_import_dump_decorator(
target_factory=GenericTarget32)
# CHECK-LABEL: func @integer_constants
@import_global
def integer_constants():
# CHECK: %[[A:.*]] = constant 100 : i32
a = 100
return a
# CHECK-LABEL: func @float_constants
@import_global
def float_constants():
# CHECK: %[[A:.*]] = constant 2.200000e+00 : f32
a = 2.2
return a
| [
"[email protected]"
] | |
c939c29a265c9ad2c8e60bbe024d8471ccb7348d | 480e33f95eec2e471c563d4c0661784c92396368 | /Geometry/CMSCommonData/test/dd4hep/2026D35.py | fcd71f1b6cde299aae8e0ce696f2cf7e34257bba | [
"Apache-2.0"
] | permissive | cms-nanoAOD/cmssw | 4d836e5b76ae5075c232de5e062d286e2026e8bd | 4eccb8a758b605875003124dd55ea58552b86af1 | refs/heads/master-cmsswmaster | 2021-01-23T21:19:52.295420 | 2020-08-27T08:01:20 | 2020-08-27T08:01:20 | 102,867,729 | 7 | 14 | Apache-2.0 | 2022-05-23T07:58:09 | 2017-09-08T14:03:57 | C++ | UTF-8 | Python | false | false | 1,228 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Phase2C4_cff import Phase2C4
process = cms.Process("HcalParametersTest",Phase2C4)
process.load('Geometry.HcalCommonData.hcalParameters_cff')
process.load('Geometry.HcalCommonData.hcalSimulationParameters_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.DDDetectorESProducer = cms.ESSource("DDDetectorESProducer",
confGeomXMLFiles = cms.FileInPath('Geometry/CMSCommonData/data/dd4hep/cmsExtendedGeometry2026D35.xml'),
appendToDataLabel = cms.string('')
)
process.DDCompactViewESProducer = cms.ESProducer("DDCompactViewESProducer",
appendToDataLabel = cms.string('')
)
process.hpa = cms.EDAnalyzer("HcalParametersAnalyzer")
process.hcalParameters.fromDD4Hep = cms.bool(True)
process.hcalSimulationParameters.fromDD4Hep = cms.bool(True)
process.Timing = cms.Service("Timing")
process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck")
process.p1 = cms.Path(process.hpa)
| [
"[email protected]"
] | |
051b8fc4c4f9b655d4722a097ae2ebb6b6478ded | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_appended.py | d4a4abe95c2c87fcc180869b9bcb91fd2dea25b1 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
#calss header
class _APPENDED():
def __init__(self,):
self.name = "APPENDED"
self.definitions = append
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['append']
| [
"[email protected]"
] | |
54dd5c9c96fe3d34d65d41b7cc7943a8ad1da41a | e41b86e0b25edc055167aa13a55bd2da14869b73 | /summarizer_app/apps.py | 3ea7b8aad2120fb39dd02f2f970052c1010f2ffb | [] | no_license | tejasa97/django-summarizer | 9ce21ba3ff9176e4cf341531b380415e03cda150 | 5be990ab95358adb002cc8ed62cf8b56cfe85130 | refs/heads/master | 2023-02-16T12:52:52.172337 | 2021-01-04T18:42:36 | 2021-01-04T18:42:36 | 325,529,463 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | from django.apps import AppConfig
class SummarizerAppConfig(AppConfig):
name = 'summarizer_app'
| [
"[email protected]"
] | |
5ad4df840871267cc1bb83ec88e485f0153556e0 | 25d081c82bf9adc2a8d96c254df0239a9f982a71 | /museum_api/apps.py | cc4d3156bb67434fd105904f896d8798cb30922c | [
"MIT"
] | permissive | asiekierka/z2 | f102de582aaa9fc51b6b598a1fb07c58be4f540f | d926408423dc98d71d5e7fc2fda3202c03c309de | refs/heads/master | 2021-06-15T15:09:41.614135 | 2021-02-23T02:44:54 | 2021-02-23T02:44:54 | 146,348,922 | 1 | 0 | MIT | 2018-08-27T20:14:46 | 2018-08-27T20:14:46 | null | UTF-8 | Python | false | false | 94 | py | from django.apps import AppConfig
class MuseumApiConfig(AppConfig):
name = 'museum_api'
| [
"[email protected]"
] | |
544bcce07294878710767c945f78174e204c8843 | af669dbef653dd69474f4c0836582bf14262c80f | /price-test/frame/lib/deploylib/confparser.py | 2aaf1de62efdfe8dc17d1dc8b90fe1dbe81695cb | [] | no_license | siki320/fishtest | 7a3f91639d8d4cee624adc1d4d05563611b435e9 | 7c3f024192e1c48214b53bc45105bdf9e746a013 | refs/heads/master | 2021-01-19T21:58:36.807126 | 2017-04-19T09:56:37 | 2017-04-19T09:56:37 | 88,729,049 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 12,050 | py | # -*- coding: GB18030 -*-
"""
@author: lipeilong
@date: Mar 28, 2012
@summary: 负责拓扑配置文件的解析
@version: 1.0.0.0
@copyright: Copyright (c) 2012 XX, Inc. All Rights Reserved
"""
import os
import string
import ConfigParser
from frame.lib.commonlib.configure.configure import Configure
class EnvConfigHelper(object):
"""
@author:maqi
@note:Env拓扑配置Helper类
"""
@staticmethod
def get_conf(file_dir,file):
"""
@note:生成Env拓扑配置
"""
if file is None:
return None
try:
conf = EnvMMConfigParser(file_dir, file)
if conf.get_conf_version() == "3":
conf = EnvMMUbConfigParser(file_dir, file)
except ConfigParser.MissingSectionHeaderError:
conf = EnvSMConfigParser(file_dir,file)
return conf
class EnvSMConfigParser(object):
"""
@author:maqi
@note:解析单机部署Env的拓扑结构
"""
def __init__(self,file_dir,file):
self.type = 'sm'
if file == None:
self.lines = None
else:
f = open(file_dir+file, 'r')
self.lines = f.readlines()
f.close()
def get_conf_version(self):
return "1"
def parse_topology(self):
return_topology_list = []
line_count = 0
for line in self.lines:
line_count += 1
if False == self._check_line_validity(line):
#只有非注释行
#if False == self._check_line_is_comments(line):
continue
up_module, down_module = self._parse_module_name(line)
return_topology_list.append([up_module,down_module])
return return_topology_list
def _check_line_is_empty(self, line):
'''
@note: 检查是否为空行
'''
if "\n" != line[-1]:
return False
for c in line[:-1]:
if " " != c:
return False
return True
def _check_line_is_comments(self, line):
'''
@note: 检查line是不是一个注释行,空行
'''
if True == self._check_line_is_empty(line):
return True
#不包含“#”,肯定不是注释行
pos = string.find(line, "#")
if -1 == pos:
return False
#注意只判断第一个“#”, line的第一个非空格字符是“#”,表示被注释了
#这里考虑了有效行尾的注释
ret = True
for c in line[:pos]:
if " " != c:
ret = False
break
return ret
def _check_line_validity(self, line):
'''
@note:检查line的合法性
'''
ret = True
#line中既 不包含“->”,又不包含“<-”
if -1 == string.find(line, "->") and -1 == string.find(line, "<-"):
ret = False
#line中既 包含“->”,又包含“<-”
if -1 != string.find(line, "->") and -1 != string.find(line, "<-"):
ret = False
#检查是不是注释行
if True == self._check_line_is_comments(line):
ret = False
return ret
def _parse_module_name(self, line):
'''
@note: 从合法的line中parse出上下游模块名
'''
sep_list = ["->", "<-"]
for s in sep_list:
if -1 != string.find(line, s):
sep = s
break
module_name_list = line.split(sep)
# 合法line的中含有说明类注释,过滤掉注释
if -1 != module_name_list[1].find("#"):
module_name_list[1] = module_name_list[1][0:module_name_list[1].index("#")]
#由 "->", "<-"判断上下游模块
up_module_name = module_name_list[sep_list.index(sep)].strip()
down_module_name = module_name_list[1 - sep_list.index(sep)].strip()
return up_module_name, down_module_name
class EnvMMConfigParser(object):
"""
@author:youguiyan01
@note:解析Env的拓扑结构
"""
def __init__(self,file_dir,file):
self.type = 'mm'
self.conf = ConfigParser.SafeConfigParser()
self.conf.read(file_dir+file)
def get_conf_version(self):
return str(self._get_info("conf","version"))
def _get_section(self,section):
if not self.conf.has_section(section):
return None
return self.conf.items(section)
def _get_info(self,section_name, key_name):
if self.conf.has_option(section_name, key_name) == False:
return None
info = self.conf.get(section_name, key_name)
return info
def parse_host_module(self):
"""
@note: 读取host中的所有host的信息,然后对于每一个host,
寻找该host下面的模块信息,然后进行拼接
@return return_module_list:模块信息列表,该list中每一个元素都一个模块信息,具体包含
模块名,类名,类描述文件,搭建地址,主机,用户名,密码
"""
return_host_list = []
return_module_list = []
host_list = self._get_section("host")
if host_list == None:
return None
for host in host_list:
host_name = host[0]
temp_info = host[1].split("\t")
host_info={}
host_info["host_name"] = temp_info[0]
host_info["user"] = temp_info[1]
host_info["password"] = temp_info[2]
return_host_list.append(host_info)
module_info_list = self._get_section(host_name)
if module_info_list == None:
#return None
continue
for one_module_info in module_info_list:
module_name = one_module_info[0]
temp_info = one_module_info[1].split("\t")
module_info = {}
module_info["module_name"] = module_name
module_info["class_name"] = temp_info[0]
module_info["conf_file"] = temp_info[1]
module_info["remote_path"] = temp_info[2]
module_info.update(host_info)
return_module_list.append(module_info)
return return_host_list,return_module_list
def parse_topology(self):
"""
@note:解析拓扑
@return return_topology_list:拓扑列表,每个元素是一个关联关系
(up_module,down_module)
"""
return_topology_list = []
topology_list = self._get_section("topology")
if topology_list == None:
return return_topology_list
for topology in topology_list:
topology_info = topology[1].split("->")
if len(topology_info) <= 1:
continue
up_module = topology_info[0].strip(" ")
down_module = topology_info[1].strip(" ")
return_topology_list.append([up_module,down_module])
return return_topology_list
class EnvMMUbConfigParser(object):
"""
@author:lipeilong
@note:解析新版本的Env拓扑结构配置文件,支持多级section配置,conf.version=3
"""
def __init__(self, conf_dir, conf_file):
self.type = 'mmub'
self.conf=Configure()
self.conf.load(conf_dir, conf_file)
self.dir = conf_dir
self.file = conf_file
def get_conf_version(self):
return str(self._get_info("conf","version"))
def _get_section(self,section):
key_value_list = self.conf[section].get_key_value()
return key_value_list
def _get_info(self,section_name, key_name):
info = self.conf[section_name][key_name]
if info == None:
return None
value = str(info)
return value
def parse_module_name_dict(self):
"""
@note: 读取lib class对应的lib路径信息
"""
return_module_name_dict={}
if self.conf["module_class"] == None:
return None
for module_name in self.conf["module_class"].get_key_value():
return_module_name_dict[module_name[0]] = module_name[1]
return return_module_name_dict
def parse_host_module(self):
"""
@note: 读取host中的所有host的信息,然后对于每一个host,
寻找该host下面的模块信息,然后进行拼接
@return return_module_list:模块信息列表,该list中每一个元素都一个模块信息,具体包含
模块名,类名,类描述文件,搭建地址,主机,用户名,密码
"""
return_host_list = []
return_module_list = []
index = 0
host_num = len(self.conf["deploy"]["host"])
while index < host_num:
host_info={}
host_info["host_name"] = str(self.conf["deploy"]["host"][index]["name"])
if self.conf["deploy"]["host"][index].has_key('user'):
host_info["user"] = str(self.conf["deploy"]["host"][index]["user"])
if self.conf["deploy"]["host"][index].has_key('password'):
host_info["password"] = str(self.conf["deploy"]["host"][index]["password"])
return_host_list.append(host_info)
#解析module信息
index_module = 0
module_num = len(self.conf["deploy"]["host"][index]["module"])
while index_module < module_num:
module_info = {}
module_info["module_name"] = str(self.conf["deploy"]["host"][index]["module"][index_module]["name"])
module_info["class_name"] = str(self.conf["deploy"]["host"][index]["module"][index_module]["class"])
module_info["conf_file"] = str(self.conf["deploy"]["host"][index]["module"][index_module]["conf"])
if self.conf["deploy"]["host"][index]["module"][index_module].has_key("remote_path"):
module_info["remote_path"] = str(self.conf["deploy"]["host"][index]["module"][index_module]["remote_path"])
else:
module_info["remote_path"] = module_info["module_name"]
if self.conf["deploy"]["host"][index]["module"][index_module].has_key("cpuCoreNum"):
module_info["cpuCoreNum"] = str(self.conf["deploy"]["host"][index]["module"][index_module]['cpuCoreNum'])
if self.conf["deploy"]["host"][index]["module"][index_module].has_key("mem"):
module_info["mem"] = str(self.conf["deploy"]["host"][index]["module"][index_module]['mem'])
if self.conf["deploy"]["host"][index]["module"][index_module].has_key("disk"):
module_info["disk"] = str(self.conf["deploy"]["host"][index]["module"][index_module]['disk'])
if self.conf["deploy"]["host"][index]["module"][index_module].has_key("exclusive"):
module_info["exclusive"] = str(self.conf["deploy"]["host"][index]["module"][index_module]['exclusive'])
module_info.update(host_info)
return_module_list.append(module_info)
index_module = index_module + 1
index = index + 1
return return_host_list, return_module_list
def parse_topology(self):
"""
@note:解析拓扑
@return return_topology_list:拓扑列表,每个元素是一个关联关系
(up_module,down_module)
"""
return_topology_list = []
index = 0
topo_list = self.conf["deploy"]["topology"].get_key_value()
for relation in topo_list:
topology_info = relation[1].split("->")
if len(topology_info) <= 1:
continue
up_module = topology_info[0].strip(" ")
down_module = topology_info[1].strip(" ")
return_topology_list.append([up_module,down_module])
index = index + 1
return return_topology_list
| [
"[email protected]"
] | |
d93e55450c39085ee035efdef32eaa204a90914b | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.5_rd=0.8_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=9/params.py | de9ca510da01c4ee2e0649bfbafc64737c8bc56b | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.531929',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.8',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 9,
'utils': 'uni-medium-3'}
| [
"[email protected]"
] | |
195af1a86103dd62444c59e3c5fb1c2951f026c8 | 924147cf4ce118a1856bf0e6107d3dac671e6ac4 | /test_app/urls.py | 44861043de0c4c84c4ec4b65c1d706a9b60d1cc0 | [
"BSD-3-Clause"
] | permissive | jlongster/django-waffle | 4a3ec12477a7a2a783c8b3c661a4dbe313311d7c | acc8e4adb41e7713be9778460fc3e99e034b7511 | refs/heads/master | 2020-04-08T16:19:11.434019 | 2012-01-26T18:54:29 | 2012-01-26T18:54:29 | 3,734,066 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | from django.conf.urls.defaults import patterns, url, include
from django.contrib import admin
from django.http import HttpResponseNotFound, HttpResponseServerError
from test_app import views
from waffle.views import wafflejs
handler404 = lambda r: HttpResponseNotFound()
handler500 = lambda r: HttpResponseServerError()
admin.autodiscover()
urlpatterns = patterns('',
url(r'^flag_in_view', views.flag_in_view, name='flag_in_view'),
url(r'^wafflejs$', wafflejs, name='wafflejs'),
url(r'^switch-on', views.switched_view),
url(r'^switch-off', views.switched_off_view),
url(r'^flag-on', views.flagged_view),
url(r'^flag-off', views.flagged_off_view),
(r'^admin/', include(admin.site.urls))
)
| [
"[email protected]"
] | |
b4c9f4150acaabb1e1e2cf5d3ca2ba4d4604b129 | 793e9eeaa2b3018fac9fd7964eb2e5ebd11a4fa2 | /phd_thesis/pyROOT/readfile.py | fccfa452ee30e02324499f1d8122911e4dfefcc7 | [] | no_license | attikis/rootMacros | 2c516456c1e39e4306eb433925f5ca39f9ad9599 | 64e7e779f5b19bfbe2b28a2058b829c57b953c3d | refs/heads/master | 2021-05-09T21:41:31.447907 | 2019-07-18T07:15:07 | 2019-07-18T07:15:07 | 118,733,665 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,720 | py | #!/usr/bin env python
### to execute: |myLinux> python readfile.py testFile.dat
### System imports
import os, sys
from array import array
### ROOT imports
from ROOT import *
### HiggsAnalysis imports
from HiggsAnalysis.HeavyChHiggsToTauNu.tools.dataset import *
from HiggsAnalysis.HeavyChHiggsToTauNu.tools.histograms import *
from HiggsAnalysis.HeavyChHiggsToTauNu.tools.tdrstyle import *
import HiggsAnalysis.HeavyChHiggsToTauNu.tools.styles as styles
### User imports
from bayes import *
from myArrays import *
### Provide the name of the file to be opened
if len(sys.argv) > 1:
file = sys.argv[1]
### Otherwise ose as default the name file.dat
else:
file = 'file.dat'
### If the file already exists remove it since the script will automatically generate one.
#if os.path.exists(file):
# os.system('rm %s'%(file))
#os.system('hplusPrintCounters3.py --mode xsect --mainCounterOnly > %s ; echo "EOF" >> %s'%(file,file))
######################################################################################
### Options
verbosity = false ### false to suppress most "print" commands
######################################################################################
### Open file for processing
file = open('%s'%(file), 'r')
### Define lists to be used
DataSetName = []
CutNamesAndValues = []
DataSetAndXSectAndNormFact = []
######################################################################################
### Loop over lines and save the cut-flow info (Cross Section for MC, Events for data)
lim = True
while file:
line = file.readline()
# split the line and puts elements in list s as strings
s = line.split()
if 'EOF' in s:
break
n = len(s)
# 10 is the number of columns
### Investigate lists which have three elements (i.e. 3 is the number of columns of each list)
if n == 3:
### Save the names of the datasets, cross section and normalisation factor
DataSetAndXSectAndNormFact.append([s[0], s[1], s[2]])
if n == 10:
if lim == True:
for i in range(n):
DataSetName.append(s[i])
lim = False
else:
#print "CutNamesAndValues\tCut\tValue"
for i in range(1,n):
CutNamesAndValues.append([DataSetName[i],s[0],float(s[i])])
#print "%s\t%s\t%.0f"%(DataSetName[i],s[0],float(s[i]))
### Print the DataSet Name, Cross Section and MC Normalisation Factor (ib this order)
#print "DataSetAndXSectAndNormFact", DataSetAndXSectAndNormFact
#print "DataSetAndXSectAndNormFact[0][0] = ", DataSetAndXSectAndNormFact[0][0]
#print "DataSetAndXSectAndNormFact[0][1] = ", DataSetAndXSectAndNormFact[0][1]
#print "DataSetAndXSectAndNormFact[0][2] = ", DataSetAndXSectAndNormFact[0][2]
### Define lists that will hold the information (Cut Name and Xsection/Events surviving Cut)
WJets = []
qcd30to50 = []
qcd50to80 = []
qcd80to120 = []
qcd120to170 = []
qcd170to230 = []
qcd230to300 = []
TTbarJets = []
TTbar = []
# hplus90 = []
# hplus100 = []
# hplus120 = []
# hplus140 = []
# hplus160 = []
######################################################################################
### Loop over all cut names and create new lists according to the DataSet Name
for item in CutNamesAndValues:
name = item[0]
tmp = [item[1],item[2]]
if name == 'WJets':
WJets.append(tmp)
if name == 'QCD_Pt30to50':
qcd30to50.append(tmp)
if name == 'QCD_Pt50to80':
qcd50to80.append(tmp)
if name == 'QCD_Pt80to120':
qcd80to120.append(tmp)
if name == 'QCD_Pt120to170':
qcd120to170.append(tmp)
if name == 'QCD_Pt170to230':
qcd170to230.append(tmp)
if name == 'QCD_Pt230to300':
qcd230to300.append(tmp)
if name == 'TTbarJets':
TTbarJets.append(tmp)
if name == 'TTbar':
TTbar.append(tmp)
######################################################################################
### Print lists
if verbosity == True:
print
print "***************************************************"
print "* Printing cut-flow for each CutNamesAndValuesset *"
print "***************************************************"
print 'qcd30to50'
print qcd30to50
print
print 'qcd50to80'
print qcd50to80
print
print 'qcd80to120'
print qcd80to120
print
print 'qcd120to170'
print qcd120to170
print
print 'qcd170to230'
print qcd170to230
print
print 'qcd230to300'
print qcd230to300
print
print 'TTbarJets'
print TTbarJets
print
print 'TTbar'
print TTbar
print
print 'W+Jets'
print WJets
print "***************************************************************************************************************"
######################################################################################
### Calculate contamination from EWK
XSectTotalEWK = TTbar[4][1]+WJets[4][1]
XSectPassedEWK = TTbar[-1][1]+WJets[-1][1]
### Calculations for combined QCD
XSectTotalQCD = qcd30to50[4][1]+qcd50to80[4][1]+qcd80to120[4][1]+qcd120to170[4][1]+qcd170to230[4][1]+qcd230to300[4][1]
XSectPassedQCD = qcd30to50[-1][1]+qcd50to80[-1][1]+qcd80to120[-1][1]+qcd120to170[-1][1]+qcd170to230[-1][1]+qcd230to300[-1][1]
### Calculate total QCD purity
QCD_purity = XSectPassedQCD/(XSectPassedEWK+XSectPassedQCD)
QCD_efficiency = XSectPassedQCD/XSectTotalQCD
### Declare (double) arrays to be filled with efficiencies and purities
effArray = array("d",[ ])
purArray = array("d",[ ])
#
purArray_errDown = effArray = array("d",[ ])
purArray_errUp = effArray = array("d",[ ])
#
effArray_errDown = effArray = array("d",[ ])
effArray_errUp = effArray = array("d",[ ])
######################################################################################
### Calculate Efficiency using the ROOT class "bayesDivide", for each QCD DataSet
if verbosity == True:
print
print "****************************"
print "* Calculating Efficiencies *"
print "****************************"
print "..."
print "***************************************************************************************************************"
effObj = bayesDivide(qcd30to50[-1][1], qcd30to50[4][1])
eff = effObj.GetY()[0]
eff_errUp = effObj.GetEYhigh()[0]
eff_errDown = effObj.GetEYlow()[0]
### If zero events survive the cut-flow try to get an upper limit for efficiency as: eff = 1 Evt/Total Evts (for xSection 1 Evt = Norm Factor)
if eff == 0:
eff = float(DataSetAndXSectAndNormFact[1][2])/qcd30to50[4][1]
if verbosity == True:
print "DataSetAndXSectAndNormFact[1][0]", DataSetAndXSectAndNormFact[1][0], " eff = ", eff
effArray.append(eff)
effArray_errDown.append(eff_errDown)
effArray_errUp.append(eff_errUp)
effObj = bayesDivide(qcd50to80[-1][1], qcd50to80[4][1])
eff = effObj.GetY()[0]
eff_errUp = effObj.GetEYhigh()[0]
eff_errDown = effObj.GetEYlow()[0]
### If zero events survive the cut-flow try to get an upper limit for efficiency as: eff = 1 Evt/Total Evts (for xSection 1 Evt = Norm Factor)
if eff == 0:
eff = float(DataSetAndXSectAndNormFact[2][2])/qcd30to50[4][1]
if verbosity == True:
print "DataSetAndXSectAndNormFact[2][0]", DataSetAndXSectAndNormFact[2][0], " eff = ", eff
effArray.append(eff)
effArray_errDown.append(eff_errDown)
effArray_errUp.append(eff_errUp)
effObj = bayesDivide(qcd80to120[-1][1], qcd80to120[4][1])
eff = effObj.GetY()[0]
eff_errUp = effObj.GetEYhigh()[0]
eff_errDown = effObj.GetEYlow()[0]
### If zero events survive the cut-flow try to get an upper limit for efficiency as: eff = 1 Evt/Total Evts (for xSection 1 Evt = Norm Factor)
if eff == 0:
eff = float(DataSetAndXSectAndNormFact[3][2])/qcd30to50[4][1]
if verbosity == True:
print "DataSetAndXSectAndNormFact[3][0]", DataSetAndXSectAndNormFact[3][0], " eff = ", eff
effArray.append(eff)
effArray_errDown.append(eff_errDown)
effArray_errUp.append(eff_errUp)
effObj = bayesDivide(qcd120to170[-1][1], qcd120to170[4][1])
eff = effObj.GetY()[0]
eff_errUp = effObj.GetEYhigh()[0]
eff_errDown = effObj.GetEYlow()[0]
### If zero events survive the cut-flow try to get an upper limit for efficiency as: eff = 1 Evt/Total Evts (for xSection 1 Evt = Norm Factor)
if eff == 0:
eff = float(DataSetAndXSectAndNormFact[4][2])/qcd30to50[4][1]
if verbosity == True:
print "DataSetAndXSectAndNormFact[4][0]", DataSetAndXSectAndNormFact[4][0], " eff = ", eff
effArray.append(eff)
effArray_errDown.append(eff_errDown)
effArray_errUp.append(eff_errUp)
effObj = bayesDivide(qcd170to230[-1][1], qcd170to230[4][1])
eff = effObj.GetY()[0]
eff_errUp = effObj.GetEYhigh()[0]
eff_errDown = effObj.GetEYlow()[0]
### If zero events survive the cut-flow try to get an upper limit for efficiency as: eff = 1 Evt/Total Evts (for xSection 1 Evt = Norm Factor)
if eff == 0:
eff = float(DataSetAndXSectAndNormFact[5][2])/qcd30to50[4][1]
if verbosity == True:
print "DataSetAndXSectAndNormFact[5][0]", DataSetAndXSectAndNormFact[5][0], " eff = ", eff
effArray.append(eff)
effArray_errDown.append(eff_errDown)
effArray_errUp.append(eff_errUp)
effObj = bayesDivide(qcd230to300[-1][1], qcd230to300[4][1])
eff = effObj.GetY()[0]
eff_errUp = effObj.GetEYhigh()[0]
eff_errDown = effObj.GetEYlow()[0]
### If zero events survive the cut-flow try to get an upper limit for efficiency as: eff = 1 Evt/Total Evts (for xSection 1 Evt = Norm Factor)
if eff == 0:
eff = float(DataSetAndXSectAndNormFact[6][2])/qcd30to50[4][1]
if verbosity == True:
print "DataSetAndXSectAndNormFact[6][0]", DataSetAndXSectAndNormFact[6][0], " eff = ", eff
effArray.append(eff)
effArray_errDown.append(eff_errDown)
effArray_errUp.append(eff_errUp)
######################################################################################
### Calculate Purity
if verbosity == True:
print
print "************************"
print "* Calculating Purities *"
print "************************"
print "..."
print "***************************************************************************************************************"
purObj = bayesDivide(qcd30to50[-1][1], qcd30to50[4][1]+XSectPassedEWK)
pur = purObj.GetY()[0]
pur_errUp = purObj.GetEYhigh()[0]
pur_errDown = purObj.GetEYlow()[0]
if pur == 0:
pur = float(DataSetAndXSectAndNormFact[1][2])/(qcd30to50[4][1]+XSectPassedEWK)
if verbosity == True:
print "DataSetAndXSectAndNormFact[1][0]", DataSetAndXSectAndNormFact[1][0], " pur = ", pur
purArray.append(pur)
purArray_errDown.append(pur_errDown)
purArray_errUp.append(pur_errUp)
purObj = bayesDivide(qcd50to80[-1][1], qcd50to80[4][1]+XSectPassedEWK)
pur = purObj.GetY()[0]
pur_errUp = purObj.GetEYhigh()[0]
pur_errDown = purObj.GetEYlow()[0]
if pur == 0:
pur = float(DataSetAndXSectAndNormFact[2][2])/(qcd30to50[4][1]+XSectPassedEWK)
if verbosity == True:
print "DataSetAndXSectAndNormFact[2][0]", DataSetAndXSectAndNormFact[2][0], " pur = ", pur
purArray.append(pur)
purArray_errDown.append(pur_errDown)
purArray_errUp.append(pur_errUp)
purObj = bayesDivide(qcd80to120[-1][1], qcd80to120[4][1]+XSectPassedEWK)
pur = purObj.GetY()[0]
pur_errUp = purObj.GetEYhigh()[0]
pur_errDown = purObj.GetEYlow()[0]
if pur == 0:
pur = float(DataSetAndXSectAndNormFact[3][2])/(qcd30to50[4][1]+XSectPassedEWK)
if verbosity == True:
print "DataSetAndXSectAndNormFact[3][0]", DataSetAndXSectAndNormFact[3][0], " pur = ", pur
purArray.append(pur)
purArray_errDown.append(pur_errDown)
purArray_errUp.append(pur_errUp)
purObj = bayesDivide(qcd120to170[-1][1], qcd120to170[4][1]+XSectPassedEWK)
pur = purObj.GetY()[0]
pur_errUp = purObj.GetEYhigh()[0]
pur_errDown = purObj.GetEYlow()[0]
if pur == 0:
pur = float(DataSetAndXSectAndNormFact[4][2])/(qcd30to50[4][1]+XSectPassedEWK)
if verbosity == True:
print "DataSetAndXSectAndNormFact[4][0]", DataSetAndXSectAndNormFact[4][0], " pur = ", pur
purArray.append(pur)
purArray_errDown.append(pur_errDown)
purArray_errUp.append(pur_errUp)
purObj = bayesDivide(qcd170to230[-1][1], qcd170to230[4][1]+XSectPassedEWK)
pur = purObj.GetY()[0]
pur_errUp = purObj.GetEYhigh()[0]
pur_errDown = purObj.GetEYlow()[0]
if pur == 0:
pur = float(DataSetAndXSectAndNormFact[5][2])/(qcd30to50[4][1]+XSectPassedEWK)
if verbosity == True:
print "DataSetAndXSectAndNormFact[5][0]", DataSetAndXSectAndNormFact[5][0], " pur = ", pur
purArray.append(pur)
purArray_errDown.append(pur_errDown)
purArray_errUp.append(pur_errUp)
purObj = bayesDivide(qcd230to300[-1][1], qcd230to300[4][1]+XSectPassedEWK)
pur = purObj.GetY()[0]
pur_errUp = purObj.GetEYhigh()[0]
pur_errDown = purObj.GetEYlow()[0]
if pur == 0:
pur = float(DataSetAndXSectAndNormFact[6][2])/(qcd30to50[4][1]+XSectPassedEWK)
if verbosity == True:
print "DataSetAndXSectAndNormFact[6][0]", DataSetAndXSectAndNormFact[6][0], " pur = ", pur
purArray.append(pur)
purArray_errDown.append(pur_errDown)
purArray_errUp.append(pur_errUp)
# ### Remove/Fix the entries below
# purArray.append(QCD_purity)
# purArray_errDown.append(0.01)
# purArray_errUp.append(0.01)
# effArray.append(QCD_efficiency)
# effArray_errDown.append(0.0001)
# effArray_errUp.append(0.01)
######################################################################################
if verbosity == True:
print
print "**************"
print "* Efficiency *"
print "**************"
print "effArray = ", effArray
print
print "effArray_errDown = ", effArray_errDown
print
print "effArray_errUp = ", effArray_errUp
print
print "**********"
print "* Purity *"
print "**********"
print "purArray = ", purArray
print
print "purArray_errDown = ", purArray_errDown
print
print "purArray_errUp = ", purArray_errDown
###
print
print "--> Info: Total Purity of QCD Sample = ", QCD_purity
print
######################################################################################
### Customise ROOT
gROOT.Reset()
ROOT.gROOT.SetBatch(False)
style = TDRStyle()
style.setPalettePretty()
style.setWide(True)
purArray_alt = array("d",[ ])
purArray_errDown_alt = array("d",[ ])
purArray_errUp_alt = array("d",[ ])
purObj_alt = bayesDivide(XSectPassedQCD, XSectPassedQCD+XSectPassedEWK)
pur_alt = purObj_alt.GetY()[0]
pur_errUp_alt = purObj_alt.GetEYhigh()[0]
pur_errDown_alt = purObj_alt.GetEYlow()[0]
for i in range(0,6):
#print i
purArray_alt.append(pur_alt)
purArray_errDown_alt.append(pur_errDown)
purArray_errUp_alt.append(pur_errUp)
# purArray_alt = array("d",[QCD_purity,QCD_purity,QCD_purity,QCD_purity,QCD_purity,QCD_purity ])
### Draw TGraph with Asymmetric Error Bars using Bayesian Statistical Tools
# graph = TGraphAsymmErrors(6, purArray, effArray, purArray_errDown, purArray_errUp, effArray_errDown, effArray_errUp) ### works
graph = TGraphAsymmErrors(6, purArray_alt, effArray, purArray_errDown_alt, purArray_errUp_alt, effArray_errDown, effArray_errUp) ### works
graph.Draw("AP")
graph.SetMarkerStyle(kFullCircle)
graph.SetMarkerColor(2)
#graph.GetXaxis().SetRangeUser( 0.0 , 1.0) #???
#graph.GetYaxis().SetRangeUser( 0.0 , 1.0) #???
graph.SetTitle( "#epsilon Vs QCD purities (EWK)" )
graph.GetYaxis().SetTitle("Efficiency (#epsilon) of selection")
graph.GetXaxis().SetTitle("Purity of QCD Sample")
### Customise gPad
gPad.SetLogy(1)
gPad.Update()
# ### Test Histogram with efficiencies only
# hEff=TH1F('efficiencies', '#epsilon', 1000, 0.0, 0.002)
# hEff.Fill(effArray[0])
# hEff.Fill(effArray[1])
# hEff.Fill(effArray[2])
# hEff.Fill(effArray[3])
# hEff.Fill(effArray[4])
# hEff.Fill(effArray[5])
# hEff.Draw()
################
print
raw_input('Press \"Enter\" to exit to terminal ...')
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.