blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5a8e8d819963505695e442f8feb3dc849404db3d
|
f03bd5bd7873c5cc33b4ef5199f219539f3a340e
|
/CAAPR/CAAPR_AstroMagic/PTS/pts/modeling/plotting/maps.py
|
34c95944a5b48c35565bf382c7f5185c18db72ec
|
[
"GPL-1.0-or-later",
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-philippe-de-muyter",
"MIT"
] |
permissive
|
Stargrazer82301/CAAPR
|
5f8a7033b16792f23abd5d07021b53b9228a5db4
|
62b2339beb2eb956565e1605d44d92f934361ad7
|
refs/heads/master
| 2022-08-29T02:53:33.658022 | 2022-08-05T19:06:46 | 2022-08-05T19:06:46 | 49,977,601 | 8 | 1 |
MIT
| 2022-08-05T19:06:47 | 2016-01-19T19:32:42 |
Python
|
UTF-8
|
Python
| false | false | 3,449 |
py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.plotting.maps Contains the MapsPlotter class
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from .component import PlottingComponent
from ..maps.component import MapsComponent
from ...core.tools import filesystem as fs
from ...core.tools.logging import log
from ...magic.core.frame import Frame
from ...magic.plot.imagegrid import StandardImageGridPlotter
# -----------------------------------------------------------------
class MapsPlotter(PlottingComponent, MapsComponent):
"""
This class...
"""
def __init__(self, config=None):
"""
The constructor ...
:param config:
:return:
"""
# Call the constructor of the base class
#super(MapsPlotter, self).__init__(config) # not sure this works
PlottingComponent.__init__(self, config)
MapsComponent.__init__(self)
# -- Attributes --
# The dictionary of image frames
self.images = dict()
# -----------------------------------------------------------------
def run(self, features=None):
"""
This function ...
:return:
"""
# 1. Call the setup function
self.setup()
# 2. Load the images
self.load_images()
# 3. Plot
self.plot()
# -----------------------------------------------------------------
def setup(self):
"""
This function ...
:return:
"""
# Call the setup function of the base class
super(MapsPlotter, self).setup()
# -----------------------------------------------------------------
def load_images(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the maps ...")
for name in ["dust", "ionizing_stars", "old_stars", "young_stars"]:
# Determine the path to the image
path = fs.join(self.maps_path, name + ".fits")
# Debugging
log.debug("Loading the " + name + " image ...")
# Open the map
image = Frame.from_file(path)
# Add the image to the dictionary
self.images[name] = image
# -----------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting ...")
# Create the image plotter
plotter = StandardImageGridPlotter()
# Add the images
for label in self.images: plotter.add_image(self.images[label], label)
# Determine the path to the plot file
path = fs.join(self.plot_maps_path, "maps.pdf")
plotter.colormap = "hot"
plotter.vmin = 0.0
plotter.set_title("Input maps")
# Make the plot
plotter.run(path)
# -----------------------------------------------------------------
|
[
"[email protected]"
] | |
c2aa9abcfa5c036633a093ef2a54e4933f52f65f
|
d820c8efb25c9adb77015650a0f7dc6f1e983bfe
|
/abc/abc050_c.py
|
2ee42d95bfc181d741d4eb9244b9dd97dde8f042
|
[] |
no_license
|
toshikish/atcoder
|
73fdaa2310f23f846279f9f7466bdb969448371f
|
33676630d6820dd92ccf0931425b8906b065bedd
|
refs/heads/master
| 2022-05-16T20:00:52.665762 | 2022-04-02T11:55:44 | 2022-04-02T11:55:44 | 173,099,510 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 538 |
py
|
from collections import defaultdict
N = int(input())
A = list(map(int, input().split()))
c = defaultdict(int)
for a in A:
c[a] += 1
if N % 2 == 0:
probable = True
for i in range(1, N, 2):
if c[i] != 2:
probable = False
ans = 2 ** (N // 2) % (10 ** 9 + 7) if probable else 0
else:
probable = True
if c[0] != 1:
probable = False
for i in range(2, N, 2):
if c[i] != 2:
probable = False
ans = 2 ** ((N - 1) // 2) % (10 ** 9 + 7) if probable else 0
print(ans)
|
[
"[email protected]"
] | |
372c60c9ffa1d43f6ea24aa8501c2db618e5bbce
|
f3e51466d00510f1dae58f1cb87dd53244ce4e70
|
/LeetCodes/272. Closest Binary Search Tree Value II.py
|
d1d13fab5f033ce4abb1ae9ab45dffa36d528771
|
[] |
no_license
|
chutianwen/LeetCodes
|
40d18e7aa270f8235342f0485bfda2bd1ed960e1
|
11d6bf2ba7b50c07e048df37c4e05c8f46b92241
|
refs/heads/master
| 2022-08-27T10:28:16.594258 | 2022-07-24T21:23:56 | 2022-07-24T21:23:56 | 96,836,652 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,395 |
py
|
'''
272. Closest Binary Search Tree Value II
Hard
277
13
Favorite
Share
Given a non-empty binary search tree and a target value, find k values in the BST that are closest to the target.
Note:
Given target value is a floating point.
You may assume k is always valid, that is: k ≤ total nodes.
You are guaranteed to have only one unique set of k values in the BST that are closest to the target.
Example:
Input: root = [4,2,5,1,3], target = 3.714286, and k = 2
4
/ \
2 5
/ \
1 3
Output: [4,3]
Follow up:
Assume that the BST is balanced, could you solve it in less than O(n) runtime (where n = total nodes)?
'''
class Solution:
def closestKValues(self, root, target, k):
# Helper, takes a path and makes it the path to the next node
def nextpath(path, kid1, kid2):
if path:
if kid2(path):
path += kid2(path),
while kid1(path):
path += kid1(path),
else:
kid = path.pop()
while path and kid is kid2(path):
kid = path.pop()
# These customize nextpath as forward or backward iterator
kidleft = lambda path: path[-1].left
kidright = lambda path: path[-1].right
# Build path to closest node
path = []
while root:
path += root,
root = root.left if target < root.val else root.right
dist = lambda node: abs(node.val - target)
path = path[:path.index(min(path, key=dist))+1]
# Get the path to the next larger node
path2 = path[:]
nextpath(path2, kidleft, kidright)
for x in path2:
print(x.val)
# Collect the closest k values by moving the two paths outwards
vals = []
for _ in range(k):
if not path2 or path and dist(path[-1]) < dist(path2[-1]):
vals += path[-1].val,
nextpath(path, kidright, kidleft)
else:
vals += path2[-1].val,
nextpath(path2, kidleft, kidright)
return vals
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def closestKValues(self, root, target, k):
"""
:type root: TreeNode
:type target: float
:type k: int
:rtype: List[int]
"""
min_diff = float('inf')
dis = lambda x: abs(x.val - target)
path_smaller = []
while root:
path_smaller.append(root)
root = root.left if target < root.val else root.right
path_smaller = path_smaller[: path_smaller.index(min(path_smaller, key=dis)) + 1]
frontier_left = lambda path: path[-1].left
frontier_right = lambda path: path[-1].right
def next_path(path, frontier1, frontier2):
# frontier1 is for checking if next node available, frontier2 is for pooling from stack
if frontier1(path):
path.append(frontier1(path))
while frontier2(path):
path.append(frontier2(path))
else:
node = path.pop()
while path and node is frontier1(path):
node = path.pop()
path_larger = path_smaller[:]
next_path(path_larger, frontier_right, frontier_left)
for x in path_larger:
print(x.val)
res = []
dis = lambda x: abs(x.val - target)
for _ in range(k):
if not path_larger or path_smaller and dis(path_smaller[-1]) < dis(path_larger[-1]):
res.append(path_smaller[-1].val)
next_path(path_smaller, frontier_left, frontier_right)
else:
res.append(path_larger[-1].val)
next_path(path_larger, frontier_right, frontier_left)
return res
class Solution:
def closestKValues(self, root, target, k):
abs_dis = lambda x: abs(x.val - target)
stack_small = []
while root:
stack_small.append(root)
root = root.left if target < root.val else root.right
closest_cut = min(stack_small, key=abs_dis)
stack_small = stack_small[:stack_small.index(closest_cut) + 1]
stack_large = stack_small[:]
def next(stack, fun1, fun2):
if fun1(stack):
stack.append(fun1(stack))
while fun2(stack):
stack.append(fun2(stack))
else:
cur = stack.pop()
while stack and cur == fun1(stack):
cur = stack.pop()
frontier_left = lambda x: x[-1].left
frontier_right = lambda x: x[-1].right
next(stack_large, frontier_right, frontier_left)
res = []
for _ in range(k):
if not stack_large or stack_small and abs_dis(stack_small[-1]) <= abs_dis(stack_large[-1]):
res.append(stack_small[-1].val)
next(stack_small, frontier_left, frontier_right)
else:
res.append(stack_large[-1].val)
next(stack_large, frontier_right, frontier_left)
return res
|
[
"[email protected]"
] | |
269c20a755b2e945ab30e91074fb5b1c3c6610fc
|
c380976b7c59dadaccabacf6b541124c967d2b5a
|
/.history/src/data/data_20191019130008.py
|
488e9d3809c5c14aace59c9e423def076851d17e
|
[
"MIT"
] |
permissive
|
bkraft4257/kaggle_titanic
|
b83603563b4a3c995b631e8142fe72e1730a0e2e
|
f29ea1773773109a867278c001dbd21a9f7b21dd
|
refs/heads/master
| 2020-08-17T12:45:28.653402 | 2019-11-15T16:20:04 | 2019-11-15T16:20:04 | 215,667,760 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,144 |
py
|
import pandas as pd
from typing import Union
from pathlib import Path
from nameparser import HumanName
class ExtractData:
def __init__(self, filename: Union[str, Path], drop_columns=None):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.filename = filename
self.drop_columns = drop_columns
self.all_label_columns = ["survived"]
self.all_feature_columns = [
"pclass",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
]
self.Xy_raw = None
self.extract_raw()
def extract_raw(self):
"""
Extracts data from a CSV file.
Returns:
pd.DataFrame -- [description]
"""
Xy_raw = pd.read_csv(self.filename)
Xy_raw.columns = Xy_raw.columns.str.lower().str.replace(" ", "_")
Xy_raw = Xy_raw.rename(columns={'age':'age_known'})
Xy_raw["pclass"] = Xy_raw["pclass"].astype("category")
self.Xy_raw = Xy_raw.set_index("passengerid")
class TransformData:
title_translator = {
"Mlle.": "Mrs.",
"Mme.": "Mrs.",
"Sir.": "Mr.",
"Ms.": "Mrs.",
"Rev.": "Mr.",
"": "Mr.",
"Col.": "Mr.",
"Capt.": "Mr.",
"Lady.": "Mrs.",
"the Countess. of": "Mrs.",
}
def __init__(self, raw_data, adult_age_threshold_min = 13, drop_columns=None):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.raw = raw_data
self.adult_age_threshold_min = adult_age_threshold_min
self.Xy = self.raw.Xy_raw.copy()
self.extract_title()
self.extract_last_name()
self.extract_cabin_number()
self.extract_cabin_prefix()
self.estimate_age()
self.calc_is_child()
def calc_is_child(self):
self.Xy['is_child'] = self.Xy.age < self.adult_age_threshold_min
def extract_cabin_number(self):
self.Xy['cabin_number'] = self.Xy.ticket.str.extract('(\d+)$')
def extract_cabin_prefix(self):
self.Xy['cabin_prefix'] = self.Xy.ticket.str.extract('^(.+) ')
def extract_title(self):
"""[summary]
"""
self.Xy["title"] = (
self.Xy.name.apply(lambda x: HumanName(x).title)
.replace(self.title_translator)
.replace({"\.": ""}, regex=True)
)
def extract_last_name(self):
self.Xy["last_name"] = self.Xy.name.apply(lambda x: HumanName(x).last)
def clean(self,):
"""Clean data to remove missing data and "unnecessary" features.
Arguments:
in_raw_df {pd.DataFrame} -- Dataframe containing all columns and rows Kaggle Titanic Training Data set
"""
self.Xy = self.Xy_raw.drop(self.drop_columns, axis=1)
def estimate_age(self, Xy_age_estimate=None, groupby_columns=['sex','title']):
"""[summary]
Keyword Arguments:
groupby {list} -- [description] (default: {['sex','title']})
"""
if Xy_age_estimate is None:
Xy_age_estimate = self.Xy.groupby(groupby_columns).age_known.mean().to_frame().round(1)
Xy_age_estimate = Xy_age_estimate.rename(columns ={'age_known':'age_estimate'})
out_df = self.Xy.reset_index().merge(Xy_age_estimate, on=groupby_columns)
self.Xy['age'] = self.Xy['age_known'].fillna(out_df['age_estimate'])
self.Xy_age_estimate = Xy_age_estimate
|
[
"[email protected]"
] | |
2b3a6a3b8a0cd3fafbbbf5f6845e94e7ea145772
|
8015f1c62a2cb4efd21aa8938336913bf8117868
|
/bamap/ba2526.pngMap.py
|
dbdb9cdfd109add3c261a76f235b40924fc08b33
|
[] |
no_license
|
GamerNoTitle/Beepers-and-OLED
|
675b5e3c179df0f0e27b42bf594c43860d03b9af
|
afe1340e5394ae96bda5f9022a8a66824368091e
|
refs/heads/master
| 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,468 |
py
|
ba2526.pngMap = [
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111100000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011100000000000000000000000000000000',
'00001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001100000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000000000000000000000',
'00000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000',
'00001110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001110000000000000000000000000000',
'00000110000001001111011000000000000000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000',
'10001110000011001111111000000000000000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000',
'11111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000111000000000000000000000000',
'11111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000111000000000000000000000000',
'11111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000011111111010000011100011010',
'11111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111',
'11111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111',
'11111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111',
'11111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111110',
'11111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111110',
'11111111111111111111111111110100000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111',
'11111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111',
'11111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111110',
'11111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000001011111111111111111111111110',
'11111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111',
'11111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111',
'11111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111',
'11111111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111',
'11111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111',
'11111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111',
'11111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111110',
'11111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111110',
'11111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111',
'11111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111000011',
'11111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111100000',
'11111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111110000',
'00111111111111111111111111000000000000000000000000000000000000000000000000000000000000000010111111111111111111111111111111110000',
'00111111111111100000000111000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111110000',
'00001111111110100000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111110000',
'00101111111111000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111100',
'00001111111101000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111110000',
'00000111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000110111111111111111110100000',
'00000011111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111110000000000',
'00000011111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001100000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000',
'00000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000111001000000011111000000000000',
'00000000000000000000000000001000110000000000000000000000000000000000000000000000000000000000000000111111111100111111110000000000',
'00001110000000000000000001111111110000000000000000000000000000000000000000000000000000000000000010111111111111111111111100000000',
'00011111110000000000000000111111110000000000000000000000000000000000000000000000000000000000000000111111111111111111111110000001',
'00011111111100000000001111111111110000000000000000000000000000000000000000000000000000000000000000111111111111111111111110001011',
'00011111111100000000011111111111110000000000000000000000000000000000000000000000000000000000000100111111111111111111111111011111',
'00001111111111000000011111111111110000000000000000000000000000000000000000000000000010011111111111111111111111111111111111111110',
'00001111111111100000111111111111100000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111110',
'00001011111111000000111111111111110000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111',
'00001011111111100000111111111111000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111',
'00000011111111110001111111111111000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111111',
'00000010111111111001111111111111000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111110',
'00000000111111111111111111111000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111111',
'00000000111111111111111111111000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111',
'00000000001111111111111111110000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111',
'01000000001111111111111111111101111111000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111000000000000000000000000000000001011111111111111111111111111111111111111111111111',
'00111111111111111111111111111111111111111111111111100000000011100000000000000111111111111111111111111111111111111111111111111111',
]
|
[
"[email protected]"
] | |
30e15a1b111b3db9ba288c9d3dc8e0e6c1a8ff63
|
077c91b9d5cb1a6a724da47067483c622ce64be6
|
/load_balancer_fuzzer_mcs/intermcs_19_/interactive_replay_config.py
|
c579a012fb2bdbbe402547c7e4eedc9f0915ee81
|
[] |
no_license
|
Spencerx/experiments
|
0edd16398725f6fd9365ddbb1b773942e4878369
|
aaa98b0f67b0d0c0c826b8a1565916bf97ae3179
|
refs/heads/master
| 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,099 |
py
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import InteractiveReplayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pox.py --verbose --unthreaded-sh misc.ip_loadbalancer --ip=123.123.1.3 --servers=123.123.2.3,123.123.1.3 sts.util.socket_mux.pox_monkeypatcher openflow.discovery openflow.of_01 --address=__address__ --port=__port__', label='c1', address='127.0.0.1', cwd='dart_pox')],
topology_class=MeshTopology,
topology_params="num_switches=3",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=True,
kill_controllers_on_exit=True)
control_flow = InteractiveReplayer(simulation_config, "experiments/load_balancer_fuzzer_mcs/intermcs_19_/mcs.trace.notimeouts")
# wait_on_deterministic_values=False
# delay_flow_mods=False
# Invariant check: 'check_for_ofp_error'
# Bug signature: "ERROR_SENT"
|
[
"[email protected]"
] | |
85696669f20f3e9e72ae887dfea4980d60f2d30c
|
cf7c928d6066da1ce15d2793dcf04315dda9b9ed
|
/Jungol/Lv1_LCoder_Python/pyc0_리스트3/Main_JO_924_리스트3_자가진단4.py
|
5d2a061396cfe3ecaa9addc21c493d77e9562c33
|
[] |
no_license
|
refresh6724/APS
|
a261b3da8f53de7ff5ed687f21bb1392046c98e5
|
945e0af114033d05d571011e9dbf18f2e9375166
|
refs/heads/master
| 2022-02-01T23:31:42.679631 | 2021-12-31T14:16:04 | 2021-12-31T14:16:04 | 251,617,280 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 467 |
py
|
# 100 이하의 자연수를 입력받아
# 첫 번째 항은 100으로 두 번째 항은 입력받은 수로 초기화하고
# 다음 항부터는 전전항에서 전항을 뺀 수로 채워나가는 수열을 작성하여
# 그 수가 음수가 나올 때까지 출력하는 프로그램을 작성하시오.
a = 100
b = int(input())
list = [a, b]
c = a - b
while c >= 0:
list.append(c)
a = b
b = c
c = a - b
list.append(c)
print(*list, sep=" ")
|
[
"[email protected]"
] | |
c4a06e2b61bcf307acb87e92eaa2c9ff5afa4e5a
|
ed12e1905d71e2ff8ff01f39e8d2ebd2e8ccda1f
|
/Chapter 18/spiralDraw.py
|
7f3bdce70ef5205cb0a9f9c14537bcdd93946f70
|
[] |
no_license
|
afettouhi/AutomatetheBoringStuffwithPython-py38
|
ac18c28a78c0fe9b4b3afd900f668a50a92203db
|
f8cfc1761257983280039246d3fa3ebe65ec84cb
|
refs/heads/master
| 2022-12-01T00:44:09.828472 | 2020-08-01T04:47:46 | 2020-08-01T04:47:46 | 281,289,442 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 572 |
py
|
import pyautogui
import time
print('5 second til it starts')
pyautogui.LOG_SCREENSHOTS = True
pyautogui.LOG_SCREENSHOTS_LIMIT = 100
time.sleep(5)
pyautogui.click() # click to put drawing program in focus
distance = 300
shrink = 20
while distance > 0:
pyautogui.dragRel(distance, 0, duration=0.1) # move right
distance = distance - shrink
pyautogui.dragRel(0, distance, duration=0.1) # move down
pyautogui.dragRel(-distance, 0, duration=0.1) # move left
distance = distance - shrink
pyautogui.dragRel(0, -distance, duration=0.1) # move up
|
[
"[email protected]"
] | |
36106fd4f837ef5a89f665a1d2bd3c2438f6df1f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_nighties.py
|
f7282804ffcdbc15e6ef297e404c1b07eea351a2
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 226 |
py
|
#calss header
class _NIGHTIES():
def __init__(self,):
self.name = "NIGHTIES"
self.definitions = nightie
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['nightie']
|
[
"[email protected]"
] | |
57e2a50254badcb57543d4facb1a0485fdcb2a11
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/lib/googlecloudsdk/generated_clients/apis/tpu/v1beta1/resources.py
|
a1b11eb5bd917f4daa240b150019a41f2041e4d1
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 |
NOASSERTION
| 2022-10-29T20:49:13 | 2021-02-02T05:47:30 |
Python
|
UTF-8
|
Python
| false | false | 1,722 |
py
|
# -*- coding: utf-8 -*- #
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for Cloud Platform Apis generated from apitools."""
import enum
BASE_URL = 'https://tpu.googleapis.com/v1beta1/'
DOCS_URL = 'https://cloud.google.com/tpu/'
class Collections(enum.Enum):
"""Collections for all supported apis."""
PROJECTS = (
'projects',
'projects/{projectsId}',
{},
[u'projectsId'],
True
)
PROJECTS_LOCATIONS = (
'projects.locations',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}',
},
[u'name'],
True
)
PROJECTS_LOCATIONS_OPERATIONS = (
'projects.locations.operations',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/operations/'
'{operationsId}',
},
[u'name'],
True
)
def __init__(self, collection_name, path, flat_paths, params,
enable_uri_parsing):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
self.enable_uri_parsing = enable_uri_parsing
|
[
"[email protected]"
] | |
ca76d2d1c807f66ca7556dc77a4ddb73eab2cd23
|
f8360e6eef89f9b78365a73e7abacf87db1d880a
|
/models/hospitality/ambulance.py
|
bdf8c525a7757a874f200ac727dccadb8dc04407
|
[] |
no_license
|
Trilokan/sivappu
|
022c6b5997213db8c8994429bf5e482f42b8464d
|
110c95851a970f051a50bed6ee72be542ca91efe
|
refs/heads/master
| 2020-05-04T20:44:42.555829 | 2019-04-29T12:34:08 | 2019-04-29T12:34:08 | 179,449,651 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,961 |
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from datetime import datetime
PROGRESS_INFO = [("draft", "Draft"),
("confirmed", "Confirmed"),
("driver_intimated", "Driver to Intimated"),
("done", "Done"),
("cancel", "Cancel")]
CANCEL_INFO = [("cancel", "Cancel")]
CURRENT_DATE = datetime.now().strftime("%Y-%m-%d")
CURRENT_TIME = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
INDIA_TIME = datetime.now().strftime("%d-%m-%Y %H:%M:%S")
# Ambulance
class Ambulance(models.Model):
_name = "arc.ambulance"
_inherit = "mail.thread"
date = fields.Date(string="Date", default=CURRENT_DATE, required=True)
name = fields.Char(string="Name", readonly=True)
driver_id = fields.Many2one(comodel_name="arc.person", string="Driver", required=True)
patient_id = fields.Many2one(comodel_name="arc.person", string="Patient", required=True)
source_address = fields.Text(string="Address")
source_landmark = fields.Char(string="Landmark")
source_contact = fields.Char(string="Contact 1")
source_contact_2 = fields.Char(string="Contact 2")
destination_address = fields.Text(string="Address")
destination_landmark = fields.Char(string="Landmark")
destination_contact = fields.Char(string="Contact 1")
destination_contact_2 = fields.Char(string="Contact 2")
progress = fields.Selection(selection=PROGRESS_INFO, string="Progress", default="draft")
is_cancel = fields.Selection(selection=CANCEL_INFO, string="Is Cancel")
distance = fields.Float(string="Distance(KM)", default=0.0, required=True)
charges_km = fields.Float(string="Charges (per KM)", default=0.0, required=True)
others = fields.Float(string="Others", default=0.0, required=True)
total_amount = fields.Float(string="Total", default=0.0, required=True)
writter = fields.Text(string="Writter", track_visibility="always")
@api.multi
def trigger_confirm(self):
writter = "Ambulance confirmed by {0} on {1}".format(self.env.user.name, INDIA_TIME)
self.write({"progress": "confirmed", "writter": writter})
@api.multi
def trigger_inform_driver(self):
writter = "Ambulance Informed to {0} by {1} on {2}".format(self.driver_id.name, self.env.user.name, INDIA_TIME)
self.write({"progress": "confirmed", "writter": writter})
@api.multi
def trigger_done(self):
writter = "{0} Shifted {1} by on {2}".format(self.patient_id.name, self.driver_id.name, INDIA_TIME)
self.write({"progress": "done", "writter": writter})
@api.multi
def trigger_cancel(self):
writter = "Ambulance cancelled by {0} on {1}".format(self.env.user.name, INDIA_TIME)
self.write({"progress": "confirmed", "writter": writter})
@api.model
def create(self, vals):
vals["name"] = self.env["ir.sequence"].next_by_code(self._name)
return super(Ambulance, self).create(vals)
|
[
"[email protected]"
] | |
e76e790450bcd46e5b5dcd70d7a0b61000286552
|
4b7e282fe480415f5d52c0fc0429f144156190fe
|
/examples/campaign_management/validate_text_ad.py
|
c417044271af2550cbaf807dc27cd05064965c41
|
[
"Apache-2.0"
] |
permissive
|
Z2Xsoft/google-ads-python
|
c4750357bb19da91bb3b6bf2fa84bef9d2df36d3
|
1779d52a0446c8afb2437b0a9e103dcb849f5590
|
refs/heads/main
| 2023-08-18T15:22:17.840364 | 2021-09-26T04:08:53 | 2021-09-26T04:08:53 | 410,444,398 | 0 | 0 |
Apache-2.0
| 2021-09-26T04:08:53 | 2021-09-26T03:55:38 | null |
UTF-8
|
Python
| false | false | 4,625 |
py
|
#!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example shows use of the validateOnly header for an expanded text ad.
No objects will be created, but exceptions will still be thrown.
"""
import argparse
import sys
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
def main(client, customer_id, ad_group_id):
ad_group_ad_operation = client.get_type("AdGroupAdOperation")
ad_group_ad = ad_group_ad_operation.create
ad_group_service = client.get_service("AdGroupService")
ad_group_ad.ad_group = ad_group_service.ad_group_path(
customer_id, ad_group_id
)
ad_group_ad.status = client.enums.AdGroupAdStatusEnum.PAUSED
# Create an expanded text ad.
ad_group_ad.ad.expanded_text_ad.description = "Luxury Cruise to Mars"
ad_group_ad.ad.expanded_text_ad.headline_part1 = (
"Visit the Red Planet in style."
)
# Adds a headline that will trigger a policy violation to demonstrate error
# handling.
ad_group_ad.ad.expanded_text_ad.headline_part2 = (
"Low-gravity fun for everyone!!"
)
ad_group_ad.ad.final_urls.append("http://www.example.com/")
ad_group_ad_service = client.get_service("AdGroupAdService")
# Attempt the mutate with validate_only=True.
try:
request = client.get_type("MutateAdGroupAdsRequest")
request.customer_id = customer_id
request.operations.append(ad_group_ad_operation)
request.partial_failure = False
request.validate_only = True
response = ad_group_ad_service.mutate_ad_group_ads(request=request)
print('"Expanded text ad validated successfully.')
except GoogleAdsException as ex:
# This will be hit if there is a validation error from the server.
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}".'
)
print(
"There may have been validation error(s) while adding expanded "
"text ad."
)
policy_error_enum = client.get_type(
"PolicyFindingErrorEnum"
).PolicyFindingError.POLICY_FINDING
count = 1
for error in ex.failure.errors:
# Note: Policy violation errors are returned as PolicyFindingErrors.
# For additional details, see
# https://developers.google.com/google-ads/api/docs/policy-exemption/overview
if error.error_code.policy_finding_error == policy_error_enum:
if error.details.policy_finding_details:
details = (
error.details.policy_finding_details.policy_topic_entries
)
for entry in details:
print(f"{count}) Policy topic entry: \n{entry}\n")
count += 1
else:
print(
f"\tNon-policy finding error with message "
f'"{error.message}".'
)
if error.location:
for (
field_path_element
) in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v8")
parser = argparse.ArgumentParser(
description="Shows how to use the ValidateOnly header."
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-a", "--ad_group_id", type=str, required=True, help="The Ad Group ID."
)
args = parser.parse_args()
main(googleads_client, args.customer_id, args.ad_group_id)
|
[
"[email protected]"
] | |
0fba4f05eb71350e2e99e4e969652cb301f906c5
|
ae002b7c03eacb4081c9234fa07c5201ec703941
|
/tasklock/tests.py
|
c58ea9334ed9a1e9d4effe8b6fd9aa83638aeaf1
|
[] |
no_license
|
zbyte64/django-tasklock
|
53ea4ab4454bcece54ec1835166fb2bb5998c7ae
|
75f848fde78f488457be54304027f82d12f48c25
|
refs/heads/master
| 2021-01-18T17:25:24.749657 | 2011-10-15T05:09:15 | 2011-10-15T05:09:15 | 2,580,617 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 412 |
py
|
from django.test import TestCase
from models import TaskLock
import tasks
class TaskLockTest(TestCase):
def testPing(self):
task_lock = TaskLock.objects.schedule_task('thekey', 'celery.ping')
self.assertEqual(TaskLock.objects.all().count(), 1)
task_lock.ready()
def testCleanup(self):
#TODO load in data for it to cleanup
tasks.cleanup_finished_task_locks()
|
[
"[email protected]"
] | |
1c7dc028dc165fd5bc7b49df4beb82a4a9f66004
|
9ecfa2dfa544dc77c8adc92f414f506846823e23
|
/scripts/roficalc/roficalc
|
350faf4ad31b9a22935d23c2cee0fb2102ef0e4a
|
[
"MIT"
] |
permissive
|
grimpy/rofi-scripts
|
164eab6de2174acf33eec2d2410fd6d43df22900
|
5cee30e9b0ad5fddcd0c5cea12ce6eb14bd86bdc
|
refs/heads/master
| 2021-04-06T11:11:13.828931 | 2018-03-10T20:22:04 | 2018-03-10T20:22:04 | 124,650,044 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 539 |
#!/usr/bin/env python3
import sys
from sys import argv
# allow users to use math function like sqrt sin and so on easily
from math import *
if len(argv) == 1: # scriptname
print("Enter math expression to evaluate")
else:
expr = ("".join(argv[1:])).strip()
try:
result = str(eval(expr))
except:
print('Invalid expression: {}'.format(expr))
sys.exit(0)
width = max(len(expr), len(result)) + 3
print("{0: >{1}}".format(expr, width))
print('=')
print("{0: >{1}}".format(result, width))
|
[
"[email protected]"
] | ||
b299b2e5a9e1daa9ba514883cc28f69082507351
|
694559acfaf08a145989ca1a4fa95e6a94b2abaa
|
/流量分析与处理/Scapy/TCP_Rest.py
|
04d38e6aa528bb611c60cd4c57eb8c75fef718d1
|
[] |
no_license
|
Founderbn2014/Python_Network
|
64d01e19aca84986eca48b85a222c62a338e1dff
|
e89bbbd54bdee5b13c9ffca8d2ea128ee4ecac6a
|
refs/heads/master
| 2023-05-19T06:46:52.559372 | 2020-11-23T14:01:16 | 2020-11-23T14:01:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,500 |
py
|
#!/usr/bin/env python3
# -*- encoding = utf-8 -*-
# 该代码由本人学习时编写,仅供自娱自乐!
# 本人QQ:1945962391
# 欢迎留言讨论,共同学习进步!
from scapy.all import *
from Tools.Scapy_IFACE import scapy_iface
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) # 清除报错
def tcp_monitor_callback(pkt):
# 本代码主要任务: 对传入的数据包,发送TCP Rest进行会话重置
source_mac = pkt[Ether].fields['src']
destination_mac = pkt[Ether].fields['dst']
source_ip = pkt[IP].fields['src']
destination_ip = pkt[IP].fields['dst']
source_port = pkt[TCP].fields['sport']
destination_port = pkt[TCP].fields['dport']
seq_sn = pkt[TCP].fields['seq']
ack_sn = pkt[TCP].fields['ack']
a = Ether(src=source_mac, dst=destination_mac) / IP(src=source_ip, dst=destination_ip) / TCP(dport=destination_port,
sport=source_port,
flags=4, seq=seq_sn)
b = Ether(src=destination_mac, dst=source_mac) / IP(src=destination_ip, dst=source_ip) / TCP(dport=source_port,
sport=destination_port,
flags=4, seq=ack_sn)
sendp(a,
iface=global_if, # Windows环境不能使用iface参数
verbose=False)
sendp(b,
iface=global_if, # Windows环境不能使用iface参数
verbose=False)
def tcp_reset(src_ip, dst_ip, dst_port, ifname, src_port=None):
# 本代码主要任务: 搜索匹配过滤条件的数据包,然后使用tcp_monitor_callback方法进行重置会话处理
global global_if
global_if = scapy_iface(ifname)
if src_port is None:
match = "src host " + src_ip + " and dst host " + dst_ip + " and dst port " + dst_port
else:
match = "src host " + src_ip + " and dst host " + dst_ip + " and src port " + src_port + " and dst port " + dst_port
print("开始匹配异常流量" + match)
sniff(prn=tcp_monitor_callback,
filter=match,
iface=global_if,
store=0)
if __name__ == "__main__":
# 使用Linux解释器 & WIN解释器
tcp_reset('192.168.98.29', '192.168.98.66', '22', 'ens33')
|
[
"[email protected]"
] | |
91fd6ec6a59587d512d5275184795b8b3b4b41a5
|
3f969c2c4ebdf3a9226ae624a84292ba63210147
|
/dataAnalysis/calGoalInferAndPlot.py
|
c2426a723d7884988f562ceb16cccb5b9c0b3ac1
|
[] |
no_license
|
chengshaozhe/commitmentObstacles
|
390cb7f4c3b7ea79fdb17c1b20ff656bdba6c009
|
f77f9de11c7798984cc07f5cf34c81f0ffba410d
|
refs/heads/master
| 2021-06-25T13:52:08.880415 | 2021-06-22T06:31:07 | 2021-06-22T06:31:07 | 233,626,897 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 16,473 |
py
|
import pandas as pd
import os
import sys
sys.path.append(os.path.join(os.path.join(os.path.dirname(__file__), '..')))
import glob
DIRNAME = os.path.dirname(__file__)
import matplotlib.pyplot as plt
# plt.style.use('ggplot')
import numpy as np
import pickle
from scipy.stats import ttest_ind, entropy, mannwhitneyu, ranksums
from scipy.interpolate import interp1d
from dataAnalysis import calculateSE, calculateAvoidCommitmnetZone
# from machinePolicy.onlineVIWithObstacle import RunVI
from dataAnalysis import *
class SoftmaxPolicy:
def __init__(self, softmaxBeta):
self.softmaxBeta = softmaxBeta
def __call__(self, QDict, playerGrid, targetGrid, obstacles):
actionDict = QDict[(playerGrid, targetGrid)]
actionValues = list(actionDict.values())
softmaxProbabilityList = calculateSoftmaxProbability(actionValues, self.softmaxBeta)
softMaxActionDict = dict(zip(actionDict.keys(), softmaxProbabilityList))
return softMaxActionDict
class BasePolicy:
def __init__(self, Q_dict, softmaxBeta):
self.Q_dict = Q_dict
self.softmaxBeta = softmaxBeta
def __call__(self, playerGrid, target1, target2):
actionDict = self.Q_dict[(playerGrid, tuple(sorted((target1, target2))))]
actionValues = list(actionDict.values())
softmaxProbabilityList = calculateSoftmaxProbability(actionValues, self.softmaxBeta)
softMaxActionDict = dict(zip(actionDict.keys(), softmaxProbabilityList))
return softMaxActionDict
def calInformationGain(baseProb, conditionProb):
return entropy(baseProb) - entropy(conditionProb)
# class CalculateActionInformation:
# def __init__(self, initPrior, goalPolicy, basePolicy):
# self.initPrior = initPrior
# self.goalPolicy = goalPolicy
# self.basePolicy = basePolicy
# def __call__(self, trajectory, aimAction, target1, target2):
# trajectory = list(map(tuple, trajectory))
# targets = list([target1, target2])
# expectedInfoList = []
# cumulatedInfoList = []
# priorList = self.initPrior
# for index, (playerGrid, action) in enumerate(zip(trajectory, aimAction)):
# likelihoodList = [self.goalPolicy(playerGrid, goal).get(action) for goal in targets]
# posteriorUnnormalized = [prior * likelihood for prior, likelihood in zip(priorList, likelihoodList)]
# evidence = sum(posteriorUnnormalized)
# posteriorList = [posterior / evidence for posterior in posteriorUnnormalized]
# prior = posteriorList
# goalProbList = [list(self.goalPolicy(playerGrid, goal).values()) for goal in targets]
# baseProb = list(self.basePolicy(playerGrid, target1, target2).values())
# # expectedInfo = sum([goalPosterior * KL(goalProb, baseProb) for goalPosterior, goalProb in zip(posteriorList, goalProbList)])
# expectedInfo = sum([goalPosterior * calInformationGain(baseProb, goalProb) for goalPosterior, goalProb in zip(posteriorList, goalProbList)])
# expectedInfoList.append(expectedInfo)
# cumulatedInfo = sum(expectedInfoList)
# cumulatedInfoList.append(cumulatedInfo)
# return cumulatedInfoList
class CalculateActionInformation:
def __init__(self, initPrior, goalPolicy, basePolicy):
self.initPrior = initPrior
self.goalPolicy = goalPolicy
self.basePolicy = basePolicy
def __call__(self, trajectory, aimAction, target1, target2):
trajectory = list(map(tuple, trajectory))
goalPosteriorList = []
priorGoal = initPrior[0]
goal = trajectory[-1]
targets = list([target1, target2])
noGoal = [target for target in targets if target != goal][0]
expectedInfoList = []
cumulatedInfoList = []
for playerGrid, action in zip(trajectory, aimAction):
likelihoodGoal = self.goalPolicy(playerGrid, goal).get(action)
likelihoodNogoal = self.goalPolicy(playerGrid, noGoal).get(action)
posteriorGoal = (priorGoal * likelihoodGoal) / ((priorGoal * likelihoodGoal) + (1 - priorGoal) * likelihoodNogoal)
priorGoal = posteriorGoal
goalProb = list(self.goalPolicy(playerGrid, goal).values())
baseProb = list(self.basePolicy(playerGrid, target1, target2).values())
# expectedInfo = posteriorGoal * KL(goalProb, baseProb)
expectedInfo = posteriorGoal * calInformationGain(baseProb, goalProb)
expectedInfoList.append(expectedInfo)
cumulatedInfo = sum(expectedInfoList)
cumulatedInfoList.append(cumulatedInfo)
return cumulatedInfoList
class GoalInfernce:
def __init__(self, initPrior, goalPolicy, runVI):
self.initPrior = initPrior
self.goalPolicy = goalPolicy
self.runVI = runVI
def __call__(self, trajectory, aimAction, target1, target2, obstacles):
trajectory = list(map(tuple, trajectory))
goalPosteriorList = []
priorGoal = initPrior[0]
goal = trajectory[-1]
targets = list([target1, target2])
noGoal = [target for target in targets if target != goal][0]
QDictGoal = self.runVI(goal, obstacles)
QDictNoGoal = self.runVI(noGoal, obstacles)
for playerGrid, action in zip(trajectory, aimAction):
likelihoodGoal = self.goalPolicy(QDictGoal, playerGrid, goal, obstacles).get(action)
likelihoodB = self.goalPolicy(QDictNoGoal, playerGrid, noGoal, obstacles).get(action)
posteriorGoal = (priorGoal * likelihoodGoal) / ((priorGoal * likelihoodGoal) + (1 - priorGoal) * likelihoodB)
goalPosteriorList.append(posteriorGoal)
priorGoal = posteriorGoal
goalPosteriorList.insert(0, initPrior[0])
return goalPosteriorList
def getSoftmaxGoalPolicy(Q_dict, playerGrid, target, softmaxBeta):
actionDict = Q_dict[(playerGrid, target)]
actionValues = list(actionDict.values())
softmaxProbabilityList = calculateSoftmaxProbability(actionValues, softmaxBeta)
softMaxActionDict = dict(zip(actionDict.keys(), softmaxProbabilityList))
return softMaxActionDict
class InferPosterior:
def __init__(self, softmaxBeta, runVI):
self.softmaxBeta = softmaxBeta
self.runVI = runVI
self.initPrior = [0.5, 0.5]
def __call__(self, trajectory, aimAction, target1, target2, obstacles):
trajectory = list(map(tuple, trajectory))
priorList = self.initPrior
posteriorsList = [priorList]
_, _, transitionTableA, rewardA, _, V_goalA, Q_dictA, _ = self.runVI(target1, obstacles)
_, _, transitionTableB, rewardB, _, V_goalB, Q_dictB, _ = self.runVI(target2, obstacles)
goalQDicts = [Q_dictA, Q_dictB]
targets = [target1, target2]
for playerGrid, action in zip(trajectory, aimAction):
goalPolicies = [getSoftmaxGoalPolicy(Q_dict, playerGrid, goal, self.softmaxBeta) for Q_dict, goal in zip(goalQDicts, targets)]
likelihoodList = [goalPolicies[goalIndex].get(action) for goalIndex, goal in enumerate(targets)]
posteriorUnnormalized = [prior * likelihood for prior, likelihood in zip(priorList, likelihoodList)]
evidence = sum(posteriorUnnormalized)
posteriors = [posterior / evidence for posterior in posteriorUnnormalized]
posteriorsList.append(posteriors)
priorList = posteriors
return posteriorsList
def calPosteriorByInterpolation(goalPosteriorList, xInterpolation):
x = np.divide(np.arange(len(goalPosteriorList) + 1), len(goalPosteriorList))
goalPosteriorList.append(1)
y = np.array(goalPosteriorList)
f = interp1d(x, y, kind='nearest')
goalPosterior = f(xInterpolation)
return goalPosterior
def calPosteriorByChosenSteps(goalPosteriorList, xnew):
goalPosterior = np.array(goalPosteriorList)[xnew]
return goalPosterior
def calInfo(expectedInfoList):
x = np.divide(np.arange(len(expectedInfoList)), len(expectedInfoList) - 1)
y = np.array(expectedInfoList)
f = interp1d(x, y, kind='nearest')
xnew = np.linspace(0., 1., 30)
goalPosterior = f(xnew)
return goalPosterior
class CalFirstIntentionStep:
def __init__(self, inferThreshold):
self.inferThreshold = inferThreshold
def __call__(self, goalPosteriorList):
for index, goalPosteriori in enumerate(goalPosteriorList):
if goalPosteriori > self.inferThreshold:
return index + 1
break
return len(goalPosteriorList)
class CalFirstIntentionStepRatio:
def __init__(self, calFirstIntentionStep):
self.calFirstIntentionStep = calFirstIntentionStep
def __call__(self, goalPosteriorList):
firstIntentionStep = self.calFirstIntentionStep(goalPosteriorList)
firstIntentionStepRatio = firstIntentionStep / len(goalPosteriorList)
return firstIntentionStepRatio
def isDecisionStepInZone(trajectory, target1, target2, decisionSteps):
trajectory = list(map(tuple, trajectory))[:decisionSteps + 1]
initPlayerGrid = trajectory[0]
zone = calculateAvoidCommitmnetZone(initPlayerGrid, target1, target2)
isStepInZone = [step in zone for step in trajectory[1:]]
return np.all(isStepInZone)
def calGoalPosteriorFromAll(posteriors, trajectory, target1, target2):
trajectory = list(map(tuple, trajectory))
goalIndex = None
if trajectory[-1] == target1:
goalIndex = 0
elif trajectory[-1] == target2:
goalIndex = 1
else:
print("trajectory no goal reach! ")
print(trajectory, target1, target2)
goalPosteriorList = [posterior[goalIndex] for posterior in posteriors]
return goalPosteriorList
if __name__ == '__main__':
from machinePolicy.showIntentionModel import RunVI, GetShowIntentionPolices
gridSize = 15
noise = 0.067
gamma = 0.9
goalReward = 30
actionSpace = [(0, -1), (0, 1), (-1, 0), (1, 0)]
noiseActionSpace = [(0, -1), (0, 1), (-1, 0), (1, 0), (1, 1), (1, -1), (-1, -1), (-1, 1)]
runVI = RunVI(gridSize, actionSpace, noiseActionSpace, noise, gamma, goalReward)
softmaxBeta = 2.5
inferPosterior = InferPosterior(softmaxBeta, runVI)
resultsPath = os.path.join(os.path.join(DIRNAME, '..'), 'results')
participants = ['human', 'RL']
# participants = ['human', 'intentionModel/threshold0.5infoScale11']
# participants = ['intentionModel/threshold0.3infoScale11', 'intentionModel/threshold0.3infoScale8']
# participants = ['human', 'intentionModelChosen/threshold0.07infoScale8.5']
# participants = ['human', 'intentionModel/threshold0.1infoScale7softmaxBetaInfer3']
# participants = ['intentionModelChosen/threshold0.07infoScale8.5', 'intentionModel/threshold0.07infoScale8.5']
# participants = ['human']
# participants = ['human', 'intentionModel/threshold0.1infoScale7softmaxBetaInfer3', 'RL']
# participants = ['human']
# participants = ['test/human', 'test/intention', 'test/RL']
# decisionStep = 2
for decisionStep in [6]: # , 4, 2, 1, 0]:
statsList = []
semList = []
statDFList = []
for participant in participants:
dataPath = os.path.join(resultsPath, participant)
df = pd.concat(map(pd.read_csv, glob.glob(os.path.join(dataPath, '*.csv'))), sort=False)
nubOfSubj = len(df["name"].unique())
print(participant, nubOfSubj)
df = df[(df['targetDiff'] == '0') | (df['targetDiff'] == 0)]
df = df[(df['conditionName'] == 'expCondition1') | (df['conditionName'] == 'expCondition2')]
df = df[(df['decisionSteps'] == decisionStep)]
df["trajLength"] = df.apply(lambda x: len(eval(x['trajectory'])), axis=1)
df['isValidTraj'] = df.apply(lambda x: isValidTraj(eval(x['trajectory']), eval(x['target1']), eval(x['target2'])), axis=1)
df = df[df['isValidTraj'] == 1]
chosenSteps = 16
df = df[(df["trajLength"] > chosenSteps)]
# df = df[(df["trajLength"] > 14) & (df["trajLength"] < 25)]
# df['goalPosterior'] = df.apply(lambda x: goalInfernce(eval(x['trajectory']), eval(x['aimAction']), eval(x['target1']), eval(x['target2']), eval(x['obstacles'])), axis=1)
# df['goalPosteriorList'] = df.apply(lambda x: goalInfernce(eval(x['trajectory']), eval(x['aimAction']), eval(x['target1']), eval(x['target2']), eval(x['obstacles'])), axis=1)
df['posteriors'] = df.apply(lambda x: inferPosterior(eval(x['trajectory']), eval(x['aimAction']), eval(x['target1']), eval(x['target2']), eval(x['obstacles'])), axis=1)
# df.to_csv("humanPosterior.csv")
df['goalPosteriorList'] = df.apply(lambda x: calGoalPosteriorFromAll(x['posteriors'], eval(x['trajectory']), eval(x['target1']), eval(x['target2'])), axis=1)
# interpolation
# xnew = np.linspace(0., 1., 15)
# df['goalPosterior'] = df.apply(lambda x: calPosteriorByInterpolation(x['goalPosteriorList'], xnew), axis=1)
xnew = np.array(list(range(chosenSteps + 1)))
df['goalPosterior'] = df.apply(lambda x: calPosteriorByChosenSteps(x['goalPosteriorList'], xnew), axis=1)
# df['goalPosterior'] = df.apply(lambda x: np.round(np.array(x['goalPosterior']) * 100), axis=1)
goalPosterior = np.array(df['goalPosterior'].tolist())
goalPosteriorMean = np.mean(goalPosterior, axis=0)
goalPosteriorStd = np.divide(np.std(goalPosterior, axis=0, ddof=1), np.sqrt(len(goalPosterior)))
statsList.append(goalPosteriorMean)
semList.append(goalPosteriorStd)
def arrMean(df, colnames):
arr = np.array(df[colnames].tolist())
return np.mean(arr, axis=0)
grouped = pd.DataFrame(df.groupby('name').apply(arrMean, 'goalPosterior'))
statArr = np.round(np.array(grouped.iloc[:, 0].tolist()).T, 1)
statDFList.append(statArr)
pvalus = np.array([ttest_ind(statDFList[0][i], statDFList[1][i])[1] for i in range(statDFList[0].shape[0])])
sigArea = np.where(pvalus < 0.05)[0]
print(sigArea)
# lables = ['Humans', 'Intention Model', 'RL']
lables = ['Humans', 'MEU Agent']
lineWidth = 1
# xnew = np.array(list(range(1, 16)))
fig, ax = plt.subplots()
plt.rcParams['figure.dpi'] = 200
colorList = [(0.8392156862745098, 0.15294117647058825, 0.1568627450980392), # red
(0.12156862745098039, 0.4666666666666667, 0.7058823529411765), # blue
(0.6, 0.6, 0.6)] # grey
for i in range(len(statsList)):
plt.plot(xnew, statsList[i], label=lables[i], linewidth=lineWidth, color=colorList[i])
# plt.errorbar(xnew, statsList[i], yerr=ci95, label=lables[i])
from scipy.stats import t, norm
alpha = 0.05
# ci95t = semList[i] * t.ppf(1 - alpha / 2, degreeOfFreedom)
t_ci = 2.086 # two-tailed 95% z_ci = 1.96
ci95 = t_ci * semList[i]
plt.fill(np.concatenate([xnew, xnew[::-1]]), np.concatenate([statsList[i] - ci95, (statsList[i] + ci95)[::-1]]), color=colorList[i], alpha=.2)
# sns.regplot(xnew, statsList[i], data=ans.loc[ans.dataset == "III"], scatter_kws={"s": 80},robust=True, ci=95)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
# sig area line
# xnewSig = xnew[sigArea]
# ySig = [stats[sigArea] for stats in statsList]
# for sigLine in [xnewSig[0], xnewSig[-1]]:
# plt.plot([sigLine] * 10, np.linspace(0.5, 1., 10), color='black', linewidth=2, linestyle="--")
plt.legend(loc='best', fontsize=12)
plt.xlabel("Agent's steps over time", fontsize=14, color='black')
plt.ylabel('Posterior probability of goal-reached', fontsize=14, color='black')
plt.ylim((0.47, 1.05))
plt.xticks(fontsize=12, color='black')
plt.yticks(fontsize=12, color='black')
plt.rcParams['svg.fonttype'] = 'none'
plt.savefig('/Users/chengshaozhe/Downloads/exp2bStep{}.svg'.format(str(decisionStep)), dpi=600, format='svg')
# plt.title('Inferred Goal Through Time', fontsize=fontSize, color='black')
plt.show()
|
[
"[email protected]"
] | |
aa8e32b68deea78cb2fba849a9d3e19ff132cca3
|
e45cf89071f4c625fca52dd8e549d243a79da6a5
|
/tests/multithread/ctrl_thread_0.py
|
3ceaa17030418dd537e258633a5afa07dca9300a
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
EddieBurning/PyCoRAM
|
cb78f6cebcca001a5a8ed3e868d87b11cdfb7af4
|
8eaa9a2d417a57611d78058b732ebcd86ee09759
|
refs/heads/master
| 2020-03-07T18:06:13.877471 | 2016-11-21T09:03:46 | 2016-11-21T09:03:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 312 |
py
|
def ctrl_thread():
print("thread 0")
ram = CoramMemory(0, 32, 128)
channel = CoramChannel(0, 32)
addr = 0
sum = 0
for i in range(4):
ram.write(0, addr, 128)
channel.write(addr)
sum = channel.read()
addr += 512
print('thread0 sum=', sum)
ctrl_thread()
|
[
"[email protected]"
] | |
82a1e89adc5a862bba569ee647f117869509b4ea
|
82f3c228f1b913ed4f37d6ab651eb2c0a9ce7221
|
/Configurations/UserConfigs/2018_MCOnly/EWKZNuNuConfig.py
|
a9b99a9014f6db73b747f831a3d4fc3d243ac35d
|
[] |
no_license
|
samhiggie/ReweightScheme
|
7f388f639d02753e19eca30c8b0920ca6addb6e0
|
b2e4449e8d77d244048047a79e7dd8df6b2e35f7
|
refs/heads/master
| 2020-09-11T19:35:53.094218 | 2019-11-12T22:42:32 | 2019-11-12T22:42:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,772 |
py
|
import ROOT
from Configurations.Weights.CrossSectionWeightingModule.CrossSectionWeight import crossSectionWeight
from Configurations.Weights.MuIDIsoReweightingModule.MuIDIsoWeight import muIDIsoWeight_2018 as muIDIsoWeight
from Configurations.Weights.MuTrackingWeightModule.MuTrackingWeight import muTrackingWeight_2018 as muTrackingWeight
from Configurations.Weights.PileupWeightingModule.PileupWeight import pileupWeight_2018 as pileupWeight
from Configurations.Weights.TauFakeRateWeightModule.TauFakeRateWeight import tauFakeRateWeight_2018 as tauFakeRateWeight
from Configurations.Weights.TauIDModule.TauIDWeight import tauIDWeight_2018 as tauIDWeight
from Configurations.Weights.TriggerSFModule.TriggerWeight import triggerWeight_2018 as triggerWeight
from Configurations.Weights.ZPTReweightingModule.ZPTWeight import ZPTWeight_2018 as ZPTWeight
from Configurations.Weights.bTaggingWeightModule.bTaggingWeight import bTaggingWeight
from Configurations.ConfigDefinition import ReweightConfiguration
EWKConfiguration = ReweightConfiguration()
EWKConfiguration.name = "EWKZNuNu"
EWKConfiguration.inputFile = "/data/aloeliger/SMHTT_Selected_2018_MCOnly_Deep/EWKZNuNu.root"
crossSectionWeight.sample = 'EWKZNuNu'
crossSectionWeight.year = '2018'
totalEventsFile = ROOT.TFile.Open("/data/aloeliger/SMHTT_Selected_2018_MCOnly_Deep/EWKZNuNu.root")
crossSectionWeight.totalEvents = totalEventsFile.eventCount.GetBinContent(2)
totalEventsFile.Close()
pileupWeight.year = '2018'
pileupWeight.sample = 'EWKZNuNu'
pileupWeight.InitPileupWeightings(pileupWeight)
EWKConfiguration.listOfWeights = [
crossSectionWeight,
muIDIsoWeight,
muTrackingWeight,
pileupWeight,
tauFakeRateWeight,
tauIDWeight,
triggerWeight,
ZPTWeight,
bTaggingWeight,
]
|
[
"[email protected]"
] | |
af8953741c5d9fb31421e8d4289a5a1c21177f09
|
45758bad12c09ead188ee1cb7f121dab4dd5eeca
|
/sandbox/urls.py
|
8eb5830f3f7270427a8d6f13d16ec3d99fe39019
|
[
"MIT"
] |
permissive
|
sveetch/django-icomoon
|
35dd55962a4423b8930dbcb884ed2a8aa2d4ef67
|
327b70e5509811db7b46f2baa8d301a49e626167
|
refs/heads/master
| 2021-12-28T21:55:05.563118 | 2021-12-22T00:17:53 | 2021-12-22T00:17:53 | 42,755,203 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 602 |
py
|
"""
Sandbox URL Configuration
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import include, path
urlpatterns = [
path("admin/", admin.site.urls),
path('', include('icomoon.urls', namespace='icomoon')),
]
# This is only needed when using runserver with demo settings
if settings.DEBUG:
urlpatterns = (
urlpatterns
+ staticfiles_urlpatterns()
+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
)
|
[
"[email protected]"
] | |
9113b4f9227eef4b96bbe40c1c53de88cfa930b6
|
95267d92a6438665cf7848de229bea691d537f81
|
/EGE_27/dosroc2.py
|
8c5c6833ef5acd30d1a39dd7040ab57e4f8b5fdc
|
[
"MIT"
] |
permissive
|
webkadiz/olympiad-problems
|
2874eb1846c59778e70bcdc9550b3484bc3aa9cc
|
b3a8a3e83d0930947a89ec42e86e3058f464ea40
|
refs/heads/master
| 2022-11-15T23:48:39.854546 | 2022-11-04T12:55:48 | 2022-11-04T12:55:48 | 228,297,097 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 586 |
py
|
n = int(input())
m1 = m2 = m171 = m172 = 0
L = R = 0
for i in range(n):
x = int(input())
if x % 17 == 0 and x % 2 == 0 and x + m2 > L + R:
L = x; R = m2
if x % 17 == 0 and x % 2 == 1 and x + m1 > L + R:
L = x; R = m1
if x % 2 == 0 and x + m172 > L + R:
L = x; R = m172
if x % 2 == 1 and x + m171 > L + R:
L = x; R = m171
if x % 17 == 0 and x % 2 == 0 and x > m172:
m172 = x
elif x % 17 == 0 and x > m171:
m171 = x
elif x % 2 == 0 and x > m2:
m2 = x
elif x > m1:
m1 = x
print(L, R)
|
[
"[email protected]"
] | |
544506ebe00126ce63deae7f44ed381b16b2cf45
|
a06e021bf984c2ff8d1236fb4ff14f99c46fb538
|
/venv/lib/python3.5/io.py
|
c337c43b1f600ff920e4c96a51869e4055062100
|
[
"MIT"
] |
permissive
|
luzfcb/danibraz
|
c148336d7139664f9e3ac2fe5c1676f82d9bb6af
|
bb1070fdcd31cf4911956c81c50e792fa43fa373
|
refs/heads/master
| 2020-09-14T23:15:51.570941 | 2017-11-29T19:27:30 | 2017-11-29T19:27:30 | 94,474,781 | 0 | 0 | null | 2017-06-15T20:07:41 | 2017-06-15T20:07:41 | null |
UTF-8
|
Python
| false | false | 56 |
py
|
/home/eliaspai/.pyenv/versions/3.5.3/lib/python3.5/io.py
|
[
"[email protected]"
] | |
14bf2f67d2e6064de9cf34ea6f8fee72bf41afdf
|
9b5cbd04b771b6fc4c3a6a1715622d7a0d579f0f
|
/src/app/main.py
|
6a11dd25c388335e9288137d16e89f922ce7b0a1
|
[] |
no_license
|
OneTesseractInMultiverse/fuzzy-guacamole
|
7b091661b09de7859a7620dfb74e9cf371b5e223
|
141bef131c25684e48670ede76f4404291f42f9a
|
refs/heads/main
| 2023-03-09T21:52:44.325106 | 2021-02-25T02:03:02 | 2021-02-25T02:03:02 | 342,083,471 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,710 |
py
|
from fastapi import FastAPI
from .routers import sample, application
from starlette.middleware.cors import CORSMiddleware
from .database import SessionLocal, engine
from starlette.requests import Request
from starlette.responses import Response
from . import models
models.Base.metadata.create_all(bind=engine)
# -----------------------------------------------------------------------------
# APPLICATION OBJECT
# -----------------------------------------------------------------------------
app = FastAPI(
title="Example Repo",
description="An identity management microservice written in Python and Cloud Native",
version="1.0.0",
openapi_url="/api/openapi.json",
docs_url="/api/docs",
redoc_url=None
)
@app.middleware("http")
async def db_session_middleware(request: Request, call_next):
response = Response("Cannot establish connection with persistence provider", status_code=500)
try:
request.state.db = SessionLocal()
response = await call_next(request)
finally:
request.state.db.close()
return response
# -----------------------------------------------------------------------------
# CORS RULES
# -----------------------------------------------------------------------------
origins = [
"*"
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# -----------------------------------------------------------------------------
# ADD ROUTERS
# -----------------------------------------------------------------------------
app.include_router(sample.router, prefix="/api/v1")
app.include_router(application.router, prefix="/api/v1")
|
[
"[email protected]"
] | |
67f7b7355c99f3dcd4073f6246ec09ea455407a3
|
5b93930ce8280b3cbc7d6b955df0bfc5504ee99c
|
/nodes/VanderPlas17Python/D_Chapter3/A_Installingand/index.py
|
5764989f53230bdcbd9f37fd9ad573b50f12c66f
|
[] |
no_license
|
nimra/module_gen
|
8749c8d29beb700cac57132232861eba4eb82331
|
2e0a4452548af4fefd4cb30ab9d08d7662122cf4
|
refs/heads/master
| 2022-03-04T09:35:12.443651 | 2019-10-26T04:40:49 | 2019-10-26T04:40:49 | 213,980,247 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,710 |
py
|
# Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.HierBlock import HierBlock as hbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.ListBlock import ListBlock as lbk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
blocks = [
# CHAPTER 3
# Data Manipulation with Pandas
#
#
#
#
# In the previous chapter, we dove into detail on NumPy and its ndarray object, which
# provides efficient storage and manipulation of dense typed arrays in Python. Here
# we’ll build on this knowledge by looking in detail at the data structures provided by
# the Pandas library. Pandas is a newer package built on top of NumPy, and provides an
# efficient implementation of a DataFrame. DataFrames are essentially multidimen‐
# sional arrays with attached row and column labels, and often with heterogeneous
# types and/or missing data. As well as offering a convenient storage interface for
# labeled data, Pandas implements a number of powerful data operations familiar to
# users of both database frameworks and spreadsheet programs.
# As we saw, NumPy’s ndarray data structure provides essential features for the type of
# clean, well-organized data typically seen in numerical computing tasks. While it
# serves this purpose very well, its limitations become clear when we need more flexi‐
# bility (attaching labels to data, working with missing data, etc.) and when attempting
# operations that do not map well to element-wise broadcasting (groupings, pivots,
# etc.), each of which is an important piece of analyzing the less structured data avail‐
# able in many forms in the world around us. Pandas, and in particular its Series and
# DataFrame objects, builds on the NumPy array structure and provides efficient access
# to these sorts of “data munging” tasks that occupy much of a data scientist’s time.
# In this chapter, we will focus on the mechanics of using Series, DataFrame, and
# related structures effectively. We will use examples drawn from real datasets where
# appropriate, but these examples are not necessarily the focus.
#
# Installing and Using Pandas
# Installing Pandas on your system requires NumPy to be installed, and if you’re build‐
# ing the library from source, requires the appropriate tools to compile the C and
#
#
# 97
#
# Cython sources on which Pandas is built. Details on this installation can be found in
# the Pandas documentation. If you followed the advice outlined in the preface and
# used the Anaconda stack, you already have Pandas installed.
# Once Pandas is installed, you can import it and check the version:
# In[1]: import pandas
# pandas.__version__
# Out[1]: '0.18.1'
#
# Just as we generally import NumPy under the alias np, we will import Pandas under
# the alias pd:
# In[2]: import pandas as pd
# This import convention will be used throughout the remainder of this book.
#
#
# Reminder About Built-In Documentation
# As you read through this chapter, don’t forget that IPython gives you the ability to
# quickly explore the contents of a package (by using the tab-completion feature) as
# well as the documentation of various functions (using the ? character). (Refer back to
# “Help and Documentation in IPython” on page 3 if you need a refresher on this.)
# For example, to display all the contents of the pandas namespace, you can type this:
# In [3]: pd.<TAB>
# And to display the built-in Pandas documentation, you can use this:
# In [4]: pd?
# More detailed documentation, along with tutorials and other resources, can be found
# at http://pandas.pydata.org/.
#
#
#
# Introducing Pandas Objects
# At the very basic level, Pandas objects can be thought of as enhanced versions of
# NumPy structured arrays in which the rows and columns are identified with labels
# rather than simple integer indices. As we will see during the course of this chapter,
# Pandas provides a host of useful tools, methods, and functionality on top of the basic
# data structures, but nearly everything that follows will require an understanding of
# what these structures are. Thus, before we go any further, let’s introduce these three
# fundamental Pandas data structures: the Series, DataFrame, and Index.
# We will start our code sessions with the standard NumPy and Pandas imports:
# In[1]: import numpy as np
# import pandas as pd
#
#
# 98 | Chapter 3: Data Manipulation with Pandas
#
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Installing and Using Pandas",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
[self.add(a) for a in blocks]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Installingand(HierNode):
def __init__(self):
super().__init__("Installing and Using Pandas")
self.add(Content())
# eof
|
[
"[email protected]"
] | |
c5a37467b8d6e7b6fd1b0eddf40d765701fd7025
|
10e19b5cfd59208c1b754fea38c34cc1fb14fdbe
|
/desktop/core/ext-py/Babel-0.9.6/babel/messages/tests/data/project/file1.py
|
7f592a8354bb01e56e129eb05907aa597c5a3c21
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
sarvex/hue
|
780d28d032edd810d04e83f588617d1630ec2bef
|
6e75f0c4da2f3231e19c57bdedd57fb5a935670d
|
refs/heads/master
| 2023-08-15T21:39:16.171556 | 2023-05-01T08:37:43 | 2023-05-01T08:37:43 | 32,574,366 | 0 | 0 |
Apache-2.0
| 2023-09-14T16:55:28 | 2015-03-20T09:18:18 |
Python
|
UTF-8
|
Python
| false | false | 207 |
py
|
# -*- coding: utf-8 -*-
# file1.py for tests
from gettext import gettext as _
def foo():
# TRANSLATOR: This will be a translator coment,
# that will include several lines
print _('bar')
|
[
"[email protected]"
] | |
436fadf058f83c73cbe9654035f8c721ee01dd1e
|
ee70ae3bc47a885b5c372f3de0077c7f7b61ad41
|
/application/machinelearning/demo/demo_tree.py
|
bb45527540f31f4e878927a6c029ca86d486b79a
|
[] |
no_license
|
plutoese/mars
|
e2518631c36772812c70af4aa52de10dd5f1d6a7
|
28f6ded1275e47c83f2f3bad5d0c7063d51c779f
|
refs/heads/master
| 2021-01-10T04:37:18.245424 | 2016-01-27T02:48:25 | 2016-01-27T02:48:25 | 47,901,707 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 860 |
py
|
# coding=UTF-8
from sklearn import tree
from sklearn.datasets import load_iris
from sklearn.externals.six import StringIO
from graphviz import Source
iris = load_iris()
clf = tree.DecisionTreeClassifier()
print(iris.data)
print(iris.target)
clf = clf.fit(iris.data, iris.target)
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data)
print((dot_data.getvalue()))
'''
src = Source(dot_data.getvalue())
print(type(src))
src.render('test-output/holy-grenade.gv', view=True)'''
'''
fr = open('d:/data/lenses.txt')
lenses = [inst.strip().split('\t') for inst in fr.readlines()]
lensesLabels = ['age','prescript','astigmatic','tearRate']
lenses_data = [item[0:4] for item in lenses]
lenses_target = [item[4] for item in lenses]
print(lenses_data)
print(lenses_target)
clf = tree.DecisionTreeClassifier()
clf = clf.fit(lenses_data, lenses_target)'''
|
[
"[email protected]"
] | |
c475df4a12601af3051abf2c9415ab99487f6153
|
2d96050f870d26703d7e1ff8f1c472592c70ecf7
|
/accounts/models.py
|
9c1e1d0ee1b76ddb440d56310587d76850cbe521
|
[] |
no_license
|
Pythonian/tweetme
|
92b211028fc683f515b98df8a29afe61e25bd9d6
|
5858b5977ff1bfbf8ee4de03d059f90defa1e3d1
|
refs/heads/master
| 2023-08-05T05:28:28.796794 | 2020-07-03T07:42:02 | 2020-07-03T07:42:02 | 276,833,816 | 0 | 0 | null | 2021-09-22T19:21:14 | 2020-07-03T07:14:34 |
Python
|
UTF-8
|
Python
| false | false | 2,826 |
py
|
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.urls import reverse_lazy
class UserProfileManager(models.Manager):
use_for_related_fields = True
def all(self):
qs = self.get_queryset().all()
try:
if self.instance:
qs = qs.exclude(user=self.instance)
except:
pass
return qs
def toggle_follow(self, user, to_toggle_user):
user_profile, created = UserProfile.objects.get_or_create(user=user) # (user_obj, true)
if to_toggle_user in user_profile.following.all():
user_profile.following.remove(to_toggle_user)
added = False
else:
user_profile.following.add(to_toggle_user)
added = True
return added
def is_following(self, user, followed_by_user):
user_profile, created = UserProfile.objects.get_or_create(user=user)
if created:
return False
if followed_by_user in user_profile.following.all():
return True
return False
def recommended(self, user, limit_to=10):
print(user)
profile = user.profile
following = profile.following.all()
following = profile.get_following()
qs = self.get_queryset().exclude(user__in=following).exclude(id=profile.id).order_by("?")[:limit_to]
return qs
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='profile', on_delete=models.CASCADE) # user.profile
following = models.ManyToManyField(settings.AUTH_USER_MODEL, blank=True, related_name='followed_by')
# user.profile.following -- users i follow
# user.followed_by -- users that follow me -- reverse relationship
objects = UserProfileManager() # UserProfile.objects.all()
# abc = UserProfileManager() # UserProfile.abc.all()
def __str__(self):
return str(self.following.all().count())
def get_following(self):
users = self.following.all() # User.objects.all().exclude(username=self.user.username)
return users.exclude(username=self.user.username)
def get_follow_url(self):
return reverse_lazy("profiles:follow", kwargs={"username":self.user.username})
def get_absolute_url(self):
return reverse_lazy("profiles:detail", kwargs={"username":self.user.username})
# cfe = User.objects.first()
# User.objects.get_or_create() # (user_obj, true/false)
# cfe.save()
def post_save_user_receiver(sender, instance, created, *args, **kwargs):
if created:
new_profile = UserProfile.objects.get_or_create(user=instance)
# celery + redis
# deferred task
post_save.connect(post_save_user_receiver, sender=settings.AUTH_USER_MODEL)
|
[
"[email protected]"
] | |
c5dd090b720a4344c3a53761aba525e6b8b59d02
|
11eb83864d41f71dabc92fd5fa04cefa158c09ee
|
/heatpump/ui_helper.py
|
b42aa6acb5b14b2df2c0631b5a36b4dccc4bce52
|
[
"Apache-2.0"
] |
permissive
|
alanmitchell/heat-pump-calc
|
9d6dd5a3288861865d2b20e323da631b56a7bff7
|
fb0b5bfb4dc84d002b431eac7373d24920f11dc3
|
refs/heads/master
| 2023-08-10T12:41:24.400653 | 2023-07-03T21:49:12 | 2023-07-03T21:49:12 | 123,494,123 | 9 | 1 |
Apache-2.0
| 2023-07-25T18:48:51 | 2018-03-01T21:24:46 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 14,587 |
py
|
"""Functions that create lists of Dash Input and State objects,
and convert the values from the components associated with those objects
into variables suitable for passing to energy models. Only inputs that
are used in the Energy Model are addressed here.
"""
import numbers
import numpy as np
from dash.dependencies import Input, State
from heatpump.constants import ELECTRIC_ID
from . import library as lib
from .utils import is_null
input_info = [
('bldg_name', 'Building Name', 'null-ok'),
('notes', 'Notes', 'null-ok'),
('city_id', 'City'),
('elec_input', 'Type of Electric Rate input', 'extra'),
('utility_id', 'Utility', 'null-ok,extra'),
('elec_rate_ez', 'Electric Rate', 'null-ok,float,greater-than-zero,extra'),
('pce_ez', 'PCE assistance', 'null-ok,null-to-zero,float,extra'),
('customer_chg_ez', 'Electric Utility Customer Charge', 'null-ok,null-to-zero,float,extra'),
('blk1_kwh', 'Electric Block 1 kWh limit', 'null-ok,int,greater-than-zero,extra'),
('blk2_kwh', 'Electric Block 2 kWh limit', 'null-ok,int,greater-than-zero,extra'),
('blk3_kwh', 'Electric Block 3 kWh limit', 'null-ok,int,greater-than-zero,extra'),
('blk4_kwh', 'Electric Block 4 kWh limit', 'null-ok,int,greater-than-zero,extra'),
('blk1_rate', 'Electric Block 1 rate', 'null-ok,float,extra'),
('blk2_rate', 'Electric Block 2 rate', 'null-ok,float,extra'),
('blk3_rate', 'Electric Block 3 rate', 'null-ok,float,extra'),
('blk4_rate', 'Electric Block 4 rate', 'null-ok,float,extra'),
('demand_chg_adv', 'Electric Demand Charge', 'null-ok,null-to-zero,float,extra'),
('pce_adv', 'PCE assistance', 'null-ok,null-to-zero,float,extra'),
('customer_chg_adv', 'Electric Customer Charge', 'null-ok,null-to-zero,float,extra'),
('no_pce_chks', 'Ignore PCE', 'extra'),
('co2_lbs_per_kwh', 'CO2 per kWh of extra electricity generation'),
('bldg_type', 'Building Type', 'extra'),
('commun_all_pce', 'Community Building PCE Used Up', 'extra'),
('bldg_floor_area', 'Building Floor Area', 'float,greater-than-zero'),
('garage_stall_count', 'Garage Size'),
('garage_heated_by_hp', 'Is Garage Heated by Heat Pump'),
('wall_type', 'Wall Construction Type', 'extra'),
('exist_heat_fuel_id', 'Heating Fuel Type'),
('end_uses_chks', 'End Uses using Heating Fuel', 'extra'),
('occupant_count', 'Number of Occupants', 'null-ok,null-to-zero,int,greater-than-zero'),
('exist_unit_fuel_cost', 'Heating Fuel Price', 'null-ok,float,greater-than-zero'),
('heat_effic', 'Heating System Efficiency', 'extra'),
('heat_effic_slider', 'Custom-entered Heating System Efficiency', 'extra'),
('aux_elec', 'Auxiliary Electric Use', 'extra'),
('exist_fuel_use', 'Existing Heating Fuel Use', 'null-ok,float,greater-than-zero'),
('elec_uses', 'Electric Uses Included in Annual Use Entry'),
('elec_use_jan', 'January Electric Use', 'null-ok,float,greater-than-zero'),
('elec_use_may', 'May Electric Use', 'null-ok,float,greater-than-zero'),
('indoor_heat_setpoint', 'Heating Thermostat'),
('hp_zones', 'Number of Heat Pump Zones', 'extra'),
('hp_selection', 'Heat Pump Selection Method', 'extra'),
('hp_manuf_id', 'Heat Pump Manufacturer', 'null-ok,extra'), # needed to get a callback to fire
('hp_model_id', 'Heat Pump Model', 'null-ok'),
('capital_cost', 'Installed Heat Pump Cost', 'float'),
('rebate_dol', 'Heat Pump Rebate', 'null-ok,null-to-zero,float'),
('pct_financed', '% of Heat Pump Financed'),
('loan_term', 'Length/Term of Loan'),
('loan_interest', 'Loan Interest Rate'),
('low_temp_cutoff', 'Low Temperature Cutoff of Heat Pump'),
('off_months_chks', 'Months when Heat Pump is Off'),
('pct_exposed_to_hp', 'Percent of Home Open to Heat Pump'),
('bedroom_temp_tolerance', 'Bedroom Temperature Tolerance'),
('doors_open_to_adjacent', 'Doors Open to Adjacent Spaces'),
('sales_tax', 'Sales Tax'),
('inflation_rate', 'Inflation Rate'),
('fuel_esc_rate', 'Fuel Price Escalation Rate'),
('elec_esc_rate', 'Electricity Price Escalation Rate'),
('discount_rate', 'Discount Rate'),
('hp_life', 'Heat Pump Life'),
('op_cost_chg', 'Operating Cost Change', 'null-ok,null-to-zero,float'),
]
# Make a list of Input objects and a list of State objects for each of
# the Inputs that can affect the calculation.
input_objects = []
state_objects = []
for info in input_info:
var_name = info[0]
input_objects.append(Input(var_name, 'value'))
state_objects.append(State(var_name, 'value'))
def calc_input_objects():
"""Return a set of Input objects that can be used in a callback
for the above inputs.
"""
return input_objects
def calc_state_objects():
"""Returns a list of State objects that can be used in a callback
for the above inputs.
"""
return state_objects
# Default dictionary of all possible input checks and conversions.
# All checks and conversions are assumed to be not applied in the default case.
check_conversion_codes = ('null-ok', 'null-to-zero', 'float', 'int', 'greater-than-zero','extra')
check_conversion = dict(zip(check_conversion_codes, [False] * len(check_conversion_codes)))
def inputs_to_vars(input_vals):
"""This routine returns a list of input error messages and a dictionary of
variables and associated values. To create the dictionary of variables and
values, conversions that are listed in the input_info list above are applied.
"""
# Separate the variables that are the final variables that are used in the
# calculation, 'vars', from those that are secondary and will be used to
# create main variables, 'extras'
vars = {}
extras = {}
# Start a list of error messages
errors = []
# Make the dictionaries of main variables and extra variables, doing the
# requested checks and conversions.
for info, val in zip(input_info, input_vals):
# The third info item may or may not be present so use a wildcard
# item in the tuple unpacking.
var, desc, *other = info
# Start assuming no checks and conversions are present and then
# override by those present in the third element of the info tuple.
cc = check_conversion.copy() # all False to start with.
if len(other):
for item in other[0].split(','):
cc[item.strip()] = True
if is_null(val):
if cc['null-ok']:
# Only other check / conversion is null to zero if the value is
# None
if cc['null-to-zero']:
val = 0
else:
errors.append(f'The {desc} must be entered.')
else:
if cc['float']:
try:
if isinstance(val, str):
# remove any commas before converting.
val = val.replace(',', '')
val = float(val)
except:
errors.append(f'{desc} must be a number.')
elif cc['int']:
try:
val = int(val)
except:
errors.append(f'{desc} must be an integer number.')
if cc['greater-than-zero']:
if not isinstance(val, numbers.Number) or val <= 0:
errors.append(f'{desc} must be greater than zero.')
if cc['extra']:
extras[var] = val
else:
vars[var] = val
if len(errors):
# Because of errors, no point in going further.
return errors, vars, extras
# convert percentage values to fractions
vars['discount_rate'] /= 100.
vars['elec_esc_rate'] /= 100.
vars['fuel_esc_rate'] /= 100.
vars['inflation_rate'] /= 100.
vars['loan_interest'] /= 100.
vars['pct_financed'] /= 100.
vars['sales_tax'] /= 100.
vars['pct_exposed_to_hp'] /= 100.
# ------------------- Some Other Input Checks -------------------------
# If existing heating fuel type is electric, no fuel price needs to be entered,
# but otherwise, a fuel price is required.
if vars['exist_heat_fuel_id'] != ELECTRIC_ID and is_null(vars['exist_unit_fuel_cost']):
errors.append('The Heating Fuel Price per Unit must be entered.')
return errors, vars, extras
# January and May electric is required if the Heating Fuel is not Electric
if vars['exist_heat_fuel_id'] != ELECTRIC_ID and is_null(vars['elec_use_jan']):
errors.append('The January Electricity use must be entered.')
return errors, vars, extras
if vars['exist_heat_fuel_id'] != ELECTRIC_ID and is_null(vars['elec_use_may']):
errors.append('The May Electricity use must be entered.')
return errors, vars, extras
if extras['hp_selection'] == 'advanced':
if is_null(vars['hp_model_id']):
errors.append('You must Select a Heat Pump Model.')
return errors, vars, extras
else:
# simple selection. Substitute a Generic ID for the Heat Pump Model
# based on the number of zones served. Use negative numbers for generic
# IDs.
vars['hp_model_id'] = -extras['hp_zones']
if (vars['loan_term'] > vars['hp_life']) and (vars['pct_financed'] > 0):
errors.append('The Term of the Loan cannot be longer than the Life of the Heat Pump. The Heat Pump Life can be changed in the Advanced Economic Inputs section.')
return errors, vars, extras
# --------------------- Electric Utility Rates ----------------------------
# Create a utility object from the electric utility inputs.
# To prepare for manual entry of the utility parameters, if that
# is selected, make a real utility object from the first one listed
# for the community and set fields to default values.
city = lib.city_from_id(vars['city_id'])
utility = lib.util_from_id(city.ElecUtilities[0][1]).copy()
utility.at['Name'] = 'Custom'
utility.at['IsCommercial'] = False
utility.at['DemandCharge'] = np.NaN
# Blocks, PCE, Customer Charge will be set below if this object
# is used.
if extras['elec_input'] == 'util':
if is_null(extras['utility_id']):
errors.append('You must select an Electric Utility for this City.')
return errors, vars, extras
else:
utility = lib.util_from_id(extras['utility_id'])
elif extras['elec_input'] == 'ez':
if is_null(extras['elec_rate_ez']):
errors.append('You must enter an Electric Rate for this City.')
return errors, vars, extras
else:
# just one block
utility.at['Blocks'] = [(np.NaN, extras['elec_rate_ez'])]
utility.at['PCE'] = extras['pce_ez']
utility.at['CustomerChg'] = extras['customer_chg_ez']
else:
# Advanced utility rate input
# Need to check block limits and rates to see if they are in
# the correct format.
# make a list of limits and a list of block rates
limits = [extras[f'blk{i}_kwh'] for i in range(1, 5)]
rates = [extras[f'blk{i}_rate'] for i in range(1, 5)]
# there must be a None at the last block
last_ix = None
for i in range(4):
if is_null(limits[i]):
last_ix = i
break
if last_ix is None:
errors.append('The Last Electric Rate Block kWh limit must be empty.')
return errors, vars, extras
# Now check that all limits prior to the None are filled out
for i in range(last_ix):
val = limits[i]
if is_null(val):
errors.append(f'The Electric Rate Block {i+1} must have a kWh value.')
return errors, vars, extras
# Check that there are rates for all the blocks through the last
for i in range(last_ix + 1):
val = rates[i]
if is_null(val):
errors.append(f'The Electric Rate Block {i+1} must have a rate.')
return errors, vars, extras
# Blocks are good, so add them to the utility
blocks = []
for i in range(last_ix + 1):
kwh = limits[i] if i != last_ix else np.nan
rate = rates[i]
blocks.append( (kwh, rate) )
utility.at['Blocks'] = blocks
utility.at['PCE'] = extras['pce_adv']
utility.at['CustomerChg'] = extras['customer_chg_adv']
utility.at['DemandCharge'] = extras['demand_chg_adv']
vars['utility'] = utility
# To account for whether this building type gets PCE and to account
# for the "ignore PCE" checkbox, set the 'pce_limit' variable.
if 'no_pce' in extras['no_pce_chks']:
# ignore PCE is selected
pce_limit = 0.0
else:
if extras['bldg_type'] == 'res':
pce_limit = 750.0
elif extras['bldg_type'] == 'comm':
# no PCE for commercial buildings
pce_limit = 0.0
elif extras['bldg_type'] == 'commun':
if extras['commun_all_pce']:
# all of the community building PCE is used up for
# this community
pce_limit = 0.0
else:
# assume enough community building PCE left to supply the
# heat pump. Thus, no limit on PCE for this building.
pce_limit = np.inf
# save the PCE limit variable
vars['pce_limit'] = pce_limit
# -------------- Other Variables needing Processing --------------------
# Wall Type translates to Insulation Level
wall_to_insul = {'2x4': 1, '2x6': 2, 'better': 3}
vars['insul_level'] = wall_to_insul[extras['wall_type']]
# Possible manual entry of heating efficiency
if extras['heat_effic'] == 'manual':
vars['exist_heat_effic'] = extras['heat_effic_slider'] / 100.
else:
vars['exist_heat_effic'] = extras['heat_effic'] / 100.
# Auxiliary Electricity Use of Heating System, kWh / MMBtu of heat
# delivered.
aux_to_kwh = {'no-fan': 0.0, 'boiler': 4.0, 'toyo': 3.0,
'furnace-effic': 6.25, 'furnace-std': 12.5}
vars['exist_kwh_per_mmbtu'] = aux_to_kwh[extras['aux_elec']]
# Other End Uses using Heating Fuel
vars['includes_dhw'] = 'dhw' in extras['end_uses_chks']
vars['includes_dryer'] = 'drying' in extras['end_uses_chks']
vars['includes_cooking'] = 'cooking' in extras['end_uses_chks']
return errors, vars, extras
|
[
"[email protected]"
] | |
c293ce9bd29a97a2c3b61fcadaa79834d734aff1
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/DaVinciDev_v39r1/Phys/StrippingSelections/python/StrippingSelections/StrippingRD/StrippingTau23MuLines.py
|
e41b622cb595dc6fb534a355d58d740cfbacc24d
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 17,410 |
py
|
'''
Module for construction of tau -->MuMuMu stripping selections and lines
Exported symbols (use python help!):
-
'''
__author__ = ['Jon Harrison', 'Paul Seyfert', 'Marcin Chrzaszcz']
__date__ = '05/05/2015'
__version__ = '$Revision: 3.0 $'
__all__ = ('Tau23MuLinesConf',
'default_config',
'makeTau23Mu',
'makeDs23PiTIS',
'makeDs23Pi',
'makeDsPhiPi',
'makeTau25Mu',
'makeTau2PMuMu'
)
from Gaudi.Configuration import *
from Configurables import FilterDesktop, CombineParticles
from PhysSelPython.Wrappers import Selection, DataOnDemand
from StrippingConf.StrippingLine import StrippingLine
from StrippingUtils.Utils import LineBuilder
from GaudiKernel.PhysicalConstants import c_light
default_config = {
'NAME' : 'Tau23Mu',
'WGs' : ['RD'],
'BUILDERTYPE' : 'Tau23MuLinesConf',
'CONFIG' : {
'TauPrescale' :1.,
'TauPostscale' :1.,
'Ds23PiTISPrescale' :0.0,
'Ds23PiPrescale' :0.0,
'Ds2PhiPiPrescale' :1.,
'Tau25Prescale' :1.,
'Tau2PMuMuPrescale' :1.,
'TrackGhostProb' :0.45
},
'STREAMS' : { 'Leptonic' : ['StrippingTau23MuTau23MuLine','StrippingTau23MuDs2PhiPiLine','StrippingTau23MuTau2PMuMuLine','StrippingTau23MuDs23PiLine','StrippingTau23MuTau25MuLine']}
}
class Tau23MuLinesConf(LineBuilder) :
"""
Builder
"""
__configuration_keys__ = ( 'TauPrescale',
'TauPostscale',#universal for all lines
'Ds23PiTISPrescale',
'Ds23PiPrescale',
'Ds2PhiPiPrescale',
'Tau25Prescale',
'Tau2PMuMuPrescale',
'TrackGhostProb'
)
def __init__(self,
name = 'Tau23Mu',
config = None) :
LineBuilder.__init__(self, name, config)
#checkConfig(Bs2MuMuLinesConf.__configuration_keys__,config)
tau_name=name+'Tau23Mu'
ds23PiTIS_name = name+'Ds23PiTIS'
ds23Pi_name=name+'Ds23Pi'
ds2PhiPi_name=name+'Ds2PhiPi'
tau25_name=name+'Tau25Mu'
tau2pmm_name=name+'Tau2PMuMu'
self.selTau23Mu = makeTau23Mu(tau_name,config)
#self.selDs23PiTIS = makeDs23PiTIS(self,ds23PiTIS_name)
self.selDs23Pi = makeDs23Pi(ds23Pi_name,config)
self.selDs2PhiPi = makeDs2PhiPi(ds2PhiPi_name,config)
self.selTau25Mu = makeTau25Mu(tau25_name,config)
self.selTau2PMuMu = makeTau2pmm(tau2pmm_name,config)
self.tau23MuLine = StrippingLine(tau_name+"Line",
prescale = config['TauPrescale'],
postscale = config['TauPostscale'],
MDSTFlag = True,
RequiredRawEvents = ["Calo"],
algos = [ self.selTau23Mu ],
RelatedInfoTools = [{ 'Type' : 'RelInfoConeVariables', 'ConeAngle' : 1.,
'Variables' : ['CONEANGLE', 'CONEMULT', 'CONEPT', 'CONEPTASYM'],
'Location':'ConeIsoInfo',
'DaughterLocations':{'[tau+ -> ^mu+ mu+ mu-]CC' : 'MuonConeVarInfo1', '[tau+ -> mu+ ^mu+ mu-]CC' : 'MuonConeVarInfo2', '[tau+ -> mu+ mu+ ^mu-]CC' : 'MuonConeVarInfo3'}
},
{'Type': 'RelInfoVertexIsolation',
'Location':'VtxIsoInfo' },
{ 'Type': 'RelInfoTrackIsolationBDT',
'Variables' : 0,
'Location':'TrackIsoInfo',
'DaughterLocations':{'[tau+ -> ^mu+ mu+ mu-]CC' : 'MuonTrackIsoBDTInfo1', '[tau+ -> mu+ ^mu+ mu-]CC' : 'MuonTrackIsoBDTInfo2', '[tau+ -> mu+ mu+ ^mu-]CC' : 'MuonTrackIsoBDTInfo3'}
}
]
)
#self.ds23PiTISLine = StrippingLine(ds23PiTIS_name+"Line",
# prescale = config['Ds23PiTISPrescale'],
# postscale = config['TauPostscale'],
# algos = [ self.selDs23PiTIS ]
# )
self.ds23PiLine = StrippingLine(ds23Pi_name+"Line",
prescale = config['Ds23PiPrescale'],
postscale = config['TauPostscale'],
MDSTFlag = True,
RequiredRawEvents = [ ],
algos = [ self.selDs23Pi ]
)
self.ds2PhiPiLine = StrippingLine(ds2PhiPi_name+"Line",
prescale = config['Ds2PhiPiPrescale'],
postscale = config['TauPostscale'],
MDSTFlag = True,
RequiredRawEvents = ["Calo"],
algos = [ self.selDs2PhiPi ],
RelatedInfoTools = [{ 'Type' : 'RelInfoConeVariables', 'ConeAngle' : 1.,
'Variables' : ['CONEANGLE', 'CONEMULT', 'CONEPT', 'CONEPTASYM'],
'Location':'ConeIsoInfo',
'DaughterLocations':{'[D_s+ -> ^pi+ mu+ mu-]CC' : 'PionConeVarInfo', '[D_s+ -> pi+ ^mu+ mu-]CC' : 'MuonConeVarInfo1', '[D_s+ -> pi+ mu+ ^mu-]CC' : 'MuonConeVarInfo2'}
},
{'Type': 'RelInfoVertexIsolation',
'Location':'VtxIsoInfo' },
{ 'Type': 'RelInfoTrackIsolationBDT',
'Variables' : 0,
'Location':'TrackIsoInfo',
'DaughterLocations':{'[D_s+ -> ^pi+ mu+ mu-]CC' : 'PionTrackIsoBDTInfo', '[D_s+ -> pi+ ^mu+ mu-]CC' : 'MuonTrackIsoBDTInfo1', '[D_s+ -> pi+ mu+ ^mu-]CC' : 'MuonTrackIsoBDT_mu_2'}
}
]
)
self.tau25MuLine = StrippingLine(tau25_name+"Line",
prescale = config['Tau25Prescale'],
postscale = config['TauPostscale'],
MDSTFlag = True,
RequiredRawEvents = [ ],
algos = [ self.selTau25Mu ]
)
self.tau2PMuMuLine = StrippingLine(tau2pmm_name+"Line",
prescale = config['Tau2PMuMuPrescale'],
postscale = config['TauPostscale'],
MDSTFlag = True,
RequiredRawEvents = ["Calo"],
algos = [ self.selTau2PMuMu ] ,
RelatedInfoTools = [{ 'Type' : 'RelInfoConeVariables', 'ConeAngle' : 1.,
'Variables' : ['CONEANGLE', 'CONEMULT', 'CONEPT', 'CONEPTASYM'],
'Location':'ConeIsoInfo',
'DaughterLocations':{"[[ tau+ -> ^p+ mu+ mu- ]CC, [ tau+ -> ^p~- mu+ mu+ ]CC, [ Lambda_c+ -> ^p+ mu+ mu- ]CC, [ Lambda_c+ -> ^p~- mu+ mu+ ]CC]" : 'ProtonConeVarInfo', "[[ tau+ -> p+ ^mu+ mu- ]CC, [ tau+ -> p~- ^mu+ mu+ ]CC, [ Lambda_c+ -> p+ ^mu+ mu- ]CC, [ Lambda_c+ -> p~- ^mu+ mu+ ]CC]" : 'MuonConeVarInfo1', "[[ tau+ -> p+ mu+ ^mu- ]CC, [ tau+ -> p~- mu+ ^mu+ ]CC, [ Lambda_c+ -> p+ mu+ ^mu- ]CC, [ Lambda_c+ -> p~- mu+ ^mu+ ]CC]" : 'MuonConeVarInfo2'}
},
{'Type': 'RelInfoVertexIsolation',
'Location':'VtxIsoInfo' },
{ 'Type': 'RelInfoTrackIsolationBDT',
'Variables' : 0,
'Location':'TrackIsoInfo',
'DaughterLocations':{"[[ tau+ -> ^p+ mu+ mu- ]CC, [ tau+ -> ^p~- mu+ mu+ ]CC, [ Lambda_c+ -> ^p+ mu+ mu- ]CC, [ Lambda_c+ -> ^p~- mu+ mu+ ]CC]" : 'ProtonTrackIsoBDTInfo', "[[ tau+ -> p+ ^mu+ mu- ]CC, [ tau+ -> p~- ^mu+ mu+ ]CC, [ Lambda_c+ -> p+ ^mu+ mu- ]CC, [ Lambda_c+ -> p~- ^mu+ mu+ ]CC]" : 'MuonTrackIsoBDTInfo1', "[[ tau+ -> p+ mu+ ^mu- ]CC, [ tau+ -> p~- mu+ ^mu+ ]CC, [ Lambda_c+ -> p+ mu+ ^mu- ]CC, [ Lambda_c+ -> p~- mu+ ^mu+ ]CC]" : 'MuonTrackIsoBDTInfo2'}
}
]
)
self.registerLine(self.tau23MuLine)
#self.registerLine(self.ds23PiTISLine)
self.registerLine(self.ds23PiLine)
self.registerLine(self.ds2PhiPiLine)
self.registerLine(self.tau25MuLine)
self.registerLine(self.tau2PMuMuLine)
def makeTau23Mu(name, config):
"""
Please contact Johannes Albrecht if you think of prescaling this line!
Arguments:
name : name of the Selection.
"""
Tau2MuMuMu = CombineParticles("Comine"+name)
Tau2MuMuMu.DecayDescriptor = " [ tau+ -> mu+ mu+ mu- ]cc"
Tau2MuMuMu.DaughtersCuts = { "mu+" : " ( PT > 300 * MeV ) & ( TRGHOSTPROB < %(TrackGhostProb)s ) & ( TRCHI2DOF < 3 ) "\
"& ( BPVIPCHI2 () > 9 ) " % config}
Tau2MuMuMu.CombinationCut = "(ADAMASS('tau+')<400*MeV)"
Tau2MuMuMu.MotherCut = """
( VFASPF(VCHI2) < 15 ) &
( (BPVLTIME () * c_light) > 100 * micrometer ) &
( BPVIPCHI2() < 225 )
"""
_stdLooseMuons = DataOnDemand(Location = "Phys/StdLooseMuons/Particles")
return Selection (name,
Algorithm = Tau2MuMuMu,
RequiredSelections = [ _stdLooseMuons ])
def makeDs23Pi(name, config):
"""
Please contact Johannes Albrecht if you think of prescaling this line!
Arguments:
name : name of the Selection.
"""
Ds2PiPiPi = CombineParticles("Comine"+name)
Ds2PiPiPi.DecayDescriptor = " [ D_s+ -> pi+ pi+ pi- ]cc "
Ds2PiPiPi.DaughtersCuts = { "pi+" : " ( PT > 300 * MeV ) & ( TRGHOSTPROB < %(TrackGhostProb)s ) & ( TRCHI2DOF < 3 ) & ( BPVIPCHI2 () > 9 ) " % config}
Ds2PiPiPi.CombinationCut = "(ADAMASS('D_s+')<80*MeV)"
Ds2PiPiPi.MotherCut = """
( VFASPF(VCHI2) < 15 ) &
( (BPVLTIME () * c_light) > 100 * micrometer ) &
( BPVIPCHI2() < 225 )
"""
_stdLoosePions = DataOnDemand(Location = "Phys/StdLoosePions/Particles")
return Selection (name,
Algorithm = Ds2PiPiPi,
RequiredSelections = [ _stdLoosePions ])
def makeDs23PiTIS(self, name, config):
"""
Please contact Johannes Albrecht if you think of prescaling this line!
Arguments:
name : name of the Selection.
"""
def makeTISTOS( name, _input, _trigger ) :
from Configurables import TisTosParticleTagger
_tisTosFilter = TisTosParticleTagger( name + "Tagger" )
_tisTosFilter.TisTosSpecs = { _trigger : 0 }
#_tisTosFilter.ProjectTracksToCalo = False
#_tisTosFilter.CaloClustForCharged = False
#_tisTosFilter.CaloClustForNeutral = False
#_tisTosFilter.TOSFrac = { 4:0.0, 5:0.0 }
return Selection( name
, Algorithm = _tisTosFilter
, RequiredSelections = [ _input ]
)
self.combDs2pipipi=makeDs23Pi(name, config)
self.selDs23PiHlt1TIS = makeTISTOS( self.name() + "Ds23PiHlt1TIS"
, self.combDs2pipipi#makeDs23Pi#self.combPiPiPi
, "Hlt1.*Decision%TIS"
)
self.selDs23PiHlt2TIS = makeTISTOS( self.name() + "Ds23PiHlt2TIS"
, self.selDs23PiHlt1TIS
, "Hlt2.*Decision%TIS"
)
return self.selDs23PiHlt2TIS
# return Selection (name,
# Algorithm = Ds2PiPiPiTIS,
# RequiredSelections = [ Ds2PiPiPi ])
def makeDs2PhiPi(name, config):
"""
Please contact Johannes Albrecht if you think of prescaling this line!
Arguments:
name : name of the Selection.
"""
Ds2PhiPi = CombineParticles("Comine"+name)
Ds2PhiPi.DecayDescriptor = " [ D_s+ -> pi+ mu+ mu- ]cc "
Ds2PhiPi.DaughtersCuts = { "pi+" : " ( PT > 300 * MeV ) & ( TRGHOSTPROB < %(TrackGhostProb)s ) & ( TRCHI2DOF < 3 ) & ( BPVIPCHI2 () > 9 ) " % config,
"mu+" : " ( PT > 300 * MeV ) & ( TRGHOSTPROB < %(TrackGhostProb)s ) & ( TRCHI2DOF < 3 ) & ( BPVIPCHI2 () > 9 ) " % config}
Ds2PhiPi.CombinationCut = "(ADAMASS('D_s+')<250*MeV) & in_range ( 970 * MeV , AM23 , 1070 * MeV )"
Ds2PhiPi.MotherCut = """
( VFASPF(VCHI2) < 15 ) &
( (BPVLTIME () * c_light) >100 * micrometer ) &
( BPVIPCHI2() < 225 )
"""
_stdLoosePions = DataOnDemand(Location = "Phys/StdLoosePions/Particles")
_stdLooseMuons = DataOnDemand(Location = "Phys/StdLooseMuons/Particles")
return Selection (name,
Algorithm = Ds2PhiPi,
RequiredSelections = [ _stdLooseMuons, _stdLoosePions ])
def makeTau25Mu(name, config):
"""
Please contact Johannes Albrecht if you think of prescaling this line!
Arguments:
name : name of the Selection.
"""
Tau2MuMuMuMuMu = CombineParticles("Comine"+name)
Tau2MuMuMuMuMu.DecayDescriptor = " [ tau+ -> mu+ mu+ mu+ mu- mu-]cc"
Tau2MuMuMuMuMu.DaughtersCuts = { "mu+" : " ( PT > 300 * MeV ) & ( TRGHOSTPROB < %(TrackGhostProb)s ) & ( TRCHI2DOF < 3 ) & ( BPVIPCHI2 () > 9 ) " % config }
Tau2MuMuMuMuMu.CombinationCut = "(ADAMASS('tau+')<400*MeV)"
Tau2MuMuMuMuMu.MotherCut = """
( VFASPF(VCHI2) < 30 ) &
( (BPVLTIME () * c_light) > 100 * micrometer ) &
( BPVIPCHI2() < 225 )
"""
_stdLooseMuons = DataOnDemand(Location = "Phys/StdLooseMuons/Particles")
return Selection (name,
Algorithm = Tau2MuMuMuMuMu,
RequiredSelections = [ _stdLooseMuons ])
def makeTau2pmm(name, config):
"""
Please contact Jon Harrison if you think of prescaling this line!
Arguments:
name : name of the Selection.
"""
Tau2PMuMu = CombineParticles("Comine"+name)
Tau2PMuMu.DecayDescriptors = [" [ tau+ -> p+ mu+ mu- ]cc"," [ tau+ -> p~- mu+ mu+ ]cc",
" [ Lambda_c+ -> p+ mu+ mu- ]cc"," [ Lambda_c+ -> p~- mu+ mu+ ]cc" ]
Tau2PMuMu.DaughtersCuts = { "mu+" : " ( PT > 300 * MeV ) & ( TRCHI2DOF < 3 ) & ( BPVIPCHI2 () > 9 ) "\
"& ( PIDmu > -5 ) & ( (PIDmu - PIDK) > 0 ) & ( TRGHOSTPROB < %(TrackGhostProb)s )"% config,
"p+" : " ( PT > 300 * MeV ) & ( TRCHI2DOF < 3 ) & ( BPVIPCHI2 () > 9 ) "\
"& (PIDp>10) & ( TRGHOSTPROB < %(TrackGhostProb)s )" % config}
Tau2PMuMu.CombinationCut = "( (ADAMASS('tau+')<150*MeV) | (ADAMASS('Lambda_c+')<150*MeV) )"
Tau2PMuMu.MotherCut = """
( VFASPF(VCHI2) < 15 ) &
( (BPVLTIME () * c_light) > 100 * micrometer ) &
( BPVIPCHI2() < 225 )
"""
_stdLooseMuons = DataOnDemand(Location = "Phys/StdLooseMuons/Particles")
_stdLooseProtons = DataOnDemand(Location = "Phys/StdLooseProtons/Particles")
return Selection (name,
Algorithm = Tau2PMuMu,
RequiredSelections = [ _stdLooseMuons, _stdLooseProtons ])
|
[
"[email protected]"
] | |
fbf85905651a6049958aa7abf3d844158023081a
|
2ec97b62d7edf0f2e257622e0027f12bfdb3651a
|
/custom_components/covid19_nswhealth_tests/sensor.py
|
72a601647fc62554cd94058c27c79948cacd0602
|
[
"Unlicense"
] |
permissive
|
Mirec511/HomeAssistantConfig
|
6837ed67ef73600410b6e07b82ef641930762429
|
200bca51e91ef3c844f6f2ddc004a7e82da3a04e
|
refs/heads/master
| 2023-02-08T00:03:38.825829 | 2021-01-02T00:42:42 | 2021-01-02T00:42:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,094 |
py
|
"""Sensor platform for NSW Air Quality"""
import datetime
import logging
from datetime import timedelta
import homeassistant.helpers.config_validation as cv
import pandas as pd
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
__version__ = "0.0.1"
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = []
DEFAULT_UOM = "Tests"
# DEFAULT_SCAN_INTERVAL = timedelta(hours=1)
# SCAN_INTERVAL = timedelta(hours=1)
ICON = "mdi:biohazard"
MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(minutes=300)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_NAME): cv.string})
def setup_platform(hass, config, add_entities, discovery_info=None):
"Setup Platform"
add_entities([NSWHSensor(name=config[CONF_NAME])])
class NSWHSensor(Entity):
def __init__(self, name: str):
self._state = None
self._name = name
self._attributes = {}
@property
def name(self):
return "covid_19_nswh_local_tests"
@property
def state(self):
return self._state
@property
def icon(self):
return ICON
@property
def state_attributes(self):
return self._attributes
@property
def unit_of_measurement(self):
return DEFAULT_UOM
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
url = "https://data.nsw.gov.au/data/dataset/60616720-3c60-4c52-b499-751f31e3b132/resource/945c6204-272a-4cad-8e33-dde791f5059a/download/pcr_testing_table1_location.csv"
df = pd.read_csv(url, parse_dates=[0])
df_hneh = df[df.lhd_2010_code == "X800"]
df_hneh_count = len(df_hneh)
df_hneh_earliest = df_hneh["test_date"].min().strftime("%a %d %b")
df_hneh_latest = df_hneh["test_date"].max().strftime("%a %d %b")
df_trc = df[df.lga_code19 == 17310.0]
df_trc_count = len(df_trc)
df_trc_earliest = df_trc["test_date"].min().strftime("%a %d %b")
df_trc_latest = df_trc["test_date"].max().strftime("%a %d %b")
df_tamw = df[df.postcode == 2340.0]
df_tamw_count = len(df_tamw)
df_tamw_earliest = df_tamw["test_date"].min().strftime("%a %d %b")
df_tamw_latest = df_tamw["test_date"].max().strftime("%a %d %b")
self._attributes = {}
self._state = 0
self._attributes["hneh"] = df_hneh_count
self._attributes["hneh_dates"] = (
"1st: " + str(df_hneh_earliest) + " - Last: " + str(df_hneh_latest)
)
self._attributes["trc"] = df_trc_count
self._attributes["trc_dates"] = (
"1st: " + str(df_trc_earliest) + " - Last: " + str(df_trc_latest)
)
self._attributes["tamw"] = df_tamw_count
self._attributes["tamw_dates"] = (
"1st: " + str(df_tamw_earliest) + " - Last: " + str(df_tamw_latest)
)
self._attributes[ATTR_ATTRIBUTION] = "Data provided by NSW Health"
self._state = df_tamw_count
|
[
"[email protected]"
] | |
b42d07f5043047e9017805411b5a44416a02059c
|
115022e4e1e2e78e99a73a87e8172efb16faecd7
|
/accounts/admin.py
|
85fa978e83658b9d3797c78642601e6e407a2d7a
|
[] |
no_license
|
navill/toy_project
|
a043865c3c40c3ceb3e07c7662347df225f62203
|
7d2ea837cfc4543d219c2096ab8f156f77d118b7
|
refs/heads/master
| 2022-12-11T04:09:39.845515 | 2020-02-14T02:56:40 | 2020-02-14T02:56:40 | 228,669,658 | 1 | 0 | null | 2022-12-08T03:19:23 | 2019-12-17T17:38:48 |
Python
|
UTF-8
|
Python
| false | false | 437 |
py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
# from .forms import CustomUserCreationForm, CustomUserChangeForm
from .models import User
#
# class CustomUserAdmin(UserAdmin):
# add_form = CustomUserCreationForm
# form = CustomUserChangeForm
# model = User
# list_display = ['email', 'first_name', 'last_name']
#
#
# admin.site.register(User, CustomUserAdmin)
admin.site.register(User)
|
[
"[email protected]"
] | |
f70949e09e7699a52a5ae572af19486fb8b2dc86
|
b5ca0a2ce47fdb4306bbdffcb995eb7e6eac1b23
|
/Python/Strings/Alphabet Rangoli/Alphabet_Rangoli.py
|
26cb874f6a5802822cd69e78986885dcdb2c30df
|
[] |
no_license
|
rsoemardja/HackerRank
|
ac257a66c3649534197b223b8ab55011d84fb9e1
|
97d28d648a85a16fbe6a5d6ae72ff6503a063ffc
|
refs/heads/master
| 2022-04-14T22:46:03.412359 | 2020-04-03T07:44:04 | 2020-04-03T07:44:04 | 217,687,370 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 556 |
py
|
import string
size = int(input())
alphabet = string.ascii_lowercase
for i in range(size - 1,0, -1):
row = ["-"] * (size * 2 - 1)
for j in range(0, size - i):
row[size - 1 - j] = alphabet[j + i]
row[size - 1 + j] = alphabet[j + i]
print("-".join(row))
for i in range(0, size):
row = ["-"] * (size * 2 - 1)
for j in range(0, size - i):
row[size - 1 - j] = alphabet[j + i]
row[size - 1 + j] = alphabet[j + i]
print("-".join(row))
if __name__ == '__main__':
n = int(input())
print_rangoli(n)
|
[
"[email protected]"
] | |
5d796f0381a5ebddca1568879724619b4be56403
|
573a2fa5094d510a9d44a361366a67efda5a3e8a
|
/blender/arm/logicnode/variable_scene.py
|
2aa08fdf4e4328d2cf637bf86b1a6abf32e6ccd9
|
[
"GPL-2.0-only",
"Zlib"
] |
permissive
|
kirumon/armory
|
ca7126a47172abe676da9e36ff13052a008bc812
|
4df19ef970ba76d6f99d0b07b44048e2e148e4ff
|
refs/heads/master
| 2020-04-19T22:51:41.810157 | 2019-01-30T16:45:54 | 2019-01-30T16:45:54 | 168,481,317 | 1 | 0 |
Zlib
| 2019-01-31T07:21:29 | 2019-01-31T07:21:28 | null |
UTF-8
|
Python
| false | false | 569 |
py
|
import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class SceneNode(Node, ArmLogicTreeNode):
'''Scene node'''
bl_idname = 'LNSceneNode'
bl_label = 'Scene'
bl_icon = 'QUESTION'
property0: StringProperty(name='', default='')
def init(self, context):
self.outputs.new('NodeSocketShader', 'Scene')
def draw_buttons(self, context, layout):
layout.prop_search(self, 'property0', bpy.data, 'scenes', icon='NONE', text='')
add_node(SceneNode, category='Variable')
|
[
"[email protected]"
] | |
9e94ec20208d9e3ca2e8fc55036b9d896fa09936
|
206c10808b6224f7d8236e27cc555e723af695d9
|
/tomodachi/envelope/protobuf_base.py
|
8a43ed27bf914590e789c8bd1487d3173828be56
|
[
"MIT"
] |
permissive
|
xdmiodz/tomodachi
|
3280209ae49100ec902e3b15c323b38e7480cdd3
|
7ca998a421dd724df5967d5baa0cf79f5112b79b
|
refs/heads/master
| 2023-03-15T19:22:16.381212 | 2023-01-20T07:34:48 | 2023-01-20T07:34:48 | 200,020,833 | 0 | 2 |
MIT
| 2023-03-08T00:00:01 | 2019-08-01T09:30:22 |
Python
|
UTF-8
|
Python
| false | false | 3,768 |
py
|
import base64
import logging
import time
import uuid
import zlib
from typing import Any, Dict, Tuple, Union
from tomodachi.envelope.proto_build.protobuf.sns_sqs_message_pb2 import SNSSQSMessage
PROTOCOL_VERSION = "tomodachi-protobuf-base--1.0.0"
class ProtobufBase(object):
@classmethod
def validate(cls, **kwargs: Any) -> None:
if "proto_class" not in kwargs:
raise Exception("No proto_class defined")
if kwargs.get("proto_class", None).__class__.__name__ != "GeneratedProtocolMessageType":
raise Exception("proto_class is not a GeneratedProtocolMessageType")
@classmethod
async def build_message(cls, service: Any, topic: str, data: Any, **kwargs: Any) -> str:
message_data = data.SerializeToString()
data_encoding = "proto"
if len(message_data) > 60000:
message_data = zlib.compress(data.SerializeToString())
data_encoding = "gzip_proto"
message = SNSSQSMessage()
message.service.name = getattr(service, "name", None)
message.service.uuid = getattr(service, "uuid", None)
message.metadata.message_uuid = "{}.{}".format(getattr(service, "uuid", ""), str(uuid.uuid4()))
message.metadata.protocol_version = PROTOCOL_VERSION
message.metadata.timestamp = time.time()
message.metadata.topic = topic
message.metadata.data_encoding = data_encoding
message.data = message_data
return base64.b64encode(message.SerializeToString()).decode("ascii")
@classmethod
async def parse_message(
cls, payload: str, proto_class: Any = None, validator: Any = None, **kwargs: Any
) -> Union[Dict, Tuple]:
message = SNSSQSMessage()
message.ParseFromString(base64.b64decode(payload))
message_uuid = message.metadata.message_uuid
timestamp = message.metadata.timestamp
raw_data = None
obj = None
if not proto_class:
raw_data = message.data
else:
obj = proto_class()
if message.metadata.data_encoding == "proto":
obj.ParseFromString(message.data)
elif message.metadata.data_encoding == "base64": # deprecated
obj.ParseFromString(base64.b64decode(message.data))
elif message.metadata.data_encoding == "gzip_proto":
obj.ParseFromString(zlib.decompress(message.data))
elif message.metadata.data_encoding == "base64_gzip_proto": # deprecated
obj.ParseFromString(zlib.decompress(base64.b64decode(message.data)))
elif message.metadata.data_encoding == "raw":
raw_data = message.data
if validator is not None:
try:
if hasattr(validator, "__func__"):
# for static functions
validator.__func__(obj)
else:
# for non-static functions
validator(obj)
except Exception as e:
logging.getLogger("envelope.protobuf_base").warning(e.__str__())
raise e
return (
{
"service": {"name": message.service.name, "uuid": message.service.uuid},
"metadata": {
"message_uuid": message.metadata.message_uuid,
"protocol_version": message.metadata.protocol_version,
"timestamp": message.metadata.timestamp,
"topic": message.metadata.topic,
"data_encoding": message.metadata.data_encoding,
},
"data": raw_data if raw_data is not None else obj,
},
message_uuid,
timestamp,
)
|
[
"[email protected]"
] | |
8153fbc21d01a5e302697a944d2f268e1cb21908
|
41a4eeaf62a36d7c57ad55393996787bb55ba6b7
|
/venv/lib/python3.7/site-packages/kubernetes/client/models/v1_env_var.py
|
b756905e43471ee3b9b8e089d217f9efdd3b5ef4
|
[] |
no_license
|
jicowan/group-operator
|
c7a20ff03584da9ace19489bc3d27b9fb22a066c
|
bac6e51aef0d9836679621e3ce7e55f4c1ead402
|
refs/heads/master
| 2021-07-14T11:45:30.062219 | 2019-09-26T15:26:52 | 2019-09-26T15:26:52 | 209,454,861 | 10 | 4 | null | 2021-07-01T17:23:07 | 2019-09-19T03:29:54 |
Python
|
UTF-8
|
Python
| false | false | 5,399 |
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1EnvVar(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'value': 'str',
'value_from': 'V1EnvVarSource'
}
attribute_map = {
'name': 'name',
'value': 'value',
'value_from': 'valueFrom'
}
def __init__(self, name=None, value=None, value_from=None):
"""
V1EnvVar - a model defined in Swagger
"""
self._name = None
self._value = None
self._value_from = None
self.discriminator = None
self.name = name
if value is not None:
self.value = value
if value_from is not None:
self.value_from = value_from
@property
def name(self):
"""
Gets the name of this V1EnvVar.
Name of the environment variable. Must be a C_IDENTIFIER.
:return: The name of this V1EnvVar.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1EnvVar.
Name of the environment variable. Must be a C_IDENTIFIER.
:param name: The name of this V1EnvVar.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def value(self):
"""
Gets the value of this V1EnvVar.
Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".
:return: The value of this V1EnvVar.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""
Sets the value of this V1EnvVar.
Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".
:param value: The value of this V1EnvVar.
:type: str
"""
self._value = value
@property
def value_from(self):
"""
Gets the value_from of this V1EnvVar.
Source for the environment variable's value. Cannot be used if value is not empty.
:return: The value_from of this V1EnvVar.
:rtype: V1EnvVarSource
"""
return self._value_from
@value_from.setter
def value_from(self, value_from):
"""
Sets the value_from of this V1EnvVar.
Source for the environment variable's value. Cannot be used if value is not empty.
:param value_from: The value_from of this V1EnvVar.
:type: V1EnvVarSource
"""
self._value_from = value_from
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1EnvVar):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"[email protected]"
] | |
33abb6dd9defeaef05ef48f283b9bbc6035dd8e9
|
e71fa62123b2b8f7c1a22acb1babeb6631a4549b
|
/examples/inheritance1.py
|
70ebab52d7eed62adfcc64973549ae62dd19fed5
|
[
"BSD-2-Clause"
] |
permissive
|
timgates42/XlsxWriter
|
40480b6b834f28c4a7b6fc490657e558b0a466e5
|
7ad2541c5f12b70be471b447ab709c451618ab59
|
refs/heads/main
| 2023-03-16T14:31:08.915121 | 2022-07-13T23:43:45 | 2022-07-13T23:43:45 | 242,121,381 | 0 | 0 |
NOASSERTION
| 2020-02-21T11:14:55 | 2020-02-21T11:14:55 | null |
UTF-8
|
Python
| false | false | 1,766 |
py
|
##############################################################################
#
# Example of how to subclass the Workbook and Worksheet objects. We also
# override the default worksheet.write() method to show how that is done.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright 2013-2022, John McNamara, [email protected]
#
from xlsxwriter.workbook import Workbook
from xlsxwriter.worksheet import Worksheet
from xlsxwriter.worksheet import convert_cell_args
class MyWorksheet(Worksheet):
"""
Subclass of the XlsxWriter Worksheet class to override the default
write() method.
"""
@convert_cell_args
def write(self, row, col, *args):
data = args[0]
# Reverse strings to demonstrate the overridden method.
if isinstance(data, str):
data = data[::-1]
return self.write_string(row, col, data)
else:
# Call the parent version of write() as usual for other data.
return super(MyWorksheet, self).write(row, col, *args)
class MyWorkbook(Workbook):
"""
Subclass of the XlsxWriter Workbook class to override the default
Worksheet class with our custom class.
"""
def add_worksheet(self, name=None):
# Overwrite add_worksheet() to create a MyWorksheet object.
worksheet = super(MyWorkbook, self).add_worksheet(name, MyWorksheet)
return worksheet
# Create a new MyWorkbook object.
workbook = MyWorkbook('inheritance1.xlsx')
# The code from now on will be the same as a normal "Workbook" program.
worksheet = workbook.add_worksheet()
# Write some data to test the subclassing.
worksheet.write('A1', 'Hello')
worksheet.write('A2', 'World')
worksheet.write('A3', 123)
worksheet.write('A4', 345)
workbook.close()
|
[
"[email protected]"
] | |
db6569501a941b99f6394e71bd64953e8ddb415a
|
993cf64df4795e7912a7f9157bd8bf02aa985506
|
/past_work/SWEA_D2/1946 docdecode.py
|
99fd7ef2d7f3d2cd0488565545486aecda7eebfd
|
[] |
no_license
|
jiwonjulietyoon/Algorithm
|
b541e630c5b01b47cc05b538970d2b73d452baf5
|
a11be16f4700e7e55382d4dcfd88d534a232f024
|
refs/heads/master
| 2020-04-24T01:54:05.200538 | 2019-11-09T03:56:47 | 2019-11-09T03:56:47 | 171,616,523 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 879 |
py
|
# 1946. 간단한 압축 풀기
# for T in range(int(input())):
# doc = {}
# N = int(input())
# for _ in N:
# k, v = input().split()
# v = int(v)
# doc[k] = v
# print(f"#{T+1}")
# for k in doc:
# for
for T in range(int(input())):
N = int(input())
print(f"#{T+1}")
cnt = 0
for _ in range(N):
ch, n = input().split()
n = int(n)
for _ in range(n):
print(ch, end='')
cnt += 1
if not cnt%10:
print()
print()
for T in range(int(input())):
doc = []
N = int(input())
for _ in range(N):
ch, n = input().split()
n = int(n)
doc.extend([ch] * n)
print(f"#{T+1}")
cnt = 0
for x in doc:
print(x, end='')
cnt += 1
if not cnt % 10:
print()
print()
|
[
"[email protected]"
] | |
d09335d99fce5581c09dbb4944074926cd84937b
|
74983098c5de53007bde6052a631845c781b5ba8
|
/camelback/camelback15/camelback.py
|
6809a547ce3f5ca4a2973a8aad74b2abcb3e8e5e
|
[] |
no_license
|
numairmansur/Experiments
|
94ccdd60f4c2cf538fab41556ac72405656c9d77
|
592f39916461c7a9f7d400fa26f849043d1377ed
|
refs/heads/master
| 2021-04-29T12:39:16.845074 | 2017-02-15T07:36:47 | 2017-02-15T07:36:47 | 78,043,284 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 700 |
py
|
import numpy as np
import sys
import math
import time
import csv
from hpolib.benchmarks.synthetic_functions import Camelback # Change this
from time import gmtime, strftime
def main(job_id, params):
print '!!! Entered Main !!!'
print 'Anything printed here will end up in the output directory for job #:', str(job_id)
print params
f = Camelback() # Change this
res = f.objective_function([params['x'], params['y']]) # CHANGE THIS
print res
# /home/mansurm/Experiments/...../run1.csv
with open('/home/mansurm/Experiments/camelback/run15.csv','a') as csvfile: # CHANGE THIS
writer = csv.writer(csvfile, delimiter=',')
writer.writerow([res['main'][0]])
return res['main'][0]
|
[
"[email protected]"
] | |
31ff8aaa5ba4c0ea5c9cd51140a9cb65ed640375
|
78ed228ff9262eaca44fe5badab05f512433eea8
|
/transcrypt/development/automated_tests/transcrypt/iterators_and_generators/__init__.py
|
9da8cb24e88386f5ae95b46235edb89a0fc66840
|
[
"Apache-2.0"
] |
permissive
|
khlumzeemee/Transcrypt
|
74af14f3175d1ce1d4debdfc5b346214d2597105
|
6a8abee3648daa0f36b509993ba54e14e8e9cf9b
|
refs/heads/master
| 2021-01-12T16:57:15.609336 | 2016-10-14T17:22:29 | 2016-10-14T17:22:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,910 |
py
|
class Iterable:
def __init__ (self, i):
self.aList = range (0, 50, i)
def __iter__ (self):
return Iterator (self)
class Iterator:
def __init__ (self, iterable):
self.iterable = iterable
self.index = -1
def __next__ (self): # Should be auto-wrapped in a next (self) by the compiler
self.index += 1
if self.index > 5:
raise StopIteration ()
return self.iterable.aList [self.index]
def __iter__ (self):
return self
def exhaustableGenerator (i):
for i in range (5):
yield 2 * i
def run (autoTester):
exhaustableGenExp = (a * a * a for a in [10, 20, 30]) # Currently still converted to iterator on list comprehension, must also be iterable
# So becomes py_iter (aList).
# List already has an __iter__ which it will return, it's a __PyIterator__
# To that __PyIterator__, that will already have a __next__, py_iter first adds a next
# So exhaustableGenExp is an iterator with a next and a __next__
# If we call iter on that, py_iter is calle again py_iter, on an object with a next and a next __next__
# For this reason py_iter needs a recursion prevention check
iterables = [Iterable (7), exhaustableGenerator (5), [i * 3 for i in range (5)], exhaustableGenExp]
for iterable in iterables:
autoTester.check ('[1]')
iterator = iter (iterable)
try:
while True:
autoTester.check (next (iterator))
except Exception as exception:
autoTester.check (exception.__class__.__name__)
autoTester.check ('[2]')
iterator = iter (iterable)
try:
while True:
autoTester.check (next (iterator))
except Exception as exception:
autoTester.check (exception.__class__.__name__)
for iterable in iterables:
autoTester.check ('[3]')
for n in iterable:
autoTester.check (n)
autoTester.check ('[4]')
for n in iterable:
autoTester.check (n)
|
[
"[email protected]"
] | |
3a2bb166b63a640f43091117e69e7b8199f98ea0
|
3be86a9093167acf4cb92a0b70c7087996f8f8e1
|
/0013_roman_to_integer.py
|
1322ef20494da2c1ea575461524a1a41cf40a8cd
|
[] |
no_license
|
zimingding/leetcode
|
28d25fc1e62612752c511b52af9ff77f7b7a7da7
|
c43b22146465a78a58e3cc3945228431ed94388a
|
refs/heads/master
| 2020-05-24T00:10:59.893207 | 2019-11-11T20:21:38 | 2019-11-11T20:21:38 | 187,010,281 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,284 |
py
|
class Solution:
def romanToInt(self, s: str) -> int:
r = 0
i = 0
while i < len(s):
c = s[i]
if c == 'I':
if i+1 < len(s) and s[i+1] == 'V':
r += 4
i += 1
elif i+1 < len(s) and s[i+1] == 'X':
r += 9
i += 1
else:
r += 1
if c == 'V':
r += 5
if c == 'X':
if i+1 < len(s) and s[i + 1] == 'L':
r += 40
i += 1
elif i+1 < len(s) and s[i + 1] == 'C':
r += 90
i += 1
else:
r += 10
if c == 'L':
r += 50
if c == 'C':
if i+1 < len(s) and s[i + 1] == 'D':
r += 400
i += 1
elif i+1 < len(s) and s[i + 1] == 'M':
r += 900
i += 1
else:
r += 100
if c == 'D':
r += 500
if c == 'M':
r += 1000
i += 1
return r
print(Solution().romanToInt('MCMXCIV'))
|
[
"[email protected]"
] | |
a0ca7b7d5578f7ed4c1ad33fdabb84e8158a0362
|
98b9e0a180d65bde6e799a0ef503409ce09ad9bd
|
/PythonFun/pyFun/Type_List.py
|
912a66f52b9aff6b8a3e6b5674fcb68014099e35
|
[] |
no_license
|
LizhangX/DojoAssignments
|
7fae9ed3098d1a131f2a2b093ded95a4de70a5cb
|
b56ccdf41fe5d43a4d5f340e4f21aaf632d7b7d6
|
refs/heads/master
| 2023-02-04T14:27:09.234685 | 2019-08-23T22:18:04 | 2019-08-23T22:18:04 | 94,444,518 | 0 | 1 | null | 2023-01-31T21:55:04 | 2017-06-15T13:55:14 |
TSQL
|
UTF-8
|
Python
| false | false | 948 |
py
|
# Write a program that takes a list and prints a message for each element in the list, based on that element's data type.
def TypeList(arr):
string = ""
sum = 0
for i in arr:
if type(i) == str:
string = string + " " + i
elif type(i) == float or type(i) == int:
sum += i
if string != "" and sum != 0:
print "\"The array you entered is of mixed type\""
print "\"String:{}\"".format(string)
print "\"Sum: {}\"".format(sum)
elif string != "":
print "\"The array you entered is of string type\""
print "\"String:{}\"".format(string)
elif sum != 0 and type(sum) == int:
print "\"The array you entered is of integer type\""
print "\"Sum: {}\"".format(sum)
l = ['magical unicorns',19,'hello',98.98,'world']
m = [2,3,1,7,4,12]
n = ['magical','unicorns']
TypeList(l)
TypeList(m)
TypeList(n)
|
[
"[email protected]"
] | |
c02e9b9aafa6aeabec084409401e9a732d8f1f9a
|
edfd1db2b48d4d225bc58be32fbe372a43415112
|
/team-task/airflow1.9/dags/redtuxedo/rwidjojo2_5.py
|
fda6c03c2023cabe0f8455728727512ee85cda92
|
[] |
no_license
|
rwidjojo/airflow-training
|
ed83cb9e97ca85ef06de1426f2f41014881a1f22
|
ac82040d8ddc3859df5576eee08d397e824016f1
|
refs/heads/main
| 2023-08-12T21:01:17.672059 | 2021-01-04T09:17:48 | 2021-01-04T09:17:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 928 |
py
|
# Instructions
# Define a function that uses the python logger to log
# parameter from PythonOperator
import datetime
import logging
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.utils.dates import days_ago
owner = 'rwidjojo' # Replace with your short name
default_args = {
'owner': owner,
'depends_on_past': False,
'start_date': days_ago(2),
}
def write_to_log(**kwargs):
myText = kwargs["log_to_write"]
logging.info(myText)
dag = DAG(
f'{owner}.lesson2.excercise5',
default_args=default_args,
)
greet_task = PythonOperator(
task_id="say_hello",
python_callable=write_to_log,
op_kwargs={'log_to_write': f'Hi {owner} greeting from airflow'},
dag=dag,
)
bye_task = PythonOperator(
task_id="bye_hello",
python_callable=write_to_log,
op_kwargs={'log_to_write': 'Good bye'},
dag=dag,
)
greet_task >> bye_task
|
[
"[email protected]"
] | |
5501985b53b77f2d646b0cf9178da0185097aea6
|
5b88dcd27a0edb0c4de5cf523848d7f033a82bec
|
/Documents/Django/Examen_Django_1_3/Nueva carpeta/M5_SitioMedico-01_JuanArancibia/M5_SitioMedico/urls.py
|
06031cc279720ccf5b8a0c5f1c42dd2a0fb8458b
|
[
"MIT"
] |
permissive
|
JuanArancibiaF/M5_SitioMedico
|
e63440c8edc0a491c6c134c88e8a43cb3283195a
|
ea9cb6a35f9b2e57741f6384d3b8c0081b7bb677
|
refs/heads/master
| 2023-03-03T15:38:13.107832 | 2021-02-12T16:53:14 | 2021-02-12T16:53:14 | 337,381,870 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 852 |
py
|
"""M5_SitioMedico URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('app1.urls')),
path('app_forms/', include('app_forms.urls')),
]
|
[
"[email protected]"
] | |
73df0c0ec8d34d04112fe3dd73871d6063d5dc44
|
172e5fcd35072b576380c4258f0ca3e0d8883b35
|
/datasets/helmet/filter.py
|
7d5e9c7548d9e929d3180d46f8d3e6a3c965bfe2
|
[] |
no_license
|
fanqie03/classifier_pytorch
|
e1951578fb0eeab8983bf80be710d250be64b8f9
|
21bb2e3c2ca01333080668dce928b48d4e0e6d59
|
refs/heads/master
| 2023-05-14T13:36:08.211691 | 2020-01-03T08:02:09 | 2020-01-03T08:02:09 | 202,356,398 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,279 |
py
|
import shutil
import os
import argparse
from pathlib import Path
from PIL import Image
from tqdm import tqdm
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--source_dir', default='/home/cmf/datasets/extract_data/gongdi/person')
parser.add_argument('--target_dir', default='/home/cmf/datasets/extract_data/gongdi/person_filter')
parser.add_argument('--min_size', default=30)
args = parser.parse_args()
return args
def split(array, split_num):
total_len = len(array)
part_num = len(array) / split_num
arrays = []
for i in range(split_num):
start = int(i*part_num)
end = int((i+1)*part_num) if int((i+1)*part_num) < total_len else total_len
arrays.append(array[start: end])
return arrays
def main():
args = get_args()
source_dir = Path(args.source_dir)
target_dir = Path(args.target_dir)
source_files = list(source_dir.rglob('*'))
folder_name = source_dir.name
for i, file in tqdm(enumerate(source_files)):
image = Image.open(file)
if image.width < args.min_size or image.height < args.min_size:
continue
del image
dst = target_dir / file.name
shutil.copy(file, dst)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f1f1b0a578bbed74ac77a1c88f51c2b536e74150
|
d6150d04ec161dbdac33e9be23648ad4f258a1a7
|
/tensorflow/examples/saved_model/integration_tests/use_mnist_cnn.py
|
9e1ca33029a928e8f6a8bad1f068d9e56e71be54
|
[
"Apache-2.0"
] |
permissive
|
aweers/tensorflow
|
bf0f5c6c6a6384a044a5c081dd1e8efe89c0349e
|
640726310112e1ad708faef66f751fe5d70ec102
|
refs/heads/master
| 2020-04-24T23:56:25.910880 | 2019-03-09T15:12:37 | 2019-03-09T15:12:37 | 172,361,635 | 0 | 0 |
Apache-2.0
| 2019-03-09T15:12:38 | 2019-02-24T16:31:37 |
C++
|
UTF-8
|
Python
| false | false | 4,964 |
py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Imports a convolutional feature extractor for MNIST in SavedModel format.
This program picks up the SavedModel written by export_mnist_cnn.py and
uses the feature extractor contained in it to classification on either
classic MNIST (digits) or Fashion MNIST (thumbnails of apparel). Optionally,
it trains the feature extractor further as part of the new classifier.
As expected, that makes training slower but does not help much for the
original training dataset but helps a lot for transfer to the other dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow as tf
from tensorflow.examples.saved_model.integration_tests import mnist_util
from tensorflow.examples.saved_model.integration_tests import util
from tensorflow.python.saved_model import load as svmd_load
tf.saved_model.load = svmd_load.load
FLAGS = flags.FLAGS
flags.DEFINE_string(
'export_dir', None,
'Directory of exported SavedModel.')
flags.DEFINE_integer(
'epochs', 5,
'Number of epochs to train.')
flags.DEFINE_bool(
'retrain', False,
'If set, the imported SavedModel is trained further.')
flags.DEFINE_float(
'dropout_rate', None,
'If set, dropout rate passed to the SavedModel.')
flags.DEFINE_float(
'regularization_loss_multiplier', None,
'If set, multiplier for the regularization losses in the SavedModel.')
flags.DEFINE_bool(
'use_fashion_mnist', False,
'Use Fashion MNIST (products) instead of the real MNIST (digits). '
'With this, --retrain gains a lot.')
flags.DEFINE_bool(
'fast_test_mode', False,
'Shortcut training for running in unit tests.')
def make_classifier(feature_extractor, l2_strength=0.01, dropout_rate=0.5):
"""Returns a Keras Model to classify MNIST using feature_extractor."""
regularizer = lambda: tf.keras.regularizers.l2(l2_strength)
net = inp = tf.keras.Input(mnist_util.INPUT_SHAPE)
net = feature_extractor(net)
net = tf.keras.layers.Dropout(dropout_rate)(net)
net = tf.keras.layers.Dense(mnist_util.NUM_CLASSES, activation='softmax',
kernel_regularizer=regularizer())(net)
return tf.keras.Model(inputs=inp, outputs=net)
def scale_regularization_losses(obj, multiplier):
"""Scales obj.regularization_losses by multiplier if not None."""
if multiplier is None: return
def _scale_one_loss(l): # Separate def avoids lambda capture of loop var.
f = tf.function(lambda: tf.multiply(multiplier, l()))
_ = f.get_concrete_function()
return f
obj.regularization_losses = [_scale_one_loss(l)
for l in obj.regularization_losses]
def main(argv):
del argv
# Load a pre-trained feature extractor and wrap it for use in Keras.
obj = tf.saved_model.load(FLAGS.export_dir)
scale_regularization_losses(obj, FLAGS.regularization_loss_multiplier)
arguments = {}
if FLAGS.dropout_rate is not None:
arguments['dropout_rate'] = FLAGS.dropout_rate
feature_extractor = util.CustomLayer(obj, output_shape=[128],
trainable=FLAGS.retrain,
arguments=arguments)
# Build a classifier with it.
model = make_classifier(feature_extractor)
# Train the classifier (possibly on a different dataset).
(x_train, y_train), (x_test, y_test) = mnist_util.load_reshaped_data(
use_fashion_mnist=FLAGS.use_fashion_mnist,
fake_tiny_data=FLAGS.fast_test_mode)
model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(),
metrics=['accuracy'],
# TODO(arnoegw): Remove after investigating huge allocs.
run_eagerly=True)
print('Training on %s with %d trainable and %d untrainable variables.' %
('Fashion MNIST' if FLAGS.use_fashion_mnist else 'MNIST',
len(model.trainable_variables), len(model.non_trainable_variables)))
model.fit(x_train, y_train,
batch_size=128,
epochs=FLAGS.epochs,
steps_per_epoch=3,
verbose=1,
validation_data=(x_test, y_test))
if __name__ == '__main__':
# tf.enable_v2_behavior()
app.run(main)
|
[
"[email protected]"
] | |
2429e6f4e1bf8f3af9f8ad79ed51e9307b1be38e
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/CJ_16_2/16_2_1_kieronb_digits.py
|
88aa02a8dfc600e147bcbfb888a04aa403e5cd60
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 |
Python
|
UTF-8
|
Python
| false | false | 1,483 |
py
|
#!/usr/bin/python
import sys
#import logging
#logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
nums = ['ZERO', 'ONE', 'TWO', 'THREE', 'FOUR', 'FIVE', 'SIX', 'SEVEN', 'EIGHT', 'NINE'];
def solve(s):
digits = []
while 'Z' in s:
digits.append(0)
s = remove_word(s, nums[0])
while 'W' in s:
digits.append(2)
s = remove_word(s, nums[2])
while 'U' in s:
digits.append(4)
s = remove_word(s, nums[4])
while 'X' in s:
digits.append(6)
s = remove_word(s, nums[6])
while 'G' in s:
digits.append(8)
s = remove_word(s, nums[8])
while 'O' in s:
digits.append(1)
s = remove_word(s, nums[1])
while 'R' in s:
digits.append(3)
s = remove_word(s, nums[3])
while 'F' in s:
digits.append(5)
s = remove_word(s, nums[5])
while 'V' in s:
digits.append(7)
s = remove_word(s, nums[7])
while 'N' in s:
digits.append(9)
s = remove_word(s, nums[9])
return ''.join(sorted([str(i) for i in digits]))
def remove_word(s, w):
for c in w:
s = remove_char(s, c)
return s
def remove_char(s, c):
if not c in s:
return s
i = s.index(c)
return s[:i] + s[i+1:]
first = True
n = 0
for line in sys.stdin:
if first:
first = False
else:
n = n + 1
ans = solve(line)
print("Case #" + str(n) + ": " + ans)
|
[
"[[email protected]]"
] | |
420215bd2efecae47efd82097ef50ae4aeb87a1c
|
b3084604bb27ff87149bfc49c16a8a5e6ea5582c
|
/flsp-mrp/models/Productionmsg.py
|
7cf757957caa793f22d4c429c2a337555ca17ede
|
[] |
no_license
|
odoo-smg/firstlight
|
9fe308fb876e80a11ebfda40a442983c9a85ae3e
|
4a82cd5cfd1898c6da860cb68dff3a14e037bbad
|
refs/heads/master
| 2022-10-09T10:10:36.108190 | 2022-09-28T16:06:30 | 2022-09-28T16:06:30 | 235,829,864 | 3 | 2 | null | 2022-03-17T19:26:27 | 2020-01-23T15:56:48 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,009 |
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
class Produtionflspmsg(models.TransientModel):
_name = 'flspmrp.productionflspmsg'
_description = "Wizard: Message on Production"
@api.model
def default_get(self, fields):
res = super(Produtionflspmsg, self).default_get(fields)
production_order = self.env['mrp.production']
production_id = self.env.context.get('default_production_id') or self.env.context.get('active_id')
if production_id:
production_order = self.env['mrp.production'].browse(production_id)
if production_order.exists():
if 'product_id' in fields:
res['product_id'] = production_order.product_id.id
if 'bom_id' in fields:
res['bom_id'] = production_order.bom_id.id
return res
product_id = fields.Many2one('product.product', string='Product', readonly=True)
bom_id = fields.Many2one('mrp.bom', string='Bill of Material', readonly=True)
|
[
"[email protected]"
] | |
fadfcdfc5a65325f9e65158cb8d2183527c560d5
|
4380a4029bac26f205ed925026914dce9e96fff0
|
/slyr/parser/object.py
|
7edcb40861627e48a2518d8bf6be2b450d2eba7e
|
[] |
no_license
|
deepVector/slyr
|
6b327f835994c8f20f0614eb6c772b90aa2d8536
|
5d532ac3eec0e00c5883bf873d30c6b18a4edf30
|
refs/heads/master
| 2020-12-03T10:24:39.660904 | 2019-04-08T00:48:03 | 2019-04-08T00:48:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 779 |
py
|
#!/usr/bin/env python
"""
Base class for persistent objects
"""
from typing import List
class Object:
"""
Base class for objects which can be read from a stream
"""
@staticmethod
def guid() -> str:
"""
Returns the object's GUID
"""
return ''
@staticmethod
def compatible_versions():
"""
Returns the list of compatible object versions, or None to skip
ESRI version bytes
"""
return [1]
def read(self, stream, version):
"""
Reads the object from the given stream
"""
pass
def children(self) -> List['slyr.parser.Object']:
"""
Returns a list of all child objects referenced by this object
"""
return []
|
[
"[email protected]"
] | |
6c61a8655ed737b3ded276848231cfa8a07a9bb0
|
2bf56904829ab9d5e5aa49a50aeceaef620df643
|
/tests/test_collector.py
|
41a03d3fd613b6b8f6b685f983d856ad12323a3e
|
[
"MIT"
] |
permissive
|
OCHA-DAP/hdx-scraper-unosat-flood-portal
|
501f53d43ead4fc46312fc46229c43c034787ed0
|
80b0bcd404993e4bd1dae442f794c9f86b6d5328
|
refs/heads/master
| 2021-12-25T06:00:02.327571 | 2021-12-22T20:07:36 | 2021-12-22T20:07:36 | 37,866,121 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,278 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# system
import os
import sys
dir = os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(os.path.join(dir, 'scripts'))
# testing
import mock
import unittest
from mock import patch
# program
import config.load as Config
import config.database as DB
import utilities.db as Database
import unosat_flood_portal_collect.collect as Collect
#
# Global variables.
#
TEST_DATA = 'test_flood_portal_output.json'
class CheckCollectorFunctions(unittest.TestCase):
'''Unit tests checking if the collector is working as expected.'''
def test_wrapper_doesnt_fail(self):
assert Collect.Main() != False
def test_fetch_data_function(self):
assert Collect.FetchData(url='http://localhost:8080') == False
def test_processing_works(self):
data = Collect.DownloadAndProcessData()
assert type(data) == list
def test_clean_table_fails(self):
assert Collect.CleanTable('foo') == False
class CheckPatches(unittest.TestCase):
'''Unit tests that check if the patches are doing what they are supposed to do.'''
def test_read_all_records_works(self):
d = Database.ReadAllRecords('unprocessed_data')
assert type(d) == list
assert Database.ReadAllRecords('unprocessed_data') != False
|
[
"[email protected]"
] | |
8178cc34b56a6a03048dac573f71a04c78628aa0
|
7cc9cb8bfa749cb011170299ca780f8e8d140b54
|
/api/endpoints/login.py
|
11e94f14912aefe873931f2ae736874d028c5596
|
[
"Apache-2.0"
] |
permissive
|
AssiaHalloul/FRDP
|
75ef66168782d854494a79ef220f60d3792784b7
|
e799b7e9d858c613ee7d7cce992dddee2eafca5f
|
refs/heads/main
| 2023-07-28T04:09:27.358335 | 2021-09-17T03:11:13 | 2021-09-17T03:11:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,915 |
py
|
from datetime import timedelta
from typing import Any
from fastapi import APIRouter, Body, Depends, HTTPException
from fastapi.security import OAuth2PasswordRequestForm
from sqlalchemy.orm import Session
import crud
import models
from api import deps
from core import security
from core.config import settings
from core.security import get_password_hash
from crud.crud_user import CRUDUser
from schemas.msg import Msg
from schemas.token import Token
from schemas.user import User
from utils import generate_password_reset_token, verify_password_reset_token
router = APIRouter()
user1 = CRUDUser(User)
@router.post("/login/access-token", response_model=Token)
def login_access_token(
db: Session = Depends(deps.get_db), form_data: OAuth2PasswordRequestForm = Depends()
) -> Any:
"""
OAuth2 compatible token login, get an access token for future requests
"""
user = user1.authenticate(db, email=form_data.username, password=form_data.password)
if not user:
raise HTTPException(status_code=400, detail="Incorrect email or password")
elif not user1.is_active(user):
raise HTTPException(status_code=400, detail="Inactive user")
access_token_expires = timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)
return {
"access_token": security.create_access_token(
user.id, expires_delta=access_token_expires
),
"token_type": "bearer",
}
@router.post("/login/test-token", response_model=User)
def Check_Session(
current_user: models.user.User = Depends(deps.get_current_user),
) -> Any:
"""
Test if a user is logged in by checking if a valid access token is in the header
"""
return current_user
@router.post(
"/password-recovery/{email}",
response_model=Msg,
status_code=200,
response_description="Success",
)
def recover_password(email: str, db: Session = Depends(deps.get_db)) -> Any:
"""
Password Recovery
"""
user = user1.get_by_email(db, email=email)
if not user:
raise HTTPException(
status_code=422,
detail="The user with this username does not exist in the system.",
)
return {"msg": generate_password_reset_token(email=email)}
@router.post("/reset-password/", response_model=Msg)
def reset_password(
token: str = Body(...),
new_password: str = Body(...),
db: Session = Depends(deps.get_db),
) -> Any:
"""
Reset your password
"""
email = verify_password_reset_token(token)
if not email:
raise HTTPException(status_code=400, detail="Invalid token")
user = crud.user.get_by_email(db, email=email)
if not user:
raise HTTPException(status_code=404, detail="User not found")
hashed_password = get_password_hash(new_password)
user.hashed_password = hashed_password
db.add(user)
db.commit()
return {"msg": "Password updated successfully!"}
|
[
"[email protected]"
] | |
55d298950c7d7ca21de4fa38f72dc8b508087211
|
93db4b48741ff4ab0a3895813a6c7543e01821ea
|
/leetcode/Python/884_decoded_str.py
|
436b4076dfc4d56b8920181fbd41bd1e380db873
|
[] |
no_license
|
shubham14/Coding_Contest_solutions
|
f884c458d3316bdafc6f1b1a52cf3e962c58bc47
|
1b67497f35b892c25e3d9600214fa37a738ffd40
|
refs/heads/master
| 2021-06-22T13:34:10.581101 | 2019-10-09T02:56:01 | 2019-10-09T02:56:01 | 131,326,516 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,125 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 19:42:57 2018
@author: Shubham
"""
import math
class Solution:
def maxArea(self, A, B, C, D, E, F, G, H):
areaSum = (C-A)*(D-B) + (G-E)*(H-F)
if(E>=C or A>=G or B>=H or F>=D):
return areaSum
bX = max(A, E)
bY = max(B, F)
tX = min(C, G)
tY = min(D, H)
areaIntersect = (tX-bX)*(tY-bY);
return areaSum - areaIntersect
def minSquare(self, n):
s = [i*i for i in range(1, int(math.sqrt(n)) + 1)]
l = 0
currentLevel = [0]
while True:
nextLevel = []
for a in currentLevel:
for b in s:
if a + b == n:
return l + 1
if a + b < n:
nextLevel.append(a + b)
currentLevel = list(set(nextLevel))
l += 1
def rev_str(self, str1, st, end):
while st <= end:
str1[st], str1[end] = str1[end], str1[st]
st += 1
end -= 1
return str1
def rev_sent(self, sent):
start = 0
end = 0
sent = list(sent)
for i, ele in enumerate(sent):
if ele == ' ' and start != end:
end = i - 1
sent = self.rev_str(sent, start, end)
start = i + 1
sent = self.rev_str(sent, start, len(sent)-1)
return sent
class MinStack:
def __init__(self):
self.min_stack = []
self.top = -1
self.minEle = 1000000
self.next_minEle = 999999
def push(self, x):
self.min_stack.append(x)
self.top += 1
if x < self.minEle:
self.next_minEle = self.minEle
self.minEle = x
def pop(self):
if self.min_stack[self.top] == self.minEle:
self.minEle = self.next_minEle
self.min_stack.pop()
self.top -= 1
def top(self):
return self.min_stack[self.top]
def getMin(self):
return self.minEle
|
[
"[email protected]"
] | |
86f71192aab9f52f5661559bb3f89b77d1024de2
|
aea8fea216234fd48269e4a1830b345c52d85de2
|
/fhir/resources/tests/test_patient.py
|
ef1efd5ded649615fb1e081d1f6330bcd7bf3f8a
|
[
"BSD-3-Clause"
] |
permissive
|
mmabey/fhir.resources
|
67fce95c6b35bfdc3cbbc8036e02c962a6a7340c
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
refs/heads/master
| 2023-04-12T15:50:30.104992 | 2020-04-11T17:21:36 | 2020-04-11T17:21:36 | 269,712,884 | 0 | 0 |
NOASSERTION
| 2020-06-05T17:03:04 | 2020-06-05T17:03:04 | null |
UTF-8
|
Python
| false | false | 29,150 |
py
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Patient
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import io
import json
import os
import unittest
import pytest
from .. import patient
from ..fhirdate import FHIRDate
from .fixtures import force_bytes
@pytest.mark.usefixtures("base_settings")
class PatientTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get("FHIR_UNITTEST_DATADIR") or ""
with io.open(os.path.join(datadir, filename), "r", encoding="utf-8") as handle:
js = json.load(handle)
self.assertEqual("Patient", js["resourceType"])
return patient.Patient(js)
def testPatient1(self):
inst = self.instantiate_from("patient-example-xds.json")
self.assertIsNotNone(inst, "Must have instantiated a Patient instance")
self.implPatient1(inst)
js = inst.as_json()
self.assertEqual("Patient", js["resourceType"])
inst2 = patient.Patient(js)
self.implPatient1(inst2)
def implPatient1(self, inst):
self.assertTrue(inst.active)
self.assertEqual(force_bytes(inst.address[0].city), force_bytes("Metropolis"))
self.assertEqual(force_bytes(inst.address[0].country), force_bytes("USA"))
self.assertEqual(
force_bytes(inst.address[0].line[0]), force_bytes("100 Main St")
)
self.assertEqual(force_bytes(inst.address[0].postalCode), force_bytes("44130"))
self.assertEqual(force_bytes(inst.address[0].state), force_bytes("Il"))
self.assertEqual(inst.birthDate.date, FHIRDate("1956-05-27").date)
self.assertEqual(inst.birthDate.as_json(), "1956-05-27")
self.assertEqual(force_bytes(inst.gender), force_bytes("male"))
self.assertEqual(force_bytes(inst.id), force_bytes("xds"))
self.assertEqual(
force_bytes(inst.identifier[0].system), force_bytes("urn:oid:1.2.3.4.5")
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].code), force_bytes("MR")
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0203"),
)
self.assertEqual(force_bytes(inst.identifier[0].use), force_bytes("usual"))
self.assertEqual(
force_bytes(inst.identifier[0].value), force_bytes("89765a87b")
)
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(force_bytes(inst.name[0].family), force_bytes("Doe"))
self.assertEqual(force_bytes(inst.name[0].given[0]), force_bytes("John"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testPatient2(self):
inst = self.instantiate_from("patient-example-f001-pieter.json")
self.assertIsNotNone(inst, "Must have instantiated a Patient instance")
self.implPatient2(inst)
js = inst.as_json()
self.assertEqual("Patient", js["resourceType"])
inst2 = patient.Patient(js)
self.implPatient2(inst2)
def implPatient2(self, inst):
self.assertTrue(inst.active)
self.assertEqual(force_bytes(inst.address[0].city), force_bytes("Amsterdam"))
self.assertEqual(force_bytes(inst.address[0].country), force_bytes("NLD"))
self.assertEqual(
force_bytes(inst.address[0].line[0]), force_bytes("Van Egmondkade 23")
)
self.assertEqual(
force_bytes(inst.address[0].postalCode), force_bytes("1024 RJ")
)
self.assertEqual(force_bytes(inst.address[0].use), force_bytes("home"))
self.assertEqual(inst.birthDate.date, FHIRDate("1944-11-17").date)
self.assertEqual(inst.birthDate.as_json(), "1944-11-17")
self.assertEqual(
force_bytes(inst.communication[0].language.coding[0].code),
force_bytes("nl"),
)
self.assertEqual(
force_bytes(inst.communication[0].language.coding[0].display),
force_bytes("Dutch"),
)
self.assertEqual(
force_bytes(inst.communication[0].language.coding[0].system),
force_bytes("urn:ietf:bcp:47"),
)
self.assertEqual(
force_bytes(inst.communication[0].language.text), force_bytes("Nederlands")
)
self.assertTrue(inst.communication[0].preferred)
self.assertEqual(force_bytes(inst.contact[0].name.family), force_bytes("Abels"))
self.assertEqual(
force_bytes(inst.contact[0].name.given[0]), force_bytes("Sarah")
)
self.assertEqual(force_bytes(inst.contact[0].name.use), force_bytes("usual"))
self.assertEqual(
force_bytes(inst.contact[0].relationship[0].coding[0].code),
force_bytes("C"),
)
self.assertEqual(
force_bytes(inst.contact[0].relationship[0].coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0131"),
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].system), force_bytes("phone")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].use), force_bytes("mobile")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].value), force_bytes("0690383372")
)
self.assertFalse(inst.deceasedBoolean)
self.assertEqual(force_bytes(inst.gender), force_bytes("male"))
self.assertEqual(force_bytes(inst.id), force_bytes("f001"))
self.assertEqual(
force_bytes(inst.identifier[0].system),
force_bytes("urn:oid:2.16.840.1.113883.2.4.6.3"),
)
self.assertEqual(force_bytes(inst.identifier[0].use), force_bytes("usual"))
self.assertEqual(
force_bytes(inst.identifier[0].value), force_bytes("738472983")
)
self.assertEqual(
force_bytes(inst.identifier[1].system),
force_bytes("urn:oid:2.16.840.1.113883.2.4.6.3"),
)
self.assertEqual(force_bytes(inst.identifier[1].use), force_bytes("usual"))
self.assertEqual(
force_bytes(inst.maritalStatus.coding[0].code), force_bytes("M")
)
self.assertEqual(
force_bytes(inst.maritalStatus.coding[0].display), force_bytes("Married")
)
self.assertEqual(
force_bytes(inst.maritalStatus.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-MaritalStatus"),
)
self.assertEqual(force_bytes(inst.maritalStatus.text), force_bytes("Getrouwd"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertTrue(inst.multipleBirthBoolean)
self.assertEqual(force_bytes(inst.name[0].family), force_bytes("van de Heuvel"))
self.assertEqual(force_bytes(inst.name[0].given[0]), force_bytes("Pieter"))
self.assertEqual(force_bytes(inst.name[0].suffix[0]), force_bytes("MSc"))
self.assertEqual(force_bytes(inst.name[0].use), force_bytes("usual"))
self.assertEqual(force_bytes(inst.telecom[0].system), force_bytes("phone"))
self.assertEqual(force_bytes(inst.telecom[0].use), force_bytes("mobile"))
self.assertEqual(force_bytes(inst.telecom[0].value), force_bytes("0648352638"))
self.assertEqual(force_bytes(inst.telecom[1].system), force_bytes("email"))
self.assertEqual(force_bytes(inst.telecom[1].use), force_bytes("home"))
self.assertEqual(
force_bytes(inst.telecom[1].value), force_bytes("[email protected]")
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testPatient3(self):
inst = self.instantiate_from("patient-example-d.json")
self.assertIsNotNone(inst, "Must have instantiated a Patient instance")
self.implPatient3(inst)
js = inst.as_json()
self.assertEqual("Patient", js["resourceType"])
inst2 = patient.Patient(js)
self.implPatient3(inst2)
def implPatient3(self, inst):
self.assertTrue(inst.active)
self.assertEqual(inst.birthDate.date, FHIRDate("1982-08-02").date)
self.assertEqual(inst.birthDate.as_json(), "1982-08-02")
self.assertTrue(inst.deceasedBoolean)
self.assertEqual(force_bytes(inst.gender), force_bytes("female"))
self.assertEqual(force_bytes(inst.id), force_bytes("pat4"))
self.assertEqual(
force_bytes(inst.identifier[0].system),
force_bytes("urn:oid:0.1.2.3.4.5.6.7"),
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].code), force_bytes("MR")
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0203"),
)
self.assertEqual(force_bytes(inst.identifier[0].use), force_bytes("usual"))
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("123458"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(force_bytes(inst.name[0].family), force_bytes("Notsowell"))
self.assertEqual(force_bytes(inst.name[0].given[0]), force_bytes("Sandy"))
self.assertEqual(force_bytes(inst.name[0].use), force_bytes("official"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testPatient4(self):
inst = self.instantiate_from("patient-example-infant-twin-1.json")
self.assertIsNotNone(inst, "Must have instantiated a Patient instance")
self.implPatient4(inst)
js = inst.as_json()
self.assertEqual("Patient", js["resourceType"])
inst2 = patient.Patient(js)
self.implPatient4(inst2)
def implPatient4(self, inst):
self.assertEqual(inst.birthDate.date, FHIRDate("2017-05-15").date)
self.assertEqual(inst.birthDate.as_json(), "2017-05-15")
self.assertEqual(
force_bytes(inst.contact[0].name.family), force_bytes("Organa")
)
self.assertEqual(
force_bytes(inst.contact[0].name.given[0]), force_bytes("Leia")
)
self.assertEqual(force_bytes(inst.contact[0].name.use), force_bytes("maiden"))
self.assertEqual(
force_bytes(inst.contact[0].relationship[0].coding[0].code),
force_bytes("72705000"),
)
self.assertEqual(
force_bytes(inst.contact[0].relationship[0].coding[0].display),
force_bytes("Mother"),
)
self.assertEqual(
force_bytes(inst.contact[0].relationship[0].coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.contact[0].relationship[0].coding[1].code),
force_bytes("N"),
)
self.assertEqual(
force_bytes(inst.contact[0].relationship[0].coding[1].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0131"),
)
self.assertEqual(
force_bytes(inst.contact[0].relationship[0].coding[2].code),
force_bytes("MTH"),
)
self.assertEqual(
force_bytes(inst.contact[0].relationship[0].coding[2].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-RoleCode"),
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].system), force_bytes("phone")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].use), force_bytes("mobile")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].value), force_bytes("+31201234567")
)
self.assertEqual(
force_bytes(inst.extension[0].url),
force_bytes(
"http://hl7.org/fhir/StructureDefinition/patient-mothersMaidenName"
),
)
self.assertEqual(
force_bytes(inst.extension[0].valueString), force_bytes("Organa")
)
self.assertEqual(force_bytes(inst.gender), force_bytes("female"))
self.assertEqual(force_bytes(inst.id), force_bytes("infant-twin-1"))
self.assertEqual(
force_bytes(inst.identifier[0].system),
force_bytes("http://coruscanthealth.org/main-hospital/patient-identifier"),
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].code), force_bytes("MR")
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0203"),
)
self.assertEqual(
force_bytes(inst.identifier[0].value), force_bytes("MRN7465737865")
)
self.assertEqual(
force_bytes(inst.identifier[1].system),
force_bytes("http://new-republic.gov/galactic-citizen-identifier"),
)
self.assertEqual(
force_bytes(inst.identifier[1].value), force_bytes("7465737865")
)
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(inst.multipleBirthInteger, 1)
self.assertEqual(force_bytes(inst.name[0].family), force_bytes("Solo"))
self.assertEqual(force_bytes(inst.name[0].given[0]), force_bytes("Jaina"))
self.assertEqual(force_bytes(inst.name[0].use), force_bytes("official"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testPatient5(self):
inst = self.instantiate_from("patient-example-infant-mom.json")
self.assertIsNotNone(inst, "Must have instantiated a Patient instance")
self.implPatient5(inst)
js = inst.as_json()
self.assertEqual("Patient", js["resourceType"])
inst2 = patient.Patient(js)
self.implPatient5(inst2)
def implPatient5(self, inst):
self.assertEqual(inst.birthDate.date, FHIRDate("1995-10-12").date)
self.assertEqual(inst.birthDate.as_json(), "1995-10-12")
self.assertEqual(force_bytes(inst.gender), force_bytes("female"))
self.assertEqual(force_bytes(inst.id), force_bytes("infant-mom"))
self.assertEqual(
force_bytes(inst.maritalStatus.coding[0].code), force_bytes("M")
)
self.assertEqual(
force_bytes(inst.maritalStatus.coding[0].display), force_bytes("Married")
)
self.assertEqual(
force_bytes(inst.maritalStatus.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-MaritalStatus"),
)
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(force_bytes(inst.name[0].family), force_bytes("Solo"))
self.assertEqual(force_bytes(inst.name[0].given[0]), force_bytes("Leia"))
self.assertEqual(force_bytes(inst.name[0].use), force_bytes("official"))
self.assertEqual(force_bytes(inst.name[1].family), force_bytes("Organa"))
self.assertEqual(force_bytes(inst.name[1].given[0]), force_bytes("Leia"))
self.assertEqual(force_bytes(inst.name[1].use), force_bytes("maiden"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testPatient6(self):
inst = self.instantiate_from("patient-example-newborn.json")
self.assertIsNotNone(inst, "Must have instantiated a Patient instance")
self.implPatient6(inst)
js = inst.as_json()
self.assertEqual("Patient", js["resourceType"])
inst2 = patient.Patient(js)
self.implPatient6(inst2)
def implPatient6(self, inst):
self.assertEqual(inst.birthDate.date, FHIRDate("2017-09-05").date)
self.assertEqual(inst.birthDate.as_json(), "2017-09-05")
self.assertEqual(
force_bytes(inst.extension[0].url),
force_bytes(
"http://hl7.org/fhir/StructureDefinition/patient-mothersMaidenName"
),
)
self.assertEqual(
force_bytes(inst.extension[0].valueString), force_bytes("Everywoman")
)
self.assertEqual(force_bytes(inst.gender), force_bytes("male"))
self.assertEqual(force_bytes(inst.id), force_bytes("newborn"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(inst.multipleBirthInteger, 2)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testPatient7(self):
inst = self.instantiate_from("patient-example-infant-fetal.json")
self.assertIsNotNone(inst, "Must have instantiated a Patient instance")
self.implPatient7(inst)
js = inst.as_json()
self.assertEqual("Patient", js["resourceType"])
inst2 = patient.Patient(js)
self.implPatient7(inst2)
def implPatient7(self, inst):
self.assertEqual(
force_bytes(inst.contact[0].name.family), force_bytes("Organa")
)
self.assertEqual(
force_bytes(inst.contact[0].name.given[0]), force_bytes("Leia")
)
self.assertEqual(force_bytes(inst.contact[0].name.use), force_bytes("maiden"))
self.assertEqual(
force_bytes(inst.contact[0].relationship[0].coding[0].code),
force_bytes("72705000"),
)
self.assertEqual(
force_bytes(inst.contact[0].relationship[0].coding[0].display),
force_bytes("Mother"),
)
self.assertEqual(
force_bytes(inst.contact[0].relationship[0].coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.contact[0].relationship[0].coding[1].code),
force_bytes("N"),
)
self.assertEqual(
force_bytes(inst.contact[0].relationship[0].coding[1].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0131"),
)
self.assertEqual(
force_bytes(inst.contact[0].relationship[0].coding[2].code),
force_bytes("MTH"),
)
self.assertEqual(
force_bytes(inst.contact[0].relationship[0].coding[2].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-RoleCode"),
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].system), force_bytes("phone")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].use), force_bytes("mobile")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].value), force_bytes("+31201234567")
)
self.assertEqual(
force_bytes(inst.extension[0].url),
force_bytes(
"http://hl7.org/fhir/StructureDefinition/patient-mothersMaidenName"
),
)
self.assertEqual(
force_bytes(inst.extension[0].valueString), force_bytes("Organa")
)
self.assertEqual(force_bytes(inst.gender), force_bytes("male"))
self.assertEqual(force_bytes(inst.id), force_bytes("infant-fetal"))
self.assertEqual(
force_bytes(inst.identifier[0].system),
force_bytes("http://coruscanthealth.org/main-hospital/patient-identifier"),
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].code), force_bytes("MR")
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0203"),
)
self.assertEqual(
force_bytes(inst.identifier[0].value), force_bytes("MRN657865757378")
)
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testPatient8(self):
inst = self.instantiate_from("patient-genetics-example1.json")
self.assertIsNotNone(inst, "Must have instantiated a Patient instance")
self.implPatient8(inst)
js = inst.as_json()
self.assertEqual("Patient", js["resourceType"])
inst2 = patient.Patient(js)
self.implPatient8(inst2)
def implPatient8(self, inst):
self.assertTrue(inst.active)
self.assertEqual(
force_bytes(inst.address[0].line[0]), force_bytes("2222 Home Street")
)
self.assertEqual(force_bytes(inst.address[0].use), force_bytes("home"))
self.assertEqual(inst.birthDate.date, FHIRDate("1973-05-31").date)
self.assertEqual(inst.birthDate.as_json(), "1973-05-31")
self.assertEqual(force_bytes(inst.gender), force_bytes("female"))
self.assertEqual(force_bytes(inst.id), force_bytes("genetics-example1"))
self.assertEqual(
force_bytes(inst.identifier[0].system),
force_bytes("http://hl7.org/fhir/sid/us-ssn"),
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].code), force_bytes("SS")
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0203"),
)
self.assertEqual(
force_bytes(inst.identifier[0].value), force_bytes("444222222")
)
self.assertEqual(
inst.meta.lastUpdated.date, FHIRDate("2012-05-29T23:45:32Z").date
)
self.assertEqual(inst.meta.lastUpdated.as_json(), "2012-05-29T23:45:32Z")
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(force_bytes(inst.name[0].family), force_bytes("Everywoman"))
self.assertEqual(force_bytes(inst.name[0].given[0]), force_bytes("Eve"))
self.assertEqual(force_bytes(inst.name[0].use), force_bytes("official"))
self.assertEqual(force_bytes(inst.telecom[0].system), force_bytes("phone"))
self.assertEqual(force_bytes(inst.telecom[0].use), force_bytes("work"))
self.assertEqual(
force_bytes(inst.telecom[0].value), force_bytes("555-555-2003")
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testPatient9(self):
inst = self.instantiate_from("patient-example-b.json")
self.assertIsNotNone(inst, "Must have instantiated a Patient instance")
self.implPatient9(inst)
js = inst.as_json()
self.assertEqual("Patient", js["resourceType"])
inst2 = patient.Patient(js)
self.implPatient9(inst2)
def implPatient9(self, inst):
self.assertTrue(inst.active)
self.assertEqual(force_bytes(inst.gender), force_bytes("other"))
self.assertEqual(force_bytes(inst.id), force_bytes("pat2"))
self.assertEqual(
force_bytes(inst.identifier[0].system),
force_bytes("urn:oid:0.1.2.3.4.5.6.7"),
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].code), force_bytes("MR")
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0203"),
)
self.assertEqual(force_bytes(inst.identifier[0].use), force_bytes("usual"))
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("123456"))
self.assertEqual(force_bytes(inst.link[0].type), force_bytes("seealso"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(force_bytes(inst.name[0].family), force_bytes("Donald"))
self.assertEqual(force_bytes(inst.name[0].given[0]), force_bytes("Duck"))
self.assertEqual(force_bytes(inst.name[0].given[1]), force_bytes("D"))
self.assertEqual(force_bytes(inst.name[0].use), force_bytes("official"))
self.assertEqual(
force_bytes(inst.photo[0].contentType), force_bytes("image/gif")
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testPatient10(self):
inst = self.instantiate_from("patient-example-c.json")
self.assertIsNotNone(inst, "Must have instantiated a Patient instance")
self.implPatient10(inst)
js = inst.as_json()
self.assertEqual("Patient", js["resourceType"])
inst2 = patient.Patient(js)
self.implPatient10(inst2)
def implPatient10(self, inst):
self.assertTrue(inst.active)
self.assertEqual(inst.birthDate.date, FHIRDate("1982-01-23").date)
self.assertEqual(inst.birthDate.as_json(), "1982-01-23")
self.assertEqual(
inst.deceasedDateTime.date, FHIRDate("2015-02-14T13:42:00+10:00").date
)
self.assertEqual(inst.deceasedDateTime.as_json(), "2015-02-14T13:42:00+10:00")
self.assertEqual(force_bytes(inst.gender), force_bytes("male"))
self.assertEqual(force_bytes(inst.id), force_bytes("pat3"))
self.assertEqual(
force_bytes(inst.identifier[0].system),
force_bytes("urn:oid:0.1.2.3.4.5.6.7"),
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].code), force_bytes("MR")
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0203"),
)
self.assertEqual(force_bytes(inst.identifier[0].use), force_bytes("usual"))
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("123457"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(force_bytes(inst.name[0].family), force_bytes("Notsowell"))
self.assertEqual(force_bytes(inst.name[0].given[0]), force_bytes("Simon"))
self.assertEqual(force_bytes(inst.name[0].use), force_bytes("official"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
|
[
"[email protected]"
] | |
ba481ea8f879122af999411632fb73b73261732c
|
10041bd495b9accbc8f81790fbcc581350e2eaad
|
/utils/GoogleSpreadSheet_API.py
|
82aadecda194fdf6563a5f60ab143f43358b0372
|
[
"Apache-2.0"
] |
permissive
|
heboric/ouro
|
58ca64d217a593f168dec11068aed12fe910c0c8
|
5209a9946b39874ed02c2602baf39fd299de609b
|
refs/heads/master
| 2023-04-19T04:47:21.235442 | 2021-05-04T21:58:30 | 2021-05-04T21:58:30 | 364,108,551 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,467 |
py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# setup: https://developers.google.com/sheets/api/quickstart/python
# source: https://github.com/gsuitedevs/python-samples/tree/master/sheets
from __future__ import print_function
import os
from oauth2client import file, client, tools
from googleapiclient.discovery import build
from httplib2 import Http
PATH = os.path.dirname(os.path.realpath(__file__))
SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
class Sheet(object):
def __init__(self, Spreadsheet_ID, name, service=False):
self.Spreadsheet_ID=Spreadsheet_ID
self.name=name
if not service:
store = file.Storage(os.path.join(PATH,*['token.json']))
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets(os.path.join(PATH,*['credentials.json']), SCOPES)
creds = tools.run_flow(flow, store)
service = build('sheets', 'v4', http=creds.authorize(Http()))
self.SpreadsheetSnippets=SpreadsheetSnippets(service)
self.values=self.get_values()
def get_values(self, range_name='A:ZZZ', majorDimension='ROWS'):
return self.SpreadsheetSnippets.get_values(self.Spreadsheet_ID,'%s!%s'%(self.name,range_name), majorDimension).get('values',[])
def update(self, values, range_name='A:ZZZ', majorDimension='ROWS',value_input_option='RAW'):
return self.SpreadsheetSnippets.update_values(self.Spreadsheet_ID, range_name, values, majorDimension,value_input_option)
def append(self, values, range_name='A:ZZZ', majorDimension='ROWS',value_input_option='RAW'):
return self.SpreadsheetSnippets.append_values(self.Spreadsheet_ID, range_name, values, majorDimension,value_input_option)
def clear(self):
self.update([['' for i in range(len(self.values[0]))]for i in range(len(self.values))])
def convert(self,values=False):
if not values:
values=self.values
header=values[0]
data=[
{
key:row[i]
for i,key in enumerate(header)
if row[i]
}
for row in values[1:]
]
return(data)
def ObjListToSheet(self,objlist):
self.clear()
#//convert object into an 3D-array data=[row][column]
header=[]
for obj in objlist:
for key in obj:
if key not in header:
header.append(key)
array=[
[
obj[key] if key in obj else None
for key in header
]
for obj in objlist
]
array.unshift(header)
self.update(array)
class SpreadsheetSnippets(object):
def __init__(self, service=False):
if not service:
store = file.Storage(os.path.join(PATH,*['token.json']))
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets(os.path.join(PATH,*['credentials.json']), SCOPES)
creds = tools.run_flow(flow, store)
service = build('sheets', 'v4', http=creds.authorize(Http()))
self.service = service
def create(self, title):
service = self.service
# [START sheets_create]
spreadsheet = {
'properties': {
'title': title
}
}
spreadsheet = service.spreadsheets().create(body=spreadsheet,
fields='spreadsheetId').execute()
print('Spreadsheet ID: {0}'.format(spreadsheet.get('spreadsheetId')))
# [END sheets_create]
return spreadsheet.get('spreadsheetId')
def get_values(self, spreadsheet_id, range_name='A:ZZZ', majorDimension='ROWS'):
service = self.service
# [START sheets_get_values]
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheet_id, range=range_name, majorDimension=majorDimension).execute()
# For example, if the spreadsheet data is: `A1=1,B1=2,A2=3,B2=4`,
# then requesting `range=A1:B2,majorDimension=ROWS` will return
# `[[1,2],[3,4]]`,
# whereas requesting `range=A1:B2,majorDimension=COLUMNS` will return
#numRows = len(result.get('values')) if result.get('values')is not None else 0
#print('{0} rows retrieved.'.format(numRows))
# [END sheets_get_values]
return result
def update_values(self, spreadsheet_id, range_name, values,majorDimension='ROWS',value_input_option='RAW'):
service = self.service
#value_input_options
# INPUT_VALUE_OPTION_UNSPECIFIED Default input value. This value must not be used.
# RAW The values the user has entered will not be parsed and will be stored as-is.
# USER_ENTERED The values will be parsed as if the user typed them into the UI. Numbers will stay as numbers, but strings may be converted to numbers, dates, etc. following the same rules that are applied when entering text into a cell via the Google Sheets UI.
body = {
'values': values,
'majorDimension': majorDimension
}
result = service.spreadsheets().values().update(
spreadsheetId=spreadsheet_id, range=range_name,
valueInputOption=value_input_option, body=body).execute()
print('{0} cells updated.'.format(result.get('updatedCells')))
return result
def append_values(self, spreadsheet_id, range_name, values,majorDimension='ROWS',value_input_option='RAW'):
service = self.service
body = {
'values': values,
'majorDimension': majorDimension
}
result = service.spreadsheets().values().append(
spreadsheetId=spreadsheet_id, range=range_name,
valueInputOption=value_input_option, body=body).execute()
print('{0} cells appended.'.format(result \
.get('updates') \
.get('updatedCells')))
# [END sheets_append_values]
return result
def batch_update(self, spreadsheet_id, title, find, replacement):
service = self.service
# [START sheets_batch_update]
requests = []
# Change the spreadsheet's title.
requests.append({
'updateSpreadsheetProperties': {
'properties': {
'title': title
},
'fields': 'title'
}
})
# Find and replace text
requests.append({
'findReplace': {
'find': find,
'replacement': replacement,
'allSheets': True
}
})
# Add additional requests (operations) ...
body = {
'requests': requests
}
response = service.spreadsheets().batchUpdate(
spreadsheetId=spreadsheet_id,
body=body).execute()
find_replace_response = response.get('replies')[1].get('findReplace')
print('{0} replacements made.'.format(
find_replace_response.get('occurrencesChanged')))
# [END sheets_batch_update]
return response
def batch_get_values(self, spreadsheet_id, _range_names):
service = self.service
# [START sheets_batch_get_values]
range_names = [
# Range names ...
]
# [START_EXCLUDE silent]
range_names = _range_names
# [END_EXCLUDE]
result = service.spreadsheets().values().batchGet(
spreadsheetId=spreadsheet_id, ranges=range_names).execute()
print('{0} ranges retrieved.'.format(result.get('valueRanges')))
# [END sheets_batch_get_values]
return result
def batch_update_values(self, spreadsheet_id, range_name,
value_input_option, _values):
service = self.service
# [START sheets_batch_update_values]
values = [
[
# Cell values ...
],
# Additional rows
]
# [START_EXCLUDE silent]
values = _values
# [END_EXCLUDE]
data = [
{
'range': range_name,
'values': values
},
# Additional ranges to update ...
]
body = {
'valueInputOption': value_input_option,
'data': data
}
result = service.spreadsheets().values().batchUpdate(
spreadsheetId=spreadsheet_id, body=body).execute()
print('{0} cells updated.'.format(result.get('updatedCells')))
# [END sheets_batch_update_values]
return result
def pivot_tables(self, spreadsheet_id):
service = self.service
# Create two sheets for our pivot table.
body = {
'requests': [{
'addSheet': {}
}, {
'addSheet': {}
}]
}
batch_update_response = service.spreadsheets() \
.batchUpdate(spreadsheetId=spreadsheet_id, body=body).execute()
source_sheet_id = batch_update_response.get('replies')[0] \
.get('addSheet').get('properties').get('sheetId')
target_sheet_id = batch_update_response.get('replies')[1] \
.get('addSheet').get('properties').get('sheetId')
requests = []
# [START sheets_pivot_tables]
requests.append({
'updateCells': {
'rows': {
'values': [
{
'pivotTable': {
'source': {
'sheetId': source_sheet_id,
'startRowIndex': 0,
'startColumnIndex': 0,
'endRowIndex': 101,
'endColumnIndex': 8
},
'rows': [
{
'sourceColumnOffset': 6,
'showTotals': True,
'sortOrder': 'ASCENDING',
},
],
'columns': [
{
'sourceColumnOffset': 3,
'sortOrder': 'ASCENDING',
'showTotals': True,
}
],
'values': [
{
'summarizeFunction': 'COUNTA',
'sourceColumnOffset': 3
}
],
'valueLayout': 'HORIZONTAL'
}
}
]
},
'start': {
'sheetId': target_sheet_id,
'rowIndex': 0,
'columnIndex': 0
},
'fields': 'pivotTable'
}
})
body = {
'requests': requests
}
response = service.spreadsheets() \
.batchUpdate(spreadsheetId=spreadsheet_id, body=body).execute()
# [END sheets_pivot_tables]
return response
def conditional_formatting(self, spreadsheet_id):
service = self.service
# [START sheets_conditional_formatting]
my_range = {
'sheetId': 0,
'startRowIndex': 1,
'endRowIndex': 11,
'startColumnIndex': 0,
'endColumnIndex': 4,
}
requests = [{
'addConditionalFormatRule': {
'rule': {
'ranges': [my_range],
'booleanRule': {
'condition': {
'type': 'CUSTOM_FORMULA',
'values': [{
'userEnteredValue':
'=GT($D2,median($D$2:$D$11))'
}]
},
'format': {
'textFormat': {
'foregroundColor': {'red': 0.8}
}
}
}
},
'index': 0
}
}, {
'addConditionalFormatRule': {
'rule': {
'ranges': [my_range],
'booleanRule': {
'condition': {
'type': 'CUSTOM_FORMULA',
'values': [{
'userEnteredValue':
'=LT($D2,median($D$2:$D$11))'
}]
},
'format': {
'backgroundColor': {
'red': 1,
'green': 0.4,
'blue': 0.4
}
}
}
},
'index': 0
}
}]
body = {
'requests': requests
}
response = service.spreadsheets() \
.batchUpdate(spreadsheetId=spreadsheet_id, body=body).execute()
print('{0} cells updated.'.format(len(response.get('replies'))))
# [END sheets_conditional_formatting]
return response
|
[
"[email protected]"
] | |
5fdf10d2c3284be8fe2c12dd47d17a980192a24d
|
477a1182c09f276e8d29651222ba72968074fcb8
|
/Fizz Buzz.py
|
1691cf23a3ddb9911ba968b01c0572d305dfe589
|
[] |
no_license
|
a58982284/cookbook
|
900ed8f8f2d1fb4111076074607574645ac07314
|
7490a968e93c85df0d4d9701d0901e2a8c3bdfac
|
refs/heads/master
| 2020-03-29T12:04:08.316281 | 2018-09-22T14:14:44 | 2018-09-22T14:14:44 | 149,883,412 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 329 |
py
|
def checkio(number: int) -> str:
if number%3 == 0 and number%5==0:
return 'Fizz Buzz'
elif number%3==0 and number%5!=0:
return 'Fizz'
elif number%3 !=0 and number%5==0:
return 'Buzz'
else:
return str(number)
print(checkio(15))
print(checkio(6))
print(checkio(5))
print(checkio(7))
|
[
"[email protected]"
] | |
9b8122ee97bc2012e154cd33cd099e61ed67ab7b
|
de88a649182d42206358e53bba69252e04d8a69f
|
/abc_py/abc168/c.py
|
b616070660aef9a02ea101c6794186568462f0ec
|
[] |
no_license
|
aki-nasu/competition
|
47b05312d9b19dcf62212570d6253ec7a109382d
|
9edb02abb14d896932f08218417d3f10b54f1755
|
refs/heads/master
| 2021-06-22T22:09:14.433407 | 2021-06-06T06:53:42 | 2021-06-06T06:53:42 | 225,662,310 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 186 |
py
|
import math
a,b,h,m = map(int,input().split())
ra = h * 30 + m * 0.5
rb = m * 6
r = math.radians(abs(ra-rb))
# (abs(ra-rb)*math.pi)/180
print(math.sqrt(a**2 + b**2 - 2*a*b*math.cos(r)))
|
[
"[email protected]"
] | |
e0f547e3b52a0671a240931d5804ecde3fac717d
|
887f86963badac6d18c7aff8a3c421a104c27553
|
/model/sensibility_baseline/rnn_pytorch.py
|
db7a5646559bf8a8783f66b0834acc51a1435eb4
|
[] |
no_license
|
monperrus/ProgramFix
|
3c63e69325f7f62e188bc39364732b8d01eb0635
|
64f7c943775664147333d1a3e73a3cb10d7d4aa4
|
refs/heads/master
| 2022-04-19T16:30:11.213078 | 2020-04-01T07:27:43 | 2020-04-01T07:27:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,404 |
py
|
import torch
import torch.nn as nn
import torch.autograd as autograd
import config
import os
import more_itertools
from common.problem_util import to_cuda
from common.torch_util import calculate_accuracy_of_code_completion, get_predict_and_target_tokens, \
remove_last_item_in_sequence, reverse_tensor
from common.util import batch_holder, transform_id_to_token, PaddedList, show_process_map, CustomerDataSet
from common import util, torch_util
from sklearn.utils import shuffle
import sys
from seq2seq.models import EncoderRNN
import pandas as pd
from vocabulary.word_vocabulary import Vocabulary
gpu_index = 0
BEGIN, END, UNK = ["<BEGIN>", "<END>", "<UNK>"]
class SensibilityRNNDataset(CustomerDataSet):
def __init__(self,
data_df: pd.DataFrame,
vocabulary: Vocabulary,
set_type: str,
transformer_vocab_slk=None,
no_filter=False,
do_flatten=False,
MAX_LENGTH=500,
use_ast=False,
do_multi_step_sample=False,
id_to_program_dict=None,
no_id_to_program_dict=False):
# super().__init__(data_df, vocabulary, set_type, transform, no_filter)
self.set_type = set_type
self.vocabulary = vocabulary
self.transformer = transformer_vocab_slk
self.is_flatten = do_flatten
self.max_length = MAX_LENGTH
self.use_ast = use_ast
self.transform = False
self.do_multi_step_sample = do_multi_step_sample
if data_df is not None:
if not no_filter:
self.data_df = self.filter_df(data_df)
else:
self.data_df = data_df
self.only_first = do_multi_step_sample
from experiment.experiment_dataset import FlattenRandomIterateRecords
self._samples = [FlattenRandomIterateRecords(row, is_flatten=do_flatten, only_first=do_multi_step_sample)
for i, row in self.data_df.iterrows()]
# c = 0
# for i, (index, row) in self.data_df.iterrows():
# print(i)
# print(row['id'])
self.program_to_position_dict = {row['id']: i for i, (index, row) in enumerate(self.data_df.iterrows())}
if self.transform:
self._samples = show_process_map(self.transform, self._samples)
# for s in self._samples:
# for k, v in s.items():
# print("{}:shape {}".format(k, np.array(v).shape))
def filter_df(self, df):
df = df[df['error_token_id_list'].map(lambda x: x is not None)]
df = df[df['distance'].map(lambda x: x >= 0)]
def iterate_check_max_len(x):
if not self.is_flatten:
for i in x:
if len(i) > self.max_length:
return False
return True
else:
return len(x) < self.max_length
df = df[df['error_token_id_list'].map(iterate_check_max_len)]
return df
def set_only_first(self, only_first):
self.only_first = only_first
def _get_raw_sample(self, row):
# sample = dict(row)
row.select_random_i(only_first=self.only_first)
sample = {}
sample['id'] = row['id']
sample['includes'] = row['includes']
# if not self.is_flatten and self.do_multi_step_sample:
# sample['input_seq'] = row['error_token_id_list'][0]
# sample['input_seq_name'] = row['error_token_name_list'][0][1:-1]
# sample['input_length'] = len(sample['input_seq'])
# elif not self.is_flatten and not self.do_multi_step_sample:
# sample['input_seq'] = row['error_token_id_list']
# sample['input_seq_name'] = [r[1:-1] for r in row['error_token_name_list']]
# sample['input_length'] = [len(ids) for ids in sample['input_seq']]
# else:
sample['input_seq'] = row['error_token_id_list']
sample['input_seq_name'] = row['error_token_name_list'][1:-1]
sample['input_length'] = len(sample['input_seq'])
sample['copy_length'] = sample['input_length']
sample['includes'] = row['includes']
sample['distance'] = row['distance']
sample['forward_target'] = row['error_token_id_list'][1:]
sample['backward_target'] = row['error_token_id_list'][:-1]
return sample
def __getitem__(self, index):
real_position = index
row = self._samples[real_position]
return self._get_raw_sample(row)
def __setitem__(self, key, value):
real_position = key
self._samples[real_position] = value
def __len__(self):
return len(self._samples)
class LSTMModel(nn.Module):
def __init__(self, dictionary_size, embedding_dim, hidden_size, num_layers, batch_size, bidirectional=False, dropout=0):
super(LSTMModel, self).__init__()
self.dictionary_size = dictionary_size
self.embedding_dim = embedding_dim
self.hidden_size = hidden_size
self.num_layers = num_layers
self.batch_size = batch_size
self.drop = nn.Dropout(dropout)
print('dictionary_size: {}, embedding_dim: {}, hidden_size: {}, num_layers: {}, batch_size: {}, bidirectional: {}, dropout: {}'.format(
dictionary_size, embedding_dim, hidden_size, num_layers, batch_size, bidirectional, dropout))
self.bidirectional_num = 2 if bidirectional else 1
print('before create embedding')
self.word_embeddings = nn.Embedding(num_embeddings=dictionary_size, embedding_dim=embedding_dim, padding_idx=0)
print('before create lstm')
self.lstm = nn.LSTM(input_size=embedding_dim, hidden_size=hidden_size, num_layers=num_layers,
bidirectional=bidirectional, dropout=dropout)
print('before create tag')
self.hidden2tag = nn.Linear(hidden_size * self.bidirectional_num, dictionary_size)
print('before init hidden')
self.hidden = self.init_hidden(self.batch_size)
def init_hidden(self, cur_batch_size):
return (to_cuda(torch.randn(self.num_layers * self.bidirectional_num, cur_batch_size, self.hidden_size)),
to_cuda(torch.randn(self.num_layers * self.bidirectional_num, cur_batch_size, self.hidden_size)))
def forward(self, inputs, token_lengths):
"""
inputs: [batch_size, code_length]
token_lengths: [batch_size, ]
:param inputs:
:return:
"""
self.hidden = self.init_hidden(inputs.shape[0])
# inputs = torch.LongTensor(inputs)
# token_lengths = torch.LongTensor(token_lengths)
cur_batch_size = len(inputs)
_, idx_sort = torch.sort(token_lengths, dim=0, descending=True)
_, idx_unsort = torch.sort(idx_sort, dim=0)
inputs = torch.index_select(inputs, 0, idx_sort)
token_lengths = list(torch.index_select(token_lengths, 0, idx_sort))
print('input_size: ', inputs.size())
embeds = self.word_embeddings(inputs).view(cur_batch_size, -1, self.embedding_dim)
# print('embeds_size: {}, embeds is cuda: {}'.format(embeds.size(), embeds.is_cuda))
embeds = embeds.view(cur_batch_size, -1, self.embedding_dim)
embeds = self.drop(embeds)
# print('embeds_size: {}, embeds is cuda: {}'.format(embeds.size(), embeds.is_cuda))
# print('embeds value: {}'.format(embeds.data))
# print('after embeds token_length: {}'.format(token_lengths))
packed_inputs = torch.nn.utils.rnn.pack_padded_sequence(embeds, token_lengths, batch_first=True)
# print('packed_inputs batch size: ', len(packed_inputs.batch_sizes))
# print('packed_inputs is cuda: {}'.format(packed_inputs.data.is_cuda))
lstm_out, self.hidden = self.lstm(packed_inputs, self.hidden)
unpacked_lstm_out, unpacked_lstm_length = torch.nn.utils.rnn.pad_packed_sequence(lstm_out, batch_first=True,
padding_value=0)
unpacked_lstm_out = self.drop(unpacked_lstm_out)
dict_output = self.hidden2tag(unpacked_lstm_out)
packed_dict_output = torch.nn.utils.rnn.pack_padded_sequence(dict_output, token_lengths, batch_first=True)
# print('lstm_out batch size: ', len(lstm_out.batch_sizes))
# print('lstm_out is cuda: ', lstm_out.data.is_cuda)
# print('lstm value: {}'.format(lstm_out.data))
# packed_output = nn.utils.rnn.PackedSequence(self.hidden2tag(lstm_out.data).cuda(gpu_index), lstm_out.batch_sizes) # output shape: [batch_size, token_length, dictionary_size]
# print('packed_output batch size: ', len(packed_output.batch_sizes))
# print('packed_output is cuda: ', packed_output.data.is_cuda)
unpacked_out, unpacked_length = torch.nn.utils.rnn.pad_packed_sequence(packed_dict_output, batch_first=True, padding_value=0)
# print('unpacked_out: {}, unpacked_length: {}'.format(unpacked_out.size(), unpacked_length))
unpacked_out = torch.index_select(unpacked_out, 0, torch.Tensor(idx_unsort).to(inputs.device))
# print('unsort unpacked_out: {}'.format(unpacked_out.size()))
# print('unsort unpacked_out is cuda: {}'.format(unpacked_out.is_cuda))
return unpacked_out
class SplitRNNModelWarpper(nn.Module):
def __init__(self, vocabulary_size, hidden_size, encoder_params):
super().__init__()
self.hidden_size = hidden_size
self.encoder = EncoderRNN(hidden_size=hidden_size, **encoder_params)
self.output = nn.Linear(hidden_size, vocabulary_size)
def forward(self, input_seq, input_length):
encoder_output, _ = self.encoder(input_seq)
split_encoder_output = remove_last_item_in_sequence(encoder_output, input_length, k=1)
o = self.output(split_encoder_output)
return o
class SensibilityBiRnnModel(nn.Module):
def __init__(self, vocabulary_size, embedding_dim, hidden_size, encoder_params):
super().__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(num_embeddings=vocabulary_size, embedding_dim=embedding_dim, padding_idx=0)
self.forward_rnn = SplitRNNModelWarpper(vocabulary_size, hidden_size, encoder_params)
self.backward_rnn = SplitRNNModelWarpper(vocabulary_size, hidden_size, encoder_params)
def forward(self, input_seq, input_length):
backward_input_seq = reverse_tensor(input_seq, input_length)
embedded_forward_input_seq = self.embedding(input_seq)
embedded_backward_input_seq = self.embedding(backward_input_seq)
forward_output = self.forward_rnn(embedded_forward_input_seq, input_length)
backward_output = self.backward_rnn(embedded_backward_input_seq, input_length)
reversed_backward_output = reverse_tensor(backward_output, input_length-1)
return forward_output, reversed_backward_output
def create_loss_function(ignore_id):
cross_loss = nn.CrossEntropyLoss(ignore_index=ignore_id)
def loss_fn(forward_output, backward_output,
forward_target_seq, backward_target_seq):
forward_loss = cross_loss(forward_output.permute(0, 2, 1), forward_target_seq)
backward_loss = cross_loss(backward_output.permute(0, 2, 1), backward_target_seq)
total_loss = forward_loss + backward_loss
return total_loss
return loss_fn
def rnn_parse_input_batch_data_fn():
def parse_input_batch_data(batch_data, do_sample=False):
def to_long(x):
return to_cuda(torch.LongTensor(x))
input_seq = to_long(PaddedList(batch_data['input_seq']))
input_length = to_long(batch_data['input_length'])
return input_seq, input_length
return parse_input_batch_data
def rnn_parse_target_batch_data_fn(ignore_id):
def parse_target_batch_data(batch_data, ):
forward_target_seq = to_cuda(torch.LongTensor(PaddedList(batch_data['forward_target'], fill_value=ignore_id)))
backward_target_seq = to_cuda(torch.LongTensor(PaddedList(batch_data['backward_target'], fill_value=ignore_id)))
return forward_target_seq, backward_target_seq
return parse_target_batch_data
def create_output_fn(*args, **kwargs):
pass
|
[
"[email protected]"
] | |
9325ce23a6bcefeac701f51d38d1513df3b719a6
|
814f8b85dd6435b3bb3fdebf2f193912aa145a62
|
/image_segmentation/CycleGAN/__init__.py
|
b7901d30d6a0433aae360a3b18a7d65798dcb49b
|
[
"Apache-2.0"
] |
permissive
|
jacke121/pycharm
|
480df86258ee918de25b76a4156e9e6b9d355df7
|
b9b2963cf0c5028f622f41413f52f1b5cbde28a1
|
refs/heads/master
| 2020-03-18T16:35:25.579992 | 2018-01-01T02:30:58 | 2018-01-01T02:30:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 332 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by weihang huang on 17-12-10
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
root = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + '/'
|
[
"[email protected]"
] | |
389ab210334c512ad944a1569f96a0cfda9fea26
|
3482beb24c0635efcb60391d27c1987b7fb413a5
|
/kvipytools/kvipytools/rename.py
|
cce6f4b4865c27f1c76dac3edfdbb3d77e504d17
|
[] |
no_license
|
rpgplanet/rpgplanet-all
|
ad2e6a00935d2b214ba41b4adced524f1bd443db
|
6d473369cd8263f59ebcbf7f812fd4d34d4d785e
|
refs/heads/master
| 2021-01-13T01:43:58.718833 | 2010-10-28T10:25:01 | 2010-10-28T10:25:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,987 |
py
|
#!/usr/bin/env python
import sys, os
class OptionParser(object):
'''
parser commandline optiones separated by given separator::
./rename.py a=b c=d "a a a=b b b" a\\==\\=b
will result into something like this::
opts = [ ('a', 'b'), ('c', 'd'), ('a a a', 'b b b'), ('a=', '=b') ]
'''
def __init__(self, escape_char='\\', escape_replacement=-1, splitter_char='=', splitter_replacement=-2):
self.escape_char = escape_char
self.escape_replacement = escape_replacement
self.splitter_char = splitter_char
self.splitter_replacement = splitter_replacement
def split_string(self, string):
return [ c for c in string ]
def replace_pair(self, chars, pair, replacement):
'''
go through chars in pairs and if two chars equals given pair
put some special mark instead
'''
escaped_chars = []
hop = False
for i, j in enumerate(chars):
if hop:
hop = False
continue
if i < (len(chars) - 1):
if (j, chars[i+1]) == pair:
hop = True
x = replacement
else:
x = j
else:
x = j
escaped_chars.append(x)
return escaped_chars
def escape_escape(self, chars):
pair = (self.escape_char, self.escape_char)
return self.replace_pair(chars, pair, self.escape_replacement)
def escape_split(self, chars):
pair = (self.escape_char, self.splitter_char)
return self.replace_pair(chars, pair, self.splitter_replacement)
def split_via_equalsign(self, chars, splitter='='):
index = chars.index(splitter)
return (chars[:index], chars[index+1:])
def list_replace_all(self, seq, obj, repl):
for i, elem in enumerate(seq):
if elem == obj:
seq[i] = repl
def __call__(self, opts):
"""
parse options given on cmdline separated by equal sign:
>>> OptionParser()(['a=b', 'x x x=y y y'])
[('a', 'b'), ('x x x', 'y y y')]
"""
parsed_opts = []
for o in opts:
o = self.escape_escape(o)
o = self.escape_split(o)
l, r = self.split_via_equalsign(o)
for i in l, r:
self.list_replace_all(i, self.splitter_replacement, self.splitter_char)
self.list_replace_all(i, self.escape_replacement, self.escape_char)
parsed_opts.append((''.join(l), ''.join(r)))
return parsed_opts
def call_command(cmd, options, verbose=False):
"""
helper function that call shell command for every tuple in options
"""
for patrn, repl in options:
repl = {'patrn': patrn, 'repl': repl,}
command = cmd % repl
print 'running: %s' % command
if not verbose:
command += '&>/dev/null'
os.system(command)
def rename_files_dirs(options):
"""
rename all dirs and files to new name defined via options
"""
# create dirs first
call_command('''find . -type d | while read f; do mkdir -p "$(echo $f | sed 's/%(patrn)s/%(repl)s/g')"; done''', options)
# than move files
call_command('''find . -type f | while read f; do mv "$f" "$(echo $f | sed 's/%(patrn)s/%(repl)s/g')"; done''', options)
# delete empty dirs
call_command('''find -depth -type d -empty -exec rmdir {} \;''', [(1,1)])
def change_content(options):
"""
take file by file and replace any occurence of pattern with its replacement
"""
call_command('''grep -r -l -- '%(patrn)s' . | tr '\\n' '\\0' | xargs -0 sed -i "s/%(patrn)s/%(repl)s/g"''', options)
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
parse_options = OptionParser()
options = parse_options(sys.argv[1:])
rename_files_dirs(options)
change_content(options)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
30cb098c7657866cc84531411b8c6256998523ed
|
3a5ad075884d55593464f97df758de1891bfd3f2
|
/all_Gryzinski/BDE_model.py
|
6affdeb9cf80c7abc20558e4776bbe0101198979
|
[] |
no_license
|
fedorsidorov/DEBER-Simulation-2.0
|
1a812e950749cf86e8e0dbd4d3514fc58f710e9a
|
eca39922df628ca6dcfbfb2af61aaa469fe66074
|
refs/heads/master
| 2022-04-26T13:23:24.262182 | 2020-04-24T23:43:12 | 2020-04-24T23:43:12 | 202,705,740 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,527 |
py
|
#%% Import
import numpy as np
import os
import importlib
import matplotlib.pyplot as plt
import copy
import my_constants as mc
mc = importlib.reload(mc)
import E_loss_functions as elf
elf = importlib.reload(elf)
os.chdir(mc.sim_folder + 'all_Gryzinski')
#%%
MMA_bonds = {}
#kJmol_2_eV = 1e+3 / (mc.Na * mc.eV)
kJmol_2_eV = 0.0103
MMA_bonds['Op-Cp'] = 815 * kJmol_2_eV, 8
MMA_bonds['O-Cp'] = 420 * kJmol_2_eV, 4
MMA_bonds['H-C3'] = 418 * kJmol_2_eV, 12
MMA_bonds['H-C2'] = 406 * kJmol_2_eV, 4
MMA_bonds['Cp-Cg'] = 383 * kJmol_2_eV, 2
MMA_bonds['O-C3'] = 364 * kJmol_2_eV, 4
MMA_bonds['C-C3'] = 356 * kJmol_2_eV, 2
MMA_bonds['C-C2'] = 354 * kJmol_2_eV, 4
Eb_Nel = np.array(list(MMA_bonds.values()))
#%%
def get_stairway(b_map_sc):
# b_map_sc = dict([('C-C2', 4)])
Eb_Nel_sc_list = []
for val in b_map_sc.keys():
Eb_Nel_sc_list.append([MMA_bonds[val][0], MMA_bonds[val][1]])
Eb_Nel_sc = np.array(Eb_Nel_sc_list)
probs = np.zeros(len(mc.EE))
nums = np.zeros(len(mc.EE))
dens = np.zeros(len(mc.EE))
for i, e in enumerate(mc.EE):
num = 0
for st in Eb_Nel_sc:
if e >= st[0]:
num += st[1]
if num == 0:
continue
nums[i] = num
den = 0
for st in Eb_Nel:
if e >= st[0]:
den += st[1]
dens[i] = den
probs[i] = num / den
return probs
#%%
PMMA_total_inel_U = np.load(mc.sim_folder + 'E_loss/diel_responce/Dapor/PMMA_U_Dapor.npy')
#PMMA_diff_inel_U = np.load(mc.sim_folder +\
# 'E_loss/diel_responce/Dapor/PMMA_diff_U_Dapor_Ashley.npy')
PMMA_SP = np.load(mc.sim_folder + 'E_loss/diel_responce/Dapor/PMMA_SP_Dapor.npy')
#%% Go Fryzinski
total_U = np.zeros(len(mc.EE))
total_SP = np.zeros(len(mc.EE))
for bond in MMA_bonds:
total_U += elf.get_Gryzinski_CS(mc.EE, MMA_bonds[bond][0]) * MMA_bonds[bond][1] * mc.n_PMMA_mon
total_SP += elf.get_Gryzinski_SP(mc.EE, MMA_bonds[bond][0], mc.n_PMMA_mon, MMA_bonds[bond][1])
#%% U
plt.loglog(mc.EE, PMMA_total_inel_U, label='Dapor')
plt.loglog(mc.EE, total_U, label='Gryzinski + BDE')
plt.title('PMMA Dapor and Gryz+BDE U')
plt.xlabel('E, eV')
plt.ylabel('U, cm$^{-1}$')
plt.legend()
plt.grid()
#plt.savefig('PMMA_Dapor_Gryz+BDE_U.png', dpi=300)
#%% SP
plt.loglog(mc.EE, PMMA_SP, label='Dapor')
plt.loglog(mc.EE, total_SP, label='Gryzinski + BDE')
plt.xlabel('E, eV')
plt.ylabel('SP, eV/cm')
plt.legend()
plt.grid()
#plt.savefig('PMMA_Dapor_Gryz+BDE_SP.png', dpi=300)
#%% Gryzinski stairway
gryz_bond_U = np.zeros((len(MMA_bonds), len(mc.EE)))
for i in range(len(MMA_bonds)):
gryz_bond_U[i, :] = elf.get_Gryzinski_CS(mc.EE, MMA_bonds[list(MMA_bonds.keys())[i]][0]) *\
MMA_bonds[list(MMA_bonds.keys())[i]][1] * mc.n_PMMA_mon
plt.loglog(mc.EE, gryz_bond_U[i, :], label=list(MMA_bonds.keys())[i])
plt.title('PMMA Dapor and Gryz+BDE bond CS for each bond')
plt.xlabel('E, eV')
plt.ylabel('U, cm$^{-1}$')
plt.ylim(1e+5, 1e+8)
plt.legend()
plt.grid()
#plt.savefig('PMMA_Dapor_Gryz+BDE_U_bonds.png', dpi=300)
#%%
def get_w_scission(EE):
result = np.zeros(len(EE))
result = np.ones(len(EE)) * 4/40
result[np.where(EE < 815 * 0.0103)] = 4/(40 - 8)
result[np.where(EE < 420 * 0.0103)] = 4/(40 - 8 - 4)
result[np.where(EE < 418 * 0.0103)] = 4/(40 - 8 - 4 - 12)
result[np.where(EE < 406 * 0.0103)] = 4/(40 - 8 - 4 - 12 - 4)
result[np.where(EE < 383 * 0.0103)] = 4/(40 - 8 - 4 - 12 - 4 - 2)
result[np.where(EE < 364 * 0.0103)] = 4/(40 - 8 - 4 - 12 - 4 - 2 - 4)
result[np.where(EE < 356 * 0.0103)] = 4/(40 - 8 - 4 - 12 - 4 - 2 - 4 - 2)
result[np.where(EE < 354 * 0.0103)] = 0
return result
probs_easy = get_w_scission(mc.EE)
#%%
new_probs_easy = get_stairway({'C-C2': 4, 'C-C3': 2})
#%%
probs = np.zeros(len(mc.EE))
for i in range(len(probs)):
if np.sum(gryz_bond_U[:, i]) == 0:
continue
probs[i] = np.sum(gryz_bond_U[-2:, i]) / np.sum(gryz_bond_U[:, i])
end_ind = 200
plt.plot(mc.EE[:end_ind], probs_easy[:end_ind], label='basic')
plt.plot(mc.EE[:end_ind], new_probs_easy[:end_ind], label='new basic')
plt.plot(mc.EE[:end_ind], probs[:end_ind], label='Gryzinsky')
plt.title('Scission probability')
plt.xlabel('E, eV')
plt.ylabel('p')
plt.legend()
plt.grid()
#plt.savefig('scission_probs.png', dpi=300)
|
[
"[email protected]"
] | |
b0788388960b10b9cc402064bdf16311c76adf2a
|
9ce0d602404b2329dfb36a0ae31b43dd1865d76d
|
/app/models.py
|
d9f118e64dfe4d21fefde1ec1ff4885190fe0e26
|
[] |
no_license
|
haruyasu/django-video-membership
|
0cf40e6c2d28bbbc1c4c608df378a163b83f1654
|
87b8a0c4b3b12c4b901303bd2c6ee078069ba167
|
refs/heads/main
| 2023-01-01T18:39:13.697284 | 2020-10-25T10:59:41 | 2020-10-25T10:59:41 | 305,915,329 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,721 |
py
|
from django.db import models
from django.conf import settings
from django.db.models.signals import pre_save, post_save
from django.contrib.auth.signals import user_logged_in
from django.utils.text import slugify
from django.shortcuts import reverse
from django.contrib.auth import get_user_model
from allauth.account.signals import email_confirmed
import stripe
stripe.api_key = settings.STRIPE_SECRET_KEY
User = get_user_model()
class Pricing(models.Model):
name = models.CharField(max_length=100) # Basic / Pro / Premium
slug = models.SlugField()
stripe_price_id = models.CharField(max_length=100)
price = models.IntegerField()
def __str__(self):
return self.name
class Subscription(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
pricing = models.ForeignKey(Pricing, on_delete=models.CASCADE, related_name='subscriptions')
created = models.DateTimeField(auto_now_add=True)
stripe_subscription_id = models.CharField(max_length=50)
status = models.CharField(max_length=100)
def __str__(self):
return self.user.email
@property
def is_active(self):
return self.status == "active" or self.status == "trialing"
class Course(models.Model):
pricing_tiers = models.ManyToManyField(Pricing, blank=True)
name = models.CharField(max_length=100)
slug = models.SlugField(unique=True)
thumbnail = models.ImageField(upload_to="images")
description = models.TextField()
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("course-detail", kwargs={"slug": self.slug})
class Video(models.Model):
course = models.ForeignKey(Course, on_delete=models.CASCADE, related_name='videos')
vimeo_id = models.CharField(max_length=50)
title = models.CharField(max_length=150)
slug = models.SlugField(unique=True)
description = models.TextField()
order = models.IntegerField(default=1)
class Meta:
ordering = ["order"]
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("video-detail", kwargs={
"video_slug": self.slug,
"slug": self.course.slug
})
def pre_save_course(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = slugify(instance.name)
def pre_save_video(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = slugify(instance.title)
def post_email_confirmed(request, email_address, *args, **kwargs):
user = User.objects.get(email=email_address.email)
free_trial_pricing = Pricing.objects.get(name='Free Trial')
subscription = Subscription.objects.create(
user=user,
pricing=free_trial_pricing
)
stripe_customer = stripe.Customer.create(
email=user.email
)
stripe_subscription = stripe.Subscription.create(
customer=stripe_customer["id"],
items=[{'price': 'django-free-trial'}],
trial_period_days=7
)
subscription.status = stripe_subscription["status"] # trialing
subscription.stripe_subscription_id = stripe_subscription["id"]
subscription.save()
user.stripe_customer_id = stripe_customer["id"]
user.save()
def user_logged_in_receiver(sender, user, **kwargs):
subscription = user.subscription
sub = stripe.Subscription.retrieve(subscription.stripe_subscription_id)
subscription.status = sub["status"]
subscription.save()
user_logged_in.connect(user_logged_in_receiver)
email_confirmed.connect(post_email_confirmed)
pre_save.connect(pre_save_course, sender=Course)
pre_save.connect(pre_save_video, sender=Video)
|
[
"[email protected]"
] | |
64a399c18f23c2f0509b1b87319fcf387ad2065d
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/merra_scripts/01_netCDF_extraction/merra902TG/830-tideGauge.py
|
98529851e7c0414d44337ffea9a71bbf32c354c9
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,075 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 01 10:00:00 2020
MERRAv2 netCDF extraction script - template
To create an extraction script for each tide gauge
@author: Michael Tadesse
"""
import os
import pandas as pd
from d_merra_define_grid import Coordinate, findPixels, findindx
from c_merra_read_netcdf import readnetcdf
from f_merra_subset import subsetter
def extract_data(delta= 3):
"""
This is the master function that calls subsequent functions
to extract uwnd, vwnd, slp for the specified
tide gauges
delta: distance (in degrees) from the tide gauge
"""
print('Delta = {}'.format(delta), '\n')
#defining the folders for predictors
dir_in = "/lustre/fs0/home/mtadesse/MERRAv2/data"
surge_path = "/lustre/fs0/home/mtadesse/obs_surge"
csv_path = "/lustre/fs0/home/mtadesse/merraLocalized"
#cd to the obs_surge dir to get TG information
os.chdir(surge_path)
tg_list = os.listdir()
#cd to the obs_surge dir to get TG information
os.chdir(dir_in)
years = os.listdir()
#################################
#looping through the year folders
#################################
#to mark the first csv
firstCsv = True;
for yr in years:
os.chdir(dir_in)
#print(yr, '\n')
os.chdir(os.path.join(dir_in, yr))
####################################
#looping through the daily .nc files
####################################
for dd in os.listdir():
os.chdir(os.path.join(dir_in, yr)) #back to the predictor folder
print(dd, '\n')
#########################################
#get netcdf components - predictor file
#########################################
nc_file = readnetcdf(dd)
lon, lat, time, predSLP, predU10, predV10 = \
nc_file[0], nc_file[1], nc_file[2], nc_file[3], nc_file[4]\
, nc_file[5]
x = 830
y = 831
#looping through individual tide gauges
for t in range(x, y):
#the name of the tide gauge - for saving purposes
# tg = tg_list[t].split('.mat.mat.csv')[0]
tg = tg_list[t]
#extract lon and lat data from surge csv file
#print(tg, '\n')
os.chdir(surge_path)
if os.stat(tg).st_size == 0:
print('\n', "This tide gauge has no surge data!", '\n')
continue
surge = pd.read_csv(tg, header = None)
#surge_with_date = add_date(surge)
#define tide gauge coordinate(lon, lat)
tg_cord = Coordinate(surge.iloc[0,0], surge.iloc[0,1])
#find closest grid points and their indices
close_grids = findPixels(tg_cord, delta, lon, lat)
ind_grids = findindx(close_grids, lon, lat)
#loop through preds#
#subset predictor on selected grid size
predictors = {'slp':predSLP, 'wnd_u':predU10, \
'wnd_v':predV10}
for xx in predictors.keys():
pred_new = subsetter(dd, predictors[xx], ind_grids, time)
if xx == 'slp':
if firstCsv:
finalSLP = pred_new
else:
finalSLP = pd.concat([finalSLP, pred_new], axis = 0)
print(finalSLP.shape)
elif xx == 'wnd_u':
if firstCsv:
finalUwnd = pred_new
else:
finalUwnd = pd.concat([finalUwnd, pred_new], axis = 0)
elif xx == 'wnd_v':
if firstCsv:
finalVwnd = pred_new
firstCsv = False;
else:
finalVwnd = pd.concat([finalVwnd, pred_new], axis = 0)
#create directories to save pred_new
os.chdir(csv_path)
#tide gauge directory
tg_name_old = tg.split('.mat.mat.csv')[0]
tg_name = '-'.join([str(t), tg_name_old])
try:
os.makedirs(tg_name)
os.chdir(tg_name) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(tg_name)
#save as csv
finalSLP.to_csv('slp.csv')
finalUwnd.to_csv('wnd_u.csv')
finalVwnd.to_csv('wnd_v.csv')
#run script
extract_data(delta= 3)
|
[
"[email protected]"
] | |
884b9d01cec1bf9f1ce1a06c648463cd2ddab33d
|
159aed4755e47623d0aa7b652e178296be5c9604
|
/data/scripts/templates/object/draft_schematic/clothing/shared_clothing_shirt_casual_04.py
|
1f8fd20f134c94f5a9dd3f2bdc8e6da119d5dd5b
|
[
"MIT"
] |
permissive
|
anhstudios/swganh
|
fb67d42776864b1371e95f769f6864d0784061a3
|
41c519f6cdef5a1c68b369e760781652ece7fec9
|
refs/heads/develop
| 2020-12-24T16:15:31.813207 | 2016-03-08T03:54:32 | 2016-03-08T03:54:32 | 1,380,891 | 33 | 44 | null | 2016-03-08T03:54:32 | 2011-02-18T02:32:45 |
Python
|
UTF-8
|
Python
| false | false | 462 |
py
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_shirt_casual_04.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
[
"[email protected]"
] | |
4ce3ff8329588679e9e470c545d88baf564a200e
|
0fa00ecf2dd671515dc001d4b14049ec6a0c1f1c
|
/custom_components/smartthinq_sensors/wideq/devices/refrigerator.py
|
8c6a5e1a3447dad65eb1b18cd2f31b52cf4f9b96
|
[
"Unlicense"
] |
permissive
|
bacco007/HomeAssistantConfig
|
d91a5368344f50abbea881bd1e6dfc57a0e456ca
|
8548d9999ddd54f13d6a307e013abcb8c897a74e
|
refs/heads/master
| 2023-08-30T07:07:33.571959 | 2023-08-29T20:00:00 | 2023-08-29T20:00:00 | 230,585,631 | 98 | 16 |
Unlicense
| 2023-09-09T08:28:39 | 2019-12-28T09:05:02 |
Python
|
UTF-8
|
Python
| false | false | 22,201 |
py
|
"""------------------for Refrigerator"""
from __future__ import annotations
import base64
import json
import logging
from ..const import RefrigeratorFeatures, StateOptions, TemperatureUnit
from ..core_async import ClientAsync
from ..device import LABEL_BIT_OFF, LABEL_BIT_ON, Device, DeviceStatus
from ..device_info import DeviceInfo
from ..model_info import TYPE_ENUM
FEATURE_DESCR = {
"@RE_TERM_EXPRESS_FREEZE_W": "express_freeze",
"@RE_TERM_EXPRESS_FRIDGE_W": "express_cool",
"@RE_TERM_ICE_PLUS_W": "ice_plus",
}
REFRTEMPUNIT = {
"F": TemperatureUnit.FAHRENHEIT,
"℃": TemperatureUnit.CELSIUS,
"˚F": TemperatureUnit.FAHRENHEIT,
"˚C": TemperatureUnit.CELSIUS,
}
# REFRTEMPUNIT = {
# "\uff26": TemperatureUnit.FAHRENHEIT,
# "\u2103": TemperatureUnit.CELSIUS,
# "\u02daF": TemperatureUnit.FAHRENHEIT,
# "\u02daC": TemperatureUnit.CELSIUS,
# }
DEFAULT_FRIDGE_RANGE_C = [1, 10]
DEFAULT_FRIDGE_RANGE_F = [30, 45]
DEFAULT_FREEZER_RANGE_C = [-24, -14]
DEFAULT_FREEZER_RANGE_F = [-8, 6]
REFR_ROOT_DATA = "refState"
CTRL_BASIC = ["Control", "basicCtrl"]
STATE_ECO_FRIENDLY = ["EcoFriendly", "ecoFriendly"]
STATE_ICE_PLUS = ["IcePlus", ""]
STATE_EXPRESS_FRIDGE = ["", "expressFridge"]
STATE_EXPRESS_MODE = ["", "expressMode"]
STATE_FRIDGE_TEMP = ["TempRefrigerator", "fridgeTemp"]
STATE_FREEZER_TEMP = ["TempFreezer", "freezerTemp"]
CMD_STATE_ECO_FRIENDLY = [CTRL_BASIC, ["SetControl", "basicCtrl"], STATE_ECO_FRIENDLY]
CMD_STATE_ICE_PLUS = [CTRL_BASIC, ["SetControl", "basicCtrl"], STATE_ICE_PLUS]
CMD_STATE_EXPRESS_FRIDGE = [
CTRL_BASIC,
["SetControl", "basicCtrl"],
STATE_EXPRESS_FRIDGE,
]
CMD_STATE_EXPRESS_MODE = [CTRL_BASIC, ["SetControl", "basicCtrl"], STATE_EXPRESS_MODE]
CMD_STATE_FRIDGE_TEMP = [CTRL_BASIC, ["SetControl", "basicCtrl"], STATE_FRIDGE_TEMP]
CMD_STATE_FREEZER_TEMP = [CTRL_BASIC, ["SetControl", "basicCtrl"], STATE_FREEZER_TEMP]
_LOGGER = logging.getLogger(__name__)
class RefrigeratorDevice(Device):
"""A higher-level interface for a refrigerator."""
def __init__(self, client: ClientAsync, device_info: DeviceInfo):
super().__init__(client, device_info, RefrigeratorStatus(self))
self._temp_unit = None
self._fridge_temps = None
self._fridge_ranges = None
self._freezer_temps = None
self._freezer_ranges = None
def _get_feature_info(self, item_key):
config = self.model_info.config_value("visibleItems")
if not config or not isinstance(config, list):
return None
if self.model_info.is_info_v2:
feature_key = "feature"
else:
feature_key = "Feature"
for item in config:
feature_value = item.get(feature_key, "")
if feature_value and feature_value == item_key:
return item
return None
def _get_feature_title(self, feature_name, item_key):
item_info = self._get_feature_info(item_key)
if not item_info:
return None
if self.model_info.is_info_v2:
title_key = "monTitle"
else:
title_key = "Title"
title_value = item_info.get(title_key)
if not title_value:
return feature_name
return FEATURE_DESCR.get(title_value, feature_name)
def _prepare_command_v1(self, cmd, key, value):
"""Prepare command for specific ThinQ1 device."""
data_key = "value"
if cmd.get(data_key, "") == "ControlData":
data_key = "data"
str_data = cmd.get(data_key)
if str_data:
status_data = self._status.data
for dt_key, dt_value in status_data.items():
if dt_key == key:
dt_value = value
str_data = str_data.replace(f"{{{{{dt_key}}}}}", dt_value)
json_data = json.loads(str_data)
_LOGGER.debug("Command data content: %s", str(json_data))
if self.model_info.binary_control_data:
cmd["format"] = "B64"
json_data = base64.b64encode(bytes(json_data)).decode("ascii")
cmd[data_key] = json_data
return cmd
def _prepare_command_v2(self, cmd, key, value):
"""Prepare command for specific ThinQ2 device."""
data_set = cmd.pop("data", None)
if not data_set:
data_set = {REFR_ROOT_DATA: {key: value}}
else:
for cmd_key in data_set[REFR_ROOT_DATA].keys():
data_set[REFR_ROOT_DATA][cmd_key] = (
value if cmd_key == key else "IGNORE"
)
cmd["dataSetList"] = data_set
return cmd
def _prepare_command(self, ctrl_key, command, key, value):
"""Prepare command for specific device."""
cmd = self.model_info.get_control_cmd(command, ctrl_key)
if not cmd:
return None
if self.model_info.is_info_v2:
return self._prepare_command_v2(cmd, key, value)
return self._prepare_command_v1(cmd, key, value)
def _set_temp_unit(self, unit=None):
"""Set the configured temperature unit."""
if unit and unit != StateOptions.NONE:
if not self._temp_unit or unit != self._temp_unit:
self._temp_unit = unit
self._fridge_temps = None
self._freezer_temps = None
return
def _get_temp_unit(self, unit=None):
"""Get the configured temperature unit."""
if unit:
self._set_temp_unit(unit)
return self._temp_unit
def _get_temps_v1(self, key):
"""Get valid values for temps for V1 models"""
unit = self._get_temp_unit()
if unit:
unit_key = "_F" if unit == TemperatureUnit.FAHRENHEIT else "_C"
if self.model_info.value_exist(key + unit_key):
key = key + unit_key
value_type = self.model_info.value_type(key)
if not value_type or value_type != TYPE_ENUM:
return {}
temp_values = self.model_info.value(key).options
return {k: v for k, v in temp_values.items() if v != ""}
def _get_temps_v2(self, key, unit_key=None):
"""Get valid values for temps for V2 models"""
if unit_key:
if ref_key := self.model_info.target_key(key, unit_key, "tempUnit"):
key = ref_key
value_type = self.model_info.value_type(key)
if not value_type or value_type != TYPE_ENUM:
return {}
temp_values = self.model_info.value(key).options
return {k: v for k, v in temp_values.items() if v != "IGNORE"}
@staticmethod
def _get_temp_ranges(temps):
"""Get min and max values inside a dict."""
min_val = 100
max_val = -100
for value in temps.values():
try:
int_val = int(value)
except ValueError:
continue
if int_val < min_val:
min_val = int_val
if int_val > max_val:
max_val = int_val
if min_val > max_val:
return None
return [min_val, max_val]
@staticmethod
def _get_temp_key(temps, value):
"""Get temp_key based on his value."""
if not temps:
return None
str_val = str(int(value))
for key, temp_val in temps.items():
if str_val == temp_val:
try:
return int(key)
except ValueError:
return None
return None
def get_fridge_temps(self, unit=None, unit_key=None):
"""Get valid values for fridge temp."""
self._set_temp_unit(unit)
if self._fridge_temps is None:
key = self._get_state_key(STATE_FRIDGE_TEMP)
if self.model_info.is_info_v2:
self._fridge_temps = self._get_temps_v2(key, unit_key)
else:
self._fridge_temps = self._get_temps_v1(key)
self._fridge_ranges = self._get_temp_ranges(self._fridge_temps)
return self._fridge_temps
def get_freezer_temps(self, unit=None, unit_key=None):
"""Get valid values for freezer temp."""
self._set_temp_unit(unit)
if self._freezer_temps is None:
key = self._get_state_key(STATE_FREEZER_TEMP)
if self.model_info.is_info_v2:
self._freezer_temps = self._get_temps_v2(key, unit_key)
else:
self._freezer_temps = self._get_temps_v1(key)
self._freezer_ranges = self._get_temp_ranges(self._freezer_temps)
return self._freezer_temps
@property
def target_temperature_step(self):
"""Return target temperature step used."""
return 1
@property
def fridge_target_temp_range(self):
"""Return range value for fridge target temperature."""
if self._fridge_ranges is None:
unit = self._get_temp_unit() or StateOptions.NONE
if unit == TemperatureUnit.FAHRENHEIT:
return DEFAULT_FRIDGE_RANGE_F
return DEFAULT_FRIDGE_RANGE_C
return self._fridge_ranges
@property
def freezer_target_temp_range(self):
"""Return range value for freezer target temperature."""
if self._freezer_ranges is None:
unit = self._get_temp_unit() or StateOptions.NONE
if unit == TemperatureUnit.FAHRENHEIT:
return DEFAULT_FREEZER_RANGE_F
return DEFAULT_FREEZER_RANGE_C
return self._freezer_ranges
@property
def set_values_allowed(self):
"""Check if values can be changed."""
if (
not self._status
or not self._status.is_on
or self._status.eco_friendly_enabled
):
return False
return True
async def _set_feature(self, turn_on: bool, state_key, cmd_key):
"""Switch a feature."""
status_key = self._get_state_key(state_key)
if not status_key:
return
status_name = LABEL_BIT_ON if turn_on else LABEL_BIT_OFF
status_value = self.model_info.enum_value(status_key, status_name)
if not status_value:
return
keys = self._get_cmd_keys(cmd_key)
await self.set(keys[0], keys[1], key=keys[2], value=status_value)
self._status.update_status_feat(status_key, status_value, True)
async def set_eco_friendly(self, turn_on=False):
"""Switch the echo friendly status."""
await self._set_feature(turn_on, STATE_ECO_FRIENDLY, CMD_STATE_ECO_FRIENDLY)
async def set_ice_plus(self, turn_on=False):
"""Switch the ice plus status."""
if self.model_info.is_info_v2:
return
if not self.set_values_allowed:
return
await self._set_feature(turn_on, STATE_ICE_PLUS, CMD_STATE_ICE_PLUS)
async def set_express_fridge(self, turn_on=False):
"""Switch the express fridge status."""
if not self.model_info.is_info_v2:
return
if not self.set_values_allowed:
return
await self._set_feature(turn_on, STATE_EXPRESS_FRIDGE, CMD_STATE_EXPRESS_FRIDGE)
async def set_express_mode(self, turn_on=False):
"""Switch the express mode status."""
if not self.model_info.is_info_v2:
return
if not self.set_values_allowed:
return
await self._set_feature(turn_on, STATE_EXPRESS_MODE, CMD_STATE_EXPRESS_MODE)
async def set_fridge_target_temp(self, temp):
"""Set the fridge target temperature."""
if not self.set_values_allowed:
return
if self._status.temp_fridge is None:
return
temp_key = self._get_temp_key(self._fridge_temps, temp)
if not temp_key:
raise ValueError(f"Target fridge temperature not valid: {temp}")
if not self.model_info.is_info_v2:
temp_key = str(temp_key)
status_key = self._get_state_key(STATE_FRIDGE_TEMP)
keys = self._get_cmd_keys(CMD_STATE_FRIDGE_TEMP)
await self.set(keys[0], keys[1], key=keys[2], value=temp_key)
self._status.update_status_feat(status_key, temp_key, False)
async def set_freezer_target_temp(self, temp):
"""Set the freezer target temperature."""
if not self.set_values_allowed:
return
if self._status.temp_freezer is None:
return
temp_key = self._get_temp_key(self._freezer_temps, temp)
if not temp_key:
raise ValueError(f"Target freezer temperature not valid: {temp}")
if not self.model_info.is_info_v2:
temp_key = str(temp_key)
status_key = self._get_state_key(STATE_FREEZER_TEMP)
keys = self._get_cmd_keys(CMD_STATE_FREEZER_TEMP)
await self.set(keys[0], keys[1], key=keys[2], value=temp_key)
self._status.update_status_feat(status_key, temp_key, False)
def reset_status(self):
self._status = RefrigeratorStatus(self)
return self._status
async def poll(self) -> RefrigeratorStatus | None:
"""Poll the device's current state."""
res = await self._device_poll(REFR_ROOT_DATA)
if not res:
return None
self._status = RefrigeratorStatus(self, res)
return self._status
class RefrigeratorStatus(DeviceStatus):
"""
Higher-level information about a refrigerator's current status.
:param device: The Device instance.
:param data: JSON data from the API.
"""
_device: RefrigeratorDevice
def __init__(self, device: RefrigeratorDevice, data: dict | None = None):
"""Initialize device status."""
super().__init__(device, data)
self._temp_unit = None
self._eco_friendly_state = None
self._sabbath_state = None
def _get_eco_friendly_state(self):
"""Get current eco-friendly state."""
if self._eco_friendly_state is None:
state = self.lookup_enum(STATE_ECO_FRIENDLY)
if not state:
self._eco_friendly_state = ""
else:
self._eco_friendly_state = state
return self._eco_friendly_state
def _get_sabbath_state(self):
"""Get current sabbath-mode state."""
if self._sabbath_state is None:
state = self.lookup_enum(["Sabbath", "sabbathMode"])
if not state:
self._sabbath_state = ""
else:
self._sabbath_state = state
return self._sabbath_state
def _get_default_index(self, key_mode, key_index):
"""Get default model info index key."""
config = self._device.model_info.config_value(key_mode)
if not config or not isinstance(config, dict):
return None
return config.get(key_index)
def _get_default_name_index(self, key_mode, key_index):
"""Get default model info index name."""
index = self._get_default_index(key_mode, key_index)
if index is None:
return None
return self._device.model_info.enum_index(key_index, index)
def _get_default_temp_index(self, key_mode, key_index):
"""Get default model info temperature index key."""
config = self._get_default_index(key_mode, key_index)
if not config or not isinstance(config, dict):
return None
unit = self._get_temp_unit() or StateOptions.NONE
unit_key = "tempUnit_F" if unit == TemperatureUnit.FAHRENHEIT else "tempUnit_C"
return config.get(unit_key)
def _get_temp_unit(self):
"""Get used temperature unit."""
if not self._temp_unit:
temp_unit = self.lookup_enum(["TempUnit", "tempUnit"])
if not temp_unit:
return None
self._temp_unit = REFRTEMPUNIT.get(temp_unit, TemperatureUnit.CELSIUS)
return self._temp_unit
def _get_temp_key(self, key):
"""Get used temperature unit key."""
temp_key = None
if self.eco_friendly_enabled:
temp_key = self._get_default_temp_index("ecoFriendlyDefaultIndex", key)
if temp_key is None:
if self.is_info_v2:
temp_key = self.int_or_none(self._data.get(key))
else:
temp_key = self._data.get(key)
if temp_key is None:
return None
return str(temp_key)
def update_status(self, key, value):
"""Update device status."""
if not super().update_status(key, value):
return False
self._eco_friendly_state = None
return True
@property
def is_on(self):
"""Return if device is on."""
return self.has_data
@property
def temp_fridge(self):
"""Return current fridge temperature."""
index = 0
unit_key = None
if self.is_info_v2:
unit_key = self._data.get("tempUnit")
index = 1
temp_key = self._get_temp_key(STATE_FRIDGE_TEMP[index])
if temp_key is None:
return None
temp_lists = self._device.get_fridge_temps(self._get_temp_unit(), unit_key)
return self.to_int_or_none(temp_lists.get(temp_key))
@property
def temp_freezer(self):
"""Return current freezer temperature."""
index = 0
unit_key = None
if self.is_info_v2:
unit_key = self._data.get("tempUnit")
index = 1
temp_key = self._get_temp_key(STATE_FREEZER_TEMP[index])
if temp_key is None:
return None
temp_lists = self._device.get_freezer_temps(self._get_temp_unit(), unit_key)
return self.to_int_or_none(temp_lists.get(temp_key))
@property
def temp_unit(self):
"""Return used temperature unit."""
return self._get_temp_unit() or StateOptions.NONE
@property
def door_opened_state(self):
"""Return door opened state."""
if self.is_info_v2:
state = self._data.get("atLeastOneDoorOpen")
else:
state = self.lookup_enum("DoorOpenState")
if not state:
return StateOptions.NONE
return self._device.get_enum_text(state)
@property
def eco_friendly_enabled(self):
"""Return if eco friendly is enabled."""
state = self._get_eco_friendly_state()
if not state:
return False
return bool(state == LABEL_BIT_ON)
@property
def eco_friendly_state(self):
"""Return current eco friendly state."""
key = STATE_ECO_FRIENDLY[1 if self.is_info_v2 else 0]
status = self._get_eco_friendly_state()
return self._update_feature(RefrigeratorFeatures.ECOFRIENDLY, status, True, key)
@property
def ice_plus_status(self):
"""Return current ice plus status."""
if self.is_info_v2:
return None
key = STATE_ICE_PLUS[0]
status = self.lookup_enum(key)
return self._update_feature(RefrigeratorFeatures.ICEPLUS, status, True, key)
@property
def express_fridge_status(self):
"""Return current express fridge status."""
if not self.is_info_v2:
return None
key = STATE_EXPRESS_FRIDGE[1]
status = self.lookup_enum(key)
return self._update_feature(
RefrigeratorFeatures.EXPRESSFRIDGE, status, True, key
)
@property
def express_mode_status(self):
"""Return current express mode status."""
if not self.is_info_v2:
return None
key = STATE_EXPRESS_MODE[1]
status = self.lookup_enum(key)
return self._update_feature(RefrigeratorFeatures.EXPRESSMODE, status, True, key)
@property
def smart_saving_state(self):
"""Return current smart saving state."""
state = self.lookup_enum(["SmartSavingModeStatus", "smartSavingRun"])
if not state:
return StateOptions.NONE
return self._device.get_enum_text(state)
@property
def smart_saving_mode(self):
"""Return current smart saving mode."""
if self.is_info_v2:
key = "smartSavingMode"
else:
key = "SmartSavingMode"
status = self.lookup_enum(key)
return self._update_feature(
RefrigeratorFeatures.SMARTSAVINGMODE, status, True, key
)
@property
def fresh_air_filter_status(self):
"""Return current fresh air filter status."""
if self.is_info_v2:
key = "freshAirFilter"
else:
key = "FreshAirFilter"
status = self.lookup_enum(key)
return self._update_feature(
RefrigeratorFeatures.FRESHAIRFILTER, status, True, key
)
@property
def water_filter_used_month(self):
"""Return water filter used months."""
if self.is_info_v2:
key = "waterFilter"
else:
key = "WaterFilterUsedMonth"
counter = None
if self.is_info_v2:
status = self._data.get(key)
if status:
counters = status.split("_", 1)
if len(counters) > 1:
counter = counters[0]
else:
counter = self._data.get(key)
value = "N/A" if not counter else counter
return self._update_feature(
RefrigeratorFeatures.WATERFILTERUSED_MONTH, value, False, key
)
@property
def locked_state(self):
"""Return current locked state."""
state = self.lookup_enum("LockingStatus")
if not state:
return StateOptions.NONE
return self._device.get_enum_text(state)
@property
def active_saving_status(self):
"""Return current active saving status."""
return self._data.get("ActiveSavingStatus", "N/A")
def _update_features(self):
_ = [
self.eco_friendly_state,
self.ice_plus_status,
self.express_fridge_status,
self.express_mode_status,
self.smart_saving_mode,
self.fresh_air_filter_status,
self.water_filter_used_month,
]
|
[
"[email protected]"
] | |
91965e649e25eb2201b821ce51e22d441092e675
|
f7b25eaee7a19767a27f6172b87e552bcfe608ad
|
/apps/certification/tests.py
|
42c8dfc07ad42bee54b4ddce012a3fb48cc6fc0a
|
[] |
no_license
|
Mid0Riii/Psyconsole
|
addf280e075e29abc746b437a114d531d2e70f10
|
d9540e0b4b37fdd44be0a169d3ce8cdddc2b956a
|
refs/heads/master
| 2023-01-01T05:10:18.520481 | 2020-10-18T11:11:26 | 2020-10-18T11:11:26 | 266,294,872 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,117 |
py
|
from django.test import TestCase
# Create your tests here.
import qrcode
from PIL import Image, ImageDraw, ImageFont
import datetime
#
# image = Image.open('assets/cert.jpg')
#
# def get_size(image):
# #获取图像的宽和高
# width, height = image.size
# return width,height
#
# setFont = ImageFont.truetype("assets/msyh.ttf",20)
# print(get_size(image))
# draw = ImageDraw.Draw(image)
# draw.text((200,100),"测试",fill="black",font=setFont)
# image.show()
# # img = qrcode.make('http://www.baidu.com')
# # with open('test.png', 'wb') as f:
# # img.save(f)
def generateCert(type, code, name, gender, unit,grade,title,avai_year,avai_mouth,avai_day,avatar):
curr_time = datetime.datetime.now()
localdate = curr_time.strftime("%Y-%m-%d").split("-")
image = Image.open('assets/cert.jpg')
fontPath = "assets/msyh.ttf"
setFont = ImageFont.truetype(fontPath, 70)
dateFont = ImageFont.truetype(fontPath,50)
draw = ImageDraw.Draw(image)
draw.text((700, 260), type, fill="black", font=setFont)
draw.text((700, 400), code, fill="black", font=setFont)
draw.text((1290, 1500), name, fill="black", font=setFont)
draw.text((1290, 1630), gender, fill="black", font=setFont)
draw.text((1430, 1760), unit, fill="black", font=setFont)
draw.text((1430, 1890), grade, fill="black", font=setFont)
draw.text((1290, 2010), title, fill="black", font=setFont)
draw.text((1230, 2295), avai_year, fill="black", font=dateFont)
draw.text((1450, 2295), avai_mouth, fill="black", font=dateFont)
draw.text((1600, 2295), avai_day, fill="black", font=dateFont)
draw.text((1660, 2805), localdate[0], fill="black", font=dateFont)
draw.text((1870, 2805), localdate[1], fill="black", font=dateFont)
draw.text((2010, 2805), localdate[2], fill="black", font=dateFont)
avatar = Image.open("assets/defaultavatar.jpg").convert("CMYK")
avatar = avatar.resize((400,560))
image.paste(avatar,(585,1525))
image.show()
generateCert("37373737373737", "普通会员", "张三", "男", "南昌大学", "二级", "讲师", "2020","11","20","" )
|
[
"[email protected]"
] | |
579496a4ada0cb14b1e59a2b9b0b835e5ce6c8ee
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/M/mick/test_395.py
|
215e88633f5bb3f554649c55ba67ce4f7bef9adc
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,686 |
py
|
import scraperwiki
import gviz_api
page_template = """
<html>
<head>
<title>Bar-Meter</title>
<script src="http://www.google.com/jsapi" type="text/javascript"></script>
<script>
google.load("visualization", "1", {packages:["table","corechart"]});
google.setOnLoadCallback(drawTable);
function drawTable() {
%(jscode)s
var jscode_table = new google.visualization.Table(document.getElementById('table_div_jscode'));
jscode_table.draw(jscode_data, {showRowNumber: true});
%(jscode_chart)s
var chart = new google.visualization.PieChart(document.getElementById('chart_div'));
chart.draw(jscode_data_chart, {title: 'Bars per city'});
}
</script>
</head>
<body>
<H1>Bars in Austrian cities</H1>
<div id="table_div_jscode"></div>
<div id="chart_div" style="width: 900px; height: 500px;"></div>
</body>
</html>
"""
def main():
scraperwiki.sqlite.attach("at_herold_branches")
data = scraperwiki.sqlite.select(
'''city, branch, count(*) as business from at_herold_branches.swdata
where branch='was_bars'
group by city, branch
order by business desc'''
)
description = {"city": ("string", "City"),
"branch": ("string", "Branch"),
"business": ("number", "Count")}
data_table = gviz_api.DataTable(description)
data_table.LoadData(data)
# Creating a JavaScript code string
jscode = data_table.ToJSCode("jscode_data",
columns_order=("city", "branch", "business"))
# Creating a JSon string
#json = data_table.ToJSon(columns_order=("city", "branch", "business"),
# order_by="city")
data_chart = scraperwiki.sqlite.select(
'''city, count(*) as business from at_herold_branches.swdata
group by city'''
)
description_chart = {"city": ("string", "City"),
"business": ("number", "Count")}
data_table_chart = gviz_api.DataTable(description_chart)
data_table_chart.LoadData(data_chart)
jscode_chart = data_table_chart.ToJSCode("jscode_data_chart",
columns_order=("city", "business"),
order_by="city")
print page_template % vars()
main()
import scraperwiki
import gviz_api
page_template = """
<html>
<head>
<title>Bar-Meter</title>
<script src="http://www.google.com/jsapi" type="text/javascript"></script>
<script>
google.load("visualization", "1", {packages:["table","corechart"]});
google.setOnLoadCallback(drawTable);
function drawTable() {
%(jscode)s
var jscode_table = new google.visualization.Table(document.getElementById('table_div_jscode'));
jscode_table.draw(jscode_data, {showRowNumber: true});
%(jscode_chart)s
var chart = new google.visualization.PieChart(document.getElementById('chart_div'));
chart.draw(jscode_data_chart, {title: 'Bars per city'});
}
</script>
</head>
<body>
<H1>Bars in Austrian cities</H1>
<div id="table_div_jscode"></div>
<div id="chart_div" style="width: 900px; height: 500px;"></div>
</body>
</html>
"""
def main():
scraperwiki.sqlite.attach("at_herold_branches")
data = scraperwiki.sqlite.select(
'''city, branch, count(*) as business from at_herold_branches.swdata
where branch='was_bars'
group by city, branch
order by business desc'''
)
description = {"city": ("string", "City"),
"branch": ("string", "Branch"),
"business": ("number", "Count")}
data_table = gviz_api.DataTable(description)
data_table.LoadData(data)
# Creating a JavaScript code string
jscode = data_table.ToJSCode("jscode_data",
columns_order=("city", "branch", "business"))
# Creating a JSon string
#json = data_table.ToJSon(columns_order=("city", "branch", "business"),
# order_by="city")
data_chart = scraperwiki.sqlite.select(
'''city, count(*) as business from at_herold_branches.swdata
group by city'''
)
description_chart = {"city": ("string", "City"),
"business": ("number", "Count")}
data_table_chart = gviz_api.DataTable(description_chart)
data_table_chart.LoadData(data_chart)
jscode_chart = data_table_chart.ToJSCode("jscode_data_chart",
columns_order=("city", "business"),
order_by="city")
print page_template % vars()
main()
|
[
"[email protected]"
] | |
661166baa4aeb5d49e0acef2a214a82c99197977
|
58afefdde86346760bea40690b1675c6639c8b84
|
/leetcode/frog-position-after-t-seconds/386090832.py
|
9a855ba87a746a4da2b3445c88f2d2167012e3ee
|
[] |
no_license
|
ausaki/data_structures_and_algorithms
|
aaa563f713cbab3c34a9465039d52b853f95548e
|
4f5f5124534bd4423356a5f5572b8a39b7828d80
|
refs/heads/master
| 2021-06-21T10:44:44.549601 | 2021-04-06T11:30:21 | 2021-04-06T11:30:21 | 201,942,771 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 914 |
py
|
# title: frog-position-after-t-seconds
# detail: https://leetcode.com/submissions/detail/386090832/
# datetime: Tue Aug 25 17:01:25 2020
# runtime: 100 ms
# memory: 14 MB
class Solution:
def frogPosition(self, n: int, edges: List[List[int]], t: int, target: int) -> float:
visited = {1}
g = collections.defaultdict(list)
for i, j in edges:
g[i].append(j)
g[j].append(i)
def jump(i, p, t):
l = len(g[i]) - (i != 1)
if i == target:
if l == 0 or t == 0:
return 1
else:
return 0
if t == 0:
return 0
for j in g[i]:
if j == p:
continue
prop = jump(j, i, t - 1)
if prop:
return prop / l
return 0
return jump(1, 0, t)
|
[
"[email protected]"
] | |
d2738856b779dd77745eccd9ba6a256cc478cd52
|
aca253ff1a97c96a1a0a9a5802aa623789662bb1
|
/p036/modify_tree.py
|
85501c51bdad36228626e0879d0805eb3bab30d1
|
[] |
no_license
|
KD-huhu/PyQt5
|
a6128a34b93f6e2da7216d5818f66dc9614216bc
|
1c33a6549c2fcf663168256553d8c24e25d9a69c
|
refs/heads/master
| 2022-07-03T07:37:29.837547 | 2020-05-17T14:54:39 | 2020-05-17T14:54:39 | 261,768,854 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,719 |
py
|
import sys
from PyQt5.QtWidgets import *
class ModifyTree(QWidget):
def __init__(self, parent=None):
super(ModifyTree, self).__init__(parent)
self.setWindowTitle('TreeWidget 例子')
operatorLayout = QHBoxLayout()
addBtn = QPushButton('添加节点')
updateBtn = QPushButton('修改节点')
deleteBtn = QPushButton('删除节点')
operatorLayout.addWidget(addBtn)
operatorLayout.addWidget(updateBtn)
operatorLayout.addWidget(deleteBtn)
addBtn.clicked.connect(self.addNode) # 绑定槽
updateBtn.clicked.connect(self.updateNode)
deleteBtn.clicked.connect(self.deleteNode)
self.tree = QTreeWidget() # 生成树
self.tree.setColumnCount(2)
self.tree.setHeaderLabels(['Key','Value'])
root = QTreeWidgetItem(self.tree)
root.setText(0,'root')
root.setText(1, '0')
child1 = QTreeWidgetItem(root)
child1.setText(0,'child1')
child1.setText(1,'1')
child2 = QTreeWidgetItem(root)
child2.setText(0,'child2')
child2.setText(1,'2')
child3 = QTreeWidgetItem(child2)
child3.setText(0,'child3')
child3.setText(1,'3')
self.tree.clicked.connect(self.onTreeClicked)
mainLayout = QVBoxLayout(self)
mainLayout.addLayout(operatorLayout)
mainLayout.addWidget(self.tree)
self.setLayout(mainLayout)
def onTreeClicked(self,index):
item = self.tree.currentItem()
print(index.row())
print('key=%s,value=%s' % (item.text(0),item.text(1)))
def addNode(self): # 添加节点
print('添加节点')
item = self.tree.currentItem() # 获取当前节点
print(item)
node = QTreeWidgetItem(item) # 创建节点对象,为当前节点添加子节点
node.setText(0,'新节点')
node.setText(1,'新值')
def updateNode(self):
print('修改节点')
item = self.tree.currentItem() # 对当前节点进行修改
item.setText(0,'修改节点')
item.setText(1, '值已经被修改')
def deleteNode(self):
print('删除节点')
item = self.tree.currentItem() # 当前节点
root = self.tree.invisibleRootItem() # 当根节点的父节点
for item in self.tree.selectedItems(): # 要从父节点中删除
(item.parent() or root).removeChild(item)
if __name__ == '__main__':
app = QApplication(sys.argv)
tree = ModifyTree()
tree.show()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
1f5468676d551ebb3f849b542fc5defe208c8f8c
|
731c3f2f85f6002725322eedc0b2c8b5e74f610e
|
/1-jakc/jakc_hr_schedule/models/jakc_hr_employee.py
|
f81455e3f0490de9f60a0733819918ac05534423
|
[] |
no_license
|
babarlhr/project-0021
|
1ac824657f893c8f25d6eb3b839051f350d7cc9d
|
e30b8a9f5d2147d3ca5b56b69ec5dbd22f712a91
|
refs/heads/master
| 2021-09-22T15:45:47.431000 | 2018-09-11T14:59:49 | 2018-09-11T14:59:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 310 |
py
|
from openerp import fields, models, api, _
from openerp.exceptions import Warning, ValidationError
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class HrEmployee(models.Model):
_inherit = 'hr.employee'
nik = fields.Char('NIK', size=20, required=True)
|
[
"[email protected]"
] | |
ec72bc2000d02a1c5f1186aff1d4b6d0651c8040
|
b9abb3c31e98b95b4d21380d929897e70b1a4233
|
/models/data_models/__init__.py
|
a18c5783e892bcd801527f31c065e8f6dc6911c3
|
[] |
no_license
|
codacy-badger/noobit-backend
|
8b3cc4838990c08a1a830dce833a73c0a900f68c
|
874372803d709d078947b38024856a926763fff4
|
refs/heads/master
| 2022-04-07T22:15:16.681500 | 2020-03-25T20:45:35 | 2020-03-25T20:45:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 70 |
py
|
from server import settings
from .items import *
from .users import *
|
[
"[email protected]"
] | |
c7a08b2e9d4d981344d5de6f125cc2b7bb211375
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02804/s852532161.py
|
db55ddcaa9dc897931e5c7429d31e68beae24140
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 607 |
py
|
n,k=map(int,input().split())
A=list(map(int,input().split()))
mod=10**9+7
A.sort()
def cmb(n, r, mod):
if ( r<0 or r>n ):
return 0
r = min(r, n-r)
return g1[n] * g2[r] * g2[n-r] % mod
N = 10**5
g1 = [1, 1] # 元テーブル
g2 = [1, 1] #逆元テーブル
inverse = [0, 1] #逆元テーブル計算用テーブル
for i in range( 2, N + 1 ):
g1.append( ( g1[-1] * i ) % mod )
inverse.append( ( -inverse[mod % i] * (mod//i) ) % mod )
g2.append( (g2[-1] * inverse[-1]) % mod )
ans,bns=0,0
for j in range(n-k+1):
ans=(ans+(A[n-j-1]-A[j])*cmb(n-j-1,k-1,mod))%mod
print(ans%mod)
|
[
"[email protected]"
] | |
55adec22c2b1a4fd58bcba0728db6e2560ac8d54
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Projects/twilio/build/lib/twilio/rest/ip_messaging/v2/__init__.py
|
ebd0c69459eafe129379c2b02fe69211ed1fe8af
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:a7d8849526bea15c375620047af3b52bac72acf3b87db9881e9bddafd4c1c295
size 1343
|
[
"[email protected]"
] | |
90256ce535bb47bd3131fa27540b688141cd699c
|
a9305f461b2c03e4a55fec9f1ecc75f78265eb8e
|
/opencv/Realidade-Aumentada-master/insertObject/glyphfunctions.py
|
c5cdbb695347ab7fb65ec816e467b7e27d9f5fd8
|
[] |
no_license
|
JoaoBueno/estudos-python
|
653afb174f2d141fcc82511c51cbfd2bca1b55cb
|
606e188e88ee3a2b2e1daee60c71948c678228e1
|
refs/heads/master
| 2022-01-24T20:17:52.702768 | 2022-01-19T20:39:20 | 2022-01-19T20:39:20 | 150,925,137 | 2 | 2 | null | 2022-01-19T20:40:46 | 2018-09-30T03:09:08 |
Python
|
UTF-8
|
Python
| false | false | 3,278 |
py
|
import cv2
import numpy as np
def order_points(points):
s = points.sum(axis=1)
diff = np.diff(points, axis=1)
ordered_points = np.zeros((4,2), dtype="float32")
ordered_points[0] = points[np.argmin(s)]
ordered_points[2] = points[np.argmax(s)]
ordered_points[1] = points[np.argmin(diff)]
ordered_points[3] = points[np.argmax(diff)]
return ordered_points
def max_width_height(points):
(tl, tr, br, bl) = points
top_width = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
bottom_width = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
max_width = max(int(top_width), int(bottom_width))
left_height = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
right_height = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
max_height = max(int(left_height), int(right_height))
return (max_width,max_height)
def topdown_points(max_width, max_height):
return np.array([
[0, 0],
[max_width-1, 0],
[max_width-1, max_height-1],
[0, max_height-1]], dtype="float32")
def get_topdown_quad(image, src):
# src and dst points
src = order_points(src)
(max_width,max_height) = max_width_height(src)
dst = topdown_points(max_width, max_height)
# warp perspective
matrix = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(image, matrix, max_width_height(src))
# return top-down quad
return warped
def get_glyph_pattern(image, black_threshold, white_threshold):
# collect pixel from each cell (left to right, top to bottom)
cells = []
cell_half_width = int(round(image.shape[1] / 10.0))
cell_half_height = int(round(image.shape[0] / 10.0))
row1 = cell_half_height*3
row2 = cell_half_height*5
row3 = cell_half_height*7
col1 = cell_half_width*3
col2 = cell_half_width*5
col3 = cell_half_width*7
cells.append(image[row1, col1])
cells.append(image[row1, col2])
cells.append(image[row1, col3])
cells.append(image[row2, col1])
cells.append(image[row2, col2])
cells.append(image[row2, col3])
cells.append(image[row3, col1])
cells.append(image[row3, col2])
cells.append(image[row3, col3])
# threshold pixels to either black or white
for idx, val in enumerate(cells):
if val < black_threshold:
cells[idx] = 0
elif val > white_threshold:
cells[idx] = 1
else:
return None
return cells
def get_vectors(image, points):
# order points
points = order_points(points)
# load calibration data
with np.load('R1.npz') as X:
mtx, dist, _, _ = [X[i] for i in ('mtx','dist','rvecs','tvecs')]
# set up criteria, image, points and axis
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
imgp = np.array(points, dtype="float32")
objp = np.array([[0.,0.,0.],[1.,0.,0.],[1.,1.,0.],[0.,1.,0.]], dtype="float32")
# calculate rotation and translation vectors
cv2.cornerSubPix(gray,imgp,(11,11),(-1,-1),criteria)
rvecs, tvecs, _ = cv2.solvePnPRansac(objp, imgp, mtx, dist)
return rvecs, tvecs
|
[
"[email protected]"
] | |
f08bb2bf9dddb60974d27a3fbedbb68e56233d38
|
7d8e040cb703e6f6e2d55b5dc64fc9124d85dde8
|
/tests/test_sklearn_gaussian_process_classifier.py
|
cc65cc32b1d02e64a75788f2b0aa18cd2d1849a7
|
[
"MIT"
] |
permissive
|
Global-localhost/sklearn-onnx
|
fc44aa481a91482f187cfd2307df6061b77742af
|
a8267e7ba946d8b0596951060e5dca39fec47439
|
refs/heads/master
| 2023-03-23T00:19:31.474251 | 2021-03-03T19:17:12 | 2021-03-03T19:17:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,957 |
py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import unittest
from distutils.version import StrictVersion
import numpy as np
from numpy.testing import assert_almost_equal
import scipy
from onnxruntime import InferenceSession, SessionOptions
try:
from onnxruntime.capi.onnxruntime_pybind11_state import Fail as OrtFail
except ImportError:
OrtFail = RuntimeError
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn import __version__ as sklver
try:
from sklearn.gaussian_process import GaussianProcessClassifier
except ImportError:
GaussianProcessClassifier = None
from skl2onnx.common.data_types import FloatTensorType, DoubleTensorType
from skl2onnx import to_onnx
from skl2onnx.helpers.onnx_helper import change_onnx_domain
from test_utils import dump_data_and_model, TARGET_OPSET
sklver_ = ".".join(sklver.split('.')[:2])
class TestSklearnGaussianProcessClassifier(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
from ortcustomops import (
onnx_op, PyCustomOpDef, get_library_path)
except ImportError:
return
@onnx_op(op_type="SolveFloat",
inputs=[PyCustomOpDef.dt_float, PyCustomOpDef.dt_float],
outputs=[PyCustomOpDef.dt_float])
def solveopf(a, b):
# The user custom op implementation here.
return scipy.linalg.solve(a, b).astype(np.float32)
@onnx_op(op_type="SolveDouble",
inputs=[PyCustomOpDef.dt_double, PyCustomOpDef.dt_double],
outputs=[PyCustomOpDef.dt_double])
def solveopd(a, b):
# The user custom op implementation here.
return scipy.linalg.solve(a, b).astype(np.float64)
cls.path = get_library_path()
def fit_classification_model(self, gp, n_classes=2):
data = load_iris()
X, y = data.data, data.target
if n_classes == 2:
y = y % 2
elif n_classes != 3:
raise NotImplementedError("n_classes must be 2 or 3")
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=3)
gp.fit(X_train, y_train)
return gp, X_test.astype(np.float32)
def common_test_gpc(self, dtype=np.float32, n_classes=2):
gp = GaussianProcessClassifier()
gp, X = self.fit_classification_model(gp, n_classes=n_classes)
# return_cov=False, return_std=False
if dtype == np.float32:
cls = FloatTensorType
else:
cls = DoubleTensorType
model_onnx = to_onnx(
gp, initial_types=[('X', cls([None, None]))],
target_opset=TARGET_OPSET,
options={GaussianProcessClassifier: {
'zipmap': False, 'optim': 'cdist'}})
self.assertTrue(model_onnx is not None)
try:
sess = InferenceSession(model_onnx.SerializeToString())
except OrtFail:
if not hasattr(self, 'path'):
return
suffix = 'Double' if dtype == np.float64 else 'Float'
# Operator Solve is missing
model_onnx = change_onnx_domain(
model_onnx, {'Solve': ('Solve%s' % suffix, 'ai.onnx.contrib')})
so = SessionOptions()
so.register_custom_ops_library(self.path)
sess = InferenceSession(model_onnx.SerializeToString(), so)
res = sess.run(None, {'X': X.astype(dtype)})
assert_almost_equal(res[0].ravel(), gp.predict(X).ravel())
assert_almost_equal(res[1], gp.predict_proba(X),
decimal=3)
return
dt = 32 if dtype == np.float32 else 64
dump_data_and_model(
X.astype(dtype), gp, model_onnx, verbose=False,
basename="SklearnGaussianProcessRBFT%d%d" % (n_classes, dt))
@unittest.skipIf(TARGET_OPSET < 12, reason="einsum")
@unittest.skipIf(GaussianProcessClassifier is None,
reason="scikit-learn is too old")
@unittest.skipIf(StrictVersion(sklver_) < StrictVersion("0.22"),
reason="not available")
def test_gpc_float_bin(self):
self.common_test_gpc(dtype=np.float32)
@unittest.skipIf(TARGET_OPSET < 12, reason="einsum, reciprocal")
@unittest.skipIf(GaussianProcessClassifier is None,
reason="scikit-learn is too old")
@unittest.skipIf(StrictVersion(sklver_) < StrictVersion("0.22"),
reason="not available")
def test_gpc_double_bin(self):
self.common_test_gpc(dtype=np.float64)
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
2dc0f4d0b55a84dd527af325de9d13fe3c90d1e9
|
20c20938e201a0834ccf8b5f2eb5d570d407ad15
|
/abc159/abc159_c/11098579.py
|
97346785a4fa732160d26402e7682a23418b6d98
|
[] |
no_license
|
kouhei-k/atcoder_submissions
|
8e1a1fb30c38e0d443b585a27c6d134bf1af610a
|
584b4fd842ccfabb16200998fe6652f018edbfc5
|
refs/heads/master
| 2021-07-02T21:20:05.379886 | 2021-03-01T12:52:26 | 2021-03-01T12:52:26 | 227,364,764 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 36 |
py
|
L = int(input())
print((L**3 / 27))
|
[
"[email protected]"
] | |
c1c6e061d4012ce3011878e9be9c295ead79c428
|
a7dad581abcc74dd191754268131ff2ebef060fc
|
/fabfile.py
|
f16c5d1bf778c9e64bfdc2d6b25d034fd7a36380
|
[] |
no_license
|
jeremyjbowers/can-i-vote
|
5eeba4c82ab1a1f6fe94b6baaec691ecc82eea4a
|
2388b285387f59e271759d3fa71c6831b7414b38
|
refs/heads/master
| 2020-05-16T22:25:58.515072 | 2012-10-07T19:04:24 | 2012-10-07T19:04:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| true | false | 446 |
py
|
#!/usr/bin/env python
from fabric.api import *
"""
Base configuration
"""
env.project_name = 'vote'
env.user = 'root'
env.repo_path = '/home/canivote/can-i-vote/%(project_name)s' % env
"""
Environments
"""
def prod():
env.hosts = ['198.61.200.10']
"""
Commands
"""
def git_pull(release):
with cd(env.repo_path):
run('git pull origin %s' % release)
def restart():
with cd(env.repo_path):
run('utils/restart_gunicorn.sh')
|
[
"[email protected]"
] | |
6824a784b25c19dfa9fc7df1a76fe30f908699b5
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/aio/operations_async/_express_route_links_operations_async.py
|
0ce55b6c442f9135914a6c16b0da92095cb26bef
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 |
MIT
| 2020-06-16T16:38:15 | 2019-08-30T21:08:55 |
Python
|
UTF-8
|
Python
| false | false | 8,416 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteLinksOperations:
"""ExpressRouteLinksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
express_route_port_name: str,
link_name: str,
**kwargs
) -> "models.ExpressRouteLink":
"""Retrieves the specified ExpressRouteLink resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param link_name: The name of the ExpressRouteLink resource.
:type link_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteLink, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.ExpressRouteLink
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteLink"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
'linkName': self._serialize.url("link_name", link_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteLink', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}/links/{linkName}'} # type: ignore
def list(
self,
resource_group_name: str,
express_route_port_name: str,
**kwargs
) -> AsyncIterable["models.ExpressRouteLinkListResult"]:
"""Retrieve the ExpressRouteLink sub-resources of the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteLinkListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.ExpressRouteLinkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteLinkListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteLinkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}/links'} # type: ignore
|
[
"[email protected]"
] | |
05a4f511f05f4671e829cce55a8c86fcf668af92
|
05b24701576cc5d470b6ab49b25f966d3764c2d2
|
/venv/Lib/site-packages/pip/_internal/commands/configuration.py
|
31e040923ccea0d7396b56a98efd233422e4771f
|
[
"MIT"
] |
permissive
|
taneemishere/Spam-Comment-Detector
|
e80d27cdc679ad55a774052c9fa8f897fe38a514
|
b0c75cc00ef584a571ab1b2b579a6016b3504792
|
refs/heads/main
| 2023-01-24T01:06:57.299863 | 2020-11-14T05:29:58 | 2020-11-14T05:29:58 | 305,711,846 | 2 | 1 |
MIT
| 2020-11-12T07:03:38 | 2020-10-20T13:10:41 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 8,168 |
py
|
import logging
import os
import subprocess
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import ERROR, SUCCESS
from pip._internal.configuration import (
Configuration, get_configuration_files, kinds,
)
from pip._internal.exceptions import PipError
from pip._internal.utils.deprecation import deprecated
from pip._internal.utils.misc import get_prog
from pip._internal.utils.virtualenv import running_under_virtualenv
logger = logging.getLogger(__name__)
class ConfigurationCommand(Command):
"""Manage local and global configuration.
Subcommands:
list: List the active configuration (or from the file specified)
edit: Edit the configuration file in an editor
get: Get the value associated with name
set: Set the name=value
unset: Unset the value associated with name
If none of --user, --global and --site are passed, a virtual
environment configuration file is used if one is active and the file
exists. Otherwise, all modifications happen on the to the user file by
default.
"""
name = 'config'
usage = """
%prog [<file-option>] list
%prog [<file-option>] [--editor <editor-path>] edit
%prog [<file-option>] get name
%prog [<file-option>] set name value
%prog [<file-option>] unset name
"""
summary = "Manage local and global configuration."
def __init__(self, *args, **kwargs):
super(ConfigurationCommand, self).__init__(*args, **kwargs)
self.configuration = None
self.cmd_opts.add_option(
'--editor',
dest='editor',
action='store',
default=None,
help=(
'Editor to use to edit the file. Uses VISUAL or EDITOR '
'environment variables if not provided.'
)
)
self.cmd_opts.add_option(
'--global',
dest='global_file',
action='store_true',
default=False,
help='Use the system-wide configuration file only'
)
self.cmd_opts.add_option(
'--user',
dest='user_file',
action='store_true',
default=False,
help='Use the user configuration file only'
)
self.cmd_opts.add_option(
'--site',
dest='site_file',
action='store_true',
default=False,
help='Use the current environment configuration file only'
)
self.cmd_opts.add_option(
'--venv',
dest='venv_file',
action='store_true',
default=False,
help=(
'[Deprecated] Use the current environment configuration '
'file in a virtual environment only'
)
)
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
handlers = {
"list": self.list_values,
"edit": self.open_in_editor,
"get": self.get_name,
"set": self.set_name_value,
"unset": self.unset_name
}
# Determine action
if not args or args[0] not in handlers:
logger.error("Need an action ({}) to perform.".format(
", ".join(sorted(handlers)))
)
return ERROR
action = args[0]
# Determine which configuration files are to be loaded
# Depends on whether the command is modifying.
try:
load_only = self._determine_file(
options, need_value=(action in ["get", "set", "unset", "edit"])
)
except PipError as e:
logger.error(e.args[0])
return ERROR
# Load a new configuration
self.configuration = Configuration(
isolated=options.isolated_mode, load_only=load_only
)
self.configuration.load()
# Error handling happens here, not in the action-handlers.
try:
handlers[action](options, args[1:])
except PipError as e:
logger.error(e.args[0])
return ERROR
return SUCCESS
def _determine_file(self, options, need_value):
# Convert legacy venv_file option to site_file or error
if options.venv_file and not options.site_file:
if running_under_virtualenv():
options.site_file = True
deprecated(
"The --venv option has been deprecated.",
replacement="--site",
gone_in="19.3",
)
else:
raise PipError(
"Legacy --venv option requires a virtual environment. "
"Use --site instead."
)
file_options = [key for key, value in (
(kinds.USER, options.user_file),
(kinds.GLOBAL, options.global_file),
(kinds.SITE, options.site_file),
) if value]
if not file_options:
if not need_value:
return None
# Default to user, unless there's a site file.
elif any(
os.path.exists(site_config_file)
for site_config_file in get_configuration_files()[kinds.SITE]
):
return kinds.SITE
else:
return kinds.USER
elif len(file_options) == 1:
return file_options[0]
raise PipError(
"Need exactly one file to operate upon "
"(--user, --site, --global) to perform."
)
def list_values(self, options, args):
self._get_n_args(args, "list", n=0)
for key, value in sorted(self.configuration.items()):
logger.info("%s=%r", key, value)
def get_name(self, options, args):
key = self._get_n_args(args, "get [name]", n=1)
value = self.configuration.get_value(key)
logger.info("%s", value)
def set_name_value(self, options, args):
key, value = self._get_n_args(args, "set [name] [value]", n=2)
self.configuration.set_value(key, value)
self._save_configuration()
def unset_name(self, options, args):
key = self._get_n_args(args, "unset [name]", n=1)
self.configuration.unset_value(key)
self._save_configuration()
def open_in_editor(self, options, args):
editor = self._determine_editor(options)
fname = self.configuration.get_file_to_edit()
if fname is None:
raise PipError("Could not determine appropriate file.")
try:
subprocess.check_call([editor, fname])
except subprocess.CalledProcessError as e:
raise PipError(
"Editor Subprocess exited with exit code {}"
.format(e.returncode)
)
def _get_n_args(self, args, example, n):
"""Helper to make sure the command got the right number of arguments
"""
if len(args) != n:
msg = (
'Got unexpected number of arguments, expected {}. '
'(example: "{} config {}")'
).format(n, get_prog(), example)
raise PipError(msg)
if n == 1:
return args[0]
else:
return args
def _save_configuration(self):
# We successfully ran a modifying command. Need to save the
# configuration.
try:
self.configuration.save()
except Exception:
logger.error(
"Unable to save configuration. Please report this as a bug.",
exc_info=1
)
raise PipError("Internal Error.")
def _determine_editor(self, options):
if options.editor is not None:
return options.editor
elif "VISUAL" in os.environ:
return os.environ["VISUAL"]
elif "EDITOR" in os.environ:
return os.environ["EDITOR"]
else:
raise PipError("Could not determine editor to use.")
|
[
"[email protected]"
] | |
f358510396aee7aceb8657337137844f32866b6c
|
dc760b9503033b97457702f5c0d64ba6beb52d37
|
/tests/blueprints/test_documents.py
|
39417b5f3244fadf8188d949fd91cc5d01bc5c9d
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
agdsn/sipa
|
1862fa5f5764a6cb3ab866df724b6b9adeadbfe4
|
5e82221041de1b08129ed43f9f6036c541e2683d
|
refs/heads/develop
| 2023-06-08T23:04:31.054933 | 2023-05-28T11:43:31 | 2023-05-28T11:43:31 | 33,961,711 | 23 | 18 |
MIT
| 2023-09-11T22:32:43 | 2015-04-14T23:09:34 |
Python
|
UTF-8
|
Python
| false | false | 746 |
py
|
import pytest
from tests.assertions import TestClient
@pytest.fixture(scope="module")
def client(module_test_client):
return module_test_client
def test_restriced_area(client: TestClient):
with client.renders_template("login.html"):
resp = client.assert_url_ok(
"/documents_restricted/fake-doc/", follow_redirects=True
)
assert len(resp.history) == 1
assert resp.history[0].location.startswith("/login?")
@pytest.mark.usefixtures("user_logged_in")
def test_restricted_area_logged_in(client: TestClient):
client.assert_url_response_code("/documents_restricted/fake-doc/", 404)
def test_unrestricted_area(client: TestClient):
client.assert_url_response_code("/documents/fake-doc/", 404)
|
[
"[email protected]"
] | |
b9bc7aefa51fa537abb98e21537fec20db0d22ea
|
8f24e443e42315a81028b648e753c50967c51c78
|
/python/ray/train/huggingface/huggingface_trainer.py
|
8afe9c2784b0f74b0cc0380b0d37f83f69b80dec
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
simon-mo/ray
|
d07efdada8d05c6e10417f96e8dfc35f9ad33397
|
1e42e6cd15e2fb96c217cba8484e59ed0ef4b0c8
|
refs/heads/master
| 2023-03-06T00:09:35.758834 | 2022-12-23T18:46:48 | 2022-12-23T18:46:48 | 122,156,396 | 4 | 2 |
Apache-2.0
| 2023-03-04T08:56:56 | 2018-02-20T04:47:06 |
Python
|
UTF-8
|
Python
| false | false | 18,092 |
py
|
import importlib.util
import inspect
import os
import sys
import warnings
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional
try:
from packaging.version import Version
except ImportError:
from distutils.version import LooseVersion as Version
import transformers
import transformers.modeling_utils
import transformers.trainer
import transformers.training_args
from transformers.trainer_utils import IntervalStrategy
from transformers.utils import is_datasets_available
from torch.utils.data import Dataset as TorchDataset
from ray.air import session
from ray.air.checkpoint import Checkpoint
from ray.air.config import DatasetConfig, RunConfig, ScalingConfig
from ray.train.constants import (
EVALUATION_DATASET_KEY,
TRAIN_DATASET_KEY,
)
from ray.train.huggingface._huggingface_utils import (
TrainReportCallback,
process_datasets,
wrap_transformers_trainer,
)
from ray.train.torch import TorchConfig, TorchTrainer
from ray.train.trainer import GenDataset
from ray.util import PublicAPI
if TYPE_CHECKING:
from ray.data.preprocessor import Preprocessor
# Due to HF Dataset's dynamic module system, we need to dynamically import the
# datasets_modules module on every actor when training.
# We accomplish this by simply running the following bit of code directly
# in module you are currently viewing. This ensures that when we
# unpickle the HuggingFaceTrainer, it will be ran before pickle tries to
# import datasets_modules and prevents an exception from being thrown.
# Same logic is present inside HF Transformers Ray integration:
# https://github.com/huggingface/transformers/blob/\
# 7d5fde991d598370d961be8cb7add6541e2b59ce/src/transformers/integrations.py#L271
# Also see https://github.com/ray-project/ray/issues/28084
if "datasets_modules" not in sys.modules and is_datasets_available():
import datasets.load
dynamic_modules_path = os.path.join(
datasets.load.init_dynamic_modules(), "__init__.py"
)
# load dynamic_modules from path
spec = importlib.util.spec_from_file_location(
"datasets_modules", dynamic_modules_path
)
datasets_modules = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = datasets_modules
spec.loader.exec_module(datasets_modules)
# This trainer uses a special checkpoint syncing logic.
# Because HF checkpoints are very large dirs (at least several GBs),
# we use directory checkpoints that are synced between nodes when
# required instead of serializing the checkpoints and sending
# bytes over nodes. This is a much more performant solution for
# large directory checkpoints. The current implementation
# is special for HuggingFaceTrainer, but can and should be
# made generic.
# TODO(ml-team): Make dir syncing checkpoint logic generic.
@PublicAPI(stability="alpha")
class HuggingFaceTrainer(TorchTrainer):
"""A Trainer for data parallel HuggingFace Transformers on PyTorch training.
This Trainer runs the ``transformers.Trainer.train()`` method on multiple
Ray Actors. The training is carried out in a distributed fashion through PyTorch
DDP. These actors already have the necessary torch process group already
configured for distributed PyTorch training. If you have PyTorch >= 1.12.0
installed, you can also run FSDP training by specifying the ``fsdp`` argument
in ``TrainingArguments``. For more information on configuring FSDP,
refer to `Hugging Face documentation <https://huggingface.co/docs/transformers/\
main/en/main_classes/trainer#transformers.TrainingArguments>`__.
The training function ran on every Actor will first run the
specified ``trainer_init_per_worker`` function to obtain an instantiated
``transformers.Trainer`` object. The ``trainer_init_per_worker`` function
will have access to preprocessed train and evaluation datasets.
If the ``datasets`` dict contains a training dataset (denoted by
the "train" key), then it will be split into multiple dataset
shards, with each Actor training on a single shard.
All the other datasets will not be split.
Please note that if you use a custom ``transformers.Trainer`` subclass,
the ``get_train_dataloader`` method will be wrapped around to disable
sharding by ``transformers.IterableDatasetShard``, as the dataset will
already be sharded on the Ray AIR side.
HuggingFace loggers will be automatically disabled, and the ``local_rank``
argument in ``TrainingArguments`` will be automatically set. Please note
that if you want to use CPU training, you will need to set the ``no_cuda``
argument in ``TrainingArguments`` manually - otherwise, an exception
(segfault) may be thrown.
This Trainer requires ``transformers>=4.19.0`` package.
Example:
.. code-block:: python
# Based on
# huggingface/notebooks/examples/language_modeling_from_scratch.ipynb
# Hugging Face imports
from datasets import load_dataset
import transformers
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
import ray
from ray.train.huggingface import HuggingFaceTrainer
from ray.air.config import ScalingConfig
model_checkpoint = "gpt2"
tokenizer_checkpoint = "sgugger/gpt2-like-tokenizer"
block_size = 128
datasets = load_dataset("wikitext", "wikitext-2-raw-v1")
tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint)
def tokenize_function(examples):
return tokenizer(examples["text"])
tokenized_datasets = datasets.map(
tokenize_function, batched=True, num_proc=1, remove_columns=["text"]
)
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {
k: sum(examples[k], []) for k in examples.keys()
}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model
# supported it.
# instead of this drop, you can customize this part to your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [
t[i : i + block_size]
for i in range(0, total_length, block_size)
]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
batch_size=1000,
num_proc=1,
)
ray_train_ds = ray.data.from_huggingface(lm_datasets["train"])
ray_evaluation_ds = ray.data.from_huggingface(
lm_datasets["validation"]
)
def trainer_init_per_worker(train_dataset, eval_dataset, **config):
model_config = AutoConfig.from_pretrained(model_checkpoint)
model = AutoModelForCausalLM.from_config(model_config)
args = transformers.TrainingArguments(
output_dir=f"{model_checkpoint}-wikitext2",
evaluation_strategy="epoch",
save_strategy="epoch",
logging_strategy="epoch",
learning_rate=2e-5,
weight_decay=0.01,
)
return transformers.Trainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
)
scaling_config = ScalingConfig(num_workers=3)
# If using GPUs, use the below scaling config instead.
# scaling_config = ScalingConfig(num_workers=3, use_gpu=True)
trainer = HuggingFaceTrainer(
trainer_init_per_worker=trainer_init_per_worker,
scaling_config=scaling_config,
datasets={"train": ray_train_ds, "evaluation": ray_evaluation_ds},
)
result = trainer.fit()
Args:
trainer_init_per_worker: The function that returns an instantiated
``transformers.Trainer`` object and takes in the following arguments:
train ``Torch.Dataset``, optional evaluation ``Torch.Dataset``
and config as kwargs. The Torch Datasets are automatically
created by converting the Ray Datasets internally before
they are passed into the function.
datasets: Any Ray Datasets to use for training. Use
the key "train" to denote which dataset is the training
dataset and (optionally) key "evaluation" to denote the evaluation
dataset. Can only contain a training dataset
and up to one extra dataset to be used for evaluation.
If a ``preprocessor`` is provided and has not already been fit,
it will be fit on the training dataset. All datasets will be
transformed by the ``preprocessor`` if one is provided.
trainer_init_config: Configurations to pass into
``trainer_init_per_worker`` as kwargs.
torch_config: Configuration for setting up the PyTorch backend. If set to
None, use the default configuration. This replaces the ``backend_config``
arg of ``DataParallelTrainer``. Same as in ``TorchTrainer``.
scaling_config: Configuration for how to scale data parallel training.
dataset_config: Configuration for dataset ingest.
run_config: Configuration for the execution of the training run.
preprocessor: A ray.data.Preprocessor to preprocess the
provided datasets.
resume_from_checkpoint: A checkpoint to resume training from.
"""
_dataset_config = {
# training dataset should be split by us
"train": DatasetConfig(fit=True, split=True, required=True),
# do not split eval dataset, as HF has a system to parallelize
# evaluation across workers, and it requires each worker
# to have the full eval dataset
"evaluation": DatasetConfig(split=False),
}
def __init__(
self,
trainer_init_per_worker: Callable[
[TorchDataset, Optional[TorchDataset], Any], transformers.trainer.Trainer
],
*,
datasets: Dict[str, GenDataset],
trainer_init_config: Optional[Dict] = None,
torch_config: Optional[TorchConfig] = None,
scaling_config: Optional[ScalingConfig] = None,
dataset_config: Optional[Dict[str, DatasetConfig]] = None,
run_config: Optional[RunConfig] = None,
preprocessor: Optional["Preprocessor"] = None,
resume_from_checkpoint: Optional[Checkpoint] = None,
):
# Functionality required for HuggingFaceTrainer only added in this
# version
if Version(transformers.__version__) < Version("4.19.0"):
raise RuntimeError(
"HuggingFaceTrainer requires transformers>=4.19.0, but you "
f"have {transformers.__version__} which is incompatible. "
"Update on all nodes with `pip install -U 'transformers>=4.19.0'`."
)
self._validate_trainer_init_per_worker(
trainer_init_per_worker, "trainer_init_per_worker"
)
trainer_init_config = trainer_init_config.copy() if trainer_init_config else {}
if "_trainer_init_per_worker" in trainer_init_config:
raise ValueError(
"'_trainer_init_per_worker' is a reserved key in `trainer_init_config`."
)
trainer_init_config["_trainer_init_per_worker"] = trainer_init_per_worker
super().__init__(
train_loop_per_worker=_huggingface_train_loop_per_worker,
train_loop_config=trainer_init_config,
torch_config=torch_config,
scaling_config=scaling_config,
dataset_config=dataset_config,
run_config=run_config,
datasets=datasets,
preprocessor=preprocessor,
resume_from_checkpoint=resume_from_checkpoint,
)
def _validate_trainer_init_per_worker(
self, trainer_init_per_worker: Callable, fn_name: str
) -> None:
num_params = len(inspect.signature(trainer_init_per_worker).parameters)
if num_params < 3:
raise ValueError(
f"{fn_name} should take in at least 3 arguments, "
f"but it accepts {num_params} arguments instead."
)
def _validate_attributes(self):
for key, conf in self._dataset_config.items():
if conf.use_stream_api:
raise ValueError(
"HuggingFaceTrainer does not support `use_stream_api`."
)
gpus_per_worker = self.scaling_config.num_gpus_per_worker
if gpus_per_worker > 1:
raise ValueError(
f"You have assigned {gpus_per_worker} GPUs per worker. "
"This is not supported by HuggingFace, which expects "
"one GPU per worker in DDP mode and will fail "
"if more are assigned."
)
if gpus_per_worker != int(gpus_per_worker):
raise ValueError(
f"You have assigned {gpus_per_worker} GPUs per worker, "
"but fractional GPUs are not supported by HuggingFace."
)
super()._validate_attributes()
def _huggingface_train_loop_per_worker(config):
"""Per-worker training loop for HuggingFace Transformers."""
trainer_init_per_worker = config.pop("_trainer_init_per_worker")
# Env vars necessary for HF to setup DDP
os.environ["RANK"] = str(session.get_world_rank())
os.environ["WORLD_SIZE"] = str(session.get_world_size())
os.environ["LOCAL_RANK"] = str(session.get_local_rank())
train_dataset = session.get_dataset_shard(TRAIN_DATASET_KEY)
eval_dataset = session.get_dataset_shard(EVALUATION_DATASET_KEY)
train_torch_dataset, eval_torch_dataset = process_datasets(
train_dataset,
eval_dataset,
)
trainer: transformers.trainer.Trainer = trainer_init_per_worker(
train_torch_dataset, eval_torch_dataset, **config
)
strategies = [
strategy
for strategy in (trainer.args.evaluation_strategy, trainer.args.save_strategy)
if strategy not in ("no", IntervalStrategy.NO)
]
strategies = [trainer.args.logging_strategy] + strategies
if not all(strategy == strategies[0] for strategy in strategies[1:]):
raise ValueError(
"When using Ray AIR,`logging_strategy`, `evaluation_strategy` "
"and `save_strategy` must all be set to the same value. "
"`evaluation_strategy` or `save_strategy` may also be set to 'no'.\n"
f"Got `logging_strategy`={trainer.args.logging_strategy}\n"
f"`evaluation_strategy`={trainer.args.evaluation_strategy}\n"
f"`save_strategy`={trainer.args.save_strategy}"
)
if trainer.args.save_strategy in ("steps", IntervalStrategy.STEPS):
if (
trainer.args.save_steps < trainer.args.logging_steps
or trainer.args.save_steps % trainer.args.logging_steps != 0
):
raise ValueError(
"When using 'steps' `save_strategy`, `save_steps` must be "
"equal or bigger to `logging_steps`, and must be divisible "
"by `logging_steps` (so that saving occurs at the same time "
f"logging does). Got `save_steps`={trainer.args.save_steps}, "
f"`logging_steps`={trainer.args.logging_steps}."
)
if trainer.args.evaluation_strategy in ("steps", IntervalStrategy.STEPS):
if trainer.args.logging_steps != trainer.args.eval_steps:
raise ValueError(
"`logging_steps` must be equal to `eval_steps`. "
f"Got `logging_steps`={trainer.args.logging_steps}, "
f"`eval_steps`={trainer.args.eval_steps}"
)
if trainer.args.load_best_model_at_end:
raise ValueError(
"As Ray AIR replaces Hugging Face checkpointing, "
"`load_best_model_at_end` must be set to False.\n"
"You can obtain the AIR Checkpoint with "
"`Result.checkpoint` returned by the `fit()` method "
"of this Trainer, and the model itself by calling "
"`Checkpoint.get_model()`.\n"
"You can configure the checkpointing by setting "
"`run_config.checkpoint_config`."
)
if trainer.args.push_to_hub and not trainer.args.hub_token:
warnings.warn(
"You have set `push_to_hub=True` but didn't specify `hub_token`. "
"Pushing to hub will most likely fail, as the credentials will not "
"be automatically propagated from the local enviroment to the Ray Actors. "
"If that happens, specify `hub_token` in `TrainingArguments`."
)
trainer = wrap_transformers_trainer(trainer)
# ensure no HF logging callbacks are added
# aside from doubling functionality with our callbacks,
# the Wandb callbacks causes training to freeze
integration_callbacks = transformers.trainer.get_reporting_integration_callbacks(
trainer.args.report_to
)
for callback in integration_callbacks:
trainer.pop_callback(callback)
trainer.add_callback(TrainReportCallback)
checkpoint = session.get_checkpoint()
if checkpoint:
with checkpoint.as_directory() as checkpoint_path:
trainer.train(resume_from_checkpoint=checkpoint_path)
else:
trainer.train()
|
[
"[email protected]"
] | |
f631f0ad314cc54e012f56dd0631e587b44f8930
|
dd449ad8388847779b265f49f2339c9681376c60
|
/a_star_algo/algo.py
|
2d3e0aea892a843f490f31666f9a20dbc5402a30
|
[] |
no_license
|
whoji/training-ground
|
478d76a8c274050eb910b28729ca1d1cdb47eae9
|
b107cc47c4a04bb8868c410ab207bacab5a86e4c
|
refs/heads/master
| 2020-05-16T16:13:26.788156 | 2019-12-04T01:56:01 | 2019-12-04T01:56:01 | 183,154,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,369 |
py
|
# https://medium.com/@nicholas.w.swift/easy-a-star-pathfinding-7e6689c7f7b2
class Node():
"""A node class for A* Pathfinding"""
def __init__(self, parent=None, position=None):
self.parent = parent
self.position = position
self.g = 0 # G is the distance between the current node and the start node.
self.h = 0. # H is the heuristic - estimated distance from the current node to the end node.
# using the Eucleandian distance squared : a2 + b2 = c2
self.f = 0. # F is the total cost of the node.
def __eq__(self, other):
return self.position == other.position
def get_node_with_smallest_f(open_list):
assert open_list != []
current_node = open_list[0]
current_index = 0
for index, item in enumerate(open_list):
if item.f < current_node.f:
current_node = item
current_index = index
return current_node, current_index
def get_path_to_node(current_node):
path = []
current = current_node
while current is not None:
path.append(current.position)
current = current.parent
return path[::-1] # Return reversed path
def get_children_nodes(current_node, maze):
# get all the children / neighbors
children = []
for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)]:
# Adjacent squares
node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])
if node_position[0] > (len(maze) - 1) or node_position[0] < 0 or node_position[1] > (len(maze[len(maze)-1]) -1) or node_position[1] < 0:
continue
if maze[node_position[0]][node_position[1]] != 0:
continue
new_node = Node(current_node, node_position)
children.append(new_node)
return children
def AStar(maze, start, end):
# Returns a list of tuples as a path from the
# given start to the given end in the given maze
# Create start and end node
start_node = Node(None, start)
start_node.g = start_node.h = start_node.f = 0
end_node = Node(None, end)
end_node.g = end_node.h = end_node.f = 0
# Initialize both open and closed list
open_list = [] # like a frontier
closed_list = [] # like where we camre from
# Add the start node
open_list.append(start_node)
# Loop until you find the end
while len(open_list) > 0:
# Get the current node
current_node, current_index = get_node_with_smallest_f(open_list)
# Pop current off open list, add to closed list
open_list.pop(current_index)
closed_list.append(current_node)
# Found the goal
if current_node == end_node:
# print("DONE")
return get_path_to_node(current_node)
children = get_children_nodes(current_node, maze)
children = [c for c in children if c not in closed_list]
for child in children:
child.g = current_node.g + 1
child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)
child.f = child.g + child.h
for open_node in open_list:
if child == open_node and child.g > open_node.g:
continue
open_list.append(child)
|
[
"[email protected]"
] | |
d680f8875b14ff6b14fad6a3d87412f63c377f03
|
2dd0082221239fef0e0894c852f70f1eaeb62b9e
|
/Assignments/pete/python/curses/curses8/curses7.py
|
58231c63220cd044d2e1c7060580b9eaab06acdc
|
[] |
no_license
|
pjz987/2019-10-28-fullstack-night
|
03097cf3dc24aeec0c326044bb0fc99385fbc333
|
4c643013de73f08d7503d62ec602d6a5c80ffa7e
|
refs/heads/master
| 2022-11-11T19:40:00.296645 | 2020-06-25T16:14:47 | 2020-06-25T16:14:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,750 |
py
|
import argparse
import curses
parser = argparse.ArgumentParser()
parser.add_argument('-lvl', action='store', dest='version_arg', type=int)
parsed_args = parser.parse_args()
LEVEL = parsed_args.version_arg or 1
import random
import time
'''
Above argparse and curses are imported and set up.#and numpy
Below I will establish the Classes before getting into the different LEVELs.
'''
class Sprite():
def __init__(self, y, x, uni):
self.y = y
self.x = x
self.uni = uni
def __str__(self):
return self.uni
class Item(Sprite):
def __init__(self, y, x, uni, id, uses, range):
super().__init__(y, x, uni)
self.id = id
self.uses = uses
self.range = range
def __repr__(self):
return f"{self.uni}:{self.uses}/{self.range}"
class Character(Sprite):
def __init__(self, y, x, uni, uni2):
super().__init__(y, x, uni)
self.uni2 = uni2
self.inv = []
def __str__(self):
return self.uni#, self.uni2
def mouth_string(self):
return self.uni2
def move(self, direction, steps):
# change the y/x position of the character
pass
def attack(self, direction, weapon):
#attack and everything
pass
hero = Character(13, 46, '{00}', '-__-')
enemies = [
Character(1, 96, '|><|', '|==|'),
Character(7, 26, '|><|', '|==|'),
Character(10, 61, '|><|', '|==|'),
Character(13, 41, '|><|', '|==|'),
]
def gen_enemies(hero, enemies, num=4):
tiles = []
# [tiles.append(coord) for ]
for y in range(23):
for x in range(97):
if y % 3 == 1 and x % 5 == 1:
tiles.append((y, x))
tiles.remove((hero.y, hero.x))
for i in range(num):
co_ord = random.choice(tiles)
y = co_ord[0]
x = co_ord[1]
enemy = Character(y, x, '|><|', '|==|')
enemies.append(enemy)
tiles.remove((co_ord))
return tiles
enemies = []
enemies_num = random.randrange(3, 7)
tiles = gen_enemies(hero, enemies, enemies_num)
def gen_items(item_attr_list, tiles, num=3):
items = []
for i in range(num):
co_ord = random.choice(tiles)
tiles.remove(co_ord)
y = co_ord[0]
x = co_ord[1]
item_attr = random.choice(item_attr_list)
item = Item(y, x, item_attr[0], item_attr[1], item_attr[2], item_attr[3])
items.append(item)
return items
# items = [
# Item(20, 16, '🏹', 'bow'),
# Item(19, 5, '🔫', 'gun')
# ]
item_attr_list = [
('🏹', 'bow', 1, 10),
('🗡', 'sword', 3, 1),
('🔫', 'gun', 2, 5)
]
items_num = random.randrange(2, 5)
items = gen_items(item_attr_list, tiles, items_num)
unicode_storage_list = ['🗡', '⚔', '🔫', '🏹', '🛡', '🔑', '🗝', '❤', '☠', '☠', '⬆', '➡', '⬇', '⬅']
moves = [[0, 1], [0, -1], [1, 0], [-1, 0]]
key_list = ['KEY_UP', 'KEY_DOWN', 'KEY_RIGHT', 'KEY_LEFT']
def fix_pos(sprite): #converted this from function to method
if sprite.y < 1:
sprite.y = 1
if sprite.y > 22:
sprite.y = 22
if sprite.x < 1:
sprite.x = 1
if sprite.x > 96:
sprite.x = 96
def aim(hero, wasd):#converted
if wasd == 'w':
game_screen.addstr(hero.y - 1, hero.x, '⬆')
elif wasd == 'a':
game_screen.addstr(hero.y, hero.x - 1, '⬅')
elif wasd == 's':
game_screen.addstr(hero.y + 1, hero.x, '⬇')
elif wasd == 'd':
game_screen.addstr(hero.y, hero.x + 2, '➡')
draw_screen(hero, enemies, items, game_screen)
def shoot(hero, enemies, aim_dir, game_screen):#converted
if hero.inv:
for enemy in enemies:
if (aim_dir == 'w' and hero.x == enemy.x and hero.y > enemy.y) or (aim_dir == 'a' and hero.y == enemy.y and hero.x > enemy.x) or (aim_dir == 's' and hero.x == enemy.x and hero.y < enemy.y) or (aim_dir == 'd' and hero.y == enemy.y and hero.x < enemy.x):
enemy.uni = '☠'
draw_screen(hero, enemies, items, game_screen)
time.sleep(1)
enemies.remove(enemy)
hero.inv[0].uses -= 1
if hero.inv[0].uses == 0:
hero.inv.remove(hero.inv[0])
def enemy_move(hero, enemies):
for enemy in enemies:
y_or_x = random.choice(['y', 'x'])
if enemy.y == hero.y:
y_or_x = 'x'
elif enemy.x == hero.x:
y_or_x = 'y'
if y_or_x == 'y':
if enemy.y > hero.y:
enemy.y -= 3
else:
enemy.y += 3
else:
y_or_x == 'x'
if enemy.x > hero.x:
enemy.x -= 5
else:
enemy.x += 5
fix_pos(enemy)
def draw_screen(hero, enemies, items, game_screen):
game_screen.clear()
for y in range(26):
for x in range(100):
if x % 5 == 0 :
game_screen.addstr(y, x, '|')
if y % 3 == 0:
game_screen.addstr(y, x, '-')
if dead == True:
hero.uni = '☠'
game_screen.addstr(1, 1, "AND YOU DEAD")
[game_screen.addstr(item.y + 1, item.x + 1, str(item)) for item in items]
[game_screen.addstr(enemy.y, enemy.x, str(enemy)) for enemy in enemies]
[game_screen.addstr(enemy.y + 1, enemy.x, enemy.mouth_string()) for enemy in enemies]
game_screen.addstr(hero.y, hero.x, str(hero))
game_screen.addstr(hero.y + 1, hero.x, hero.mouth_string())
game_screen.addstr(25, 1, f"Inventory: {hero.inv}")
game_screen.addstr(25, 35, f"Screen Size: {game_screen.getmaxyx()}")
game_screen.addstr(25, 70, f"Hero Postion: {hero.y, hero.x}")
if won:
game_screen.addstr(1, 1, "YOU WON!")
# game_screen.addstr(2, 1, f"{game_screen.getmaxyx()}")
game_screen = curses.initscr()
curses.curs_set(0)
print(game_screen.getmaxyx())
won = False
dead = False
game_screen.keypad(True)
game_screen.clear()
draw_screen(hero, enemies, items, game_screen)
game_screen.addstr(2, 41, "Arrow Keys To Move")
game_screen.addstr(5, 41, "WASD To Aim")
game_screen.addstr(8, 41, "SPACE To Shoot")
# game_screen.addstr(hero.y, hero.x, str(hero))
# game_screen.addstr(hero.y + 1, hero.x, hero.mouth_string())
# [game_screen.addstr(item.y, item.x, str(item)) for item in items]
# [game_screen.addstr(enemy.y, enemy.x, str(enemy)) for enemy in enemies]
# [game_screen.addstr(enemy.y + 1, enemy.x, enemy.mouth_string()) for enemy in enemies]
# game_screen.addstr(21, 5, f"Inventory: {hero.inv}")
# for enemy in enemies:
# game_screen.addstr(enemy.y, enemy.x, str(enemy))
while True:
in_key = game_screen.getkey()
if in_key == 'q':
curses.endwin()
break
for enemy in enemies:
if enemy.x == hero.x and enemy.y == hero.y:
dead = True
if dead == False and in_key in ['KEY_UP', 'KEY_DOWN', 'KEY_RIGHT', 'KEY_LEFT']:
if in_key == key_list[0]:
hero.y -= 3
elif in_key == key_list[1]:
hero.y += 3
elif in_key == key_list[2]:
hero.x += 5
elif in_key == key_list[3]:
hero.x -= 5
fix_pos(hero)
for item in items:
if item.y == hero.y and item.x == hero.x:
hero.inv.append(item)
items.remove(item)
enemy_move(hero, enemies)
if dead == False and in_key in ['w', 'a', 's', 'd']:
aim(hero, in_key)
aim_dir = in_key
draw_screen(hero, enemies, items, game_screen)
if dead == False and in_key == ' ':
shoot(hero, enemies, aim_dir, game_screen)
enemy_move(hero, enemies)
if enemies == []:
won = True
draw_screen(hero, enemies, items, game_screen)
# print(game_screen.getmaxyx())
|
[
"[email protected]"
] | |
c8eb4290d28e0c5f79fcabc5ae365815fdd0e68a
|
46ad22b772f0bb115e1192ca24c86b1593d51870
|
/tools/vidcap/vidcap.py
|
3488605089f48448b70aa5f2b26896fe570106bb
|
[] |
no_license
|
cosmologicon/unifac
|
fb533abfbba7ebb33561a330f7be5d22dbc2a373
|
e7668c6736cd4db66f8d56e945afb69ec03f2160
|
refs/heads/master
| 2022-06-15T10:46:28.448477 | 2022-05-30T20:26:55 | 2022-05-30T20:26:55 | 37,033,765 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 19,655 |
py
|
# Linux/pygame video capture utility by Christopher Night - public domain
# Requires mogrify, oggenc (if there's audio), and mencoder
# 0. Install dependencies: sudo apt-get install imagemagick vorbis-tools mencoder
# 1. Recording. Place this file into your source directory. Within your game, import it with
# "import vidcap". Then the recording will start when you call pygame.init(). That's all there
# is to it. (For best synchronization results, import vidcap before any of your other modules.)
# 2. Encoding. When you're done, simply run vidcap.py as "python vidcap.py" or whatever. This
# will create an AVI in the vidcap directory called vidcap.avi. Watch and enjoy!
# To delete a video and all of its files, simply remove the directory.
# Advanced API:
# vidcap.record() : begin recording
# vidcap.stop() : stop recording
# vidcap.toggle() : toggle recording
# vidcap.cap() : explicitly capture a frame. You don't need to call this directly, it's done
# automatically whenever you call pygame.display.flip().
wrappygame = True # Should this module wrap pygame.display.flip and pygame.init so that the video
# is automatically recorded when you call these functions? If this is False,
# you'll need to call vidcap.cap() once per frame
# Audio will not work if this is set to False.
viddir = None # You can set this to an explicit directory path if you like
# Otherwise it will default to a directory with a current timestamp
recordsymbol = True # Put a "recording" symbol in the corner when recording
# The symbol itself doesn't get recorded
usepng = False # Use png rather than bmp (takes less disk space but is slower)
drawmouse = True # Draw the mouse cursor onto the recording
import pygame, datetime, os, inspect, subprocess, glob
_recording = True
_recordaudio = True
_logging = True
def timestamp(t = None):
"""10-digit timestamp"""
return str(10000000000 + (pygame.time.get_ticks() if t is None else t))[1:]
def defaultdir():
"""Default (timestamp-based) directory name"""
return datetime.datetime.now().strftime("vidcap-%Y%m%d%H%M%S")
def checkdir():
"""Make sure viddir is set and the path exists"""
global viddir
if viddir is None: viddir = defaultdir()
if not os.path.exists(viddir):
os.mkdir(viddir)
def lastdir():
"""The latest timestamp-based directory"""
dirs = [d for d in os.listdir(".") if d.startswith("vidcap-")]
return max(dirs) if dirs else None
def currentimagepath(exten = None, t = None):
checkdir()
if exten is None: exten = "png" if usepng else "bmp"
fname = "frame-" + timestamp(t) + "." + exten
return os.path.join(viddir, fname)
def isimagepath(path, exten = "png"):
"""Does the path describe a timestamped image?"""
return path.endswith("." + exten) and path[-14:-4].isdigit() and path[-20:-14] == "frame-"
def isaudiopath(path, exten = "ogg"):
"""Does the path describe a timestamped audio?"""
return path.endswith("." + exten) and path[-14:-4].isdigit() and path[-20:-14] == "audio-"
def blankpath():
"""Pathname of the blank frame"""
checkdir()
return os.path.join(viddir, "frame-blank.png")
def makeblankframe(anyframe, color=(0,0,0)):
"""Make the blank frame. Must be passed some image to get the dimensions right"""
surf = pygame.image.load(anyframe)
surf.fill(color)
pygame.image.save(surf, blankpath())
def fadepath(basefile, (r, g, b, a)):
"""Return the path to a copy of the given image overwritten with the given color (and create
the image if it doesn't exist)"""
if a == 0: return basefile
cstr = "0x" + hex((1 << 32) + (r << 24) + (g << 16) + (b << 8) + a)[3:]
filename = basefile[:-4] + "-" + cstr + basefile[-4:]
if os.path.exists(filename): return filename
color = pygame.Color(r, g, b, a)
img = pygame.image.load(basefile).convert_alpha()
img2 = img.copy()
img2.fill(color)
img.blit(img2, (0, 0))
pygame.image.save(img, filename)
return filename
def framelistpath():
return os.path.join(viddir, "framelist.txt")
def currentaudiopath():
checkdir()
return os.path.join(viddir, "audio-%s.raw" % timestamp())
def logpath():
checkdir()
return os.path.join(viddir, "log.txt")
def log(line):
if not _logging: return
f = open(logpath(), "a")
f.write(timestamp() + " " + line + "\n")
f.close()
def eventlogpath():
checkdir()
return os.path.join(viddir, "event-log.txt")
def logevent(line):
if not _logging: return
f = open(eventlogpath(), "a")
f.write(timestamp() + " " + line + "\n")
f.close()
def getcursorimg(cache = {}):
key = pygame.mouse.get_cursor()
if key in cache: return cache[key]
(mx, my), (hx, hy), xormasks, andmasks = key
img = pygame.Surface((mx, my)).convert_alpha()
img.fill((0,0,0,0))
for y in range(my):
for x in range(mx):
j = y * mx + x
if andmasks[j/8] & (1 << (7-j%8)):
img.set_at((x, y), (255, 255, 255))
if xormasks[j/8] & (1 << (7-j%8)):
img.set_at((x, y), (0, 0, 0))
cache[key] = img, (hx, hy)
return cache[key]
_mousevisible = True
def addcursor(surf):
if drawmouse and _mousevisible and pygame.mouse.get_focused():
img, (hx, hy) = getcursorimg()
px, py = pygame.mouse.get_pos()
rect = img.get_rect(topleft = (px-hx, py-hy))
surf.blit(img, rect)
pmsvis = pygame.mouse.set_visible
def setmousevis(vis):
global _mousevisible
_mousevisible = vis
return pmsvis
def stop():
global _recording
if not _recording: return
_recording = False
logevent("stop")
logevent("fadetoblack 500")
logevent("fadeoutaudio 500")
def record():
global _recording
if _recording: return
_recording = True
logevent("record")
logevent("fadefromblack 1000")
logevent("fadeinaudio 1000")
def toggle():
if _recording:
stop()
else:
record()
def getmonitorsource():
p = subprocess.Popen("pactl list".split(), stdout = subprocess.PIPE)
out, err = p.communicate()
mlines = [line for line in out.splitlines() if "Monitor Source: " in line and "analog" in line]
assert len(mlines) == 1
mline = mlines[0]
_, _, monitorsource = mline.partition("Monitor Source: ")
return monitorsource
def unmutemonitorsource(monitorsource = None):
if monitorsource is None: monitorsource = getmonitorsource()
stdin = "set-source-mute %s false" % monitorsource
p = subprocess.Popen(["pacmd"], stdin = subprocess.PIPE, stdout = subprocess.PIPE)
_, _ = p.communicate(stdin)
_audioprocess = None # Set to None when audio recording is off
# Set to the AudioProcess instance when audio recording is in progress
# Module-level private because we need it to be GC'd, so please don't make
# reference to it.
class AudioProcess(object):
"""Use RAII to make sure the audio recording gets shut down when we're done"""
format = "s16le"
def __init__(self, filename = None, monitorsource = None):
self.filename = filename or currentaudiopath()
self.monitorsource = monitorsource or getmonitorsource()
self.com = "parec --format=%s --device=%s" % (self.format, self.monitorsource)
log("audiostart %s %s" % (self.filename, self.format))
self.file = open(self.filename, "wb")
self.process = subprocess.Popen(self.com.split(), stdout = self.file)
def terminate(self):
if self.process:
log("audiostop")
self.process.terminate()
self.process, self.file = None, None
def __del__(self):
self.terminate()
def startaudiorecording(filename = None, monitorsource = None):
global _audioprocess
if not _recordaudio: return
if _audioprocess: return # Already recording
if monitorsource is None: monitorsource = getmonitorsource()
unmutemonitorsource(monitorsource)
_audioprocess = AudioProcess(filename = filename, monitorsource = monitorsource)
def stopaudiorecording():
global _audioprocess
_audioprocess.terminate()
_audioprocess = None
def cap(screen = None):
"""Call this once a frame to capture the screen"""
global _recordaudio
if not _recording:
log("cap")
return
if screen is None: screen = pygame.display.get_surface()
fname = currentimagepath()
log("cap " + fname)
addcursor(screen)
pygame.image.save(screen, fname)
if recordsymbol and pygame.time.get_ticks() / 250 % 2:
pygame.draw.circle(screen, (255, 0, 0), (14, 14), 10, 0)
startaudiorecording()
pdflip = pygame.display.flip
def capandflip():
cap()
pdflip()
pinit = pygame.init
def init():
log("init")
startaudiorecording()
pinit()
def convertallbmps():
"""Convert all bmps in the vidcap directory into pngs (requires mogrify) - slow!"""
if not glob.glob(os.path.join(viddir, "*.bmp")): return
print "mogrify -format png " + os.path.join(viddir, "*.bmp")
os.system("mogrify -format png " + os.path.join(viddir, "*.bmp"))
os.system("rm " + os.path.join(viddir, "*.bmp"))
def convertaudio():
"""Convert raw audio in the vidcap directory into oggs"""
for f in os.listdir(viddir):
if not f.endswith(".raw"): continue
rawfile = os.path.join(viddir, f)
oggfile = rawfile[:-4] + ".ogg"
if os.path.exists(oggfile): continue
os.system("oggenc --raw --quiet -o %s %s" % (oggfile, rawfile))
def interpolateframes(fts, nframes, dt, t0 = 0):
# TODO: better interpolation function
iframes = []
index = 1 # The first frame that's later than the current timestamp
for jframe in range(nframes):
t = float(jframe) * dt + t0
while index < len(fts) and t > fts[index][0]:
index += 1
iframes.append([t, fts[index-1][1]])
return iframes
# The following class is used for audio logging. We use a wrapper around pygame.mixer that logs all
# access to the module. Later, this can be reconstructed by reading the log.
class LogAlias(object):
"""An alias to an object that logs all calls made."""
_aliasList = {}
_listname = "objs" # How the array should be written in the log
_nAlias = 0
def __init__(self, obj, name, ongetattr = None):
self._obj = obj
self._name = name # This is a string that can be eval'd to give self._obj later
self._n = self._nAlias
self._aliasList[LogAlias._nAlias] = self
self._log("%s[%s] = %s" % (self._listname, self._n, self._name))
self._ongetattr = ongetattr # Callback when self.__getattr__ is called
LogAlias._nAlias += 1
@staticmethod
def _lname(obj):
"""This is the name of this object via the alias list, if applicable"""
return "%s[%s]" % (LogAlias._listname, obj._n) if isinstance(obj, LogAlias) else repr(obj)
def __getattr__(self, attr):
"""self.x is a LogAlias wrapper around self._obj.x"""
if self._ongetattr: self._ongetattr()
if attr not in self.__dict__:
obj = getattr(self._obj, attr)
name = LogAlias._lname(self) + "." + attr
self.__dict__[attr] = LogAlias(obj, name)
return self.__dict__[attr]
def __call__(self, *args, **kw):
argstr = ", ".join(LogAlias._lname(arg) for arg in args)
kwstr = ", ".join("%s = %s" % (key, LogAlias._lname(value)) for key, value in kw.items())
callstr = "%s(%s%s%s)" % (LogAlias._lname(self), argstr, (", " if argstr and kwstr else ""), kwstr)
ret = self._obj(*args, **kw)
if inspect.isclass(self._obj): # If constructing a new instance...
return LogAlias(ret, callstr) # ...return a LogAlias wrapping the instance
self._log(callstr)
return ret
def __repr__(self):
return "LogAlias(%r)" % repr(self._obj)
def _log(self, text):
"""Add the specified text to the log along with timestamp"""
log("alias " + text)
_wrapped = False
if __name__ != "__main__":
mixer = LogAlias(pygame.mixer, "pygame.mixer")
if wrappygame and not _wrapped:
pygame.mixer = mixer
pygame.display.flip = capandflip
pygame.init = init
pygame.mouse.set_visible = setmousevis
_wrapped = True
if __name__ == "__main__":
# Encode the images and audio into an AVI file
import sys, numpy
fps = 25
audiofreq = 44100
fixedfps = True
remakeaudio = True
_logging = False
viddir = sys.argv[1] if len(sys.argv) > 1 else lastdir()
if not viddir:
print "Vidcap directory not found!"
print "Please specify a directory on the command line."
sys.exit()
print "vidcap directory is %s" % viddir
print "Converting BMPs into PNGs...."
convertallbmps()
# Analyze log file
objs = {}
logcomms = []
audiorecs = []
captimes = []
for line in open(logpath(), "r"):
words = line.split()
if len(words) < 2: continue
t = int(words[0])
if words[1] == "init":
pass
if words[1] == "audiostart":
audiorecs.append((int(words[0]), words[2]))
if words[1] == "alias":
logcomms.append((t, " ".join(words[2:]).strip()))
if words[1] == "cap":
captimes.append(int(words[0]))
print captimes
events = []
if os.path.exists(eventlogpath()):
for line in open(eventlogpath(), "r"):
words = line.split()
if len(words) < 2: continue
events.append((int(words[0]), " ".join(words[1:])))
if fixedfps:
import bisect
def fixt(t):
"""Convert a timestamp into a fixed-fps timestamp"""
dt = 1000. / fps
i1 = bisect.bisect(captimes, t)
if i1 == 0: return t - captimes[0]
if i1 == len(captimes): return (len(captimes) - 1) * dt + t - captimes[-1]
i0 = i1 - 1
t0, t1 = captimes[i0], captimes[i1]
jframe = i0 + (t - t0) / float(t1 - t0)
return int(jframe * dt)
audiorecs = [(fixt(t), rec) for t, rec in audiorecs]
logcomms = [(fixt(t), comm) for t, comm in logcomms]
events = [(fixt(t), event) for t, event in events]
fades = []
audiofades = []
for t, event in events:
if event.startswith("fadetoblack"):
dt = int(event.partition(" ")[2])
fades.append((t-dt, t, 0, 255, 0, 0, 0))
if event.startswith("fadefromblack"):
dt = int(event.partition(" ")[2])
fades.append((t, t+dt, 255, 0, 0, 0, 0))
if event.startswith("fadeoutaudio"):
dt = int(event.partition(" ")[2])
audiofades.append((t-dt, t, 1, 0))
if event.startswith("fadeinaudio"):
dt = int(event.partition(" ")[2])
audiofades.append((t, t+dt, 0, 1))
frames0 = sorted([f for f in os.listdir(viddir) if isimagepath(f)])
fts = [(int(frame[6:16]), os.path.join(viddir, frame)) for frame in frames0]
if fixedfps:
fts = [(fixt(t), f) for t, f in fts]
tend = fts[-1][0]
if remakeaudio:
basename = os.path.join(viddir, "audio-re" + str(fps if fixedfps else "") + "-%s.raw")
existing = glob.glob(basename % "*")
if existing:
assert len(existing) == 1
filename = existing[0]
print "Audio found at %s" % filename
else:
pygame.quit()
pygame.init()
filename = basename % timestamp()
print "Re-recording audio to %s...." % filename
startaudiorecording(filename = filename)
while pygame.time.get_ticks() < tend:
pygame.time.delay(1)
while logcomms and pygame.time.get_ticks() >= logcomms[0][0]:
t, comm = logcomms.pop(0)
exec(comm)
stopaudiorecording()
pygame.mixer.quit()
t = int(filename[-14:-4])
audiorecs = [(t, filename)]
else:
assert len(audiorecs) == 1
print "Audio found at %s" % audiorecs[0][1]
starts = [t for t, e in events if e == "record"] or [0]
ends = [t for t, e in events if e == "stop"] or [tend]
if min(ends) <= min(starts): starts = [0] + starts
if max(starts) >= max(ends): ends = ends + [tend]
intervals = [(start, int((end - start) * fps / 1000.)) for start, end in zip(starts, ends)]
screensize = pygame.image.load(fts[0][1]).get_size()
screen = pygame.display.set_mode(screensize)
makeblankframe(fts[0][1])
fts = [(-1, blankpath())] + fts
print "Number of clips: %s" % len(intervals)
ftlist = []
for jclip, (start, nframes) in enumerate(intervals):
print "Number of frames in clip #%s: %s" % (jclip+1, nframes)
ftlist += interpolateframes(fts, nframes, 1000. / fps, start)
for j in range(len(ftlist)):
t, f = ftlist[j]
for t1, t2, a1, a2, r, g, b in fades:
if t1 <= t <= t2:
a = int(a1 + float(t - t1) * (a2 - a1) / (t2 - t1))
f = fadepath(f, (r, g, b, a))
ftlist[j] = t, f
_, framelist = zip(*ftlist)
open(framelistpath(), "w").write("\n".join(framelist))
# TODO: probably not necessary to do this every time
print "Converting frames...."
for jframe, frame in enumerate(framelist):
img = pygame.image.load(frame).convert()
pygame.image.save(img, frame)
if jframe % 2 == 0:
screen.blit(img, (0,0))
pygame.display.flip()
print "Combining audio..."
naudiosamp = int(audiofreq * tend / 1000.)
audioarr = numpy.zeros((naudiosamp, 2), dtype = numpy.int16)
for t, afilename in audiorecs:
aclip = numpy.reshape(numpy.fromfile(open(afilename, "rb"), dtype = numpy.int16), (-1, 2))
s0 = int(audiofreq * t / 1000.)
s1 = s0 + aclip.shape[0]
if s1 > naudiosamp:
aclip = aclip[:naudiosamp-s1,]
s1 = naudiosamp
audioarr[s0:s1,] = aclip
for t1, t2, v1, v2 in audiofades:
s1 = int(audiofreq * t1 / 1000.)
s2 = int(audiofreq * t2 / 1000.)
n = s2 - s1
fac = numpy.transpose([v1 + numpy.arange(n) * ((v2 - v1) / float(n))] * 2)
if s2 > naudiosamp:
fac = fac[:naudiosamp-s2,]
s2 = naudiosamp
# print s1, s2, n, audioarr.shape, fac.shape, audioarr[s1:s2,].shape
audioarr[s1:s2,] *= fac
rawfile = os.path.join(viddir, "audio.raw")
oggfile = os.path.join(viddir, "audio.ogg")
aindices = [(int(audiofreq * t / 1000.), int(nframes * audiofreq / fps)) for t, nframes in intervals]
numpy.concatenate([audioarr[i:i+j,] for i, j in aindices]).tofile(open(rawfile, "wb"))
# audioarr.tofile(open(rawfile, "wb"))
os.system("oggenc --raw --quiet -o %s %s" % (oggfile, rawfile))
com = []
com.append("mencoder")
# com.append("mf://%s/*.png" % viddir)
com.append("mf://@%s" % framelistpath())
com.append("-mf fps=%s:type=png" % fps)
com.append("-ovc copy")
com.append("-oac pcm -audiofile %s" % oggfile if oggfile else "-oac copy")
com.append("-o %s/vidcap.avi" % viddir)
com = " ".join(com)
print
print "Encoding video...."
print com
os.system(com) # TODO: check for errors
print
print "Video created:", os.path.join(viddir, "vidcap.avi")
|
[
"[email protected]"
] | |
2c1dcce271be95ff71696e0634eac1611b1af8d3
|
81acce1d49924d89e6ebf5a472ad5b1b80cc202c
|
/qcdScale/qcdFitter.py
|
7650be090a5d03a503d1e3c513a0cb4b5ae83b62
|
[] |
no_license
|
truggles/Z_to_TauTau_13TeV
|
36a85b024052fcfef3c9efd8aebc63dc85744f7b
|
123fe0d25f8e926d8959f54cd4f64122394b60d5
|
refs/heads/master
| 2021-03-13T01:50:43.031581 | 2017-10-12T18:56:25 | 2017-10-12T18:56:25 | 37,312,811 | 0 | 0 | null | 2016-09-29T08:29:13 | 2015-06-12T09:08:22 |
Python
|
UTF-8
|
Python
| false | false | 449 |
py
|
import ROOT
from ROOT import gROOT
def qcdFit() :
f = ROOT.TFile('roots/OSvsSS.root','r')
h = f.Get('OSvsSS')
func = ROOT.TF2( 'func', '[0] + (x * [1]) +(y *[2])' )
f1 = gROOT.GetFunction('func' )
f1.SetParName( 0, 'Intercept' )
f1.SetParName( 1, 'x-slope' )
f1.SetParName( 2, 'y-slope' )
f1.SetParameter( 0, 99 )
f1.SetParameter( 1, 99 )
f1.SetParameter( 2, 99 )
h.Fit('func', 'R' )
qcdFit()
|
[
"[email protected]"
] | |
8e165645b9a092e4faa7392ab4052d978f7ea58e
|
222d7bd1c7fba8d2cfe2754ae1b07e7219ff854e
|
/Run_VVC-bu.py
|
f3bfd8de137923efe4c4b3154638e4586b7ebb46
|
[] |
no_license
|
mkjubran/VVCS
|
70981f1a64f380c2b3d04e138a46bf545d8b1bf7
|
79ffb3cbe25a48848eb2b4dbadc908f053c3f8f1
|
refs/heads/master
| 2020-12-22T09:48:42.629357 | 2020-07-14T07:24:23 | 2020-07-14T07:24:23 | 236,737,042 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,839 |
py
|
#Frame1: Type POC QPoffset QPOffsetModelOff QPOffsetModelScale CbQPoffset CrQPoffset QPfactor tcOffsetDiv2 betaOffsetDiv2 temporal_id #ref_pics_active #ref_pics reference
#pictures predict deltaRPS #ref_idcs reference idcs print >> fid, 'Frame1: P 1 5 -6.5 0.2590 0 0 1.0 0 0 0 1 1 -1 0');
from __future__ import division
import numpy as np
import os, sys, subprocess, pdb
import argparse
import ConfigParser
import datetime, math, time
import ntpath
INF = 999
###--------------------------------------------------------------
## Parse configuration Parameters from the configuration file
def main(argv=None):
# Do argv default this way, as doing it in the functional
# declaration sets it at compile time.
if argv is None:
argv = sys.argv
# Parse any conf_file specification
# We make this parser with add_help=False so that
# it doesn't parse -h and print help.
conf_parser = argparse.ArgumentParser(
description=__doc__, # printed with -h/--help
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args()
defaults = { "option":"default"}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("Parametters")))
#print(dict(config.items("Parametters")))
# Parse rest of arguments
# Don't suppress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser]
)
parser.set_defaults(**defaults)
args = parser.parse_args(remaining_argv)
return(args)
###--------------------------------------------------------------
def call(cmd):
# proc = subprocess.Popen(["cat", "/etc/services"], stdout=subprocess.PIPE, shell=True)
#proc = subprocess.Popen(cmd, \
# stdout=subprocess.PIPE, shell=True)
#print(cmd)
subprocess.call(cmd,shell=True)
#proc = subprocess.Popen(cmd,stdout=subprocess.PIPE, shell=True)
#(out, err) = proc.communicate()
return #(out, err)
###--------------------------------------------------------------
def call_bg(cmd):
#proc = subprocess.Popen(cmd, shell=True)
proc = subprocess.Popen(cmd,stdout=subprocess.PIPE, shell=True)
return proc
###--------------------------------------------------------------
def call_bg_file(cmd,fidProcess):
proc = subprocess.Popen(cmd,stdout=fidProcess, shell=True)
fidProcess.close
return proc
###--------------------------------------------------------------
def Encode_decode_video():
encoderlog=[]
decoderlog=[]
VMAFlog=[]
now_start=[]
now_end=[]
now_start.append(datetime.datetime.now())
print('Encoding {}'.format(now_start[0].strftime("%Y-%m-%d %H:%M:%S")))
InputYUV='{}.yuv'.format(vid[:-4])
fname = ntpath.basename(InputYUV)[:-4]
for cnt in range(len(rate)):
BitstreamFile='{}/VVCencoded_{}_{}.bin'.format(Path,fname,rate[cnt])
ReconYUV='{}/VVCrecon_{}_{}.yuv'.format(Path,fname,rate[cnt])
encoderlogfile='{}/VVClog_{}_{}.dat'.format(Path,fname,rate[cnt])
fid = open(encoderlogfile,'w')
osout = call_bg_file('./VVCOrig/bin/EncoderAppStatic -c ./VVCOrig/cfg/encoder_lowdelay_P_vtm.cfg -c ./VVCOrig/cfg/encoder_VVC_GOP.cfg --InputFile={} --SourceWidth={} --SourceHeight={} --SAO=0 --InitialQP={} --FrameRate={} --FramesToBeEncoded={} --MaxCUSize={} --MaxPartitionDepth={} --BitstreamFile="{}" --RateControl={} --TargetBitrate={} --ReconFile={}'.format(InputYUV,Width,Hight,QP,fps,NumFrames,MaxCUSize,MaxPartitionDepth,BitstreamFile,RateControl,rate[cnt],ReconYUV),fid)
encoderlog.append(osout)
for cnt in range(len(rate)):
encoderlog[cnt].wait()
### decoding ------------
for cnt in range(len(rate)):
OutputYUV='{}/VVCoutput_{}_{}.yuv'.format(Path,fname,rate[cnt])
#osout = call('rm -rf {}'.format(Path,OutputYUV))
BitstreamFile='{}/VVCencoded_{}_{}.bin'.format(Path,fname,rate[cnt])
decoderlogfile='{}/VVCdecoderlog_{}_{}.dat'.format(Path,fname,rate[cnt])
fid = open(decoderlogfile,'w')
osout = call_bg_file('./VVCOrig/bin/DecoderAppStatic -b {} -o {}'.format(BitstreamFile,OutputYUV),fid)
decoderlog.append(osout)
for cnt in range(len(rate)):
decoderlog[cnt].wait()
### VMAF --------
for cnt in range(len(rate)):
OutputYUV='{}/VVCoutput_{}_{}.yuv'.format(Path,fname,rate[cnt])
VMAFlogfile='{}/VVClog_{}_{}.dat'.format(Path,fname,rate[cnt])
fid = open(VMAFlogfile,'a')
osout = call_bg_file('../vmaf/run_vmaf yuv420p {} {} {} {}'.format(Width,Hight,InputYUV,OutputYUV),fid)
VMAFlog.append(osout)
for cnt in range(len(rate)):
VMAFlog[cnt].wait()
VMAFlogfile='{}/VVClog_{}_{}.dat'.format(Path,fname,rate[cnt])
### replace Frame to VMAF_Frame in the log file
call('./Replace_Frame_to_VMAF_Frame --fn {}'.format(VMAFlogfile))
return
##################################################################
## Main Body
if __name__ == "__main__":
args=main()
##Inputs
vid=args.vid;
fps=int(args.fps);
Width=int(args.w);
Hight=int(args.h);
QP=int(args.qp);
MaxCUSize=int(args.maxcusize);
MaxPartitionDepth=int(args.maxpartitiondepth);
RateControl=int(args.ratecontrol);
rate_str = args.rate.split(' ')
rate = [int(r) for r in rate_str]
NumFrames=int(args.numframes)
Path = args.resultspath
Encode_decode_video()
|
[
"[email protected]"
] | |
887a23cdc580ec87b2158ee45d31535b0c0dc08e
|
65ed6010531735377d8c0b8a77d0d336842ebe3e
|
/atx/device/__init__.py
|
f02e38e70d7453565c1496ce76e0b8f43445816f
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
neteaseknight/AirtestX
|
5084a9401777f765e11f70dd02bf3633f5cb66fd
|
c1fe6581f5f37088cbc486c9f128b6f26b0c7695
|
refs/heads/master
| 2021-01-17T21:42:27.346213 | 2016-04-03T12:44:10 | 2016-04-03T12:44:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,871 |
py
|
# coding: utf-8
from __future__ import absolute_import
import collections
from atx import imutils
FindPoint = collections.namedtuple('FindPoint', ['pos', 'confidence', 'method', 'matched'])
Display = collections.namedtuple('Display', ['width', 'height'])
__boundstuple = collections.namedtuple('Bounds', ['left', 'top', 'right', 'bottom'])
class Bounds(__boundstuple):
def __init__(self, *args, **kwargs):
super(Bounds, self).__init__(*args, **kwargs)
self._area = None
def is_inside(self, x, y):
v = self
return x > v.left and x < v.right and y > v.top and y < v.bottom
@property
def area(self):
if not self._area:
v = self
self._area = (v.right-v.left) * (v.bottom-v.top)
return self._area
@property
def center(self):
v = self
return (v.left+v.right)/2, (v.top+v.bottom)/2
def __mul__(self, mul):
return Bounds(*(int(v*mul) for v in self))
class Pattern(object):
def __init__(self, image, offset=(0, 0), anchor=0, rsl=None, resolution=None):
"""
Args:
image: image filename or image URL
offset: offset of image center
anchor: not supported
resolution: image origin screen resolution
rsl: alias of resolution
"""
self._name = None
self._image = imutils.open(image)
self._offset = offset
self._resolution = rsl or resolution
if isinstance(image, basestring):
self._name = image
def __str__(self):
return 'Pattern(name: {}, offset: {})'.format(self._name, self.offset)
@property
def image(self):
return self._image
@property
def offset(self):
return self._offset
@property
def resolution(self):
return self._resolution
|
[
"[email protected]"
] | |
dec4a2fb41492241dfdefc7038a33d1f48fa4b13
|
9fa08002daf2e991ff9dfe33ab47c4518976cc12
|
/DeepLearing/DeepLearningFlappyBird-master/deep_q_network.py
|
abdb40aeef2860bb55123cb5bb6e8f77f8267cd3
|
[
"MIT"
] |
permissive
|
freeflyfish/code_file
|
6e1264de2c13d700895bde31421ca791802f1ac6
|
e80cc440f1c969af417bc5bad73c61b50dfa7590
|
refs/heads/master
| 2020-04-30T22:10:06.929633 | 2018-12-07T10:31:04 | 2018-12-07T10:31:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,232 |
py
|
from __future__ import print_function
import tensorflow as tf
import cv2
import sys
sys.path.append("game/")
import game.wrapped_flappy_bird as game
import random
import numpy as np
from collections import deque
GAME = 'bird' # the name of the game being played for log files
ACTIONS = 2 # number of valid actions
GAMMA = 0.99 # decay rate of past observations
OBSERVE = 100000. # timesteps to observe before training
EXPLORE = 2000000. # frames over which to anneal epsilon
FINAL_EPSILON = 0.0001 # final value of epsilon
INITIAL_EPSILON = 0.0001 # starting value of epsilon
REPLAY_MEMORY = 50000 # number of previous transitions to remember
BATCH = 32 # size of minibatch
FRAME_PER_ACTION = 1
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev = 0.01)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.01, shape = shape)
return tf.Variable(initial)
def conv2d(x, W, stride):
return tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = "SAME")
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = "SAME")
def createNetwork():
# network weights
W_conv1 = weight_variable([8, 8, 4, 32])
b_conv1 = bias_variable([32])
W_conv2 = weight_variable([4, 4, 32, 64])
b_conv2 = bias_variable([64])
W_conv3 = weight_variable([3, 3, 64, 64])
b_conv3 = bias_variable([64])
W_fc1 = weight_variable([1600, 512])
b_fc1 = bias_variable([512])
W_fc2 = weight_variable([512, ACTIONS])
b_fc2 = bias_variable([ACTIONS])
# input layer
s = tf.placeholder("float", [None, 80, 80, 4])
# hidden layers
h_conv1 = tf.nn.relu(conv2d(s, W_conv1, 4) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2, 2) + b_conv2)
#h_pool2 = max_pool_2x2(h_conv2)
h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3, 1) + b_conv3)
#h_pool3 = max_pool_2x2(h_conv3)
#h_pool3_flat = tf.reshape(h_pool3, [-1, 256])
h_conv3_flat = tf.reshape(h_conv3, [-1, 1600])
h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)
# readout layer
readout = tf.matmul(h_fc1, W_fc2) + b_fc2
return s, readout, h_fc1
def trainNetwork(s, readout, h_fc1, sess):
# define the cost function
a = tf.placeholder("float", [None, ACTIONS])
y = tf.placeholder("float", [None])
readout_action = tf.reduce_sum(tf.multiply(readout, a), reduction_indices=1)
cost = tf.reduce_mean(tf.square(y - readout_action))
train_step = tf.train.AdamOptimizer(1e-6).minimize(cost)
# open up a game state to communicate with emulator
game_state = game.GameState()
# store the previous observations in replay memory
D = deque()
# printing
a_file = open("logs_" + GAME + "/readout.txt", 'w')
h_file = open("logs_" + GAME + "/hidden.txt", 'w')
# get the first state by doing nothing and preprocess the image to 80x80x4
do_nothing = np.zeros(ACTIONS)
do_nothing[0] = 1
x_t, r_0, terminal = game_state.frame_step(do_nothing)
x_t = cv2.cvtColor(cv2.resize(x_t, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, x_t = cv2.threshold(x_t,1,255,cv2.THRESH_BINARY)
s_t = np.stack((x_t, x_t, x_t, x_t), axis=2)
# saving and loading networks
saver = tf.train.Saver()
sess.run(tf.initialize_all_variables())
checkpoint = tf.train.get_checkpoint_state("saved_networks")
if checkpoint and checkpoint.model_checkpoint_path:
saver.restore(sess, checkpoint.model_checkpoint_path)
print("Successfully loaded:", checkpoint.model_checkpoint_path)
else:
print("Could not find old network weights")
# start training
epsilon = INITIAL_EPSILON
t = 0
while "flappy bird" != "angry bird":
# choose an action epsilon greedily
readout_t = readout.eval(feed_dict={s : [s_t]})[0]
a_t = np.zeros([ACTIONS])
action_index = 0
if t % FRAME_PER_ACTION == 0:
if random.random() <= epsilon:
print("----------Random Action----------")
action_index = random.randrange(ACTIONS)
a_t[random.randrange(ACTIONS)] = 1
else:
action_index = np.argmax(readout_t)
a_t[action_index] = 1
else:
a_t[0] = 1 # do nothing
# scale down epsilon
if epsilon > FINAL_EPSILON and t > OBSERVE:
epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
# run the selected action and observe next state and reward
x_t1_colored, r_t, terminal = game_state.frame_step(a_t)
x_t1 = cv2.cvtColor(cv2.resize(x_t1_colored, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, x_t1 = cv2.threshold(x_t1, 1, 255, cv2.THRESH_BINARY)
x_t1 = np.reshape(x_t1, (80, 80, 1))
#s_t1 = np.append(x_t1, s_t[:,:,1:], axis = 2)
s_t1 = np.append(x_t1, s_t[:, :, :3], axis=2)
# store the transition in D
D.append((s_t, a_t, r_t, s_t1, terminal))
if len(D) > REPLAY_MEMORY:
D.popleft()
# only train if done observing
if t > OBSERVE:
# sample a minibatch to train on
minibatch = random.sample(D, BATCH)
# get the batch variables
s_j_batch = [d[0] for d in minibatch]
a_batch = [d[1] for d in minibatch]
r_batch = [d[2] for d in minibatch]
s_j1_batch = [d[3] for d in minibatch]
y_batch = []
readout_j1_batch = readout.eval(feed_dict = {s : s_j1_batch})
for i in range(0, len(minibatch)):
terminal = minibatch[i][4]
# if terminal, only equals reward
if terminal:
y_batch.append(r_batch[i])
else:
y_batch.append(r_batch[i] + GAMMA * np.max(readout_j1_batch[i]))
# perform gradient step
train_step.run(feed_dict = {
y : y_batch,
a : a_batch,
s : s_j_batch}
)
# update the old values
s_t = s_t1
t += 1
# save progress every 10000 iterations
if t % 10000 == 0:
saver.save(sess, 'saved_networks/' + GAME + '-dqn', global_step = t)
# print info
state = ""
if t <= OBSERVE:
state = "observe"
elif t > OBSERVE and t <= OBSERVE + EXPLORE:
state = "explore"
else:
state = "train"
print("TIMESTEP", t, "/ STATE", state, \
"/ EPSILON", epsilon, "/ ACTION", action_index, "/ REWARD", r_t, \
"/ Q_MAX %e" % np.max(readout_t))
# write info to files
'''
if t % 10000 <= 100:
a_file.write(",".join([str(x) for x in readout_t]) + '\n')
h_file.write(",".join([str(x) for x in h_fc1.eval(feed_dict={s:[s_t]})[0]]) + '\n')
cv2.imwrite("logs_tetris/frame" + str(t) + ".png", x_t1)
'''
def playGame():
sess = tf.InteractiveSession()
s, readout, h_fc1 = createNetwork()
trainNetwork(s, readout, h_fc1, sess)
def main():
playGame()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
58f6791db4ae9844c53d1e49da8e3ef0bccd20d6
|
3073c7cd9efe87471bcd3d38da4cc44720984d58
|
/ureport/migrations/0013_auto__add_settings.py
|
737b12b4d9dfc8e7e262399eda064a756798d006
|
[] |
no_license
|
unicefuganda/rapidsms-ureport
|
7b9418ebc2e10c24d32c8e93072c2395329fa67b
|
90de9d4224af1922f9a828c22505de21a86f7ac0
|
refs/heads/master
| 2016-09-10T06:51:54.835440 | 2015-06-22T06:02:37 | 2015-06-22T06:02:37 | 2,215,097 | 2 | 2 | null | 2014-05-21T08:45:05 | 2011-08-16T10:20:59 |
Python
|
UTF-8
|
Python
| false | false | 16,489 |
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Settings'
db.create_table('ureport_settings', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('attribute', self.gf('django.db.models.fields.CharField')(max_length=50)),
('value', self.gf('django.db.models.fields.CharField')(default='', max_length=50, null=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True)),
))
db.send_create_signal('ureport', ['Settings'])
def backwards(self, orm):
# Deleting model 'Settings'
db.delete_table('ureport_settings')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'locations.location': {
'Meta': {'object_name': 'Location'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'point': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['locations.Point']", 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['locations.Location']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': "orm['locations.LocationType']"})
},
'locations.locationtype': {
'Meta': {'object_name': 'LocationType'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'primary_key': 'True'})
},
'locations.point': {
'Meta': {'object_name': 'Point'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'})
},
'poll.poll': {
'Meta': {'ordering': "['-end_date']", 'object_name': 'Poll'},
'contacts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'polls'", 'symmetrical': 'False', 'to': "orm['rapidsms.Contact']"}),
'default_response': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'messages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['rapidsms_httprouter.Message']", 'null': 'True', 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'response_type': ('django.db.models.fields.CharField', [], {'default': "'a'", 'max_length': '1', 'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('django.db.models.fields.SlugField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'rapidsms.backend': {
'Meta': {'object_name': 'Backend'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
'rapidsms.connection': {
'Meta': {'unique_together': "(('backend', 'identity'),)", 'object_name': 'Connection'},
'backend': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rapidsms.Backend']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'birthdate': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'health_facility': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_caregiver': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reporting_location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['locations.Location']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'contact'", 'unique': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'village': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'villagers'", 'null': 'True', 'to': "orm['locations.Location']"}),
'village_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'rapidsms_httprouter.message': {
'Meta': {'object_name': 'Message'},
'application': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'null': 'True', 'to': "orm['rapidsms_httprouter.MessageBatch']"}),
'connection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['rapidsms.Connection']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'direction': ('django.db.models.fields.CharField', [], {'max_length': '1', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_response_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responses'", 'null': 'True', 'to': "orm['rapidsms_httprouter.Message']"}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '10', 'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'db_index': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'db_index': 'True'})
},
'rapidsms_httprouter.messagebatch': {
'Meta': {'object_name': 'MessageBatch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'ureport.equatellocation': {
'Meta': {'object_name': 'EquatelLocation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['locations.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'segment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'ureport.ignoredtags': {
'Meta': {'object_name': 'IgnoredTags'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['poll.Poll']"})
},
'ureport.messageattribute': {
'Meta': {'object_name': 'MessageAttribute'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300', 'db_index': 'True'})
},
'ureport.messagedetail': {
'Meta': {'object_name': 'MessageDetail'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ureport.MessageAttribute']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'details'", 'to': "orm['rapidsms_httprouter.Message']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'ureport.permit': {
'Meta': {'object_name': 'Permit'},
'allowed': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'date': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'ureport.quotebox': {
'Meta': {'object_name': 'QuoteBox'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.TextField', [], {}),
'quote': ('django.db.models.fields.TextField', [], {}),
'quoted': ('django.db.models.fields.TextField', [], {})
},
'ureport.settings': {
'Meta': {'object_name': 'Settings'},
'attribute': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'null': 'True'})
},
'ureport.topresponses': {
'Meta': {'object_name': 'TopResponses'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'top_responses'", 'to': "orm['poll.Poll']"}),
'quote': ('django.db.models.fields.TextField', [], {}),
'quoted': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['ureport']
|
[
"[email protected]"
] | |
25c239b31c0a578419e6f525d348d98c0f40112a
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-MetalPerformanceShaders/PyObjCTest/test_mpsrayintersector_mpspolygonaccelerationstructure.py
|
1a5da10d9a4ea08bb220c5af477a364059d79eaa
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504 | 2023-06-13T20:00:17 | 2023-06-13T20:00:17 | 243,933,900 | 439 | 49 | null | 2023-06-25T02:49:07 | 2020-02-29T08:43:12 |
Python
|
UTF-8
|
Python
| false | false | 490 |
py
|
from PyObjCTools.TestSupport import TestCase
import MetalPerformanceShaders
MPSAccelerationStructureCompletionHandler = b"v@"
class TestMPSRayIntersector_MPSPolygonAccelerationStructure(TestCase):
def test_enum_types(self):
self.assertIsEnumType(MetalPerformanceShaders.MPSPolygonType)
def test_constants(self):
self.assertEqual(MetalPerformanceShaders.MPSPolygonTypeTriangle, 0)
self.assertEqual(MetalPerformanceShaders.MPSPolygonTypeQuadrilateral, 1)
|
[
"[email protected]"
] | |
824b3721de5cfb5f090fa571e20f924b6a69af1b
|
f02485de5a101f3b69a45b2c4e71bd950ee55eba
|
/Z_other/NetWork/a_test/HelloWorld/HelloWorld/urls.py
|
53f8c23c69c2637e205756186cef301657c5f5b2
|
[] |
no_license
|
newjokker/PyUtil
|
ef4266b0ca32157f9de6e2cac1b1a10647190d99
|
32e64be10a6cd2856850f6720d70b4c6e7033f4e
|
refs/heads/master
| 2020-11-28T00:19:02.073391 | 2019-12-23T02:07:40 | 2019-12-23T02:07:40 | 229,654,616 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 993 |
py
|
"""HelloWorld URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import view
urlpatterns = [
path('', view.hello), # 无关键字
path('admin/', admin.site.urls), # 关键字是 admin
path('hello/', view.hello), # 关键字为 hello
path('jokker/', view.jokker),
path('cainiao/', view.cainiao),
path('testdb/', view.cainiao)
]
|
[
"[email protected]"
] | |
dc6539b1e4f2e1e7b697a39e2d29dcd7da994516
|
016200d5593feb15bf7737389586bd161398a09c
|
/Database/venv/bin/rst2s5.py
|
0c2a22f8856285da5058dbce8a0c1e7017f95304
|
[] |
no_license
|
MarcPartensky/Python-2019
|
d74e41710c9b48887e141ef5a8251f5e5d06026d
|
1b29680292fdc48af25ae45ce0e9572b8c31427d
|
refs/heads/master
| 2021-07-07T18:46:49.708387 | 2020-08-11T19:49:01 | 2020-08-11T19:49:01 | 166,604,663 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 698 |
py
|
#!/Users/marcpartensky/Programs/Python/Repository-2019/Database/venv/bin/python
# $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Chris Liechti <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML slides using
the S5 template system.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='s5', description=description)
|
[
"[email protected]"
] | |
d26f9799eb67232c4f9c0d0617aaa9b2f1ec1988
|
61673ab9a42f7151de7337608c442fa6247f13bb
|
/__scraping__/gall.dcinside.com/main.py
|
490c073a6a1c9d7e5de77e2bc435f07699d416ff
|
[
"MIT"
] |
permissive
|
furas/python-examples
|
22d101670ecd667a29376d7c7d7d86f8ec71f6cf
|
95cb53b664f312e0830f010c0c96be94d4a4db90
|
refs/heads/master
| 2022-08-23T23:55:08.313936 | 2022-08-01T14:48:33 | 2022-08-01T14:48:33 | 45,575,296 | 176 | 91 |
MIT
| 2021-02-17T23:33:37 | 2015-11-04T23:54:32 |
Python
|
UTF-8
|
Python
| false | false | 774 |
py
|
#!/usr/bin/env python3
# date: 2020.01.01
# https://stackoverflow.com/questions/59551193/i-want-to-download-images-from-python-what-should-i-do/
from selenium import webdriver
import requests
#path = r"C:\Users\qpslt\Desktop\py\chromedriver_win32\chromedriver.exe"
#driver = webdriver.Chrome(path)
driver = webdriver.Firefox()
url = "https://gall.dcinside.com/board/view/?id=baseball_new8&no=10131338&exception_mode=recommend&page=1"
driver.get(url)
images = driver.find_elements_by_xpath('//div[@class="writing_view_box"]//img')
for i, img in enumerate(images, 1):
img_url = img.get_attribute('src')
print(i, img_url)
r = requests.get(img_url, headers={'Referer': url})
with open("c:/test/{}.jpg".format(i), 'wb') as f:
f.write(r.content)
|
[
"[email protected]"
] | |
0c44c18c0c305096c4cde6e736d92a55731f5691
|
7048901d6ad4cd58150deec2f7095c4bc20e28bc
|
/coupons/serializers.py
|
8c0a733f4fda39147966e3d16888dab2aad72790
|
[] |
no_license
|
reloadercf/Tienda_Backend
|
f658bc3b01cf7e8d7d86c4964a7808f04f866e66
|
ef602107861096c3f2bb8f31eab12db44be4186d
|
refs/heads/master
| 2022-07-29T03:49:40.413308 | 2019-10-21T16:47:40 | 2019-10-21T16:47:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 170 |
py
|
from rest_framework import serializers
from .models import Coupon
class CouponSerializer(serializers.ModelSerializer):
class Meta:
model = Coupon
fields = '__all__'
|
[
"[email protected]"
] | |
266d40130e6ece916b7a7b8d7242e4cccea1a212
|
868c604cdc34e04bba44834e8544036437a7eb9b
|
/chapter_1_building_abstractions_with_functions/example_1.py
|
f7311525c376f664a8d9930ffd5961f8ab97a62a
|
[] |
no_license
|
cshintov/sicp
|
bc0c1ae5c3f2b9a068e446030fcde59d73209b7c
|
46b36254d05171704ddcf45666d006e734a7a196
|
refs/heads/master
| 2021-04-23T03:14:32.416422 | 2020-03-24T13:24:12 | 2020-03-24T13:24:12 | 249,893,193 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 152 |
py
|
from urllib import urlopen
shakespeare_poem = 'http://inst.eecs.berkeley.edu/~cs61a/fa11/shakespeare.txt'
poem = urlopen(shakespeare_poem)
print poem
|
[
"[email protected]"
] | |
c5f90bbaa9e2eaf84c3dc0035e740cbcbf93576d
|
d286518da2d7b74d63162cac3befe838f74ac93a
|
/backend/winter_firefly_27005/urls.py
|
7305cf2e7f3fdf9cae158bb72166dce189017d05
|
[] |
no_license
|
crowdbotics-apps/winter-firefly-27005
|
7a1b28ec1e1bfa57800c0db25256929925935f1a
|
96777599d723241987fd750bcaa72fe040deb738
|
refs/heads/master
| 2023-04-20T15:57:28.132529 | 2021-05-20T21:00:22 | 2021-05-20T21:00:22 | 369,336,748 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,247 |
py
|
"""winter_firefly_27005 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Winter Firefly"
admin.site.site_title = "Winter Firefly Admin Portal"
admin.site.index_title = "Winter Firefly Admin"
# swagger
api_info = openapi.Info(
title="Winter Firefly API",
default_version="v1",
description="API documentation for Winter Firefly App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
[
"[email protected]"
] | |
acd4413b1a25bb706c5aa2fafcb6dca1519727b5
|
3cdb4faf34d8375d6aee08bcc523adadcb0c46e2
|
/web/env/lib/python3.6/site-packages/awscli/customizations/s3/s3handler.py
|
c95c4015d9c5e017565629761a9c7883ae451dc1
|
[
"MIT",
"GPL-3.0-only"
] |
permissive
|
rizwansoaib/face-attendence
|
bc185d4de627ce5adab1cda7da466cb7a5fddcbe
|
59300441b52d32f3ecb5095085ef9d448aef63af
|
refs/heads/master
| 2020-04-25T23:47:47.303642 | 2019-09-12T14:26:17 | 2019-09-12T14:26:17 | 173,157,284 | 45 | 12 |
MIT
| 2020-02-11T23:47:55 | 2019-02-28T17:33:14 |
Python
|
UTF-8
|
Python
| false | false | 23,299 |
py
|
# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import os
from s3transfer.manager import TransferManager
from awscli.customizations.s3.utils import (
human_readable_size, MAX_UPLOAD_SIZE, find_bucket_key, relative_path,
create_warning, NonSeekableStream)
from awscli.customizations.s3.transferconfig import \
create_transfer_config_from_runtime_config
from awscli.customizations.s3.results import UploadResultSubscriber
from awscli.customizations.s3.results import DownloadResultSubscriber
from awscli.customizations.s3.results import CopyResultSubscriber
from awscli.customizations.s3.results import UploadStreamResultSubscriber
from awscli.customizations.s3.results import DownloadStreamResultSubscriber
from awscli.customizations.s3.results import DeleteResultSubscriber
from awscli.customizations.s3.results import QueuedResult
from awscli.customizations.s3.results import SuccessResult
from awscli.customizations.s3.results import FailureResult
from awscli.customizations.s3.results import DryRunResult
from awscli.customizations.s3.results import ResultRecorder
from awscli.customizations.s3.results import ResultPrinter
from awscli.customizations.s3.results import OnlyShowErrorsResultPrinter
from awscli.customizations.s3.results import NoProgressResultPrinter
from awscli.customizations.s3.results import ResultProcessor
from awscli.customizations.s3.results import CommandResultRecorder
from awscli.customizations.s3.utils import RequestParamsMapper
from awscli.customizations.s3.utils import StdoutBytesWriter
from awscli.customizations.s3.utils import ProvideSizeSubscriber
from awscli.customizations.s3.utils import ProvideUploadContentTypeSubscriber
from awscli.customizations.s3.utils import ProvideCopyContentTypeSubscriber
from awscli.customizations.s3.utils import ProvideLastModifiedTimeSubscriber
from awscli.customizations.s3.utils import DirectoryCreatorSubscriber
from awscli.customizations.s3.utils import DeleteSourceFileSubscriber
from awscli.customizations.s3.utils import DeleteSourceObjectSubscriber
from awscli.customizations.s3.utils import DeleteCopySourceObjectSubscriber
from awscli.compat import get_binary_stdin
LOGGER = logging.getLogger(__name__)
class S3TransferHandlerFactory(object):
MAX_IN_MEMORY_CHUNKS = 6
def __init__(self, cli_params, runtime_config):
"""Factory for S3TransferHandlers
:type cli_params: dict
:param cli_params: The parameters provide to the CLI command
:type runtime_config: RuntimeConfig
:param runtime_config: The runtime config for the CLI command
being run
"""
self._cli_params = cli_params
self._runtime_config = runtime_config
def __call__(self, client, result_queue):
"""Creates a S3TransferHandler instance
:type client: botocore.client.Client
:param client: The client to power the S3TransferHandler
:type result_queue: queue.Queue
:param result_queue: The result queue to be used to process results
for the S3TransferHandler
:returns: A S3TransferHandler instance
"""
transfer_config = create_transfer_config_from_runtime_config(
self._runtime_config)
transfer_config.max_in_memory_upload_chunks = self.MAX_IN_MEMORY_CHUNKS
transfer_config.max_in_memory_download_chunks = \
self.MAX_IN_MEMORY_CHUNKS
transfer_manager = TransferManager(client, transfer_config)
LOGGER.debug(
"Using a multipart threshold of %s and a part size of %s",
transfer_config.multipart_threshold,
transfer_config.multipart_chunksize
)
result_recorder = ResultRecorder()
result_processor_handlers = [result_recorder]
self._add_result_printer(result_recorder, result_processor_handlers)
result_processor = ResultProcessor(
result_queue, result_processor_handlers)
command_result_recorder = CommandResultRecorder(
result_queue, result_recorder, result_processor)
return S3TransferHandler(
transfer_manager, self._cli_params, command_result_recorder)
def _add_result_printer(self, result_recorder, result_processor_handlers):
if self._cli_params.get('quiet'):
return
elif self._cli_params.get('only_show_errors'):
result_printer = OnlyShowErrorsResultPrinter(result_recorder)
elif self._cli_params.get('is_stream'):
result_printer = OnlyShowErrorsResultPrinter(result_recorder)
elif not self._cli_params.get('progress'):
result_printer = NoProgressResultPrinter(result_recorder)
else:
result_printer = ResultPrinter(result_recorder)
result_processor_handlers.append(result_printer)
class S3TransferHandler(object):
def __init__(self, transfer_manager, cli_params, result_command_recorder):
"""Backend for performing S3 transfers
:type transfer_manager: s3transfer.manager.TransferManager
:param transfer_manager: Transfer manager to use for transfers
:type cli_params: dict
:param cli_params: The parameters passed to the CLI command in the
form of a dictionary
:type result_command_recorder: ResultCommandRecorder
:param result_command_recorder: The result command recorder to be
used to get the final result of the transfer
"""
self._transfer_manager = transfer_manager
# TODO: Ideally the s3 transfer handler should not need to know
# about the result command recorder. It really only needs an interface
# for adding results to the queue. When all of the commands have
# converted to use this transfer handler, an effort should be made
# to replace the passing of a result command recorder with an
# abstraction to enqueue results.
self._result_command_recorder = result_command_recorder
submitter_args = (
self._transfer_manager, self._result_command_recorder.result_queue,
cli_params
)
self._submitters = [
UploadStreamRequestSubmitter(*submitter_args),
DownloadStreamRequestSubmitter(*submitter_args),
UploadRequestSubmitter(*submitter_args),
DownloadRequestSubmitter(*submitter_args),
CopyRequestSubmitter(*submitter_args),
DeleteRequestSubmitter(*submitter_args),
LocalDeleteRequestSubmitter(*submitter_args)
]
def call(self, fileinfos):
"""Process iterable of FileInfos for transfer
:type fileinfos: iterable of FileInfos
param fileinfos: Set of FileInfos to submit to underlying transfer
request submitters to make transfer API calls to S3
:rtype: CommandResult
:returns: The result of the command that specifies the number of
failures and warnings encountered.
"""
with self._result_command_recorder:
with self._transfer_manager:
total_submissions = 0
for fileinfo in fileinfos:
for submitter in self._submitters:
if submitter.can_submit(fileinfo):
if submitter.submit(fileinfo):
total_submissions += 1
break
self._result_command_recorder.notify_total_submissions(
total_submissions)
return self._result_command_recorder.get_command_result()
class BaseTransferRequestSubmitter(object):
REQUEST_MAPPER_METHOD = None
RESULT_SUBSCRIBER_CLASS = None
def __init__(self, transfer_manager, result_queue, cli_params):
"""Submits transfer requests to the TransferManager
Given a FileInfo object and provided CLI parameters, it will add the
necessary extra arguments and subscribers in making a call to the
TransferManager.
:type transfer_manager: s3transfer.manager.TransferManager
:param transfer_manager: The underlying transfer manager
:type result_queue: queue.Queue
:param result_queue: The result queue to use
:type cli_params: dict
:param cli_params: The associated CLI parameters passed in to the
command as a dictionary.
"""
self._transfer_manager = transfer_manager
self._result_queue = result_queue
self._cli_params = cli_params
def submit(self, fileinfo):
"""Submits a transfer request based on the FileInfo provided
There is no guarantee that the transfer request will be made on
behalf of the fileinfo as a fileinfo may be skipped based on
circumstances in which the transfer is not possible.
:type fileinfo: awscli.customizations.s3.fileinfo.FileInfo
:param fileinfo: The FileInfo to be used to submit a transfer
request to the underlying transfer manager.
:rtype: s3transfer.futures.TransferFuture
:returns: A TransferFuture representing the transfer if it the
transfer was submitted. If it was not submitted nothing
is returned.
"""
should_skip = self._warn_and_signal_if_skip(fileinfo)
if not should_skip:
return self._do_submit(fileinfo)
def can_submit(self, fileinfo):
"""Checks whether it can submit a particular FileInfo
:type fileinfo: awscli.customizations.s3.fileinfo.FileInfo
:param fileinfo: The FileInfo to check if the transfer request
submitter can handle.
:returns: True if it can use the provided FileInfo to make a transfer
request to the underlying transfer manager. False, otherwise.
"""
raise NotImplementedError('can_submit()')
def _do_submit(self, fileinfo):
extra_args = {}
if self.REQUEST_MAPPER_METHOD:
self.REQUEST_MAPPER_METHOD(extra_args, self._cli_params)
subscribers = []
self._add_additional_subscribers(subscribers, fileinfo)
# The result subscriber class should always be the last registered
# subscriber to ensure it is not missing any information that
# may have been added in a different subscriber such as size.
if self.RESULT_SUBSCRIBER_CLASS:
result_kwargs = {'result_queue': self._result_queue}
if self._cli_params.get('is_move', False):
result_kwargs['transfer_type'] = 'move'
subscribers.append(self.RESULT_SUBSCRIBER_CLASS(**result_kwargs))
if not self._cli_params.get('dryrun'):
return self._submit_transfer_request(
fileinfo, extra_args, subscribers)
else:
self._submit_dryrun(fileinfo)
def _submit_dryrun(self, fileinfo):
transfer_type = fileinfo.operation_name
if self._cli_params.get('is_move', False):
transfer_type = 'move'
src, dest = self._format_src_dest(fileinfo)
self._result_queue.put(DryRunResult(
transfer_type=transfer_type, src=src, dest=dest))
def _add_additional_subscribers(self, subscribers, fileinfo):
pass
def _submit_transfer_request(self, fileinfo, extra_args, subscribers):
raise NotImplementedError('_submit_transfer_request()')
def _warn_and_signal_if_skip(self, fileinfo):
for warning_handler in self._get_warning_handlers():
if warning_handler(fileinfo):
# On the first warning handler that returns a signal to skip
# immediately propogate this signal and no longer check
# the other warning handlers as no matter what the file will
# be skipped.
return True
def _get_warning_handlers(self):
# Returns a list of warning handlers, which are callables that
# take in a single parameter representing a FileInfo. It will then
# add a warning to result_queue if needed and return True if
# that FileInfo should be skipped.
return []
def _should_inject_content_type(self):
return (
self._cli_params.get('guess_mime_type') and
not self._cli_params.get('content_type')
)
def _warn_glacier(self, fileinfo):
if not self._cli_params.get('force_glacier_transfer'):
if not fileinfo.is_glacier_compatible():
LOGGER.debug(
'Encountered glacier object s3://%s. Not performing '
'%s on object.' % (fileinfo.src, fileinfo.operation_name))
if not self._cli_params.get('ignore_glacier_warnings'):
warning = create_warning(
's3://'+fileinfo.src,
'Object is of storage class GLACIER. Unable to '
'perform %s operations on GLACIER objects. You must '
'restore the object to be able to perform the '
'operation. See aws s3 %s help for additional '
'parameter options to ignore or force these '
'transfers.' %
(fileinfo.operation_name, fileinfo.operation_name)
)
self._result_queue.put(warning)
return True
return False
def _warn_parent_reference(self, fileinfo):
# normpath() will use the OS path separator so we
# need to take that into account when checking for a parent prefix.
parent_prefix = '..' + os.path.sep
escapes_cwd = os.path.normpath(fileinfo.compare_key).startswith(
parent_prefix)
if escapes_cwd:
warning = create_warning(
fileinfo.compare_key, "File references a parent directory.")
self._result_queue.put(warning)
return True
return False
def _format_src_dest(self, fileinfo):
"""Returns formatted versions of a fileinfos source and destination."""
raise NotImplementedError('_format_src_dest')
def _format_local_path(self, path):
return relative_path(path)
def _format_s3_path(self, path):
if path.startswith('s3://'):
return path
return 's3://' + path
class UploadRequestSubmitter(BaseTransferRequestSubmitter):
REQUEST_MAPPER_METHOD = RequestParamsMapper.map_put_object_params
RESULT_SUBSCRIBER_CLASS = UploadResultSubscriber
def can_submit(self, fileinfo):
return fileinfo.operation_name == 'upload'
def _add_additional_subscribers(self, subscribers, fileinfo):
subscribers.append(ProvideSizeSubscriber(fileinfo.size))
if self._should_inject_content_type():
subscribers.append(ProvideUploadContentTypeSubscriber())
if self._cli_params.get('is_move', False):
subscribers.append(DeleteSourceFileSubscriber())
def _submit_transfer_request(self, fileinfo, extra_args, subscribers):
bucket, key = find_bucket_key(fileinfo.dest)
filein = self._get_filein(fileinfo)
return self._transfer_manager.upload(
fileobj=filein, bucket=bucket, key=key,
extra_args=extra_args, subscribers=subscribers
)
def _get_filein(self, fileinfo):
return fileinfo.src
def _get_warning_handlers(self):
return [self._warn_if_too_large]
def _warn_if_too_large(self, fileinfo):
if getattr(fileinfo, 'size') and fileinfo.size > MAX_UPLOAD_SIZE:
file_path = relative_path(fileinfo.src)
warning_message = (
"File %s exceeds s3 upload limit of %s." % (
file_path, human_readable_size(MAX_UPLOAD_SIZE)))
warning = create_warning(
file_path, warning_message, skip_file=False)
self._result_queue.put(warning)
def _format_src_dest(self, fileinfo):
src = self._format_local_path(fileinfo.src)
dest = self._format_s3_path(fileinfo.dest)
return src, dest
class DownloadRequestSubmitter(BaseTransferRequestSubmitter):
REQUEST_MAPPER_METHOD = RequestParamsMapper.map_get_object_params
RESULT_SUBSCRIBER_CLASS = DownloadResultSubscriber
def can_submit(self, fileinfo):
return fileinfo.operation_name == 'download'
def _add_additional_subscribers(self, subscribers, fileinfo):
subscribers.append(ProvideSizeSubscriber(fileinfo.size))
subscribers.append(DirectoryCreatorSubscriber())
subscribers.append(ProvideLastModifiedTimeSubscriber(
fileinfo.last_update, self._result_queue))
if self._cli_params.get('is_move', False):
subscribers.append(DeleteSourceObjectSubscriber(
fileinfo.source_client))
def _submit_transfer_request(self, fileinfo, extra_args, subscribers):
bucket, key = find_bucket_key(fileinfo.src)
fileout = self._get_fileout(fileinfo)
return self._transfer_manager.download(
fileobj=fileout, bucket=bucket, key=key,
extra_args=extra_args, subscribers=subscribers
)
def _get_fileout(self, fileinfo):
return fileinfo.dest
def _get_warning_handlers(self):
return [self._warn_glacier, self._warn_parent_reference]
def _format_src_dest(self, fileinfo):
src = self._format_s3_path(fileinfo.src)
dest = self._format_local_path(fileinfo.dest)
return src, dest
class CopyRequestSubmitter(BaseTransferRequestSubmitter):
REQUEST_MAPPER_METHOD = RequestParamsMapper.map_copy_object_params
RESULT_SUBSCRIBER_CLASS = CopyResultSubscriber
def can_submit(self, fileinfo):
return fileinfo.operation_name == 'copy'
def _add_additional_subscribers(self, subscribers, fileinfo):
subscribers.append(ProvideSizeSubscriber(fileinfo.size))
if self._should_inject_content_type():
subscribers.append(ProvideCopyContentTypeSubscriber())
if self._cli_params.get('is_move', False):
subscribers.append(DeleteCopySourceObjectSubscriber(
fileinfo.source_client))
def _submit_transfer_request(self, fileinfo, extra_args, subscribers):
bucket, key = find_bucket_key(fileinfo.dest)
source_bucket, source_key = find_bucket_key(fileinfo.src)
copy_source = {'Bucket': source_bucket, 'Key': source_key}
return self._transfer_manager.copy(
bucket=bucket, key=key, copy_source=copy_source,
extra_args=extra_args, subscribers=subscribers,
source_client=fileinfo.source_client
)
def _get_warning_handlers(self):
return [self._warn_glacier]
def _format_src_dest(self, fileinfo):
src = self._format_s3_path(fileinfo.src)
dest = self._format_s3_path(fileinfo.dest)
return src, dest
class UploadStreamRequestSubmitter(UploadRequestSubmitter):
RESULT_SUBSCRIBER_CLASS = UploadStreamResultSubscriber
def can_submit(self, fileinfo):
return (
fileinfo.operation_name == 'upload' and
self._cli_params.get('is_stream')
)
def _add_additional_subscribers(self, subscribers, fileinfo):
expected_size = self._cli_params.get('expected_size', None)
if expected_size is not None:
subscribers.append(ProvideSizeSubscriber(int(expected_size)))
def _get_filein(self, fileinfo):
binary_stdin = get_binary_stdin()
return NonSeekableStream(binary_stdin)
def _format_local_path(self, path):
return '-'
class DownloadStreamRequestSubmitter(DownloadRequestSubmitter):
RESULT_SUBSCRIBER_CLASS = DownloadStreamResultSubscriber
def can_submit(self, fileinfo):
return (
fileinfo.operation_name == 'download' and
self._cli_params.get('is_stream')
)
def _add_additional_subscribers(self, subscribers, fileinfo):
pass
def _get_fileout(self, fileinfo):
return StdoutBytesWriter()
def _format_local_path(self, path):
return '-'
class DeleteRequestSubmitter(BaseTransferRequestSubmitter):
REQUEST_MAPPER_METHOD = RequestParamsMapper.map_delete_object_params
RESULT_SUBSCRIBER_CLASS = DeleteResultSubscriber
def can_submit(self, fileinfo):
return fileinfo.operation_name == 'delete' and \
fileinfo.src_type == 's3'
def _submit_transfer_request(self, fileinfo, extra_args, subscribers):
bucket, key = find_bucket_key(fileinfo.src)
return self._transfer_manager.delete(
bucket=bucket, key=key, extra_args=extra_args,
subscribers=subscribers)
def _format_src_dest(self, fileinfo):
return self._format_s3_path(fileinfo.src), None
class LocalDeleteRequestSubmitter(BaseTransferRequestSubmitter):
REQUEST_MAPPER_METHOD = None
RESULT_SUBSCRIBER_CLASS = None
def can_submit(self, fileinfo):
return fileinfo.operation_name == 'delete' and \
fileinfo.src_type == 'local'
def _submit_transfer_request(self, fileinfo, extra_args, subscribers):
# This is quirky but essentially instead of relying on a built-in
# method of s3 transfer, the logic lives directly in the submitter.
# The reason a explicit delete local file does not
# live in s3transfer is because it is outside the scope of s3transfer;
# it should only have interfaces for interacting with S3. Therefore,
# the burden of this functionality should live in the CLI.
# The main downsides in doing this is that delete and the result
# creation happens in the main thread as opposed to a separate thread
# in s3transfer. However, this is not too big of a downside because
# deleting a local file only happens for sync --delete downloads and
# is very fast compared to all of the other types of transfers.
src, dest = self._format_src_dest(fileinfo)
result_kwargs = {
'transfer_type': 'delete',
'src': src,
'dest': dest
}
try:
self._result_queue.put(QueuedResult(
total_transfer_size=0, **result_kwargs))
os.remove(fileinfo.src)
self._result_queue.put(SuccessResult(**result_kwargs))
except Exception as e:
self._result_queue.put(
FailureResult(exception=e, **result_kwargs))
finally:
# Return True to indicate that the transfer was submitted
return True
def _format_src_dest(self, fileinfo):
return self._format_local_path(fileinfo.src), None
|
[
"[email protected]"
] | |
6cdd9ef513534aeb4a9400bcad53eefa19477095
|
6d97e875fb6a3dea9780d918efe33dfd59ac137d
|
/scripts/calc_rec_probs.py
|
2bc4c7f5437257ad4b4b9c2891975142f86a63be
|
[] |
no_license
|
acarvalh/tth-htt
|
0a1350efcf76f425057c809f74d92ae3d719d008
|
c6bb3f2bfb6620c858d29c800be1ae1e2246904a
|
refs/heads/master
| 2021-06-20T05:29:35.657498 | 2018-06-02T01:34:37 | 2018-06-02T01:34:37 | 104,874,635 | 0 | 0 | null | 2017-09-26T11:10:10 | 2017-09-26T11:10:10 | null |
UTF-8
|
Python
| false | false | 2,950 |
py
|
from ROOT import TFile
import ROOT
categories = ["BB_LL", "BB_ML", "BB_MM", "BB_HL", "BB_HM", "BB_HH", "EE_LL", "EE_ML", "EE_MM", "EE_HL", "EE_HM", "EE_HH", "BE_LL", "BE_ML", "EB_ML", "BE_MM", "BE_HL", "EB_HL", "BE_HM", "EB_HM", "BE_HH"]
def calc_rec_probs(infile, processes):
f = TFile(infile)
for p in processes:
print p
for cat in categories:
histo_OS = f.Get("OS/%s/%s" % (cat, "mass_ll"))
histo_SS = f.Get("SS/%s/%s" % (cat, "mass_ll"))
print "SS/%s/%s" % (cat, "mass_ll")
#print cat, "Entries: %d SS, %d OS" % (histo_SS.GetEntries(), histo_OS.GetEntries())
#for bin_pt in range(1, histo_OS.GetNbinsX()+1):
#for bin_eta in range(1, histo_OS.GetNbinsY()+1):
os_count = histo_OS.Integral()
ss_count = histo_SS.Integral()
if ss_count + os_count > 0:
ratio = 100. * ss_count / (ss_count + os_count)
print "Category: %s:\t Ratio = %f" % (cat, ratio)
else: print "Category: %s:\t Ratio = NA" % cat
#print "Integral OS:", histo_OS.Integral(), histo_OS.Integral(1,histo_OS.GetNbinsX()-1)
#print "Integral SS:", histo_SS.Integral(), histo_SS.Integral(1,histo_SS.GetNbinsX()-1)
def calc_probs_21(infile):
f = TFile(infile)
cats = ["BB_LL", "BB_ML", "BB_MM", "BB_HL", "BB_HM", "BB_HH", "EE_LL", "EE_ML", "EE_MM", "EE_HL", "EE_HM", "EE_HH", "BE_LL", "BE_ML", "EB_ML", "BE_MM", "BE_HL", "EB_HL", "BE_HM", "EB_HM", "BE_HH"]
for cat in cats:
histo_OS = f.Get("gen/OS/%s/mass_ll" % cat)
histo_SS = f.Get("gen/SS/%s/mass_ll" % cat)
os_count = histo_OS.Integral()
ss_count = histo_SS.Integral()
if os_count > 0:ratio = 100. * ss_count / (ss_count + os_count)
else: ratio = 100.
print "Category %s:\t ratio = %f" % (cat, ratio)
def print_probs_21(infile):
f = TFile(infile)
cats = ["BB_LL", "BB_ML", "BB_MM", "BB_HL", "BB_HM", "BB_HH", "EE_LL", "EE_ML", "EE_MM", "EE_HL", "EE_HM", "EE_HH", "BE_LL", "BE_ML", "EB_ML", "BE_MM", "BE_HL", "EB_HL", "BE_HM", "EB_HM", "BE_HH"]
i = 0
os_err = ROOT.Double()
ss_err = ROOT.Double()
for cat in cats:
histo_OS = f.Get("gen/OS/%s/mass_ll" % cat)
histo_SS = f.Get("gen/SS/%s/mass_ll" % cat)
os_count = histo_OS.IntegralAndError(0, histo_OS.GetNbinsX()+2, os_err)
ss_count = histo_SS.IntegralAndError(0, histo_SS.GetNbinsX()+2, ss_err)
if os_count > 0:
ratio = ss_count / (ss_count + os_count)
err = (ss_count + ss_err) / (ss_count + ss_err + os_count - os_err) - ratio
else: ratio = 1.
print "%d, %f, %f, %f" % (i, ratio, err, err)
#print "ERR: ", ss_count, ss_err, os_count, os_err
i+=1
if __name__ == "__main__":
procs = ["DY"]
infile = "/hdfs/local/ttH_2tau/andres/ttHAnalysis/2016/histosCF_summer2/histograms/charge_flip/histograms_harvested_stage2_charge_flip_Tight.root"
#calc_rec_probs(infile, procs)
print "_" * 80
calc_probs_21(infile)
print_probs_21(infile)
|
[
"[email protected]"
] | |
1ceef34be4f65f2c9baae4ffe8778cb490a17660
|
0a973640f0b02d7f3cf9211fcce33221c3a50c88
|
/.history/src/easy-money_20210129091734.py
|
095946f3367c54f7ca3a4f7faf804a6b08068a18
|
[] |
no_license
|
JiajunChen123/IPO_under_review_crawler
|
5468b9079950fdd11c5e3ce45af2c75ccb30323c
|
031aac915ebe350ec816c05a29b5827fde588567
|
refs/heads/main
| 2023-02-26T08:23:09.622725 | 2021-02-04T10:11:16 | 2021-02-04T10:11:16 | 332,619,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,076 |
py
|
# 东方财富网 首发申报
from datetime import datetime,timedelta
from urllib.parse import urlencode
import pandas as pd
import requests
import re
import time
from bs4 import BeautifulSoup
base_url = 'https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx?'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'}
def date_gen():
r = requests.get('http://data.eastmoney.com/xg/xg/sbqy.html',headers=headers)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text,'html.parser')
dateList = [i.text for i in soup.findAll('option')]
yield dateList
def get_eastmoneyData(dateList):
query = {'type': 'NS',
'sty' : 'NSFR',
'st' : '1',
'sr' : '-1',
'p' : '1',
'ps' : '5000',
'js' : 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt' : '1',
'rt' : '53721774'
}
main_data = []
for date in dateList:
query['fd'] = dateList
# start = datetime.strptime('2017-01-05','%Y-%m-%d').date()
# while start < datetime.today().date():
# query['fd'] = start
url = base_url + urlencode(query)
# yield url
# start += timedelta(days=7)
rs = requests.get(url,headers=headers)
if rs.text == '':
continue
js = rs.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(js[:-1])
main_data.extend(data)
time.sleep(2)
temp = [i.split(',') for i in main_data]
columns = ['会计师事务所','保荐代表人','保荐机构','xxx','律师事务所','日期','所属行业','板块','是否提交财务自查报告',
'注册地','类型','机构名称','签字会计师','签字律师','时间戳','简称']
df = pd.DataFrame(temp,columns=columns)
df['文件链接'] = df['时间戳'].apply(lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf")
df = df[['机构名称', '类型', '板块', '注册地', '保荐机构','保荐代表人', '律师事务所', '签字律师','会计师事务所',
'签字会计师', '是否提交财务自查报告', '所属行业','日期','xxx', '时间戳', '保荐机构','文件链接']]
df = df[df['板块'] != '创业板']
df.to_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_raw_data.csv',index=False,encoding='utf-8-sig')
return df
def get_meetingData():
meetingInfo = []
for marketType in ['2','4']: # 2 为主板, 4 为中小板
query = {'type': 'NS',
'sty' : 'NSSH',
'st' : '1',
'sr' : '-1',
'p' : '1',
'ps' : '5000',
'js' : 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt' : marketType,
'rt' : '53723990'
}
url = base_url + urlencode(query)
rss = requests.get(url,headers=headers)
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
meetingInfo.extend(data)
temp = [j.split(',') for j in meetingInfo]
columns = ['时间戳','yyy','公司代码','机构名称','详情链接','申报日期','上会日期','申购日期','上市日期','9','拟发行数量','发行前总股本','发行后总股本','13','占发行后总股本比例','当前状态','上市地点','主承销商','承销方式','发审委委员','网站','简称']
df = pd.DataFrame(temp,columns=columns)
df['文件链接'] = df['时间戳'].apply(lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf")
df['详情链接'] = df['公司代码'].apply(lambda x: "data.eastmoney.com/xg/gh/detail/" + x + ".html")
df = df[['机构名称', '当前状态', '上市地点', '拟发行数量', '申报日期','上会日期', '申购日期', '上市日期', '主承销商','承销方式', '9', '发行前总股本','发行后总股本','13','占发行后总股本比例','发审委委员','网站','公司代码','yyy','时间戳', '简称', '详情链接','文件链接']]
df.to_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_data_meeting.csv'.format(i),index=False,encoding='utf-8-sig')
return df
def get_zzscData(dateList):
zzsc_dict = {}
for date in dateList:
query = {'type': 'NS',
'sty' : 'NSSE',
'st' : '1',
'sr' : '-1',
'p' : '1',
'ps' : '500',
'js' : 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt' : '4',
'stat':'zzsc',
'fd' : date,
'rt' : '53727636'
}
url = base_url + urlencode(query)
rss = requests.get(url,headers=headers)
if rss.text == 'var IBhynDx={pages:0,data:[{stats:false}]}':
continue
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
for i in data:
name = i.split(',')[1]
if name not in zzsc_dict:
zzsc_dict[name] = i.split(',')[2]
else:
continue
time.sleep(2)
zzsc = pd.DataFrame(zzsc_dict.items(),columns = ['机构名称','决定终止审查时间'])
zzsc.to_csv('C:/Users/chen/Desktop/IPO_info/eastmoney_zzsc.csv',encoding='utf-8-sig',index=False)
return zzsc
def eastmoney_cleanUP():
east_money = pd.read_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/easymoney_raw_data.csv')
east_money.replace({'是否提交财务自查报告':' '},'是')
east_money.replace({'是否提交财务自查报告':'不适用'},'是')
east_money['机构名称'] = east_money['机构名称'].replace(r'\*','',regex=True)
east_money['机构名称'] = east_money['机构名称'].replace(r'股份有限公司','',regex=True)
east_money = east_money[east_money['板块'] != '创业板']
# east_money.sort_values(['机构名称','类型','受理日期'],ascending=[True, True,True],inplace=True)
# east_money.to_csv('C:/Users/chen/Desktop/IPO_info/pre_cleab.csv',encoding='utf-8-sig',index=False)
east_money.drop_duplicates(subset =['机构名称','类型'], keep = 'first', inplace = True)
east_money.to_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_data_cleaned.csv',encoding='utf-8-sig',index=False)
return east_money
def gen_finalDate(eastmoney):
ekk = east_money.values.tolist()
abc = {}
for i in ekk:
if i[0] not in abc:
abc[i[0]] = {'机构名称':i[0],
'预先披露':'',
'已反馈':'',
'预先披露更新日期':'',
'其他':'',
'通过发审会日期':'',
'终止审查日期':'',
'保荐机构':i[4],
'律师事务所':i[6],
'会计师事务所':i[8],
'板块':i[2],
'简称':i[15]
}
if i[1] == '已受理':
abc[i[0]]['预先披露'] = i[12]
elif i[1] == '已反馈':
abc[i[0]]['已反馈'] = i[12]
elif i[1] == '预先披露更新':
abc[i[0]]['预先披露更新日期'] = i[12]
elif i[1] == '已通过发审会':
abc[i[0]]['通过发审会日期'] = i[12]
else:
if i[1] == '已受理':
abc[i[0]]['预先披露'] = i[12]
elif i[1] == '已反馈':
abc[i[0]]['已反馈'] = i[12]
elif i[1] == '预先披露更新':
abc[i[0]]['预先披露更新日期'] = i[12]
elif i[1] == '已通过发审会':
abc[i[0]]['通过发审会日期'] = i[12]
elif i[1] in ['已提交发审会讨论,暂缓表决','已上发审会,暂缓表决','中止审查']:
abc[i[0]]['其他'] = {i[1]:i[12]}
|
[
"[email protected]"
] | |
65c9f4d94735b5d8ec3bd25b297f2d107ba78d57
|
c83e356d265a1d294733885c373d0a4c258c2d5e
|
/mayan/apps/documents/models/trashed_document_models.py
|
b01b915df45c786b9f2bae689388dabfd9ca7183
|
[
"Apache-2.0"
] |
permissive
|
TrellixVulnTeam/fall-2021-hw2-451-unavailable-for-legal-reasons_6YX3
|
4160809d2c96707a196b8c94ea9e4df1a119d96a
|
0e4e919fd2e1ded6711354a0330135283e87f8c7
|
refs/heads/master
| 2023-08-21T23:36:41.230179 | 2021-10-02T03:51:12 | 2021-10-02T03:51:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 713 |
py
|
from mayan.apps.events.classes import EventManagerMethodAfter
from mayan.apps.events.decorators import method_event
from ..events import event_trashed_document_restored
from ..managers import TrashCanManager
from .document_models import Document
__all__ = ('TrashedDocument',)
class TrashedDocument(Document):
objects = TrashCanManager()
class Meta:
proxy = True
@method_event(
event_manager_class=EventManagerMethodAfter,
event=event_trashed_document_restored,
target='self',
)
def restore(self):
self.in_trash = False
# Skip the edite event at .save().
self._event_ignore = True
self.save()
|
[
"[email protected]"
] | |
f320319b03d47c6fb1820eed7a74123132d8126f
|
b059c2cf1e19932abb179ca3de74ced2759f6754
|
/S20/day03/02作业.py
|
56e199fe792866eaf71cd248d1ce258e423277ee
|
[] |
no_license
|
Lwk1071373366/zdh
|
a16e9cad478a64c36227419d324454dfb9c43fd9
|
d41032b0edd7d96e147573a26d0e70f3d209dd84
|
refs/heads/master
| 2020-06-18T02:11:22.740239 | 2019-07-10T08:55:14 | 2019-07-10T08:55:14 | 196,130,277 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,842 |
py
|
# 有变量name = "aleX leNb" 完成如下操作:
# 1. 移除 name 变量对应的值两边的空格,并输出处理结果
#
# name ='aleX leNB'
# print(name.strip())
# 2. 移除name变量左边的"al"并输出处理结果
# name ='aleX leNB'
# print(name[2:])
# 移除name变量右⾯的"Nb",并输出处理结果
# name='aleX leNB'
# print(name[0:7])
# 移除name变量开头的a"与最后的"b",并输出处理结果
# name='aleX leNB'
# # print(name[1:8])
# 判断 name 变量是否以 "al" 开头,并输出结果
# name = 'aleX leNB'
# # print(name.startswith('al'))
# 判断name变量是否以"Nb"结尾,并输出结果
# name = 'aleX leNB'
# print(name.endswith('NB'))
# 将 name 变量对应的值中的 所有的"l" 替换为 "p",并输出结果
# name = 'aleX leNB'
# print(name.replace('l','p'))
# 将name变量对应的值中的第⼀个"l"替换成"p",并输出结果
# print(name.replace('l','p'))
# 将 name 变量对应的值根据 所有的"l" 分割,并输出结果。
# print(name.split('l'))
# 将name变量对应的值根据第⼀个"l"分割,并输出结果。
# print(name.split('l',1))
# 将 name 变量对应的值变⼤写,并输出结果
# print(name.upper())
# 将 name 变量对应的值变⼩写,并输出结果
# print(name.lower())
# 将name变量对应的值⾸字⺟"a"⼤写,并输出结果
# print(name.capitalize())
# 判断name变量对应的值字⺟"l"出现⼏次,并输出结果
# . 从name变量对应的值中找到"N"对应的索引(如果找不到则报错),并输出结果
# print(name.find('N'))
# print(name.index('N'))
# 从name变量对应的值中找到"X le"对应的索引,并输出结果
# print(name.find('X le'))
# 请输出 name 变量对应的值的第 2 个字符?
# name = 'aleX leNB'
# 请输出 name 变量对应的值中 "e" 所在索引位置(两个e都找)?
# print(name.find('e'))
# 有字符串s = "123a4b5c"
# a.通过对s切⽚形成新的字符串s1,s1 = "123"
# b. 通过对s切⽚形成新的字符串s2,s2 = "a4b"
# c. 通过对s切⽚形成新的字符串s3,s3 = "1345"
# d. 通过对s切⽚形成字符串s4,s4 = "2ab"
# e. 通过对s切⽚形成字符串s5,s5 = "c"
# # f. 通过对s切⽚形成字符串s6,s6 = "ba2"
# s='123a4b5c'
# s1=print(s[0:3])
# s2=print(s[3:6])
# s3=print(s[0:7:2])
# s4=print(s[1:6:2])
# s5=print(s[-1])
# s6=print(s[-3:-8:-2])
#
# 使⽤while和for循环分别打印字符串s="asdfer"中每个元素
# s='asdfer'
#
# for i in s:
# print(i)
#
#
#
# count = 0
# while count < len(s):
# print(s[count])
# count += 1
#
# count =0
# while count<len(s):
# print(s[count])
# count += 1
# 使⽤for循环对s="asdfer"进⾏循环,但是每次打印的内容都是"asdfer"
# s ='asdfer'
# for i in s :
# print(s)
# 使⽤for循环对s="abcdefg"进⾏循环,每次打印的内容是每个字符加上sb, 例如:
# asb, bsb,csb,...gsb
# s ='abcdefg'
# for i in s:
# i = i+"sb"
# print(i)
# 使⽤for循环对s="321"进⾏循环,打印的内容依次是:"倒计时3秒","倒计时2
# 秒","倒计时1秒","出发
# s= '321'
# for i in s:
# if i == '3':
# print('倒计时3S')
# if i == '2':
# print('倒计时2S')
# if i == '1':
# print('倒计时1S')
# else:print('出发')
#
# 计算 1 - 2 + 3 ... + 99 中除了88以外所有数的总和
x= 1
result = 0
while x <= 99:
if x % 2 == 1:
result += x
else :
if x != 88:
result -= x
x += 1
print(result)
x=1
s=0
while x <=99:
if x % 2 ==1 :
s = s + x
else:
if x != 88:
s = s - x
x= x+1
print(s)
# 判断⼀句话是否是回⽂. 回⽂: 正着念和反着念是⼀样的. 例如, 上海 ⾃来⽔来⾃海上
# a ="上海自来水来自海上"
# content = input("请输⼊内容:") ⽤户输⼊:5+9或5+ 9或5 + 9,然后进⾏分
# 割再进⾏计算
#
# sum = 0
# content = input('请输入内容:').strip()
# print(content)
# s = content.split('+')
# print(s)
# for i in s:
# sum += int(i)
# print(sum)
# 计算⽤户输⼊的内容中有⼏个整数(以个位数为单位)。
# 如:content = input("请输⼊内容:") # 如fhdal234slfh98769fjdla
#
# content = input('请输入内容:')
# count = 0
# for i in content:
# if i.isdigit():
# count += 1
# else:continue
# print(count)
# 如:content = input("请输⼊内容:") ⽤户输⼊:5+9+6 +12+ 13,然后进⾏分割
# 再进⾏计算。
# sum = 0
# content = input('请输入:')
# count = content.rsplit('+')
# print(count)
# for i in count:
# sum = sum +int(i)
# print(sum)
#
# content =input('输入数字')
# count = 0
# for i in content:
# if i.isdigit():
# count +=1
# else:continue
# print(count)
#
# 写代码,完成下列需求:(升级题)
# ⽤户可持续输⼊(⽤while循环),⽤户使⽤的情况:
# 输⼊A,则显示⾛⼤路回家,然后在让⽤户进⼀步选择:
# 是选择公交⻋,还是步⾏?
# 选择公交⻋,显示10分钟到家,并退出整个程序。
# 选择步⾏,显示20分钟到家,并退出整个程序。
# 输⼊B,则显示⾛⼩路回家,并退出整个程序。
# 输⼊C,则显示绕道回家,然后在让⽤户进⼀步选择:
# 是选择游戏厅玩会,还是⽹吧?
# 选择游戏厅,则显示 ‘⼀个半⼩时到家,爸爸在家,拿棍等你。’并让其重新输⼊
# A,B,C选项。
# 选择⽹吧,则显示‘两个⼩时到家,妈妈已做好了战⽃准备。’并让其重新输⼊
# A,B,C选项
# 输⼊⼀个字符串,要求判断在这个字符串中⼤写字⺟,⼩写字⺟,数字, 其它字符
# 共出现了多少次,并输出出来
# s =input('请输入:')
# count = 0
# for i in s :
# if i.islower():
# count += 1
# print(count)
# if i.isdigit():
# count += 1
# print(count)
# if i.isupper():
# count += 1
# print(count)
# if i.isalnum():
# count += 1
# print(count)
# 制作趣味模板程序需求:等待⽤户输⼊名字、地点、爱好,根据⽤户的名字和爱好进
# ⾏任意现实 如:敬爱可亲的xxx,最喜欢在xxx地⽅⼲xxx
# f ='敬爱可亲的{},最喜欢在{}的{}'
# name = input('姓名')
# hobby = input('爱好') formatde 应用
# addr =input('地点')
#
# print(f.format(name,hobby,addr))
# a = '敬爱的{},喜欢{}干{}'
# name = input('姓名')
# hobby=input('爱好')
# addr=input('地点')
#
# print(a.format(name,hobby,addr))
# 输⼊⼀个字符串,要求判断在这个字符串中⼤写字⺟,⼩写字⺟,数字, 其它字符
# 共出现了多少次,并输出出来
# upp=0
# low=0
# dig=0
# oth=0
# s = input('内容')
# for i in s :
# if i.upper() :
# upp+=1
# if i.lower():
# low+=1
# if i.isdigit() :
# dig+=1
# else:
# oth+=1
# print('大写{},小写{},数字{},其他{}'.format(upp,low,dig,oth))
#
# counter_upper = 0
# counter_lower = 0
# counter_digit = 0
# counter_other = 0
#
#
#
# s = input("input a string:")
# for x in s:
# if x.isdigit():
# counter_digit += 1
# elif x.isupper():
# counter_upper += 1
# elif x.islower():
# counter_lower += 1
# else:
# counter_other += 1
#
# print("大写:{},小写:{},数字:{},其他{}".format(counter_other,counter_upper,counter_digit,counter_lower))
#
# counter_upper = 0
# counter_lower = 0
# counter_digit = 0
# counter_other = 0
#
# s = input('内容:')
# for i in s:
# if i.upper():
# counter_upper += 1
# elif i.lower():
# counter_lower += 1
# elif i.isdigit():
# counter_digit += 1
# else:
# counter_other += 1
# print('大写{},小写{}, 数字{},其他{}'.format(counter_upper,counter_lower,counter_digit,counter_other))
|
[
"[email protected]"
] | |
bd5492b6bec1f6e3280cbd6bf2c71e883c29edac
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=3.5_rd=0.5_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=69/sched.py
|
921e6e054d0d3ab3a119fcc51f90853d86f0b4cf
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 316 |
py
|
-X FMLP -Q 0 -L 3 103 300
-X FMLP -Q 0 -L 3 89 300
-X FMLP -Q 0 -L 3 81 400
-X FMLP -Q 1 -L 2 77 300
-X FMLP -Q 1 -L 2 64 400
-X FMLP -Q 2 -L 2 59 200
-X FMLP -Q 2 -L 2 54 175
-X FMLP -Q 3 -L 1 42 250
-X FMLP -Q 3 -L 1 39 250
32 150
29 150
28 175
28 125
26 175
24 250
20 300
16 150
13 125
|
[
"[email protected]"
] | |
06fde46f14519931629dcd804ce8b5d896403fd6
|
cef574422ec96cc972733812f78b8f777a934326
|
/first/drow_circle.py
|
976d039d5df39e66cf4054eae764bfd8de493bd7
|
[] |
no_license
|
ducksfrogs/pyGame2
|
6aa1f01743fc3bd8df4149f090a5ac63d72686a9
|
17fc545fa66a2d091127cfd4d5779b1e5d3385e4
|
refs/heads/main
| 2023-02-25T13:42:33.719568 | 2021-01-31T02:49:11 | 2021-01-31T02:49:11 | 323,764,411 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 744 |
py
|
"""Draw circle """
import sys
import pygame
from pygame.locals import QUIT, Rect
pygame.init()
SURFACE = pygame.display.set_mode((400,300))
FPSCLOCK = pygame.time.Clock()
def main():
""" Main routine """
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
SURFACE.fill((255,255,255))
pygame.draw.circle(SURFACE, (255,0,0), (50,50),20)
pygame.draw.circle(SURFACE, (255,0,0), (150,50), 20, 10)
pygame.draw.circle(SURFACE, (0,255,0), (50,150), 10)
pygame.draw.circle(SURFACE, (0,255,0), (150,150), 20)
pygame.display.update()
FPSCLOCK.tick(3)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.