id
stringlengths
1
265
text
stringlengths
6
5.19M
dataset_id
stringclasses
7 values
1676720
import numpy as np from sklearn.metrics.pairwise import euclidean_distances def gradient_descent(D, x0, loss_f, grad_f, lr, tol, max_iter): losses = np.zeros(max_iter) y_old = x0 y = x0 for i in range(max_iter): g = grad_f(D, y) y = y_old - lr * g stress = loss_f(D, y) losses[i] = stress if stress < tol: msg = "\riter: {0}, stress: {1:}".format(i, stress) print(msg, flush=True, end="\t") losses = losses[:i] break if i % 50 == 0: msg = "\riter: {0}, stress: {1:}".format(i, stress) print(msg, flush=True, end="\t") y_old = y if i == max_iter-1: msg = "\riter: {0}, stress: {1:}".format(i, stress) print(msg, flush=True, end="\t") print('\n') return y, losses ''' input data and output data are col vector (ie : For X \in R^{NxV} X.shape = (V,N) not (N,V) ) ''' class MDS: def __init__(self, n_dim=2, input_type='raw'): if input_type not in ['distance', 'raw']: raise RuntimeError('Not implement type !') self.input_type = input_type self.n_dim = n_dim def fit(self, X, method='cmds', # or stress lr=0.5): if method == 'cmds': return self._cmds(X) else: return self._stress_based_mds(X, lr=lr) def _cmds(self, X): """ Classical(linear) multidimensional scaling (MDS) Parameters ---------- X: (d, n) array or (n,n) array input data. The data are placed in column-major order. That is, samples are placed in the matrix (X) as column vectors d: dimension of points n: number of points n_dim: dimension of target space input_type: it indicates whether data are raw or distance - raw: raw data. (n,d) array. - distance: precomputed distances between the data. (n,n) array. Returns ------- Y: (n_dim, n) array. projected embeddings. evals: (n_dim) eigen values evecs: corresponding eigen vectors in column vectors """ Y = None evals = None evecs = None if self.input_type == 'distance': D = X elif self.input_type == 'raw': Xt = X.T D = euclidean_distances(Xt, Xt) n = len(D) H = np.eye(n) - (1/n)*np.ones((n, n)) D = (D**2).astype(np.float64) D = np.nan_to_num(D) G = -(1/2) * (H.dot(D).dot(H)) evals, evecs = np.linalg.eigh(G) index = evals.argsort()[::-1] evals = evals[index] evecs = evecs[:, index] evals = evals[:self.n_dim] evecs = evecs[:, :self.n_dim] self.eigen_vectors = evecs self.eigen_values = evals Y = np.diag(evals**(1/2)) @ evecs.T assert Y.shape[0] == self.n_dim return Y def _loss_sammon(self, D, y): """ Loss function (stress) - Sammon Parameters ---------- D: (n,n) array. distance matrix in original space This is a symetric matrix y: (d,n) array d is the dimensionality of target space. n is the number of points. Returns ------- stress: scalar. stress """ yt = y.T n = D.shape[0] Delta = euclidean_distances(yt, yt) stress = 0 for i in range(n): f = 0 s = 0 for j in range(n): s += (D[i, j] - Delta[i, j])**2 f += Delta[i, j] stress += (s/f) return stress def _grad_sammon(self, D, y): """ Gradient function (first derivative) - Sammonn_dim Parameters ---------- D: (n,n) array. distance matrix in original space This is a symetric matrix y: (d,n) array d is the dimensionality of target space. n is the number of points. Returns ------- g: (k,n) array. Gradient matrix. k is the dimensionality of target space. n is the number of points. """ D2 = euclidean_distances(y.T, y.T) n = len(D) def grid(k): s = np.zeros(y[:, k].shape) for j in range(n): if j != k: s += (D2[k, j] - D[k, j])*(y[:, k] - y[:, j])/(D2[k, j]) return s N = 1/np.tril(D, -1).sum() g = np.zeros((y.shape[0], n)) for i in range(n): g[:, i] = grid(i) return N*g def _stress_based_mds(self, x, lr, tol=1e-9, max_iter=6000): """ Stress-based MDS Parameters ---------- x: (d,n) array or (n,n) array If it is raw data -> (d,n) array otherwise, (n,n) array (distance matrix) n is the number of points d is the dimensionality of original space n_dim: dimensionality of target space loss_f: loss function grad_f: gradient function input_type: 'raw' or 'distance' init: initialisation method random: Initial y is set randomly fixed: Initial y is set by pre-defined values max_iter: maximum iteration of optimization Returns ------- y: (n_dim,n) array. Embedded coordinates in target space losses: (max_iter,) History of stress """ # obtain distance if self.input_type == 'raw': x_t = x.T D = euclidean_distances(x_t, x_t) elif self.input_type == 'distance': D = x else: raise ValueError('inappropriate input_type') # Remaining initialisation N = x.shape[1] np.random.seed(10) # Initialise y randomly y = np.random.normal(0.0, 1.0, [self.n_dim, N]) # calculate optimal solution (embedded coordinates) y, _ = gradient_descent(D, y, self._loss_sammon, self._grad_sammon, lr, tol, max_iter) return y
StarcoderdataPython
36700
<filename>slack_sdk/scim/v1/user.py from typing import Optional, Any, List, Dict, Union from .default_arg import DefaultArg, NotGiven from .internal_utils import _to_dict_without_not_given, _is_iterable from .types import TypeAndValue class UserAddress: country: Union[Optional[str], DefaultArg] locality: Union[Optional[str], DefaultArg] postal_code: Union[Optional[str], DefaultArg] primary: Union[Optional[bool], DefaultArg] region: Union[Optional[str], DefaultArg] street_address: Union[Optional[str], DefaultArg] unknown_fields: Dict[str, Any] def __init__( self, *, country: Union[Optional[str], DefaultArg] = NotGiven, locality: Union[Optional[str], DefaultArg] = NotGiven, postal_code: Union[Optional[str], DefaultArg] = NotGiven, primary: Union[Optional[bool], DefaultArg] = NotGiven, region: Union[Optional[str], DefaultArg] = NotGiven, street_address: Union[Optional[str], DefaultArg] = NotGiven, **kwargs, ) -> None: self.country = country self.locality = locality self.postal_code = postal_code self.primary = primary self.region = region self.street_address = street_address self.unknown_fields = kwargs def to_dict(self) -> dict: return _to_dict_without_not_given(self) class UserEmail(TypeAndValue): pass class UserPhoneNumber(TypeAndValue): pass class UserRole(TypeAndValue): pass class UserGroup: display: Union[Optional[str], DefaultArg] value: Union[Optional[str], DefaultArg] unknown_fields: Dict[str, Any] def __init__( self, *, display: Union[Optional[str], DefaultArg] = NotGiven, value: Union[Optional[str], DefaultArg] = NotGiven, **kwargs, ) -> None: self.display = display self.value = value self.unknown_fields = kwargs def to_dict(self) -> dict: return _to_dict_without_not_given(self) class UserMeta: created: Union[Optional[str], DefaultArg] location: Union[Optional[str], DefaultArg] unknown_fields: Dict[str, Any] def __init__( self, created: Union[Optional[str], DefaultArg] = NotGiven, location: Union[Optional[str], DefaultArg] = NotGiven, **kwargs, ) -> None: self.created = created self.location = location self.unknown_fields = kwargs def to_dict(self) -> dict: return _to_dict_without_not_given(self) class UserName: family_name: Union[Optional[str], DefaultArg] given_name: Union[Optional[str], DefaultArg] unknown_fields: Dict[str, Any] def __init__( self, family_name: Union[Optional[str], DefaultArg] = NotGiven, given_name: Union[Optional[str], DefaultArg] = NotGiven, **kwargs, ) -> None: self.family_name = family_name self.given_name = given_name self.unknown_fields = kwargs def to_dict(self) -> dict: return _to_dict_without_not_given(self) class UserPhoto: type: Union[Optional[str], DefaultArg] value: Union[Optional[str], DefaultArg] unknown_fields: Dict[str, Any] def __init__( self, type: Union[Optional[str], DefaultArg] = NotGiven, value: Union[Optional[str], DefaultArg] = NotGiven, **kwargs, ) -> None: self.type = type self.value = value self.unknown_fields = kwargs def to_dict(self) -> dict: return _to_dict_without_not_given(self) class User: active: Union[Optional[bool], DefaultArg] addresses: Union[Optional[List[UserAddress]], DefaultArg] display_name: Union[Optional[str], DefaultArg] emails: Union[Optional[List[TypeAndValue]], DefaultArg] external_id: Union[Optional[str], DefaultArg] groups: Union[Optional[List[UserGroup]], DefaultArg] id: Union[Optional[str], DefaultArg] meta: Union[Optional[UserMeta], DefaultArg] name: Union[Optional[UserName], DefaultArg] nick_name: Union[Optional[str], DefaultArg] phone_numbers: Union[Optional[List[TypeAndValue]], DefaultArg] photos: Union[Optional[List[UserPhoto]], DefaultArg] profile_url: Union[Optional[str], DefaultArg] roles: Union[Optional[List[TypeAndValue]], DefaultArg] schemas: Union[Optional[List[str]], DefaultArg] timezone: Union[Optional[str], DefaultArg] title: Union[Optional[str], DefaultArg] user_name: Union[Optional[str], DefaultArg] unknown_fields: Dict[str, Any] def __init__( self, *, active: Union[Optional[bool], DefaultArg] = NotGiven, addresses: Union[ Optional[List[Union[UserAddress, Dict[str, Any]]]], DefaultArg ] = NotGiven, display_name: Union[Optional[str], DefaultArg] = NotGiven, emails: Union[ Optional[List[Union[TypeAndValue, Dict[str, Any]]]], DefaultArg ] = NotGiven, external_id: Union[Optional[str], DefaultArg] = NotGiven, groups: Union[ Optional[List[Union[UserGroup, Dict[str, Any]]]], DefaultArg ] = NotGiven, id: Union[Optional[str], DefaultArg] = NotGiven, meta: Union[Optional[Union[UserMeta, Dict[str, Any]]], DefaultArg] = NotGiven, name: Union[Optional[Union[UserName, Dict[str, Any]]], DefaultArg] = NotGiven, nick_name: Union[Optional[str], DefaultArg] = NotGiven, phone_numbers: Union[ Optional[List[Union[TypeAndValue, Dict[str, Any]]]], DefaultArg ] = NotGiven, photos: Union[ Optional[List[Union[UserPhoto, Dict[str, Any]]]], DefaultArg ] = NotGiven, profile_url: Union[Optional[str], DefaultArg] = NotGiven, roles: Union[ Optional[List[Union[TypeAndValue, Dict[str, Any]]]], DefaultArg ] = NotGiven, schemas: Union[Optional[List[str]], DefaultArg] = NotGiven, timezone: Union[Optional[str], DefaultArg] = NotGiven, title: Union[Optional[str], DefaultArg] = NotGiven, user_name: Union[Optional[str], DefaultArg] = NotGiven, **kwargs, ) -> None: self.active = active self.addresses = ( # type: ignore [a if isinstance(a, UserAddress) else UserAddress(**a) for a in addresses] if _is_iterable(addresses) else addresses ) self.display_name = display_name self.emails = ( # type: ignore [a if isinstance(a, TypeAndValue) else TypeAndValue(**a) for a in emails] if _is_iterable(emails) else emails ) self.external_id = external_id self.groups = ( # type: ignore [a if isinstance(a, UserGroup) else UserGroup(**a) for a in groups] if _is_iterable(groups) else groups ) self.id = id self.meta = ( # type: ignore UserMeta(**meta) if meta is not None and isinstance(meta, dict) else meta ) self.name = ( # type: ignore UserName(**name) if name is not None and isinstance(name, dict) else name ) self.nick_name = nick_name self.phone_numbers = ( # type: ignore [ a if isinstance(a, TypeAndValue) else TypeAndValue(**a) for a in phone_numbers ] if _is_iterable(phone_numbers) else phone_numbers ) self.photos = ( # type: ignore [a if isinstance(a, UserPhoto) else UserPhoto(**a) for a in photos] if _is_iterable(photos) else photos ) self.profile_url = profile_url self.roles = ( # type: ignore [a if isinstance(a, TypeAndValue) else TypeAndValue(**a) for a in roles] if _is_iterable(roles) else roles ) self.schemas = schemas self.timezone = timezone self.title = title self.user_name = user_name self.unknown_fields = kwargs def to_dict(self): return _to_dict_without_not_given(self) def __repr__(self): return f"<slack_sdk.scim.{self.__class__.__name__}: {self.to_dict()}>"
StarcoderdataPython
8130
def insert_metatable(): """SQL query to insert records from table insert into a table on a DB """ return """ INSERT INTO TABLE {{ params.target_schema }}.{{ params.target_table }} VALUES ('{{ params.schema }}', '{{ params.table }}', {{ ti.xcom_pull(key='hive_res', task_ids=params.count_inserts)[0][0] }}, current_timestamp(), '{{ params.type }}'); """
StarcoderdataPython
44973
#!/usr/bin/env python """ nearest_cloud.py - Version 1.0 2013-07-28 Compute the COG of the nearest object in x-y-z space and publish as a PoseStamped message. Relies on PCL ROS nodelets in the launch file to pre-filter the cloud on the x, y and z dimensions. Based on the follower application by <NAME> at: http://ros.org/wiki/turtlebot_follower Created for the Pi Robot Project: http://www.pirobot.org Copyright (c) 2013 <NAME>. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details at: http://www.gnu.org/licenses/gpl.html """ import rospy from roslib import message from sensor_msgs.msg import PointCloud2 from sensor_msgs import point_cloud2 from geometry_msgs.msg import Point, PoseStamped, Quaternion from tf.transformations import quaternion_from_euler import numpy as np import cv2 from math import pi, radians class NearestCloud(): def __init__(self): rospy.init_node("nearest_cloud") self.min_points = rospy.get_param("~min_points", 25) self.z_percentile = rospy.get_param("~z_percentile", 100) # Define the target publisher self.target_pub = rospy.Publisher('target_pose', PoseStamped) rospy.Subscriber('point_cloud', PointCloud2, self.get_nearest_cloud) # Wait for the pointcloud topic to become available rospy.wait_for_message('point_cloud', PointCloud2) def get_nearest_cloud(self, msg): points = list() points_xy = list() # Get all the points in the visible cloud (may be prefiltered by other nodes) for point in point_cloud2.read_points(msg, skip_nans=True): points.append(point[:3]) points_xy.append(point[:2]) # Convert to a numpy array points_arr = np.float32([p for p in points]).reshape(-1, 1, 3) # Compute the COG cog = np.mean(points_arr, 0) # Convert to a Point cog_point = Point() cog_point.x = cog[0][0] cog_point.y = cog[0][1] cog_point.z = cog[0][2] #cog_point.z = 0.35 # Abort if we get an NaN in any component if np.isnan(np.sum(cog)): return # If we have enough points, find the best fit ellipse around them try: if len(points_xy) > 6: points_xy_arr = np.float32([p for p in points_xy]).reshape(-1, 1, 2) track_box = cv2.fitEllipse(points_xy_arr) else: # Otherwise, find the best fitting rectangle track_box = cv2.boundingRect(points_xy_arr) angle = pi - radians(track_box[2]) except: return #print angle # Convert the rotation angle to a quaternion q_angle = quaternion_from_euler(0, angle, 0, axes='sxyz') q = Quaternion(*q_angle) q.x = 0.707 q.y = 0 q.z = 0.707 q.w = 0 # Publish the COG and orientation target = PoseStamped() target.header.stamp = rospy.Time.now() target.header.frame_id = msg.header.frame_id target.pose.position = cog_point target.pose.orientation = q # Publish the movement command self.target_pub.publish(target) if __name__ == '__main__': try: NearestCloud() rospy.spin() except rospy.ROSInterruptException: rospy.loginfo("Nearest cloud node terminated.")
StarcoderdataPython
112998
import colorsys import numpy as np def random_colors(N, bright=True): brightness = 1.0 if bright else 0.7 hsv = [(i / N, 1, brightness) for i in range(N)] colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)) return colors def apply_mask(image, mask, color, alpha=0.5): for i in range(3): image[:, :, i] = np.where(mask == 1, image[:, :, i] * (1 - alpha) + alpha * color[i] * 255, image[:, :, i]) return image def apply_contour(image, mask, color, thickness=4): t = thickness // 2 mask = mask.copy() mask1 = mask[thickness:, thickness:] mask2 = mask[:-thickness, :-thickness] mask[t:-t, t:-t] -= mask1 * mask2 mask = np.where(mask == 0, 0., 1.) image = apply_mask(image, mask, color, alpha=1.) return image
StarcoderdataPython
153905
<gh_stars>1-10 t = int(input()) for _ in range(t) : n, k = map(int, input().split()) arr = list(map(int, input().split())) arr.sort() if(k > arr[0]) : print(abs(k-arr[0])) else : print(0)
StarcoderdataPython
1687686
<reponame>Oumourin/Book-Manager-System from . import Book from . import Order from .. import db class OrderItem(db.Model): __tablename__ = 'order_item' id = db.Column(db.Integer, primary_key=True, autoincrement=True, nullable=False, index=True) order_id = db.Column(db.Integer, db.ForeignKey(Order.id), nullable=False) isbn = db.Column(db.String(13), db.ForeignKey(Book.isbn), nullable=False) amount = db.Column(db.Integer, nullable=False) # ไธ€ไธช่ฎขๅ•ไธญๆŸ็งไนฆ็š„ๆ•ฐ็›ฎ def __init__(self, order_id, isbn, amount): self.order_id = order_id self.isbn = isbn self.amount = amount def __repr__(self): return '<OrderItem#{}:{}:{}:{}>'.format(self.id, self.order_id, self.isbn, self.amount)
StarcoderdataPython
196536
<reponame>Costopoulos/DeliveryApp<filename>src/store/forms.py from driver.models import order from django import forms from datetimewidget.widgets import DateTimeWidget class OrderForm(forms.ModelForm): time_to_pickup = forms.CharField(label='ฮ—ฮผฮตฯฮฟฮผฮทฮฝฮฏฮฑ ฮบฮฑฮน ฯŽฯฮฑ ฯ€ฮฑฯฮฌฮดฮฟฯƒฮทฯ‚', widget=forms.DateTimeInput( attrs={ "placeholder": "Yฮฅฮฅฮฅ-ฮœฮœ-DD HH:MM", "cols": 20, "rows": 20 }), required=True) adress_to = forms.CharField( required=True, label='ฮ”ฮนฮตฯฮธฯ…ฮฝฯƒฮท ฯ€ฮฑฯฮฌฮดฮฟฯƒฮทฯ‚', widget=forms.Textarea( attrs={ "placeholder": "ฮ’ฮฌฮปฯ„ฮต ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท ฮบฮฑฮน ฯƒฯ‡ฯŒฮปฮนฮฑ ฯ€ฮฑฯฮฑฮณฮณฮตฮปฮฏฮฑฯ‚", # "class": "new-class-name two", # "id": "my-id-for-textarea", #"rows": 5, 'cols': 120 } ) ) price = forms.DecimalField( required=True, label='ฮฃฯฮฝฮฟฮปฮฟ', widget=forms.NumberInput( attrs={ "placeholder": "ฮฃฮต ฮผฮฟฯฯ†ฮฎ ฮ•ฯ…ฯฯŽ.Cents", } ) ) isPaid = forms.BooleanField( required=False, label='ฮˆฯ‡ฮตฮน ฯ€ฯฮฟฯ€ฮปฮทฯฯ‰ฮธฮตฮฏ', # widget=forms.NumberInput( # attrs={ # "placeholder": "ฮฃฮต ฮผฮฟฯฯ†ฮฎ ฮ•ฯ…ฯฯŽ.Cents", # } # ) ) class Meta: model = order fields = [ 'time_to_pickup', 'adress_to', 'price', 'isPaid' ] # widgets = { # 'datetime':DateTimeWidget(attrs={'id':""}, usel10n=True, bootstrap_version=3) # }
StarcoderdataPython
3264545
import unittest from uvm.base.uvm_report_object import UVMReportObject from uvm.base.uvm_object_globals import ( UVM_INFO, UVM_ERROR, UVM_LOG, UVM_DISPLAY, UVM_COUNT, UVM_MEDIUM, UVM_HIGH) class TestUVMReportObject(unittest.TestCase): """ Unit tests for UVMReportObject """ def test_verbosity(self): obj = UVMReportObject('rpt') rh = obj.get_report_handler() verb = rh.get_verbosity_level(UVM_INFO, "") self.assertEqual(verb, UVM_MEDIUM) self.assertEqual(obj.uvm_report_enabled(UVM_MEDIUM, UVM_INFO), True) def test_report_enabled(self): obj = UVMReportObject('rpt') is_en = obj.uvm_report_enabled(UVM_MEDIUM, UVM_INFO, id="") self.assertEqual(is_en, True) is_en = obj.uvm_report_enabled(UVM_HIGH, UVM_INFO, id="") self.assertEqual(is_en, False) def test_report_severity(self): obj = UVMReportObject('rpt') act = obj.get_report_action(UVM_ERROR, id="") self.assertEqual(act, UVM_DISPLAY | UVM_COUNT) act = obj.get_report_action(UVM_INFO, id="") self.assertEqual(act, UVM_DISPLAY) obj.set_report_severity_action(UVM_ERROR, UVM_LOG) act = obj.get_report_action(UVM_ERROR, id="") self.assertEqual(act, UVM_LOG) if __name__ == '__main__': unittest.main()
StarcoderdataPython
4805850
#!/usr/bin/env python3 from distutils.core import setup setup(name='Pyromania', version='0.1', description='Python facade for a variety of tree models from R', url='https://github.com/trygvebw/pyromania', packages=['pyromania'], install_requires=[ 'pandas', 'numpy', 'scipy', 'rpy2', 'scikit-learn', ], python_requires='>=3.4')
StarcoderdataPython
1679740
<filename>backend/bugs/models.py from django.db import models from django.contrib.auth.models import User class Bug(models.Model): id = models.AutoField(primary_key=True) user = models.ForeignKey(User, on_delete=models.CASCADE) title = models.CharField(max_length=50) description = models.TextField() resolved = models.BooleanField() created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self): return self.title
StarcoderdataPython
3367599
<reponame>mrTavas/owasp-fstm-auto #!/usr/bin/env python3 # # Cross Platform and Multi Architecture Advanced Binary Emulation Framework # from qiling.os.const import * from ..const import * from ..fncc import * from ..ProcessorBind import * from ..UefiBaseType import * # @file: MdePkg\Include\Protocol\SmmSwDispatch2.h class EFI_SMM_SW_REGISTER_CONTEXT(STRUCT): _fields_ = [ ('SwSmiInputValue', UINTN) ] # @ file: MdePkg\Include\Pi\PiMmCis.h EFI_SMM_HANDLER_ENTRY_POINT2 = FUNCPTR(EFI_STATUS, EFI_HANDLE, PTR(VOID), PTR(VOID), PTR(UINTN)) class EFI_SMM_SW_DISPATCH2_PROTOCOL(STRUCT): EFI_SMM_SW_DISPATCH2_PROTOCOL = STRUCT _fields_ = [ ('Register', FUNCPTR(EFI_STATUS, PTR(EFI_SMM_SW_DISPATCH2_PROTOCOL), EFI_SMM_HANDLER_ENTRY_POINT2, PTR(EFI_SMM_SW_REGISTER_CONTEXT), PTR(EFI_HANDLE))), ('UnRegister', FUNCPTR(EFI_STATUS, PTR(EFI_SMM_SW_DISPATCH2_PROTOCOL), EFI_HANDLE)), ('MaximumSwiValue', UINTN) ] @dxeapi(params = { "This" : POINTER, "DispatchFunction" : POINTER, "RegisterContext" : POINTER, "DispatchHandle" : POINTER }) def hook_Register(ql, address, params): # Let's save the dispatch params, so they can be triggered if needed. ql.loader.smm_context.swsmi_handlers.append(params) return EFI_SUCCESS @dxeapi(params = { "This" : POINTER, "DispatchHandle" : POINTER }) def hook_UnRegister(ql, address, params): return EFI_SUCCESS descriptor = { "guid" : "18a3c6dc-5eea-48c8-a1c1-b53389f98999", "struct" : EFI_SMM_SW_DISPATCH2_PROTOCOL, "fields" : ( ("Register", hook_Register), ("UnRegister", hook_UnRegister) ) }
StarcoderdataPython
1753318
#encoding=utf-8 import numpy as np; import tensorflow as tf import util; from dataset_utils import int64_feature, float_feature, bytes_feature, convert_to_example # encoding = utf-8 import numpy as np import time import config import util class SynthTextDataFetcher(): def __init__(self, mat_path, root_path): self.mat_path = mat_path self.root_path = root_path self._load_mat() @util.dec.print_calling def _load_mat(self): data = util.io.load_mat(self.mat_path) self.image_paths = data['imnames'][0] self.image_bbox = data['wordBB'][0] self.txts = data['txt'][0] self.num_images = len(self.image_paths) def get_image_path(self, idx): image_path = util.io.join_path(self.root_path, self.image_paths[idx][0]) return image_path def get_num_words(self, idx): try: return np.shape(self.image_bbox[idx])[2] except: # error caused by dataset return 1 def get_word_bbox(self, img_idx, word_idx): boxes = self.image_bbox[img_idx] if len(np.shape(boxes)) ==2: # error caused by dataset boxes = np.reshape(boxes, (2, 4, 1)) xys = boxes[:,:, word_idx] assert(np.shape(xys) ==(2, 4)) return np.float32(xys) def normalize_bbox(self, xys, width, height): xs = xys[0, :] ys = xys[1, :] min_x = min(xs) min_y = min(ys) max_x = max(xs) max_y = max(ys) # bound them in the valid range min_x = max(0, min_x) min_y = max(0, min_y) max_x = min(width, max_x) max_y = min(height, max_y) # check the w, h and area of the rect w = max_x - min_x h = max_y - min_y is_valid = True if w < 10 or h < 10: is_valid = False if w * h < 100: is_valid = False xys[0, :] = xys[0, :] / width xys[1, :] = xys[1, :] / height return is_valid, min_x / width, min_y /height, max_x / width, max_y / height, xys def get_txt(self, image_idx, word_idx): txts = self.txts[image_idx]; clean_txts = [] for txt in txts: clean_txts += txt.split() return str(clean_txts[word_idx]) def fetch_record(self, image_idx): image_path = self.get_image_path(image_idx) if not (util.io.exists(image_path)): return None; img = util.img.imread(image_path) h, w = img.shape[0:-1]; num_words = self.get_num_words(image_idx) rect_bboxes = [] full_bboxes = [] txts = [] for word_idx in range(num_words): xys = self.get_word_bbox(image_idx, word_idx); is_valid, min_x, min_y, max_x, max_y, xys = self.normalize_bbox(xys, width = w, height = h) if not is_valid: continue; rect_bboxes.append([min_x, min_y, max_x, max_y]) xys = np.reshape(np.transpose(xys), -1) full_bboxes.append(xys); txt = self.get_txt(image_idx, word_idx); txts.append(txt); if len(rect_bboxes) == 0: return None; return image_path, img, txts, rect_bboxes, full_bboxes def cvt_to_tfrecords(output_path , data_path, gt_path, records_per_file = 50000): fetcher = SynthTextDataFetcher(root_path = data_path, mat_path = gt_path) image_idxes = range(fetcher.num_images) np.random.shuffle(image_idxes) record_count = 0; for image_idx in image_idxes: if record_count % records_per_file == 0: fid = record_count / records_per_file tfrecord_writer = tf.python_io.TFRecordWriter(output_path%(fid)) print "converting image %d/%d"%(record_count, fetcher.num_images) record = fetcher.fetch_record(image_idx); if record is None: print '\nimage %d does not exist'%(image_idx + 1) continue; record_count += 1 image_path, image, txts, rect_bboxes, oriented_bboxes = record; labels = []; for txt in txts: if len(txt) < 3: labels.append(config.ignore_label) else: labels.append(config.text_label) image_data = tf.gfile.FastGFile(image_path, 'r').read() shape = image.shape image_name = str(util.io.get_filename(image_path).split('.')[0]) example = convert_to_example(image_data, image_name, labels, txts, rect_bboxes, oriented_bboxes, shape) tfrecord_writer.write(example.SerializeToString()) if __name__ == "__main__": mat_path = util.io.get_absolute_path('~/dataset/SynthText/gt.mat') root_path = util.io.get_absolute_path('~/dataset/SynthText/') output_dir = util.io.get_absolute_path('~/dataset/pixel_link/SynthText/') util.io.mkdir(output_dir); cvt_to_tfrecords(output_path = util.io.join_path(output_dir, 'SynthText_%d.tfrecord'), data_path = root_path, gt_path = mat_path)
StarcoderdataPython
1777476
from Chef import Chef from ChineseChef import ChineseChef myChef = Chef() myChef.make_special_dish() myChineseChef = ChineseChef() myChineseChef.make_fried_rice()
StarcoderdataPython
136986
# %% import sys sys.path.append("..") from data import handling as dth import pandas as pd import numpy as np from pathlib import Path %matplotlib inline import quantstats as qs import bar_chart_race as bcr import seaborn as sns # extend pandas functionality with metrics, etc. qs.extend_pandas() # %% ###### # SETUP ###### render_vids = False ###### # PREPROCESSING ###### # Load stocksdata stocksdata_fp = Path.cwd().parent / "data" / "stocksdata_all.csv" stocksdata_all_df = dth.load_data(stocksdata_fp) *_, stocksdata_df = dth.train_val_test_split(stocksdata_all_df) # Get tested dates out of stocksdata dates = stocksdata_df.index.get_level_values(level="date").unique().tolist() # Get stock prices close and open stockprices_close_ser = stocksdata_df["close"].copy() stockprices_close_ser.name = "close" stockprices_open_ser = stocksdata_df["open"].copy() stockprices_open_ser.name = "open" # Get stocks and exchanges with open(Path.cwd().parent / "etl" / "stocks.txt") as file: symb_exchange = file.read().splitlines() stocks_df = pd.DataFrame([item.split(".") for item in symb_exchange], columns=["symbol","exchange"]).set_index("symbol")["exchange"].apply(lambda x: "EU" if x != "US" else x) # Load indices data indices_fp = Path.cwd().parent / "data" / "indices_performance.csv" indices_df = dth.load_data(indices_fp) *_, indices_df = dth.train_val_test_split(indices_df) ####### # READ TEST RESULTS ###### # Generate Multiindex DF for all test results over all experiments over all algos results_dir = Path.cwd().parent / "results" tests = [] exp_args = [] algo_dirs = [algo_dir for algo_dir in results_dir.iterdir() if algo_dir.is_dir()] for algo_dir in algo_dirs: exp_dirs = [exp_dir for exp_dir in algo_dir.iterdir() if exp_dir.is_dir()] for exp_idx, exp_dir in enumerate(exp_dirs): exp_args.append(pd.read_json(path_or_buf=exp_dir.joinpath("run_args.json"), typ="ser")) test_files = [test_file for test_file in exp_dir.rglob("env_info/test/*.json") if test_file.is_file()] for test_idx, test_file in enumerate(test_files): test_file_df = pd.read_json(test_file) data = { "algo": algo_dir.name, "exp": exp_idx, "test": test_idx, "date": dates, "totalValues": test_file_df["totalValues"], "cashes": test_file_df["cashes"], "numShares": test_file_df["numShares"] } tests.append(pd.DataFrame(data=data)) # concatenate all tests to one df tests_df = pd.concat(tests).set_index(["algo", "exp", "test", "date"]) # and all exp_args to one df exp_args_df = pd.DataFrame(exp_args) exp_args_df["exp_idx"] = exp_args_df.groupby(by="algo").cumcount() exp_args_df = exp_args_df.set_index(["algo","exp_idx"]) ### # Get the best Experiment by Sharpe Ratio # Get Sharpe ratio for all tests tests_sharpe = tests_df["totalValues"].groupby(level=["algo", "exp", "test"]).apply(qs.stats.sharpe).rename("sharpe") # Get mean sharpe over the tests for each run tests_sharpe_mean = tests_sharpe.groupby(level=["algo", "exp"]).mean() # save best exp idx to a pandas series best_exp_idx = pd.Series(dict(tests_sharpe_mean.groupby(level=["algo"]).idxmax().values.tolist())).rename("best_exp_idx") ### # Creating the best exps_args_df to show it later temp = [exp_args_df.loc[(algo_name, exp_idx), slice(None)] for algo_name, exp_idx in best_exp_idx.items()] best_exp_args_df = pd.DataFrame(temp).reset_index() best_exp_args_df[['algo','exp_idx']] = pd.DataFrame(best_exp_args_df["index"].tolist(), index= best_exp_args_df.index) best_exp_args_df = best_exp_args_df.drop(columns="index").set_index(["algo","exp_idx"]) ###### # BUILD PORTFOLIO DF ###### # Inside the best experiment, calculate the mean totalValue by date over all tests. temp = [tests_df["totalValues"].loc[(algo_name, exp_idx, slice(None), slice(None))] for algo_name, exp_idx in best_exp_idx.items()] best_exp_totalValues_df = pd.concat(temp).reset_index(level="exp", drop=True).reorder_levels(["algo","date","test"]) # Now we getting our data to make graphs and other metrics. best_exp_totalValues_df = best_exp_totalValues_df.groupby(level=["algo","date"]).mean().unstack(level="algo") ### Create indices series stoxx50e_ser = indices_df[indices_df.index.get_level_values("symbol") == "stoxx50e"]["adjusted_close"] stoxx50e_ser.index = stoxx50e_ser.index.droplevel(1) stoxx50e_ser = stoxx50e_ser.pct_change() stoxx50e_ser = stoxx50e_ser.fillna(0) stoxx50e_ser = stoxx50e_ser.rename(index="dly_ret").rename_axis("Date") stoxx50e_ser.name = "stoxx50e" dji_ser = indices_df[indices_df.index.get_level_values("symbol") == "dji"]["adjusted_close"] dji_ser.index = dji_ser.index.droplevel(1) dji_ser = dji_ser.pct_change() dji_ser = dji_ser.fillna(0) dji_ser = dji_ser.rename(index="dly_ret").rename_axis("Date") dji_ser.name = "dji" # Create a ETF portfolio etf_df = pd.DataFrame(index=dates) etf_df = etf_df.join([dji_ser, stoxx50e_ser]).fillna(0) # Calculate the weighted daily returns of our ETF portfolio etf_df["dji"] = etf_df["dji"]*(1-(36/63)) etf_df["stoxx50e"] = etf_df["stoxx50e"]*(36/63) etf_df["portf_ret"] = etf_df["dji"] + etf_df["stoxx50e"] # Calculate the portfolio value etf_df["totalValue"] = etf_df["portf_ret"].add(1).cumprod().mul(1e6) etf_ser = etf_df["totalValue"] etf_ser.name = "ETF" ### # Get all TotalValues in one DF portfolios_df = best_exp_totalValues_df.join(etf_ser) # %% ###### # SHOW RESULTS ###### # Setup the order drl_algos = ["DDPG","PPO","A2C"] basic_algos = ["BUYHOLD","RANDOM"] # Matrix of Mean Sharpes (Experiment times Algorithms) mean_sharpes_df = tests_sharpe_mean.unstack(level=["algo"]) # Color Gradient color_gradient = sns.light_palette("green", as_cmap=True) # Apply color gradient to columns of mean_sharpes_df mean_sharpes_df[drl_algos].T.style.background_gradient(cmap=color_gradient, axis=1).set_precision(2) # %% # export to .tex file mean_sharpes_df[drl_algos].to_latex(buf="mean_sharpes.tex", float_format="%.2f", index_names=False, decimal=",", caption="Sharpe-Ratio Mittelwerte aller Experimente รผber 25 Testlรคufe.\\Quelle: Eigene Darstellung.", label="tab:exp-mean-sharpes") # %% # Showing the variant args of the best experiments variant_args = ["cagr", "episodic","num_stacks", "trainsampling"] best_exp_args_df[variant_args].loc[drl_algos] # %% best_exp_args_df[variant_args].loc[drl_algos].droplevel("exp_idx").to_latex(buf="best_exp_args.tex", float_format="%.2f", index_names=False, decimal=",", caption="Umgebungsparameter des besten Experiments je Algorithmus.\\\\Quelle: Eigene Darstellung.", label="tab:best-exp-args") # %% # Table for evaluation of best experiments for algos and Bench with: # Comp. overall Return # Comp. annual growth rate # Sharpe ratio # Volatility (Standard deviation p.a.) metrics_df = pd.DataFrame(index=["Total return (%)", "CAGR (%)", "Sharpe ratio", "Std. Dev. (%)"]) for column in portfolios_df.columns: sharpe = qs.stats.sharpe(qs.utils.to_returns(portfolios_df[column])) ret = qs.stats.comp(qs.utils.to_returns(portfolios_df[column])) cagr = qs.stats.cagr(qs.utils.to_returns(portfolios_df[column])) vol = qs.stats.volatility(qs.utils.to_returns(portfolios_df[column]), annualize=False) metrics_df[column] = [ret*100, cagr*100, sharpe, vol*100] # Higlight best algorithm show_portfolios = ["DDPG","PPO","BUYHOLD","A2C","RANDOM","ETF"] metrics_df[show_portfolios].style.background_gradient(cmap=color_gradient, axis=1).set_precision(3) ### # Random is better than ETF? # This is because of the survivor bias. # We choosed only stocks that lived from 2000 to end 2019 # but the ETF contains loosers. # %% metrics_df[show_portfolios].to_latex(buf="best_exp_metrics.tex", float_format="%.2f", index_names=False, decimal=",", caption="Darstellung der PerformancemaรŸe fรผr alle DRL-Algorithmen und die Benchmarks.\\\\Quelle: Eigene Darstellung.", label="tab:best-exp-metrics") # %% # ColorBlind/friendly colormap from https://gist.github.com/thriveth/8560036 colors = ['#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3', '#999999', '#e41a1c', '#dede00'] drl_colors = colors[0:2]+[colors[3]] # Plot a linechart for all portfolios portfolios_df[show_portfolios].plot(title=None, legend=True, xlabel="", ylabel=None, color=colors).figure.savefig("img/all_line_totalValues.pdf", bbox_inches="tight") # %% # Bar chart race for Portfolio values if render_vids: bcr.bar_chart_race(df=portfolios_df.resample("2W").mean(), dpi=330, cmap=[colors[1]]+[colors[0]]+colors[2:], filename="vids/all_bcr_totalValues.mp4", orientation="v", fixed_order=True, period_length=1000, interpolate_period=True, fixed_max=True, steps_per_period=7, title="Portfolio Values over Time") # %% ###### # INVESTIGATIONS ###### # We can investigate the following underlying numbers: # Number and Value of Shares # Portfolio structure grouped by exchanges (US, EU) and Cash # Trading Costs ###### # NUMBER AND VALUE OF SHARES # Get numShares in a multilevel DF temp = [tests_df["numShares"].loc[(algo_name, exp_idx, slice(None), slice(None))] for algo_name, exp_idx in best_exp_idx.items()] best_exp_numshares_df = pd.concat(temp).reset_index(level="exp", drop=True).reorder_levels(["algo","date","test"]).to_frame() # explode list fields to columns best_exp_numshares_df = pd.DataFrame(data=best_exp_numshares_df["numShares"].values.tolist(), columns=stockprices_close_ser.index.get_level_values(level="symbol").unique().tolist(),index=best_exp_numshares_df.index) # swap axis of test (index) and algo (column) best_exp_numshares_df = best_exp_numshares_df.unstack(level="test").stack(level=None) # reorder index levels best_exp_numshares_df.index.names = ["algo", "date", "symbol"] # get mean over tests, round the mean to full numbers and convert it to int. best_exp_numshares_df = best_exp_numshares_df.mean(axis=1).round().astype("int").unstack(level="algo") # copy to new df, because we want to have sharesvalues additionally best_exp_sharesvalues_df = best_exp_numshares_df.copy() # Get closing price and calculate the stock value in portfolio best_exp_sharesvalues_df = best_exp_sharesvalues_df.join(stockprices_close_ser) for algo in drl_algos+basic_algos: best_exp_sharesvalues_df[algo] = best_exp_sharesvalues_df[algo] * best_exp_sharesvalues_df["close"] best_exp_sharesvalues_df = best_exp_sharesvalues_df.drop(columns="close") # %% for idx,algo in enumerate(drl_algos): if render_vids: # Bar chart race for seperate symbol values bcr.bar_chart_race(df=best_exp_sharesvalues_df[algo].unstack("symbol").resample("2W").mean(), dpi=330, filename="vids/"+algo+"_bcr_sharesvalues.mp4", orientation="v", interpolate_period=True, fixed_order=True, period_length=1000, filter_column_colors=True, fixed_max=True, steps_per_period=7, n_bars=10, cmap=[drl_colors[idx]], title="Seperate Stock Values of "+algo+" over Time") # Bar chart race for seperate symbol number of shares bcr.bar_chart_race(df=best_exp_numshares_df[algo].unstack("symbol").resample("2W").mean(), dpi=330, filename="vids/"+algo+"_bcr_numshares.mp4", orientation="v", interpolate_period=True, fixed_order=True, period_length=1000, filter_column_colors=True, fixed_max=True, steps_per_period=7, n_bars=10, cmap=[drl_colors[idx]], title="Seperate Stock numbers of "+algo+" over Time") # Show the mean of each numshares and plot the 10 largest. best_exp_numshares_df[algo].unstack("symbol").mean().nlargest(10).plot(title="Mean count of Shares in "+algo+" Portfolio", ylabel="Count", xlabel="Symbol", kind="bar",color=drl_colors[idx]).figure.savefig("img/"+algo+"_bar_top10_shares_counts.pdf", bbox_inches="tight") # %% ###### # PORTFOLIO STRUCTURE # cash # Get the Cashes of the Algorithms temp = [tests_df["cashes"].loc[(algo_name, exp_idx, slice(None), slice(None))] for algo_name, exp_idx in best_exp_idx.items()] best_exp_cashes_df = pd.concat(temp).reset_index(level="exp", drop=True).reorder_levels(["algo","date","test"]) best_exp_cashes_df = best_exp_cashes_df.groupby(level=["algo","date"]).mean().unstack(level="algo") ###### # Structure exchange_values_df = best_exp_sharesvalues_df.stack().copy() exchange_values_df.index.names = ["date","symbol","algo"] exchange_values_df.name = "value" temp = best_exp_cashes_df.stack().copy() temp.name = ("value","Cash") exchange_values_df = exchange_values_df.reset_index(["date","algo"]).join(stocks_df).reset_index().set_index(["date","algo","exchange"]).drop(columns=["symbol"]).sort_index().groupby(["algo","date","exchange"]).sum().unstack("exchange").join(temp) exchange_values_df.columns = exchange_values_df.columns.droplevel(None) # Include ETF portfolio structure etf_portf_df = pd.concat([dji_ser, stoxx50e_ser], axis=1).fillna(0) etf_portf_df["EU"] = 1e6*(36/63) etf_portf_df["US"] = 1e6*(1-(36/63)) etf_portf_df["EU"] = etf_portf_df["stoxx50e"].add(1).cumprod() * etf_portf_df["EU"] etf_portf_df["US"] = etf_portf_df["dji"].add(1).cumprod() * etf_portf_df["US"] etf_portf_df["Cash"] = 0 etf_portf_df = etf_portf_df.drop(columns=["dji","stoxx50e"]) etf_portf_df["algo"] = "ETF" etf_portf_df = etf_portf_df.reset_index().set_index(["algo","Date"]) etf_portf_df.index.names = ["algo","date"] etf_portf_df.columns.names = ["exchange"] exchange_values_df = exchange_values_df.append(etf_portf_df).sort_index() # Show absolute Structure of Portfolios for algo in exchange_values_df.index.get_level_values(level="algo").unique().tolist(): exchange_values_df.loc[(algo,slice(None)),slice(None)].droplevel("algo").plot(kind="area", title="Portfolio Structure of "+algo, ylabel="Value", xlabel="Date", legend="reverse",color=colors[::-1]).figure.savefig("img/"+algo+"_area_value_portfolio_structure.pdf", bbox_inches="tight") # %% # And for better comparison the percentage of total structure exchange_pct_df = exchange_values_df.copy() exchange_pct_df["Total"] = exchange_pct_df["US"] + exchange_pct_df["EU"] + exchange_pct_df["Cash"] for column in exchange_pct_df.columns: exchange_pct_df[column] = exchange_pct_df.apply(lambda x: x[column]/x["Total"], axis=1) exchange_pct_df = exchange_pct_df.drop(columns="Total") # Show it... for algo in exchange_pct_df.index.get_level_values(level="algo").unique().tolist(): exchange_pct_df.loc[(algo,slice(None)),slice(None)].droplevel("algo").plot(kind="area", title=None, ylabel="", xlabel="", legend="reverse",color=colors[::-1]).figure.savefig("img/"+algo+"_area_pct_portfolio_structure.pdf", bbox_inches="tight") # %% ###### # TRADING COSTS ###### # A completely different Question is about the produced costs # What algorithm traded the most cost effective? # Firstly we need the trades best_exp_trades_df = best_exp_numshares_df.groupby(by="symbol").diff().fillna(0) # and secondly we need the costs best_exp_costs_df = best_exp_trades_df.copy() best_exp_costs_df = best_exp_costs_df.join(stockprices_open_ser) for algo in drl_algos+basic_algos: best_exp_costs_df[algo] = best_exp_costs_df[algo].abs() * best_exp_costs_df["open"] * best_exp_args_df.loc[(algo,slice(None)),"fee"].values[0] best_exp_costs_df = best_exp_costs_df.drop(columns="open") # Show the Marginal costs per trade over time # Marginal costs determine how effective the trades has been made in conjuntion to costs (best_exp_costs_df.groupby(by="date").sum().cumsum() / best_exp_trades_df.abs().groupby(by="date").sum().cumsum())[show_portfolios[:-1]].plot(ylabel="", title=None, color=colors, xlabel="", legend=False).figure.savefig("img/all_line_marginal_trading_costs.pdf", bbox_inches="tight") # %%
StarcoderdataPython
3326700
<filename>web/addons/website_hr/__openerp__.py { 'name': 'Team Page', 'category': 'Website', 'summary': 'Present Your Team', 'version': '1.0', 'description': """ Our Team Page ============= """, 'author': 'OpenERP SA', 'depends': ['website', 'hr'], 'demo': [ 'data/website_hr_demo.xml', ], 'data': [ 'data/website_hr_data.xml', 'views/website_hr.xml', 'security/ir.model.access.csv', 'security/website_hr.xml', ], 'qweb': ['static/src/xml/*.xml'], 'installable': True, }
StarcoderdataPython
1603436
class TestInit(object): def test_init(self): assert 1 == True
StarcoderdataPython
1649813
from hallo.inc.input_parser import InputParser def test_no_args(): p = InputParser("blah blah") assert p.remaining_text == "blah blah" assert len(p.args_dict) == 0 def test_multiple_simple_args(): p = InputParser("blah blah arg1=val1 arg2=val2 arg3=val3") assert p.remaining_text == "blah blah" assert p.args_dict["arg1"] == "val1" assert p.args_dict["arg2"] == "val2" assert p.args_dict["arg3"] == "val3" def test_quoted_args_quoted_values(): p = InputParser("yo 'base unit'=\"hello world\"") assert p.remaining_text == "yo" assert p.args_dict["base unit"] == "hello world" def test_quoted_args_unquoted_values(): p = InputParser("yo 'base unit'=hello world") assert p.remaining_text == "yo world" assert p.args_dict["base unit"] == "hello" def test_unquoted_args_quoted_values(): p = InputParser('yo base unit="hello world"') assert p.remaining_text == "yo base" assert p.args_dict["unit"] == "hello world" def test_unquoted_args_unquoted_values(): p = InputParser("yo base unit=hello world") assert p.remaining_text == "yo base world" assert p.args_dict["unit"] == "hello" def test_mismatched_quotes(): p = InputParser('yo \'base unit"="hello world"') assert p.remaining_text == "yo 'base" assert p.args_dict['unit"'] == "hello world" p = InputParser("yo 'base unit'=\"hello's world\"") assert p.remaining_text == "yo" assert p.args_dict["base unit"] == "hello's world" def test_all_types(): p = InputParser( "yo 'base unit'=\"hello world\" arg1='value 1' 'arg 2'=val2 arg3=val3" ) assert p.remaining_text == "yo" assert p.args_dict["base unit"] == "hello world" assert p.args_dict["arg1"] == "value 1" assert p.args_dict["arg 2"] == "val2" assert p.args_dict["arg3"] == "val3" def test_remaining_text_start_and_end(): p = InputParser("blah blah arg1=val1 arg2=val2 hey") assert p.remaining_text == "blah blah hey" assert p.args_dict["arg1"] == "val1" assert p.args_dict["arg2"] == "val2" def test_unstripped_input(): p = InputParser(" blah blah ") assert p.remaining_text == "blah blah" def test_get_arg_by_names(): p = InputParser("blah blah arg1=val1 arg2=val2 arg3=val3") assert p.remaining_text == "blah blah" assert p.get_arg_by_names(["arg2"]) == "val2" assert p.args_dict["arg1"] == "val1" assert p.args_dict["arg2"] == "val2" assert p.args_dict["arg3"] == "val3" def test_get_arg_by_names_no_match(): p = InputParser("blah blah arg1=val1 arg2=val2 arg3=val3") assert p.remaining_text == "blah blah" assert p.get_arg_by_names(["arg4"]) is None assert p.args_dict["arg1"] == "val1" assert p.args_dict["arg2"] == "val2" assert p.args_dict["arg3"] == "val3" def test_get_arg_by_names_one_match(): p = InputParser("blah blah arg1=val1 arg2=val2 arg3=val3") assert p.remaining_text == "blah blah" assert p.get_arg_by_names(["arg4", "arg5", "arg3"]) == "val3" assert p.args_dict["arg1"] == "val1" assert p.args_dict["arg2"] == "val2" assert p.args_dict["arg3"] == "val3" def test_get_arg_by_names_first_match(): p = InputParser("blah blah arg1=val1 arg2=val2 arg3=val3") assert p.remaining_text == "blah blah" assert p.get_arg_by_names(["arg1", "arg2"]) == "val1" assert p.args_dict["arg1"] == "val1" assert p.args_dict["arg2"] == "val2" assert p.args_dict["arg3"] == "val3" def test_parse_string_no_numbers(): p = InputParser("blah bloo blee") assert p.remaining_text == "blah bloo blee" assert len(p.args_dict) == 0 assert len(p.string_words) == 3 assert len(p.number_words) == 0 assert p.string_words == ["blah", "bloo", "blee"] def test_parse_string_all_numbers(): p = InputParser("5 421 8916 34.5 -3") assert p.remaining_text == "5 421 8916 34.5 -3" assert len(p.args_dict) == 0 assert len(p.string_words) == 0 assert len(p.number_words) == 5 assert p.number_words == [5, 421, 8916, 34.5, -3] def test_parse_string_mix_of_numbers_and_args(): p = InputParser("blah blah arg1=val1 arg2=val2 5") assert p.remaining_text == "blah blah 5" assert p.args_dict["arg1"] == "val1" assert p.args_dict["arg2"] == "val2" assert p.string_words == ["blah", "blah"] assert p.number_words == [5]
StarcoderdataPython
1792296
from antarest.study.storage.rawstudy.model.filesystem.config.model import ( FileStudyTreeConfig, ) from antarest.study.storage.rawstudy.model.filesystem.folder_node import ( FolderNode, ) from antarest.study.storage.rawstudy.model.filesystem.inode import TREE from antarest.study.storage.rawstudy.model.filesystem.root.input.areas.item.item import ( InputAreasItem, ) from antarest.study.storage.rawstudy.model.filesystem.root.input.areas.list import ( InputAreasList, ) from antarest.study.storage.rawstudy.model.filesystem.root.input.areas.sets import ( InputAreasSets, ) class InputAreas(FolderNode): def build(self) -> TREE: children: TREE = { a: InputAreasItem(self.context, self.config.next_file(a)) for a in self.config.area_names() } children["list"] = InputAreasList( self.context, self.config.next_file("list.txt") ) children["sets"] = InputAreasSets( self.context, self.config.next_file("sets.ini") ) return children
StarcoderdataPython
27020
<reponame>mingyuexc/huluxia_woman_meitui<filename>tmp/keyword_get.py #!/usr/bin/python3 # coding = utf-8 """ @author:m1n9yu3 @file:keyword_get.py @time:2021/01/13 """ from get_data import * import threading from urllib import parse def multi_thread(idlist, path): """็บฟ็จ‹ๆŽงๅˆถ ๏ผŒ ไธ€ๆฌก่ท‘ 1000 ไธช็บฟ็จ‹""" # for i in range(start_id, step+start_id): # parse_json(url, start_id+i) threads = [] for i in idlist: threads.append(threading.Thread(target=get_images_url, args=(i, path))) for i in threads: i.start() for i in threads: i.join() def ask_url(url, path, number=10): i = 0 post_ids = [] js = get_json(url.format(i)) while True: # posts ๆฒกๆœ‰ๅ†…ๅฎนๆ—ถ๏ผŒ้€€ๅ‡บ if not js['posts']: break for post_id_i in js['posts']: post_ids.append(post_id_i['postID']) i += 1 # ๆŒ‡ๅฎš็ˆฌๅ–้กตๆ•ฐ # print(post_ids) number -= 1 if number % 10 == 0: multi_thread(idlist=post_ids, path=path) if number == 0: break post_ids = [] js = get_json(url.format(js['start'])) print("็ˆฌๅ–ๅฎŒๆˆ, ๅ…ฑ{} ไธชๅธ–ๅญ".format(i)) def search_key(keyword): # ๆไพ›ไธ€็ป„ _key: <KEY> # _key = input("่ฏท่พ“ๅ…ฅ _key: ") _key = "<KEY>" url = "http://floor.huluxia.com/post/search/ANDROID/2.1?platform=2&market_id=tool_baidu&_key" \ "=%s&start=1&count=20&cat_id=56&keyword=%s&flag=0" % (_key, parse.quote(keyword)) # print(url) ask_url(url, 'search_result/') if __name__ == '__main__': pass
StarcoderdataPython
1703263
# Generated by Django 2.0.5 on 2018-09-27 03:39 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('notes', '0001_initial'), ] operations = [ migrations.CreateModel( name='NoteBook', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(blank=True, default='ๆ–ฐ็ฌ”่ฎฐ', max_length=100, verbose_name='ๆ ‡้ข˜')), ('content', models.TextField(blank=True, default='', verbose_name='ๅ†…ๅฎน')), ('created_datetime', models.DateTimeField(auto_now_add=True, verbose_name='ๅˆ›ๅปบๆ—ถ้—ด')), ('update_datetime', models.DateTimeField(auto_now=True, verbose_name='ไฟฎๆ”นๆ—ถ้—ด')), ('is_archived', models.BooleanField(default=False, verbose_name='ๆ˜ฏๅฆๅฝ’ๆกฃ')), ('cover', models.TextField(blank=True, default='', verbose_name='ๅฐ้ข')), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='็”จๆˆท')), ], options={ 'db_table': 'NoteBook', 'ordering': ('update_datetime',), }, ), migrations.AddField( model_name='note', name='render_type', field=models.IntegerField(choices=[(0, 'ไธๆธฒๆŸ“'), (1, 'Markdown ๆธฒๆŸ“')], default=1, verbose_name='ๆธฒๆŸ“็ฑปๅž‹'), ), migrations.AlterField( model_name='note', name='title', field=models.CharField(blank=True, default='ๆ–ฐ็ฌ”่ฎฐ', max_length=100, verbose_name='ๆ ‡้ข˜'), ), migrations.AlterModelTable( name='note', table='Notes', ), migrations.AddField( model_name='note', name='notebook', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='notes.NoteBook', verbose_name='็ฌ”่ฎฐๆœฌ'), ), ]
StarcoderdataPython
1758405
<gh_stars>1-10 # Imports from nltk.corpus.reader.wordnet import WordNetError from nltk.corpus import wordnet from sqlalchemy import create_engine, Table, Column, BigInteger, Integer, String, Text, DateTime, MetaData, ForeignKey from sqlalchemy.orm.session import sessionmaker from sqlalchemy.schema import UniqueConstraint from sqlalchemy.schema import Sequence from sqlalchemy.exc import IntegrityError from optparse import OptionParser import datetime import uuid # Options Parser (note: deprecated since 2.7) parser = OptionParser() parser.add_option("-f", "--file", dest="filename", help="file to parse", metavar="FILE") parser.add_option("-s", "--source", dest="source", help="defined alignment source", metavar="SOURCE", default="wordnet30") parser.add_option("-t", "--through", dest="through", help="defined through translation", metavar="SOURCE", default=None) parser.add_option("-n", "--factor", dest="factor", help="syn_set factor for database", metavar="FACTOR", default=1) parser.add_option("-m", "--factor2", dest="parent_factor", help="parent syn_set factor for database", metavar="FACTOR", default=1) parser.add_option("-l", "--lang", dest="lang", help="define locale", metavar="LANG", default="en") parser.add_option("-u", "--uri", dest="uri", help="define instance uri location", metavar="URI", default="") parser.add_option("-v", "--verbose", action="store_true", dest="verbose") # FILENAME # options.filename sample # $HOME/andreord-public/lib/import/pwn_data/FinWN/corewn-fiwn-sensemap-pwnssids.tsv # SOURCES # options.source samples # wordnet30 # dannet21 # FACTOR # options.factor sample # DanNet 1000 (options, args) = parser.parse_args() if options.source == options.through: parser.error("Source cannot be the same as Through Source") # Variables delimiter = "\t" PWN_id = "wordnet30" # Database session engine = create_engine('postgresql://andreord@localhost/wordties-dannet', echo=False) Session = sessionmaker(bind=engine) metadata = MetaData(bind=engine) # Alignments Table syn_sets = Table('syn_sets', metadata, Column('id', BigInteger, primary_key=True), Column('label', Text, nullable=False), Column('gloss', Text), Column('usage', Text), Column('hyponym_count', Integer) ) instances = Table('instances', metadata, Column('id', Integer, Sequence('instances_id_seq'), primary_key=True), Column('uri', String(255)), Column('last_uptime', DateTime, onupdate=datetime.datetime.now()) ) sources = Table('sources', metadata, Column('id', String(255), primary_key=True, unique=True), Column('instance_id', Integer, ForeignKey('instances.id')), Column('lang', String(2), default="en") ) alignments = Table('alignments', metadata, Column('id', Integer, Sequence('alignments_id_seq'), primary_key=True), Column('source_id', String(255), ForeignKey('sources.id'), nullable=False), Column('lemma', Text, nullable=False), Column('definition', Text, nullable=False), Column('synonyms', Text, nullable=False), Column('key', Text, nullable=False), Column('relation_type_name', Text, nullable=False), Column('syn_set_id', BigInteger, ForeignKey('syn_sets.id'), nullable=False), Column('through_source_id', Text, ForeignKey('sources.id')), Column('ext_syn_set_id', Integer), UniqueConstraint('source_id', 'key', 'syn_set_id', 'ext_syn_set_id', 'relation_type_name', name='alignments_uniq') ) metadata.create_all(engine) # Create Alignments, Instances, Sources tables if doesn't exist session = Session() # Add Source and Instance records nextId = engine.execute(Sequence('instances_id_seq')) if options.source == PWN_id: engine.execute(instances.insert().values(id=nextId)) engine.execute(sources.insert().values(id=options.source, instance_id=nextId, lang=options.lang)) else: engine.execute(instances.insert().values(id=nextId, uri=options.uri, last_uptime=datetime.datetime.now())) engine.execute(sources.insert().values(id=options.source, instance_id=nextId, lang=options.lang)) # Open file f=open(options.filename) for line in f.readlines(): record = line.split(delimiter) relation = record[2] if options.source == PWN_id else record[4] if relation in ('eq_synonym', 'eq_has_hyponym', 'eq_has_hyperonym', 'eq_near_synonym'): if options.source == PWN_id: target, source, parent_source = record[0], record[1], -1 else: target, source, parent_source = record[0], record[1], record[2] if target.startswith("EN"): pass else: try: # load data from NLTK Wordnet package if options.source == PWN_id: lemma = wordnet.lemma_from_key(target) synset = lemma.synset definition, lemmas, lemma_name = synset.definition, "; ".join(synset.lemma_names), lemma.name # load data from source file else: lemmas, definition, lemma_name = record[3], record[5], record[3].split(";")[0].strip() #print "input factor: ", options.parent_factor if parent_source>-1: ext_syn_set_id = int(parent_source)*int(options.parent_factor) else: ext_syn_set_id = None if options.verbose: print "Inserting alignment for CorePWN key (" + target + ") with {relation} " + relation, " {parent_source} ", parent_source, " {ext id} ", ext_syn_set_id # insert statement ins = alignments.insert().values(source_id=options.source, lemma=lemma_name, definition=definition, synonyms=lemmas, key=target, relation_type_name=relation, syn_set_id=int(source)*int(options.factor), through_source_id=options.through, ext_syn_set_id=ext_syn_set_id ) engine.execute(ins) except IntegrityError as ie: print ie except WordNetError as e: print e
StarcoderdataPython
168174
<gh_stars>1-10 # Generated by Django 2.2 on 2019-04-24 14:09 import DjangoUeditor.models import datetime from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('shop', '0001_initial'), ] operations = [ migrations.CreateModel( name='Product', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=30, verbose_name='ๅ•†ๅ“ๅ็งฐ')), ('price', models.FloatField(verbose_name='ๅ•ไปท')), ('num', models.IntegerField(verbose_name='ๆ•ฐ้‡')), ('freight', models.IntegerField(default=0, verbose_name='่ฟ่ดน')), ('origin', models.CharField(max_length=20, verbose_name='ไบงๅœฐ')), ('pro_type', models.CharField(choices=[('ncp', 'ๅ†œไบงๅ“'), ('sg', 'ๆฐดๆžœ'), ('gyp', 'ๅทฅ่‰บๅ“'), ('fsp', 'ๅ‰ฏ้ฃŸๅ“')], default='ncp', max_length=3, verbose_name='ๅˆ†็ฑป')), ('buyers', models.IntegerField(default=0, verbose_name='่ดญไนฐไบบๆ•ฐ')), ('comments', models.IntegerField(default=0, verbose_name='่ฏ„่ฎบไบบๆ•ฐ')), ('details', DjangoUeditor.models.UEditorField(default='', verbose_name='ๅ•†ๅ“่ฏฆๆƒ…')), ('mainimg', models.ImageField(default='', upload_to='product/mainimg/%Y/%m', verbose_name='ๅ•†ๅ“ไธปๅ›พ')), ('remind', models.CharField(blank=True, max_length=20, null=True, verbose_name='ๆ้†’')), ('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='ๆทปๅŠ ๆ—ถ้—ด')), ], options={ 'verbose_name': 'ๅ•†ๅ“ไฟกๆฏ', 'verbose_name_plural': 'ๅ•†ๅ“ไฟกๆฏ', }, ), migrations.CreateModel( name='ProPic', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image', models.ImageField(upload_to='product/%Y/%m', verbose_name='ๅ•†ๅ“ๅ›พ')), ('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='ๆทปๅŠ ๆ—ถ้—ด')), ('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.Product', verbose_name='ๅ•†ๅ“')), ], options={ 'verbose_name': 'ๅ•†ๅ“ๅ›พ็‰‡', 'verbose_name_plural': 'ๅ•†ๅ“ๅ›พ็‰‡', }, ), ]
StarcoderdataPython
1769606
<filename>test/sp_layers_test.py import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import utils from sp_layers import SPLayer def fbank_test(): conf = { "feature_type": "fbank", "sample_rate": 16000, "num_mel_bins": 40, "use_energy": False } fn = "file:testdata/100-121669-0000.wav" pipe = "pipe:flac -c -d -s testdata/103-1240-0005.flac |" sample_rate, waveform1 = utils.load_wave(fn) sample_rate, waveform2 = utils.load_wave(pipe) waveform1 = torch.from_numpy(waveform1) waveform2 = torch.from_numpy(waveform2) lengths = [waveform1.shape[0], waveform2.shape[0]] max_length = max(lengths) padded_waveforms = torch.zeros(2, max_length) padded_waveforms[0, :lengths[0]] += waveform1 padded_waveforms[1, :lengths[1]] += waveform2 layer = SPLayer(conf) features, feature_lengths = layer(padded_waveforms, lengths) print(features) print(feature_lengths) def specaug_fbank_test(): conf = { "feature_type": "fbank", "sample_rate": 16000, "num_mel_bins": 80, "use_energy": False, "spec_aug": { "freq_mask_num": 2, "freq_mask_width": 27, "time_mask_num": 2, "time_mask_width": 100, } } fn = "file:testdata/100-121669-0000.wav" pipe = "pipe:flac -c -d -s testdata/103-1240-0005.flac |" sample_rate, waveform1 = utils.load_wave(fn) sample_rate, waveform2 = utils.load_wave(pipe) waveform1 = torch.from_numpy(waveform1) waveform2 = torch.from_numpy(waveform2) lengths = [waveform1.shape[0], waveform2.shape[0]] max_length = max(lengths) print(lengths) padded_waveforms = torch.zeros(2, max_length) padded_waveforms[0, :lengths[0]] += waveform1 padded_waveforms[1, :lengths[1]] += waveform2 layer = SPLayer(conf) features, feature_lengths = layer(padded_waveforms, lengths) import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt plt.imshow(features[1].numpy()) plt.savefig("test.png") #print(features) #print(feature_lengths) def specaug_test(): featconf = { "feature_type": "fbank", "sample_rate": 16000, "num_mel_bins": 40, "use_energy": False } augconf = { "feature_type": "fbank", "sample_rate": 16000, "num_mel_bins": 40, "use_energy": False, "spec_aug": { "freq_mask_width": 10, "freq_mask_num": 2, "time_mask_width": 100, "time_mask_num": 2} } fn = "file:testdata/100-121669-0000.wav" pipe = "pipe:flac -c -d -s testdata/103-1240-0005.flac |" sample_rate, waveform1 = utils.load_wave(fn) sample_rate, waveform2 = utils.load_wave(pipe) waveform1 = torch.from_numpy(waveform1) waveform2 = torch.from_numpy(waveform2) lengths = [waveform1.shape[0], waveform2.shape[0]] max_length = max(lengths) padded_waveforms = torch.zeros(2, max_length) padded_waveforms[0, :lengths[0]] += waveform1 padded_waveforms[1, :lengths[1]] += waveform2 splayer = SPLayer(featconf) auglayer = SPLayer(augconf) features, feature_lengths = splayer(padded_waveforms, lengths) features2, feature_lengths2 = auglayer(padded_waveforms, lengths) print("Before augmentation") print(features) print("After augmentation") print(features2) if __name__ == "__main__": fbank_test() specaug_test() specaug_fbank_test()
StarcoderdataPython
4822659
<gh_stars>0 from unittest import TestCase from readconfig import ReadConfig class TestReadConfig(TestCase): def test_ReadConfig_with_file(self): file = "config.yml" cf = ReadConfig(file) assert cf.file == file def test_ReadConfig_with_None(self): file = "./config/config.yml" cf = ReadConfig(None) assert cf.file == file def test_read_config_file(self): test_data = "Abbracadabra" file = "config.yml" cf = ReadConfig(file) assert cf.config['data']['test'] == test_data
StarcoderdataPython
1634157
<reponame>sheriffbarrow/production-ecommerce from django.shortcuts import render, HttpResponseRedirect, redirect, HttpResponse from django.contrib.auth.models import User, auth from django.views import generic from django.conf import settings from django.contrib.auth.forms import UserCreationForm from django.core.exceptions import ObjectDoesNotExist from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.shortcuts import render, get_object_or_404 from django.views.generic import ListView, DetailView, View from django.shortcuts import redirect from django.utils import timezone from ecommerce.forms import ContactForm, ProductForm, Quick_ServiceForm, VendorForm, VendorImageForm, HouseImageForm, CarImageForm, RentCarForm, ProductImageForm, RentHouseForm, OrderFoodForm from django.views.generic import TemplateView from .import forms from django.contrib.auth import login, authenticate from django.contrib.auth.decorators import login_required from ecommerce.models import Vendor, VendorImage, Product, ProductImage, Quick_Service, FoodImage, CarImage, HouseImage, RentCar, RentHouse from django.contrib.messages.views import SuccessMessageMixin from django.views.generic.edit import CreateView from django.forms import modelformset_factory from django.conf import settings from django.core import serializers from account.models import UserVendor from django.core.mail import send_mail, EmailMessage from django.conf import settings from django import template from django.template.loader import get_template from django.contrib import messages import json from hitcount.views import HitCountDetailView def create_ref_code(): return ''.join(random.choices(string.ascii_lowercase + string.digits, k=20)) def tr(request): return render(request, 'ecommerce/base2.html') class Form_view(TemplateView): template_name = 'ecommerce/sell.html' def get(self, request): form = ProductForm() return render(request, self.template_name, {'form': form}) class HomeView(ListView): model = Product template_name = 'ecommerce/index.html' context_object_name = 'items' ordering = ['-posted_date'] def account_fitler(request): items = Product.objects.filter(vendor=request.user).order_by('-posted_date') context = { 'items': items, } return render(request, 'account/account_settings.html', context) def admin(request): return render(request, 'admin/base_site.html') def slider(request): # change the time field in all models to posted date for uniformitty items = Product.objects.all()[:8] slides = Vendor.objects.all().order_by('-date')[:15] vendor = Vendor.objects.all().order_by('-date') cars = RentCar.objects.all().order_by('-pub_date')[:8] house = RentHouse.objects.all().order_by('-post_date')[:8] context = { 'house': house, 'cars': cars, 'slides': slides, 'items': items, 'vendor': vendor, } return render(request, "ecommerce/index.html", context) def any_view(request): posts = Article.objects.all().order_by("-pub_date")[:5] return render(request, "show/temp.html", {'posts': posts}) class SellMessage(SuccessMessageMixin, CreateView): template_name = 'ecommerce/sell.html' form_class = ProductForm success_url = 'ecommerce:home' seccess_message = 'be ready to meet your buyers soon!' class VendorView(ListView): model = Vendor template_name = 'ecommerce/index.html' paginate_by = 10 context_object_name = 'vendor' def get_queryset(self): return Vendor.objects.all() @login_required(login_url='/account/login-required/') def sell(request): if request.method == 'POST': form = ProductForm(request.POST) imageform = forms.ProductImageForm(request.POST, request.FILES) img = request.FILES.getlist('productimages') if form.is_valid() and imageform.is_valid(): instance = form.save(commit=False) instance.vendor = request.user instance.save() for f in img: image_instance = ProductImage(image=f, vendor=instance) image_instance.save() messages.success(request, "Product uploaded successfully and it's ready for purchase!!") else: messages.error(request, "Error!!, due to internal error or internet connectivity") form = VendorForm() imageform = ProductImageForm() return render(request, 'ecommerce/sell.html', {'form': ProductForm(), 'imageform': ProductImageForm()}) @login_required(login_url='/account/login-required/') def vendor_sell(request): if request.method == 'POST': form = VendorForm(request.POST) file_form = forms.VendorImageForm(request.POST, request.FILES) files = request.FILES.getlist('images') if form.is_valid() and file_form.is_valid(): instance = form.save(commit=False) instance.vendor = request.user instance.save() for f in files: file_instance = VendorImage(file=f, vendor=instance) file_instance.save() messages.success(request, "Vendor created successfully, Good luck...!!") else: messages.error(request, "Error, due to internal or internet connectivity!!") form = VendorForm() file_form = VendorImageForm() return render(request, 'ecommerce/vendor_profile.html', {'form': VendorForm(), 'file_form': VendorImageForm()}) @login_required(login_url='/account/login-required/') def vendor_sell_multiple(request): # number of images to be allowed ImageFormSet = modelformset_factory(VendorImage, fields=('file',), extra=5) if request.method == "POST": form = VendorForm(request.POST) formset = ImageFormSet(request.POST or None, request.FILES or None) if form.is_valid() and formset.is_valid(): vendor = form.save(commit=False) vendor.vendor = request.user vendor.save() for f in formset: try: photo = VendorImage(vendor=vendor, file=f.cleaned_data['file']) photo.save() except Exception as e: break messages.success(request, 'succeffuly') return redirect('ecommerce:vendor_options') # this helps to not crash if the user # do not upload up to the max. else: form = VendorForm() formset = ImageFormSet(queryset=VendorImage.objects.none()) context = { 'form': form, 'formset': formset, } return render(request, 'ecommerce/vendor_profile.html', {'form': form, 'formset': formset}) @login_required(login_url='/account/login-required/') def rent_car_multiple(request): # number of images to be allowed ImageFormSet = modelformset_factory(CarImage, fields=('image',), extra=5) if request.method == "POST": form = RentCarForm(request.POST) formset = ImageFormSet(request.POST or None, request.FILES or None) if form.is_valid() and formset.is_valid(): vendor = form.save(commit=False) vendor.vendor = request.user vendor.save() for f in formset: try: photo = CarImage(vendor=vendor, image=f.cleaned_data['image']) photo.save() except Exception as e: break messages.success(request, 'succeffuly') return redirect('ecommerce:home') # this helps to not crash if the user # do not upload up to the max. else: form = RentCarForm() formset = ImageFormSet(queryset=CarImage.objects.none()) context = { 'form': form, 'formset': formset, } return render(request, 'ecommerce/rent_car.html', {'form': form, 'formset': formset}) @login_required(login_url='/account/login-required/') def rent_car(request): if request.method == 'POST': form = RentCarForm(request.POST) carimageform = forms.CarImageForm(request.POST, request.FILES) img = request.FILES.getlist('carimages') if form.is_valid() and carimageform.is_valid(): instance = form.save(commit=False) instance.vendor = request.user instance.save() for f in img: image_instance = CarImage(image=f, vendor=instance) image_instance.save() messages.success( request, "Your car has been successfully uploaded and it's ready for buyers!!") else: form = RentCarForm() carimageform = CarImageForm() return render(request, 'ecommerce/rent_car.html', {'form': RentCarForm(), 'carimageform': CarImageForm()}) @login_required(login_url='/account/login-required/') def rent_house(request): if request.method == 'POST': form = RentHouseForm(request.POST) houseimageform = forms.HouseImageForm(request.POST, request.FILES) img = request.FILES.getlist('houseimages') if form.is_valid() and houseimageform.is_valid(): instance = form.save(commit=False) instance.vendor = request.user instance.save() for f in img: image_instance = HouseImage(image=f, vendor=instance) image_instance.save() messages.success( request, "Your house/apartments has been successfully uploaded and it's ready for buyers!!") else: form = RentHouseForm() carimageform = HouseImageForm() return render(request, 'ecommerce/rent_house.html', {'form': RentHouseForm(), 'houseimageform': HouseImageForm()}) @login_required(login_url='/account/login-required/') def food(request): if request.method == 'POST': form = forms.OrderFoodForm(request.POST) if form.is_valid(): instance = form.save(commit=False) instance.vendor = request.user instance.save() # save to db return redirect('ecommerce:home') else: form = forms.OrderFoodForm() return render(request, 'ecommerce/food.html', {'form': form}) class Plumber(ListView): model = Vendor template_name = 'ecommerce/plumbing.html' context_object_name = 'vendor' def get_queryset(self): return Vendor.objects.filter(category__iexact='PLUMBING') class Electrical(ListView): model = Vendor template_name = 'ecommerce/electrical.html' context_object_name = 'vendor' def get_queryset(self): return Vendor.objects.filter(category__iexact='ELECTRICAL') class Cleaning(ListView): model = Vendor template_name = 'ecommerce/cleaning.html' context_object_name = 'vendor' def get_queryset(self): return Vendor.objects.filter(category__iexact='CLEANING') class Garden(ListView): model = Vendor template_name = 'ecommerce/garden.html' context_object_name = 'vendor' def get_queryset(self): return Vendor.objects.filter(category__iexact='GARDEN') class Tilin(ListView): model = Vendor template_name = 'ecommerce/tilin.html' context_object_name = 'vendor' def get_queryset(self): return Vendor.objects.filter(category__iexact='TILING') class Laundry(ListView): model = Vendor template_name = 'ecommerce/laundry.html' context_object_name = 'vendor' def get_queryset(self): return Vendor.objects.filter(category__iexact='LAUNDRY') class Carpentry(ListView): model = Vendor template_name = 'ecommerce/carpentry.html' context_object_name = 'vendor' def get_queryset(self): return Vendor.objects.filter(category__iexact='CARPENTRY') class ServiceDetail(HitCountDetailView): model = Vendor template_name = 'ecommerce/plumber-detail.html' context_object_name = 'detail' pk_url_kwarg = 'vendor_pk' # set to True to count the hit count_hit = True def get_context_data(self, **kwargs): context = super(ServiceDetail, self).get_context_data(**kwargs) context.update({ 'related_products': Vendor.objects.order_by('-hit_count_generic__hits'), }) return context class CarDetail(HitCountDetailView): model = RentCar template_name = 'ecommerce/cardetails.html' context_object_name = 'cardetail' pk_url_kwarg = 'pk' # set to True to count the hit count_hit = True def get_context_data(self, **kwargs): context = super(CarDetail, self).get_context_data(**kwargs) context.update({ 'related_products': RentCar.objects.order_by('-hit_count_generic__hits'), }) return context class HouseDetail(HitCountDetailView): model = RentHouse template_name = 'ecommerce/housedetails.html' context_object_name = 'housedetail' pk_url_kwarg = 'pk' # set to True to count the hit count_hit = True def get_context_data(self, **kwargs): context = super(HouseDetail, self).get_context_data(**kwargs) context.update({ 'related_products': RentHouse.objects.order_by('-hit_count_generic__hits'), }) return context def send_mail(request): if not request.user.is_authenticated: return redirect("account:login") Contact_Form = ContactForm if request.method == 'POST': form = Contact_Form(request.POST) if form.is_valid(): name = request.POST.get('name') service = request.POST.get('service') email = request.POST.get('email') phone = request.POST.get('phone') duration = request.POST.get('duration') comment = request.POST.get('comment') template = get_template('ecommerce/contact_form.txt') context = { 'name': name, 'service': service, 'email': email, 'phone': phone, 'duration': duration, 'comment': comment, } content = template.render(context) email = EmailMessage( "Request for Service", content, "company name" + '', ['<EMAIL>'], headers={'Reply to': email} ) email.send() messages.success( request, "Request made successfuly, our correspondent will contact you shortly..!!") else: messages.error(request, "Error!! Check your internet connection and try again!!..") form = Contact_Form( initial={ 'email': request.user.email, 'client-name': request.detail.username, 'name': request.user.trade_name, } ) return render(request, 'ecommerce/contact.html', {'form': Contact_Form}) class ProductDetail(HitCountDetailView): model = Product template_name = 'ecommerce/product.html' context_object_name = 'details' pk_url_kwarg = 'item_pk' # set to True to count the hit count_hit = True def get_context_data(self, **kwargs): context = super(ProductDetail, self).get_context_data(**kwargs) context.update({ 'related_products': Product.objects.order_by('-hit_count_generic__hits')[:3], }) return context class Shop(ListView): model = Product template_name = 'ecommerce/shop.html' context_object_name = 'item' ordering = ['-posted_date'] def electrical(request): return render(request, 'ecommerce/electrical.html') def cleaning(request): return render(request, 'ecommerce/cleaning.html') def garden(request): return render(request, 'ecommerce/garden.html') def laundry(request): return render(request, 'ecommerce/laundry.html') def tilin(request): return render(request, 'ecommerce/tilin.html') def carpentry(request): return render(request, 'ecommerce/carpentry.html') def vendor_profile(request): return render(request, 'ecommerce/vendor_profile.html') def profile(request): if request.session.has_key('username'): posts = request.session['username'] query = User.objects.filter(username=posts) return render(request, 'vendorprofile/profile.html', {"query": query}) else: return render(request, 'vendorprofile/help.html', {}) def vendor_options(request): return render(request, 'ecommerce/vendor_options.html') class Vendor_option(ListView): model = Quick_Service template_name = 'ecommerce/vendor_options.html' context_object_name = 'Quick_Service' def get_queryset(self): return Quick_Service.objects.all() def accountSettings(request): context = {} return render(request, 'ecommerce/account_settings.html', context) class Vendor_Detail(DetailView): model = Vendor template_name = 'ecommerce/' def shop(request): return render(request, 'ecommerce/shop.html') def forget(request): return render(request, 'ecommerce/forgot-password.html') def products(request): context = { 'items': Item.objects.all() } return render(request, "ecommerce/products.html", context) def is_valid_form(values): valid = True for field in values: if field == '': valid = False return valid class CheckoutView(View): def get(self, *args, **kwargs): try: order = Order.objects.get(user=self.request.user, ordered=False) form = CheckoutForm() context = { 'form': form, 'couponform': CouponForm(), 'order': order, 'DISPLAY_COUPON_FORM': True } shipping_address_qs = Address.objects.filter( user=self.request.user, address_type='S', default=True ) if shipping_address_qs.exists(): context.update( {'default_shipping_address': shipping_address_qs[0]}) billing_address_qs = Address.objects.filter( user=self.request.user, address_type='B', default=True ) if billing_address_qs.exists(): context.update( {'default_billing_address': billing_address_qs[0]}) return render(self.request, "checkout.html", context) except ObjectDoesNotExist: messages.info(self.request, "You do not have an active order") return redirect("ecommerce:checkout") def post(self, *args, **kwargs): form = CheckoutForm(self.request.POST or None) try: order = Order.objects.get(user=self.request.user, ordered=False) if form.is_valid(): use_default_shipping = form.cleaned_data.get( 'use_default_shipping') if use_default_shipping: print("Using the defualt shipping address") address_qs = Address.objects.filter( user=self.request.user, address_type='S', default=True ) if address_qs.exists(): shipping_address = address_qs[0] order.shipping_address = shipping_address order.save() else: messages.info( self.request, "No default shipping address available") return redirect('ecommerce:checkout') else: print("User is entering a new shipping address") shipping_address1 = form.cleaned_data.get( 'shipping_address') shipping_address2 = form.cleaned_data.get( 'shipping_address2') shipping_country = form.cleaned_data.get( 'shipping_country') shipping_zip = form.cleaned_data.get('shipping_zip') if is_valid_form([shipping_address1, shipping_country, shipping_zip]): shipping_address = Address( user=self.request.user, street_address=shipping_address1, apartment_address=shipping_address2, country=shipping_country, zip=shipping_zip, address_type='S' ) shipping_address.save() order.shipping_address = shipping_address order.save() set_default_shipping = form.cleaned_data.get( 'set_default_shipping') if set_default_shipping: shipping_address.default = True shipping_address.save() else: messages.info( self.request, "Please fill in the required shipping address fields") use_default_billing = form.cleaned_data.get( 'use_default_billing') same_billing_address = form.cleaned_data.get( 'same_billing_address') if same_billing_address: billing_address = shipping_address billing_address.pk = None billing_address.save() billing_address.address_type = 'B' billing_address.save() order.billing_address = billing_address order.save() elif use_default_billing: print("Using the defualt billing address") address_qs = Address.objects.filter( user=self.request.user, address_type='B', default=True ) if address_qs.exists(): billing_address = address_qs[0] order.billing_address = billing_address order.save() else: messages.info( self.request, "No default billing address available") return redirect('ecommerce:checkout') else: print("User is entering a new billing address") billing_address1 = form.cleaned_data.get( 'billing_address') billing_address2 = form.cleaned_data.get( 'billing_address2') billing_country = form.cleaned_data.get( 'billing_country') billing_zip = form.cleaned_data.get('billing_zip') if is_valid_form([billing_address1, billing_country, billing_zip]): billing_address = Address( user=self.request.user, street_address=billing_address1, apartment_address=billing_address2, country=billing_country, zip=billing_zip, address_type='B' ) billing_address.save() order.billing_address = billing_address order.save() set_default_billing = form.cleaned_data.get( 'set_default_billing') if set_default_billing: billing_address.default = True billing_address.save() else: messages.info( self.request, "Please fill in the required billing address fields") payment_option = form.cleaned_data.get('payment_option') if payment_option == 'S': return redirect('ecommerce:payment', payment_option='stripe') elif payment_option == 'P': return redirect('ecommerce:payment', payment_option='paypal') else: messages.warning( self.request, "Invalid payment option selected") return redirect('ecommerce:checkout') except ObjectDoesNotExist: messages.warning(self.request, "You do not have an active order") return redirect("ecommerce:order-summary") class PaymentView(View): def get(self, *args, **kwargs): order = Order.objects.get(user=self.request.user, ordered=False) if order.billing_address: context = { 'order': order, 'DISPLAY_COUPON_FORM': False } userprofile = self.request.user.userprofile if userprofile.one_click_purchasing: # fetch the users card list cards = stripe.Customer.list_sources( userprofile.stripe_customer_id, limit=3, object='card' ) card_list = cards['data'] if len(card_list) > 0: # update the context with the default card context.update({ 'card': card_list[0] }) return render(self.request, "payment.html", context) else: messages.warning( self.request, "You have not added a billing address") return redirect("ecommerce:checkout") def post(self, *args, **kwargs): order = Order.objects.get(user=self.request.user, ordered=False) form = PaymentForm(self.request.POST) userprofile = UserProfile.objects.get(user=self.request.user) if form.is_valid(): token = form.cleaned_data.get('stripeToken') save = form.cleaned_data.get('save') use_default = form.cleaned_data.get('use_default') if save: if userprofile.stripe_customer_id != '' and userprofile.stripe_customer_id is not None: customer = stripe.Customer.retrieve( userprofile.stripe_customer_id) customer.sources.create(source=token) else: customer = stripe.Customer.create( email=self.request.user.email, ) customer.sources.create(source=token) userprofile.stripe_customer_id = customer['id'] userprofile.one_click_purchasing = True userprofile.save() amount = int(order.get_total() * 100) try: if use_default or save: # charge the customer because we cannot charge the token more than once charge = stripe.Charge.create( amount=amount, # cents currency="usd", customer=userprofile.stripe_customer_id ) else: # charge once off on the token charge = stripe.Charge.create( amount=amount, # cents currency="usd", source=token ) # create the payment payment = Payment() payment.stripe_charge_id = charge['id'] payment.user = self.request.user payment.amount = order.get_total() payment.save() # assign the payment to the order order_items = order.items.all() order_items.update(ordered=True) for item in order_items: item.save() order.ordered = True order.payment = payment order.ref_code = create_ref_code() order.save() messages.success(self.request, "Your order was successful!") return redirect("/") except stripe.error.CardError as e: body = e.json_body err = body.get('error', {}) messages.warning(self.request, f"{err.get('message')}") return redirect("/") except stripe.error.RateLimitError as e: # Too many requests made to the API too quickly messages.warning(self.request, "Rate limit error") return redirect("/") except stripe.error.InvalidRequestError as e: # Invalid parameters were supplied to Stripe's API print(e) messages.warning(self.request, "Invalid parameters") return redirect("/") except stripe.error.AuthenticationError as e: # Authentication with Stripe's API failed # (maybe you changed API keys recently) messages.warning(self.request, "Not authenticated") return redirect("/") except stripe.error.APIConnectionError as e: # Network communication with Stripe failed messages.warning(self.request, "Network error") return redirect("/") except stripe.error.StripeError as e: # Display a very generic error to the user, and maybe send # yourself an email messages.warning( self.request, "Something went wrong. You were not charged. Please try again.") return redirect("/") except Exception as e: # send an email to ourselves messages.warning( self.request, "A serious error occurred. We have been notifed.") return redirect("/") messages.warning(self.request, "Invalid data received") return redirect("/payment/stripe/") class OrderSummaryView(LoginRequiredMixin, View): def get(self, *args, **kwargs): try: order = Order.objects.get(user=self.request.user, ordered=False) context = { 'object': order } return render(self.request, 'order_summary.html', context) except ObjectDoesNotExist: messages.warning(self.request, "You do not have an active order") return redirect("/") class ItemDetailView(DetailView): model = Product template_name = "ecommerce/product.html" @login_required def add_to_cart(request, slug): item = get_object_or_404(Item, slug=slug) order_item, created = OrderItem.objects.get_or_create( item=item, user=request.user, ordered=False ) order_qs = Order.objects.filter(user=request.user, ordered=False) if order_qs.exists(): order = order_qs[0] # check if the order item is in the order if order.items.filter(item__slug=item.slug).exists(): order_item.quantity += 1 order_item.save() messages.info(request, "This item quantity was updated.") return redirect("ecommerce:order-summary") else: order.items.add(order_item) messages.info(request, "This item was added to your cart.") return redirect("ecommerce:order-summary") else: ordered_date = timezone.now() order = Order.objects.create( user=request.user, ordered_date=ordered_date) order.items.add(order_item) messages.info(request, "This item was added to your cart.") return redirect("ecommerce:order-summary") @login_required def remove_from_cart(request, slug): item = get_object_or_404(Item, slug=slug) order_qs = Order.objects.filter( user=request.user, ordered=False ) if order_qs.exists(): order = order_qs[0] # check if the order item is in the order if order.items.filter(item__slug=item.slug).exists(): order_item = OrderItem.objects.filter( item=item, user=request.user, ordered=False )[0] order.items.remove(order_item) messages.info(request, "This item was removed from your cart.") return redirect("ecommerce:order-summary") else: messages.info(request, "This item was not in your cart") return redirect("ecommerce:product", slug=slug) else: messages.info(request, "You do not have an active order") return redirect("ecommerce:product", slug=slug) @login_required def remove_single_item_from_cart(request, slug): item = get_object_or_404(Item, slug=slug) order_qs = Order.objects.filter( user=request.user, ordered=False ) if order_qs.exists(): order = order_qs[0] # check if the order item is in the order if order.items.filter(item__slug=item.slug).exists(): order_item = OrderItem.objects.filter( item=item, user=request.user, ordered=False )[0] if order_item.quantity > 1: order_item.quantity -= 1 order_item.save() else: order.items.remove(order_item) messages.info(request, "This item quantity was updated.") return redirect("ecommerce:order-summary") else: messages.info(request, "This item was not in your cart") return redirect("ecommerce:product", slug=slug) else: messages.info(request, "You do not have an active order") return redirect("ecommerce:product", slug=slug) def get_coupon(request, code): try: coupon = Coupon.objects.get(code=code) return coupon except ObjectDoesNotExist: messages.info(request, "This coupon does not exist") return redirect("ecommerce:checkout") class AddCouponView(View): def post(self, *args, **kwargs): form = CouponForm(self.request.POST or None) if form.is_valid(): try: code = form.cleaned_data.get('code') order = Order.objects.get( user=self.request.user, ordered=False) order.coupon = get_coupon(self.request, code) order.save() messages.success(self.request, "Successfully added coupon") return redirect("ecommerce:checkout") except ObjectDoesNotExist: messages.info(self.request, "You do not have an active order") return redirect("ecommerce:checkout") class RequestRefundView(View): def get(self, *args, **kwargs): form = RefundForm() context = { 'form': form } return render(self.request, "request_refund.html", context) def post(self, *args, **kwargs): form = RefundForm(self.request.POST) if form.is_valid(): ref_code = form.cleaned_data.get('ref_code') message = form.cleaned_data.get('message') email = form.cleaned_data.get('email') # edit the order try: order = Order.objects.get(ref_code=ref_code) order.refund_requested = True order.save() # store the refund refund = Refund() refund.order = order refund.reason = message refund.email = email refund.save() messages.info(self.request, "Your request was received.") return redirect("ecommerce:request-refund") except ObjectDoesNotExist: messages.info(self.request, "This order does not exist.") return redirect("ecommerce:request-refund")
StarcoderdataPython
3364687
import asyncio import serial_asyncio import itertools import logging from xml.etree import ElementTree from serial import SerialException from . import emu2_entities _LOGGER = logging.getLogger(__name__) class Emu2: def __init__( self, device ): self._device = device self._connected = False self._callback = None self._writer = None self._reader = None self._writer_lock = asyncio.Lock() self._data = {} def get_data(self, klass): _LOGGER.debug("Requesting data %s", klass) return self._data.get(klass.tag_name()) def register_process_callback(self, callback): self._callback = callback def connected(self) -> bool: return self._connected async def test_available(self) -> bool: if await self.open() == False: return False await self.close() return True async def wait_connected(self, timeout) -> bool: count = 0 while self._connected == False: await asyncio.sleep(1) count += 1 if (count > timeout): return False return True async def close(self) -> None: if self._writer is not None: self._writer.close() await self._writer.wait_closed() self._connected = False async def open(self) -> bool: if self._connected == True: return True try: self._reader, self._writer = await serial_asyncio.open_serial_connection( url = self._device, baudrate = 115200 ) except SerialException as ex: _LOGGER.error(ex) return False return True async def serial_read(self): _LOGGER.info("Starting serial_read loop") if await self.open() == False: return response = '' while True: try: line = await self._reader.readline() except SerialException as ex: _LOGGER.error(ex) self._connected = False break line = line.decode("utf-8").strip() _LOGGER.debug("received %s", line) response += line if line.startswith('</'): try: self._connected = True self._process_reply(response) response = '' except Exception as ex: _LOGGER.error("something went wrong: %s", ex) async def issue_command(self, command, params = None) -> bool: if self._connected == False: _LOGGER.error("issued command while not connected") return False root = ElementTree.Element('Command') name_field = ElementTree.SubElement(root, 'Name') name_field.text = command if params is not None: for k, v in params.items(): if v is not None: field = ElementTree.SubElement(root, k) field.text = v bin_string = ElementTree.tostring(root) _LOGGER.debug("XML write %s", bin_string) try: async with self._writer_lock: self._writer.write(bin_string) await self._writer.drain() # Throttle time between writes await asyncio.sleep(1) except SerialException as ex: _LOGGER.error(ex) return False return True def _process_reply(self, xml_str: str) -> None: try: wrapped = itertools.chain('<Root>', xml_str, '</Root>') root = ElementTree.fromstringlist(wrapped) except ElementTree.ParseError: _LOGGER.debug("Malformed XML: %s", xml_str) return for tree in root: response_type = tree.tag klass = emu2_entities.Entity.tag_to_class(response_type) if klass is None: _LOGGER.debug("Unsupported tag: %s", response_type) continue self._data[response_type] = klass(tree) # trigger callback if self._callback is not None: _LOGGER.debug("serial_read callback for response %s", response_type) self._callback(response_type, klass(tree)) # Convert boolean to Y/N for commands def _format_yn(self, value): if value is None: return None if value: return 'Y' else: return 'N' # Convert an integer into a hex string def _format_hex(self, num, digits=8): return "0x{:0{digits}x}".format(num, digits=digits) # Check if an event is a valid value def _check_valid_event(self, event, allow_none=True): enum = ['time', 'summation', 'billing_period', 'block_period', 'message', 'price', 'scheduled_prices', 'demand'] if allow_none: enum.append(None) if event not in enum: raise ValueError('Invalid event specified') # The following are convenience methods for sending commands. Commands # can also be sent manually using the generic issue_command method. ################################# # Raven Commands # ################################# async def restart(self): return await self.issue_command('restart') async def get_connection_status(self): return await self.issue_command('get_connection_status') async def get_device_info(self): return await self.issue_command('get_device_info') async def get_schedule(self, mac=None, event=None): self._check_valid_event(event) opts = {'MeterMacId': mac, 'Event': event} return await self.issue_command('get_schedule', opts) async def set_schedule(self, mac=None, event=None, frequency=10, enabled=True): self._check_valid_event(event, allow_none=False) opts = { 'MeterMacId': mac, 'Event': event, 'Frequency': self._format_hex(frequency), 'Enabled': self._format_yn(enabled) } return await self.issue_command('set_schedule', opts) async def set_schedule_default(self, mac=None, event=None): self._check_valid_event(event) opts = {'MeterMacId': mac, 'Event': event} return await self.issue_command('set_schedule_default', opts) async def get_meter_list(self): return await self.issue_command('get_meter_list') ########################## # Meter Commands # ########################## async def get_meter_info(self, mac=None): opts = {'MeterMacId': mac} return await self.issue_command('get_meter_info', opts) async def get_network_info(self): return await self.issue_command('get_network_info') async def set_meter_info(self, mac=None, nickname=None, account=None, auth=None, host=None, enabled=None): opts = { 'MeterMacId': mac, 'NickName': nickname, 'Account': account, 'Auth': auth, 'Host': host, 'Enabled': self._format_yn(enabled) } return await self.issue_command('set_meter_info', opts) ############################ # Time Commands # ############################ async def get_time(self, mac=None, refresh=True): opts = {'MeterMacId': mac, 'Refresh': self._format_yn(refresh)} return await self.issue_command('get_time', opts) async def get_message(self, mac=None, refresh=True): opts = {'MeterMacId': mac, 'Refresh': self._format_yn(refresh)} return await self.issue_command('get_message', opts) async def confirm_message(self, mac=None, message_id=None): if message_id is None: raise ValueError('Message id is required') opts = {'MeterMacId': mac, 'Id': self._format_hex(message_id)} return await self.issue_command('confirm_message', opts) ######################### # Price Commands # ######################### async def get_current_price(self, mac = None): opts = {'MeterMacId': mac} return await self.issue_command('get_current_price', opts) # Price is in cents, w/ decimals (e.g. "24.373") async def set_current_price(self, mac=None, price="0.0"): parts = price.split(".", 1) if len(parts) == 1: trailing = 2 price = int(parts[0]) else: trailing = len(parts[1]) + 2 price = int(parts[0] + parts[1]) opts = { 'MeterMacId': mac, 'Price': self._format_hex(price), 'TrailingDigits': self._format_hex(trailing, digits=2) } return await self.issue_command('set_current_price', opts) ############################### # Simple Metering Commands # ############################### async def get_instantaneous_demand(self, mac=None, refresh=True): opts = {'MeterMacId': mac, 'Refresh': self._format_yn(refresh)} return await self.issue_command('get_instantaneous_demand', opts) async def get_current_summation_delivered(self, mac=None, refresh=True): opts = {'MeterMacId': mac, 'Refresh': self._format_yn(refresh)} return await self.issue_command('get_current_summation_delivered', opts) async def get_current_period_usage(self, mac = None): opts = {'MeterMacId': mac} return await self.issue_command('get_current_period_usage', opts) async def get_last_period_usage(self, mac=None): opts = {'MeterMacId': mac} return await self.issue_command('get_last_period_usage', opts) async def close_current_period(self, mac=None): opts = {'MeterMacId': mac} return await self.issue_command('close_current_period', opts) async def set_fast_poll(self, mac=None, frequency=4, duration=20): opts = { 'MeterMacId': mac, 'Frequency': self._format_hex(frequency, digits=4), 'Duration': self._format_hex(duration, digits=4) } return await self.issue_command('set_fast_poll', opts)
StarcoderdataPython
3332534
<gh_stars>0 #!/usr/bin/env python # -*- coding: utf-8 -*- """ models ---------------------------------- The basic models """ import os class Concentration(object): """ the model holding the charge balance input values """ chemical_amount = None def __init__(self): super(Concentration, self).__init__() #: the chemical map from normalized values to chemical representation self.chemical_map = {'calcium': 'ca', 'dissolved calcium': 'ca', 'dissolved magnesium': 'mg', 'dissolved potassium': 'k', 'dissolved sodium': 'na', 'magnesium': 'mg', 'potassium': 'k', 'sodium': 'na', 'sodium plus potassium': 'na+k', 'total calcium': 'ca', 'total magnesium': 'mg', 'total potassium': 'k', 'total sodium': 'na', 'bicarbonate': 'hco3', 'bicarbonate as hco3': 'hco3', 'carbonate': 'co3', 'carbonate (co3)': 'co3', 'carbonate as co3': 'co3', 'chloride': 'cl', 'sulfate': 'so4', 'nitrate': 'no3', 'dissolved nitrate: no3': 'no3', 'nitrite': 'no2', 'dissolved nitrite: no2': 'no2', 'sulfate as so4': 'so4', 'bicarbonate based on alkalinity': 'hco3', 'carbonate based on alkalinity': 'co3', 'nitrate and nitrite as no3': 'no3', 'sulphate': 'so4'} #: the tracked chemicals for a charge balance and their concentration self.chemical_amount = {'ca': None, 'mg': None, 'na': None, 'k': None, 'cl': None, 'hco3': None, 'co3': None, 'so4': None, 'no2': None, 'no3': None, 'na+k': None} @property def calcium(self): return self._get_summed_value('ca') @property def magnesium(self): return self._get_summed_value('mg') @property def chloride(self): return self._get_summed_value('cl') @property def bicarbonate(self): return self._get_summed_value('hco3') @property def sulfate(self): return self._get_summed_value('so4') @property def carbonate(self): return self._get_summed_value('co3') @property def nitrate(self): return self._get_summed_value('no3') @property def nitrite(self): return self._get_summed_value('no2') @property def sodium(self): k = self._get_summed_value('k') na = self._get_summed_value('na') na_k = self._get_summed_value('na+k') if na_k is not None and k is not None and na is None: return na_k - k return na @property def potassium(self): k = self._get_summed_value('k') na = self._get_summed_value('na') na_k = self._get_summed_value('na+k') if na_k is not None and na is not None and k is None: return na_k - na return k @property def sodium_plus_potassium(self): nak = self._get_summed_value('na+k') k = self._get_summed_value('k') na = self._get_summed_value('na') if nak is not None and na is not None or k is not None: return 0 return nak @property def has_major_params(self): """ determines if the concentration can be used in a charge balance """ valid_chemicals = 5 num_of_chemicals = 0 if self.calcium is not None: num_of_chemicals += 1 if self.magnesium is not None: num_of_chemicals += 1 if self.chloride is not None: num_of_chemicals += 1 if self.bicarbonate is not None: num_of_chemicals += 1 if self.sulfate is not None: num_of_chemicals += 1 valid = num_of_chemicals == valid_chemicals return valid and ( self.sodium is not None or self.sodium_plus_potassium is not None) def append(self, concentration, detect_cond=None): """ merges the values of two concentrations into one """ if not concentration.chemical_amount: return new = concentration.chemical_amount for key in new.keys(): self._set(key, new[key], detect_cond) def _set(self, chemical, amount, detect_cond=None): """ sets the chemical value. Handles duplicate chemicals """ # there was a problem with the sample disregard if detect_cond: return # return if amount or chemical is None if amount is None or chemical is None: return chemical = chemical.lower() # do we care about this chemical? if chemical in self.chemical_map.keys(): chemical = self.chemical_map[chemical] if chemical not in self.chemical_amount.keys(): return # there is more than one sample for this chemical if self.chemical_amount[chemical] is not None: try: self.chemical_amount[chemical].append(amount) except AttributeError: # turn into a list for summing self.chemical_amount[chemical] = [ self.chemical_amount[chemical], amount] return self.chemical_amount[chemical] = amount def _get_summed_value(self, key): """ gets value from number or the sum of a list of numbers """ value = self.chemical_amount[key] try: return sum(value) / float(len(value)) except TypeError: # value is not an array pass return value class Field(object): """ a field model for taking the data in gdoc and transform it into the data for the addfield gp tool """ #: the field name to add to the feature class field_name = None #: the fields alias name field_alias = None #: the field type field_type = None #: the length of the field. Only useful for type String field_length = None #: the source of the field mapping field_source = None #: the field length default if none is set length_default = 50 def __init__(self, arg): """ args should be a set of field options (column, alias, type, ?length)""" self.field_name = arg['destination'] self.field_alias = arg['alias'] self.field_type = self._etl_type(arg['type']) self.field_source = arg['source'] if self.field_type == 'TEXT': try: self.field_length = arg['length'] except KeyError: pass # print ('{} is of type text and '.format(self.field_name) + # 'has no limit set.' + # ' Defaulting to {}'.format(self.length_default)) def _etl_type(self, field_type): """Turn schema types into acpy fields types""" # arcpy wants field types upper case field_type = field_type.upper() # fields names are pretty similar if you remove int field_type = field_type.replace('INT', '').strip() if field_type == 'STRING': return 'TEXT' elif field_type == 'TIME': return 'DATE' else: return field_type class Schema(object): """ The schema for the gdb as well as the ETL mapping """ station_gdoc_schema = [ { 'destination': 'OrgId', 'source': 'OrganizationIdentifier', 'alias': 'Organization Id', 'type': 'String', 'length': 20, 'index': 0 }, { 'destination': 'OrgName', 'source': 'OrganizationFormalName', 'alias': 'Organization Name', 'type': 'String', 'length': 100, 'index': 1 }, { 'destination': 'StationId', 'source': 'MonitoringLocationIdentifier', 'alias': 'Monitoring Location Id', 'type': 'String', 'length': 100, 'index': 2 }, { 'destination': 'StationName', 'source': 'MonitoringLocationName', 'alias': 'Monitoring Location Name', 'type': 'String', 'length': 100, 'index': 3 }, { 'destination': 'StationType', 'source': 'MonitoringLocationTypeName', 'alias': 'Monitoring Location Type', 'type': 'String', 'length': 100, 'index': 4 }, { 'destination': 'StationComment', 'source': 'MonitoringLocationDescriptionText', 'alias': 'Monitoring Location Description', 'type': 'String', 'length': 1500, 'index': 5 }, { 'destination': 'HUC8', 'source': 'HUCEightDigitCode', 'alias': 'HUC 8 Digit Code', 'type': 'String', 'length': 8, 'index': 6 }, { 'destination': 'Lon_X', 'source': 'LongitudeMeasure', 'alias': 'Latitude', 'type': 'Double', 'index': 7 }, { 'destination': 'Lat_Y', 'source': 'LatitudeMeasure', 'alias': 'Longitude', 'type': 'Double', 'index': 8 }, { 'destination': 'HorAcc', 'source': 'HorizontalAccuracyMeasure/MeasureValue', 'alias': 'Horizontal Accuracy', 'type': 'Double', 'index': 9 }, { 'destination': 'HorAccUnit', 'source': 'HorizontalAccuracyMeasure/MeasureUnitCode', 'alias': 'Horizontal Accuracy Unit', 'type': 'String', 'length': 10, 'index': 10 }, { 'destination': 'HorCollMeth', 'source': 'HorizontalCollectionMethodName', 'alias': 'Horizontal Collection Method', 'type': 'String', 'length': 100, 'index': 11 }, { 'destination': 'HorRef', 'source': 'HorizontalCoordinateReferenceSystemDatumName', 'alias': 'Horizontal Reference Datum', 'type': 'String', 'length': 10, 'index': 12 }, { 'destination': 'Elev', 'source': 'VerticalMeasure/MeasureValue', 'alias': 'Elevation', 'type': 'Double', 'index': 13 }, { 'destination': 'ElevUnit', 'source': 'VerticalMeasure/MeasureUnitCode', 'alias': 'Elevation Unit', 'type': 'String', 'length': 15, 'index': 14 }, { 'destination': 'ElevAcc', 'source': 'VerticalAccuracyMeasure/MeasureValue', 'alias': 'Elevation Accuracy', 'type': 'Double', 'index': 15 }, { 'destination': 'ElevAccUnit', 'source': 'VerticalAccuracyMeasure/MeasureUnitCode', 'alias': 'Elevation Accuracy Units', 'type': 'String', 'length': 4, 'index': 16 }, { 'destination': 'ElevMeth', 'source': 'VerticalCollectionMethodName', 'alias': 'Elevation Collection Method', 'type': 'String', 'length': 100, 'index': 17 }, { 'destination': 'ElevRef', 'source': 'VerticalCoordinateReferenceSystemDatumName', 'alias': 'Elevation Reference Datum', 'type': 'String', 'length': 12, 'index': 18 }, { 'destination': 'StateCode', 'source': 'StateCode', 'alias': 'State Code', 'type': 'Short Int', 'index': 19 }, { 'destination': 'CountyCode', 'source': 'CountyCode', 'alias': 'County Code', 'type': 'Short Int', 'index': 20 }, { 'destination': 'Aquifer', 'source': 'AquiferName', 'alias': 'Aquifer', 'type': 'String', 'length': 100, 'index': 21 }, { 'destination': 'FmType', 'source': 'FormationTypeText', 'alias': 'Formation Type', 'type': 'String', 'length': 100, 'index': 22 }, { 'destination': 'AquiferType', 'source': 'AquiferTypeName', 'alias': 'Aquifer Type', 'type': 'String', 'length': 100, 'index': 23 }, { 'destination': 'ConstDate', 'source': 'ConstructionDateText', 'alias': 'Construction Date', 'type': 'Date', 'length': 8, 'index': 24 }, { 'destination': 'Depth', 'source': 'WellDepthMeasure/MeasureValue', 'alias': 'Well Depth', 'type': 'Double', 'index': 25 }, { 'destination': 'DepthUnit', 'source': 'WellDepthMeasure/MeasureUnitCode', 'alias': 'Well Depth Units', 'type': 'String', 'length': 10, 'index': 26 }, { 'destination': 'HoleDepth', 'source': 'WellHoleDepthMeasure/MeasureValue', 'alias': 'Hole Depth', 'type': 'Double', 'index': 27 }, { 'destination': 'HoleDUnit', 'source': 'WellHoleDepthMeasure/MeasureUnitCode', 'alias': 'Hole Depth Units', 'type': 'String', 'length': 10, 'index': 28 }, { 'destination': 'demELEVm', 'source': None, 'alias': 'DEM Elevation m', 'type': 'Double', 'index': 29 }, { 'destination': 'DataSource', 'source': None, 'alias': 'Database Source', 'type': 'String', 'length': 20, 'index': 30 }, { 'destination': 'WIN', 'source': None, 'alias': 'WR Well Id', 'type': 'Long Int', 'index': 31 } ] result_gdoc_schema = [ { 'destination': 'AnalysisDate', 'source': 'AnalysisStartDate', 'alias': 'Analysis Start Date', 'type': 'Date', 'index': 0 }, { 'destination': 'AnalytMeth', 'source': 'ResultAnalyticalMethod/MethodName', 'alias': 'Analytical Method Name', 'type': 'Text', 'length': 150, 'index': 1 }, { 'destination': 'AnalytMethId', 'source': 'ResultAnalyticalMethod/MethodIdentifier', 'alias': 'Analytical Method Id', 'type': 'Text', 'length': 50, 'index': 2 }, { 'destination': 'AutoQual', 'source': None, 'alias': 'Auto Quality Check', 'type': 'Text', 'index': 3 }, { 'destination': 'CAS_Reg', 'source': None, 'alias': 'CAS Registry', 'type': 'Text', 'length': 50, 'index': 4 }, { 'destination': 'Chrg', 'source': None, 'alias': 'Charge', 'type': 'Float', 'index': 5 }, { 'destination': 'DataSource', 'source': None, 'alias': 'Database Source', 'type': 'Text', 'index': 6 }, { 'destination': 'DetectCond', 'source': 'ResultDetectionConditionText', 'alias': 'Result Detection Condition', 'type': 'Text', 'length': 50, 'index': 7 }, { 'destination': 'IdNum', 'source': None, 'alias': 'Unique Id', 'type': 'Long Int', 'index': 8 }, { 'destination': 'LabComments', 'source': 'ResultLaboratoryCommentText', 'alias': 'Laboratory Comment', 'type': 'Text', 'length': 500, 'index': 9 }, { 'destination': 'LabName', 'source': 'LaboratoryName', 'alias': 'Laboratory Name', 'type': 'Text', 'length': 100, 'index': 10 }, { 'destination': 'Lat_Y', 'source': None, 'alias': 'Latitude', 'type': 'Double', 'index': 11 }, { 'destination': 'LimitType', 'source': 'DetectionQuantitationLimitTypeName', 'alias': 'Detection Limit Type', 'type': 'Text', 'length': 250, 'index': 12 }, { 'destination': 'Lon_X', 'source': None, 'alias': 'Longitude', 'type': 'Double', 'index': 13 }, { 'destination': 'MDL', 'source': 'DetectionQuantitationLimitMeasure/MeasureValue', 'alias': 'Detection Quantitation Limit', 'type': 'Double', 'index': 14 }, { 'destination': 'MDLUnit', 'source': 'DetectionQuantitationLimitMeasure/MeasureUnitCode', 'alias': 'Detection Quantitation Limit Unit', 'type': 'Text', 'length': 50, 'index': 15 }, { 'destination': 'MethodDescript', 'source': 'MethodDescriptionText', 'alias': 'Method Description', 'type': 'Text', 'length': 100, 'index': 16 }, { 'destination': 'OrgId', 'source': 'OrganizationIdentifier', 'alias': 'Organization Id', 'type': 'Text', 'length': 50, 'index': 17 }, { 'destination': 'OrgName', 'source': 'OrganizationFormalName', 'alias': 'Organization Name', 'type': 'Text', 'length': 150, 'index': 18 }, { 'destination': 'Param', 'source': 'CharacteristicName', 'alias': 'Parameter', 'type': 'Text', 'length': 500, 'index': 19 }, { 'destination': 'ParamGroup', 'source': None, 'alias': 'Parameter Group', 'type': 'Text', 'index': 20 }, { 'destination': 'ProjectId', 'source': 'ProjectIdentifier', 'alias': 'Project Id', 'type': 'Text', 'length': 50, 'index': 21 }, { 'destination': 'QualCode', 'source': 'MeasureQualifierCode', 'alias': 'Measure Qualifier Code', 'type': 'Text', 'length': 50, 'index': 22 }, { 'destination': 'ResultComment', 'source': 'ResultCommentText', 'alias': 'Result Comment', 'type': 'Text', 'length': 1500, 'index': 23 }, { 'destination': 'ResultStatus', 'source': 'ResultStatusIdentifier', 'alias': 'Result Status', 'type': 'Text', 'length': 50, 'index': 24 }, { 'destination': 'ResultValue', 'source': 'ResultMeasureValue', 'alias': 'Result Measure Value', 'type': 'Double', 'index': 25 }, { 'destination': 'SampComment', 'source': 'ActivityCommentText', 'alias': 'Sample Comment', 'type': 'Text', 'index': 26, 'length': 500 }, { 'destination': 'SampDepth', 'source': 'ActivityDepthHeightMeasure/MeasureValue', 'alias': 'Sample Depth', 'type': 'Double', 'index': 27 }, { 'destination': 'SampDepthRef', 'source': 'ActivityDepthAltitudeReferencePointText', 'alias': 'Sample Depth Reference', 'type': 'Text', 'length': 50, 'index': 28 }, { 'destination': 'SampDepthU', 'source': 'ActivityDepthHeightMeasure/MeasureUnitCode', 'alias': 'Sample Depth Units', 'type': 'Text', 'length': 50, 'index': 29 }, { 'destination': 'SampEquip', 'source': 'SampleCollectionEquipmentName', 'alias': 'Collection Equipment', 'type': 'Text', 'length': 75, 'index': 30 }, { 'destination': 'SampFrac', 'source': 'ResultSampleFractionText', 'alias': 'Result Sample Fraction', 'type': 'Text', 'length': 50, 'index': 31 }, { 'destination': 'SampleDate', 'source': 'ActivityStartDate', 'alias': 'Sample Date', 'type': 'Date', 'index': 32 }, { 'destination': 'SampleTime', 'source': 'ActivityStartTime/Time', 'alias': 'Sample Time', 'type': 'Time', 'index': 33 }, { 'destination': 'SampleId', 'source': 'ActivityIdentifier', 'alias': 'Sample Id', 'type': 'Text', 'length': 100, 'index': 34 }, { 'destination': 'SampMedia', 'source': 'ActivityMediaSubdivisionName', 'alias': 'Sample Media', 'type': 'Text', 'length': 50, 'index': 35 }, { 'destination': 'SampMeth', 'source': 'SampleCollectionMethod/MethodIdentifier', 'alias': 'Collection Method', 'type': 'Text', 'length': 50, 'index': 36 }, { 'destination': 'SampMethName', 'source': 'SampleCollectionMethod/MethodName', 'alias': 'Collection Method Name', 'type': 'Text', 'length': 75, 'index': 37 }, { 'destination': 'SampType', 'source': 'ActivityTypeCode', 'alias': 'Sample Type', 'type': 'Text', 'length': 75, 'index': 38 }, { 'destination': 'StationId', 'source': 'MonitoringLocationIdentifier', 'alias': 'Station Id', 'type': 'Text', 'length': 50, 'index': 39 }, { 'destination': 'Unit', 'source': 'ResultMeasure/MeasureUnitCode', 'alias': 'Result Measure Unit', 'type': 'Text', 'length': 50, 'index': 40 }, { 'destination': 'USGSPCode', 'source': 'USGSPCode', 'alias': 'USGS P Code', 'type': 'Text', 'length': 50, 'index': 41 } ] @property def station(self): return self.station_gdoc_schema @property def result(self): return self.result_gdoc_schema class TableInfo(object): def __init__(self, location, name): self.location = os.path.join(location, name) self.name = name
StarcoderdataPython
1722779
<gh_stars>1-10 """Checkers list.""" from padpo.checkers.empty import EmptyChecker from padpo.checkers.fuzzy import FuzzyChecker from padpo.checkers.glossary import GlossaryChecker from padpo.checkers.grammalecte import GrammalecteChecker from padpo.checkers.linelength import LineLengthChecker from padpo.checkers.nbsp import NonBreakableSpaceChecker checkers = [ EmptyChecker(), FuzzyChecker(), GrammalecteChecker(), GlossaryChecker(), LineLengthChecker(), NonBreakableSpaceChecker(), ]
StarcoderdataPython
3362342
<gh_stars>1-10 # Copyright (c) 2018 Dolphin Emulator Website Contributors # SPDX-License-Identifier: MIT from django.conf import settings from django.db import models class NewsArticle(models.Model): """A news article which can be linked to a forum post for comments""" title = models.CharField(max_length=64) slug = models.SlugField() author = models.CharField(max_length=64) posted_on = models.DateTimeField(auto_now_add=True) forum_pid = models.IntegerField(null=True) text = models.TextField() published = models.BooleanField(default=False) def __str__(self): return self.title @property def forum_url(self): return settings.FORUM_URL + 'showthread.php?tid=%d' % self.forum_pid @models.permalink def get_absolute_url(self): return ('news_article', [self.slug])
StarcoderdataPython
1745410
from django.core.management.base import NoArgsCommand from django.db import connection from django.core.management import call_command class Command(NoArgsCommand): help = "Deletes all tables in the 'default' database." option_list = NoArgsCommand.option_list + tuple() def handle_noargs(self, **options): cursor = connection.cursor() tables = connection.introspection.django_table_names( only_existing=True) for table in tables: command = "DROP TABLE %s CASCADE;" % table cursor.execute(command) self.stderr.write("Executed ... %s" % command) cursor.execute("COMMIT;") self.stderr.write("Running syncdbmigrate ...") call_command("syncdbmigrate", **options)
StarcoderdataPython
4822353
class Solution: def isPalindrome(self, s): """ :type s: str :rtype: bool """ if s is None or len(s) < 2: return True i, j = 0, len(s) - 1 while True: while i < len(s) and not self.isAlpha(s[i]): i += 1 while j >= 0 and not self.isAlpha(s[j]): j -= 1 if i < j and s[i].lower() != s[j].lower(): return False i += 1 j -= 1 if i >= j: break return True def isAlpha(self, c): if (ord(c) >= ord('a') and ord(c) <= ord('z')) or (ord(c) >= ord('A') and ord(c) <= ord('Z')) or (ord(c) >= ord('0') and ord(c) <= ord('9')): return True return False solution = Solution() print(solution.isPalindrome("0P"))
StarcoderdataPython
3292292
from nose2.tests._common import FunctionalTestCase class TestDunderTestPlugin(FunctionalTestCase): def test_dunder(self): proc = self.runIn( 'scenario/dundertest_attribute', '-v') self.assertTestRunOutputMatches(proc, stderr='Ran 0 tests')
StarcoderdataPython
46332
# -*- coding: utf-8 -*- # Copyright (c) 2016, KOL # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the <organization> nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from urlparse import urlparse from updater import Updater PREFIX = '/video/torrserver' ART = 'art-default.jpg' ICON = 'icon-default.png' TITLE = 'TorrServer' SERVER_RE = Regex('^https?://[^/:]+(:[0-9]{1,5})?$') def Start(): HTTP.CacheTime = 0 def ValidatePrefs(): if (ValidateServer()): return MessageContainer( header=u'%s' % L('Success'), message=u'%s' % L('Preferences was changed') ) else: return MessageContainer( header=u'%s' % L('Error'), message=u'%s' % L('Bad server address') ) def ValidateServer(): return Prefs['server'] and SERVER_RE.match(Prefs['server']) def GetLink(file): if 'Play' in file: return file['Play'] return file['Link'] @handler(PREFIX, TITLE, thumb=ICON) def MainMenu(): oc = ObjectContainer(title2=TITLE, no_cache=True) Updater(PREFIX+'/update', oc) if not ValidateServer(): return MessageContainer( header=u'%s' % L('Error'), message=u'%s' % L('Please specify server address in plugin preferences') ) items = GetItems() if not items: return NoContents() server = GetServerUrl() for item in items: if len(item['Files']) > 1: oc.add(DirectoryObject( key=Callback( List, hash=item['Hash'] ), title=u'%s' % item['Name'] )) else: file = item['Files'][0] oc.add(GetVideoObject(server+GetLink(file), file['Name'])) return oc @route(PREFIX + '/list') def List(hash): found = False items = GetItems() if not items: return NoContents() for item in items: if item['Hash'] == hash: found = True break if not found: return NoContents() oc = ObjectContainer( title2=u'%s' % item['Name'], replace_parent=False, ) server = GetServerUrl() for file in item['Files']: oc.add(GetVideoObject(server+GetLink(file), file['Name'])) if not len(oc): return NoContents() return oc @route(PREFIX + '/play') def VideoPlay(uri, title, **kwargs): return ObjectContainer( objects=[GetVideoObject(uri, title)], content=ContainerContent.GenericVideos ) def GetVideoObject(uri, title): uri = u'%s' % uri title = u'%s' % title return VideoClipObject( key=Callback( VideoPlay, uri=uri, title=title ), rating_key=uri, title=title, source_title=TITLE, items=[ MediaObject( parts=[PartObject(key=uri)], container=Container.MP4, video_codec=VideoCodec.H264, audio_codec=AudioCodec.AAC, optimized_for_streaming=True ) ] ) def GetItems(): try: res = JSON.ObjectFromURL(GetServerUrl()+'/torrent/list', method='POST') except Exception as e: Log.Error(u'%s' % e) return None if not len(res): return None return res def GetServerUrl(): url = Prefs['server'] if url[-1] == '/': return url[0:-1] return url def NoContents(): return ObjectContainer( header=u'%s' % L('Error'), message=u'%s' % L('No entries found') )
StarcoderdataPython
51785
<reponame>andreasala98/pykeen # -*- coding: utf-8 -*- """Inductive models in PyKEEN."""
StarcoderdataPython
28696
<reponame>chelunike/trust_me_i_am_an_engineer # -*- encoding:utf-8 -*- impar = lambda n : 2 * n - 1 header = """ Demostrar que es cierto: 1 + 3 + 5 + ... + (2*n)-1 = n ^ 2 Luego con este programa se busca probar dicha afirmacion. """ def suma_impares(n): suma = 0 for i in range(1, n+1): suma += impar(i) return suma def main(): print(header) num = int(input('Numero: ')) suma = suma_impares(num) cuadrado = num ** 2 print('Suma de los ', num, ' primeros impares = ', suma) print('Cuadrado del numero: ', cuadrado) if suma == cuadrado: print('Son iguales, luego se cumple la afirmacion') else: print('No son iguales, luego no se cumple la afirmacion') if __name__ == '__main__': main()
StarcoderdataPython
1655425
import torch import torch.nn as nn import torchvision import torchvision.transforms as transforms import numpy as np import matplotlib.pyplot as plt # Hyper parameters n_epochs = 5 num_classes = 10 batch_size = 100 learning_rate = 0.001 interval = 100 # MNIST dataset train_dataset = torchvision.datasets.MNIST(root='../../../_data', train=True, transform=transforms.ToTensor(), download=True) test_dataset = torchvision.datasets.MNIST(root='../../../_data', train=False, transform=transforms.ToTensor(), download=True) # Data loader train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) def display_multiple_img(images, predictions, rows=1, cols=1): figure, ax = plt.subplots(nrows=rows, ncols=cols) for i, image in enumerate(images): ax[i].imshow(image[0]) ax[i].set_title(f"{predictions[i]}") ax[i].set_axis_off() plt.tight_layout() plt.show() # Convolutional neural network (two convolutional layers) class ConvNet(nn.Module): def __init__(self, num_classes=10): super(ConvNet, self).__init__() self.layer1 = nn.Sequential( nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2), nn.BatchNorm2d(16), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2)) self.layer2 = nn.Sequential( nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2), nn.BatchNorm2d(32), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2)) self.fc = nn.Linear(7*7*32, num_classes) def forward(self, x): out = self.layer1(x) out = self.layer2(out) out = out.reshape(out.size(0), -1) out = self.fc(out) return out model = ConvNet(num_classes) # Loss and optimizer criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) # Train the model total_step = len(train_loader) for epoch in range(n_epochs): loss_batch = 0 for i, (images, labels) in enumerate(train_loader): images = images labels = labels # Forward pass outputs = model(images) loss = criterion(outputs, labels) # Backward and optimize optimizer.zero_grad() loss.backward() optimizer.step() loss_batch += loss.item() print (f'\rEpoch [{epoch+1}/{n_epochs}], loss: {loss_batch}', end="") # get accucy for images, labels in test_loader: outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total)) # result network for images, labels in test_loader: outputs = model(images) _, predicted = torch.max(outputs.data, 1) maximum = min(10, len(images)) display_multiple_img(images[:maximum], predicted[:maximum], 1, maximum) break
StarcoderdataPython
169358
# -*- coding: utf-8 -*- # Visigoth: A lightweight Python3 library for rendering data visualizations in SVG # Copyright (C) 2020-2021 Visigoth Developers # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software # and associated documentation files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING # BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import random import sys import math class Silhouette(object): """ Silhouette Cluster Evaluation Arguments: data(list) : list of (lon,lat) pairs to evaluate model : a cluster model implementing the score method """ def __init__(self, data, model): self.data = data self.model = model def computeMeanDistance(self,point,otherpoints): totaldist = 0.0 if len(otherpoints) == 0: return 0.0 for otherpoint in otherpoints: totaldist += self.model.computeDistance(point,otherpoint) return totaldist/len(otherpoints) def compute(self): clusters = self.model.getClusterCount() assignments = {cluster:[] for cluster in range(clusters)} for point in self.data: cluster = self.model.score(point) assignments[cluster].append(point) s_tot = 0 for point in self.data: cluster = self.model.score(point) sameclusterpoints = assignments[cluster][:] sameclusterpoints.remove(point) if not len(sameclusterpoints): continue sil_a = self.computeMeanDistance(point,sameclusterpoints) otherclusters = list(range(clusters)) otherclusters.remove(cluster) sil_b = None for othercluster in otherclusters: otherclusterpoints = assignments[othercluster] b_cand = self.computeMeanDistance(point,otherclusterpoints) if b_cand > 0.0 and (sil_b == None or b_cand < sil_b): sil_b = b_cand s_tot += (sil_b - sil_a)/max(sil_a,sil_b) return s_tot / len(self.data)
StarcoderdataPython
144562
<filename>tronn/interpretation/inference.py<gh_stars>1-10 """description: wrappers for inference runs """ import os import h5py import json from tronn.datalayer import setup_data_loader from tronn.models import setup_model_manager from tronn.util.utils import DataKeys def _setup_input_skip_keys(args): """reduce tensors pulled from data files to save time/space """ if (args.subcommand_name == "dmim") or (args.subcommand_name == "synergy") or (args.subcommand_name == "mutatemotifs"): skip_keys = [ DataKeys.ORIG_SEQ_SHUF, DataKeys.ORIG_SEQ_ACTIVE_SHUF, DataKeys.ORIG_SEQ_PWM_SCORES, DataKeys.ORIG_SEQ_PWM_SCORES_THRESH, DataKeys.ORIG_SEQ_SHUF_PWM_SCORES, DataKeys.ORIG_SEQ_PWM_DENSITIES, DataKeys.ORIG_SEQ_PWM_MAX_DENSITIES, DataKeys.WEIGHTED_SEQ_SHUF, DataKeys.WEIGHTED_SEQ_ACTIVE_SHUF, DataKeys.WEIGHTED_SEQ_PWM_HITS, DataKeys.WEIGHTED_SEQ_PWM_SCORES, DataKeys.WEIGHTED_SEQ_PWM_SCORES_THRESH, DataKeys.WEIGHTED_SEQ_SHUF_PWM_SCORES, DataKeys.MUT_MOTIF_ORIG_SEQ, "{}.string".format(DataKeys.MUT_MOTIF_ORIG_SEQ), DataKeys.MUT_MOTIF_WEIGHTED_SEQ, DataKeys.WEIGHTED_PWM_SCORES_POSITION_MAX_VAL_MUT, DataKeys.WEIGHTED_PWM_SCORES_POSITION_MAX_IDX_MUT, DataKeys.MUT_MOTIF_POS, DataKeys.MUT_MOTIF_PRESENT, DataKeys.MUT_MOTIF_WEIGHTED_SEQ_CI, DataKeys.MUT_MOTIF_WEIGHTED_SEQ_CI_THRESH, DataKeys.MUT_MOTIF_LOGITS, DataKeys.MUT_MOTIF_LOGITS_SIG, DataKeys.MUT_MOTIF_LOGITS_MULTIMODEL, DataKeys.DFIM_SCORES, DataKeys.DFIM_SCORES_DX, DataKeys.DMIM_SCORES, DataKeys.DMIM_SCORES_SIG, DataKeys.FEATURES] for key in skip_keys: prevent_null_skip = [ "{}.string".format(DataKeys.MUT_MOTIF_ORIG_SEQ), DataKeys.MUT_MOTIF_POS] if key in prevent_null_skip: continue if "motif_mut" in key: skip_keys.append(key.replace("motif_mut", "null_mut")) elif (args.subcommand_name == "scanmotifs"): skip_keys = [] else: skip_keys = [] return skip_keys def _setup_output_skip_keys(args): """reduce tensors pulled from data files to save time/space """ if (args.subcommand_name == "dmim") or (args.subcommand_name == "synergy"): skip_keys = [] elif (args.subcommand_name == "mutatemotifs"): skip_keys = [ DataKeys.OR<KEY>, DataKeys.MUT_MOTIF_ORIG_SEQ, DataKeys.MUT_MOTIF_POS, DataKeys.MUT_MOTIF_MASK] for key in skip_keys: if "motif_mut" in key: skip_keys.append(key.replace("motif_mut", "null_mut")) elif args.subcommand_name == "scanmotifs": skip_keys = [ DataKeys.ORIG_SEQ_SHUF, DataKeys.ORIG_SEQ_ACTIVE_SHUF, DataKeys.ORIG_SEQ_PWM_SCORES, DataKeys.WEIGHTED_SEQ_SHUF, DataKeys.WEIGHTED_SEQ_ACTIVE_SHUF, DataKeys.WEIGHTED_SEQ_PWM_SCORES, DataKeys.WEIGHTED_SEQ_PWM_HITS, DataKeys.FEATURES, DataKeys.LOGITS_SHUF] if args.lite: # add other tensors to skip lite_keys = [ DataKeys.IMPORTANCE_GRADIENTS, DataKeys.WEIGHTED_SEQ, DataKeys.ORIG_SEQ_PWM_SCORES_THRESH, DataKeys.ORIG_SEQ_PWM_HITS, DataKeys.ORIG_SEQ_PWM_DENSITIES, DataKeys.ORIG_SEQ_PWM_MAX_DENSITIES, DataKeys.WEIGHTED_SEQ_PWM_SCORES_THRESH, DataKeys.WEIGHTED_SEQ_PWM_HITS] skip_keys += lite_keys elif args.subcommand_name == "simulategrammar": skip_keys = [ DataKeys.ORIG_SEQ_SHUF, DataKeys.ORIG_SEQ_ACTIVE_SHUF, DataKeys.ORIG_SEQ_PWM_SCORES, DataKeys.ORIG_SEQ_PWM_SCORES_THRESH, DataKeys.ORIG_SEQ_PWM_HITS, DataKeys.WEIGHTED_SEQ_SHUF, DataKeys.WEIGHTED_SEQ_ACTIVE_SHUF, DataKeys.WEIGHTED_SEQ_PWM_SCORES, DataKeys.WEIGHTED_SEQ_PWM_SCORES_THRESH, DataKeys.WEIGHTED_SEQ_PWM_HITS, DataKeys.FEATURES, DataKeys.LOGITS_SHUF, DataKeys.ORIG_SEQ_PWM_DENSITIES, DataKeys.ORIG_SEQ_PWM_MAX_DENSITIES] elif args.subcommand_name == "analyzevariants": skip_keys = [ DataKeys.ORIG_SEQ_SHUF, DataKeys.ORIG_SEQ_ACTIVE_SHUF, DataKeys.ORIG_SEQ_PWM_SCORES, DataKeys.ORIG_SEQ_PWM_SCORES_THRESH, DataKeys.WEIGHTED_SEQ_SHUF, DataKeys.WEIGHTED_SEQ_ACTIVE_SHUF, DataKeys.WEIGHTED_SEQ_PWM_SCORES, DataKeys.WEIGHTED_SEQ_PWM_SCORES_THRESH, DataKeys.WEIGHTED_SEQ_PWM_HITS, DataKeys.FEATURES, DataKeys.LOGITS_SHUF, DataKeys.ORIG_SEQ_PWM_DENSITIES, DataKeys.ORIG_SEQ_PWM_MAX_DENSITIES] elif args.subcommand_name == "buildtracks": skip_keys = [ DataKeys.ORIG_SEQ, DataKeys.ORIG_SEQ_ACTIVE, DataKeys.PROBABILITIES, DataKeys.LOGITS_CI, DataKeys.LOGITS_CI_THRESH, DataKeys.LOGITS_SHUF, DataKeys.LOGITS_MULTIMODEL, DataKeys.LOGITS_MULTIMODEL_NORM, DataKeys.IMPORTANCE_GRADIENTS, #DataKeys.WEIGHTED_SEQ, DataKeys.WEIGHTED_SEQ_ACTIVE_CI, DataKeys.WEIGHTED_SEQ_ACTIVE_CI_THRESH, DataKeys.WEIGHTED_SEQ_ACTIVE_SHUF, DataKeys.WEIGHTED_SEQ_THRESHOLDS, DataKeys.ORIG_SEQ_ACTIVE_SHUF, DataKeys.FEATURES] else: skip_keys = [] return skip_keys def run_inference(args, warm_start=False): """convenience wrapper to mask away multi model vs single model runs """ # adjust prefix and inference fn if doing a prediction warm start if warm_start: out_prefix = "{}/{}.{}.prediction_sample".format( args.out_dir, args.prefix, args.subcommand_name) real_inference_fn = args.inference_params["inference_fn_name"] args.inference_params["inference_fn_name"] = "empty_net" # now adjust the prefix if there was a prediction sample # this way, since file exists, inference won't run if args.prediction_sample is not None: out_prefix = args.prediction_sample.split(".h5")[0] else: out_prefix = None # run all files together or run rotation of models if args.model["name"] == "kfold_models": inference_files = run_multi_model_inference( args, out_prefix=out_prefix, positives_only=True) else: inference_files = run_single_model_inference( args, out_prefix=out_prefix, positives_only=True) if warm_start: args.model["params"]["prediction_sample"] = inference_files[0] args.inference_params["inference_fn_name"] = real_inference_fn args.inference_params["prediction_sample"] = inference_files[0] # TODO fix this later return inference_files def run_single_model_inference( args, out_prefix=None, positives_only=True, kfold=False, yield_single_examples=False): # adjust here for single vs batch output """wrapper for inference """ # set up out_file if out_prefix is None: out_file = "{}/{}.{}.h5".format( args.out_dir, args.prefix, args.subcommand_name) else: out_file = "{}.h5".format(out_prefix) # set up dataloader data_loader = setup_data_loader(args) # set up model, add to inference params model_manager = setup_model_manager(args) args.inference_params.update({"model": model_manager}) # adjust for positives (normally only run inference on positives) if positives_only: data_loader = data_loader.setup_positives_only_dataloader() # adjust for kfold in model (normally no) if kfold: keep_chromosomes = model_manager.model_dataset["test"] data_loader = data_loader.filter_for_chromosomes( keep_chromosomes) # data file adjustments done, set up input fn input_fn = data_loader.build_input_fn( args.batch_size, shuffle=not args.fifo if args.fifo is not None else True, targets=args.targets, target_indices=args.target_indices, filter_targets=args.filter_targets, examples_subset=args.dataset_examples, use_queues=True, skip_keys=_setup_input_skip_keys(args)) # skip some outputs args.inference_params.update( {"skip_outputs": _setup_output_skip_keys(args)}) # also check if processed inputs, if processed then remove model # and replace with empty net (just send tensors through) if args.processed_inputs: args.model["name"] = "empty_net" args.inference_params.update({"model_reuse": False}) model_manager = setup_model_manager(args) else: args.inference_params.update({"model_reuse": True}) # set up inference generator inference_generator = model_manager.infer( input_fn, args.out_dir, args.inference_params, checkpoint=model_manager.model_checkpoint, yield_single_examples=yield_single_examples) # run inference and save out if not os.path.isfile(out_file): model_manager.infer_and_save_to_h5( inference_generator, out_file, args.sample_size, batch_size=args.batch_size, h5_saver_batch_size=args.h5_saver_batch_size, yield_single_examples=yield_single_examples, debug=args.debug) # get chrom tags and transfer in with h5py.File(out_file, "a") as hf: hf["/"].attrs["chromosomes"] = data_loader.get_chromosomes() return [out_file] def run_multi_model_inference( args, out_prefix=None, positives_only=True): """run inference on one model """ if out_prefix is None: out_prefix = "{}/{}.{}".format( args.out_dir, args.prefix, args.subcommand_name) # get the model jsons model_jsons = args.model["params"]["models"] out_files = [] for model_idx in xrange(len(model_jsons)): # load the model json into args.model model_json = model_jsons[model_idx] with open(model_json, "r") as fp: args.model = json.load(fp) # generate the out file out_file = "{}.model-{}.h5".format(out_prefix, model_idx) out_files.append(out_file) # run inference run_inference( args, out_file=out_file, positives_only=positives_only, kfold=True) return out_files
StarcoderdataPython
1784705
<gh_stars>1-10 from intake.tests.base_testcases import ( IntakeDataTestCase, ALL_APPLICATION_FIXTURES) from user_accounts import models, exceptions from user_accounts.tests import mock class TestOrganization(IntakeDataTestCase): fixtures = ALL_APPLICATION_FIXTURES def test_has_a_pdf(self): self.assertTrue(self.sf_pubdef.has_a_pdf()) self.assertFalse(self.cc_pubdef.has_a_pdf()) def test_get_referral_emails_even_if_no_users(self): expected_email = "<EMAIL>" # we need an org org = models.Organization(name="Acme Nonprofit Services Inc.") org.save() user = mock.fake_superuser() models.Invitation.create( expected_email, organization=org, inviter=user) emails = org.get_referral_emails() self.assertListEqual(emails, [expected_email]) def test_get_referral_emails_raises_error_with_no_emails(self): org = models.Organization(name="Acme Nonprofit Services Inc.") org.save() with self.assertRaises(exceptions.NoEmailsForOrgError): org.get_referral_emails() def test_transfer_partners_returns_correct_org(self): ebclc = self.ebclc a_pubdef = self.a_pubdef self.assertIn(a_pubdef, ebclc.transfer_partners.all()) self.assertIn(ebclc, a_pubdef.transfer_partners.all()) def test_get_transfer_org_returns_none(self): sf_pubdef = self.sf_pubdef cc_pubdef = self.cc_pubdef self.assertFalse(sf_pubdef.transfer_partners.all()) self.assertFalse(cc_pubdef.transfer_partners.all())
StarcoderdataPython
4837545
import logging import re from copy import copy from typing import Optional import xlsxwriter from RISparser import readris from RISparser.config import TAG_KEY_MAPPING from . import utils class RisImporter: @classmethod def get_mapping(cls): mapping = copy(TAG_KEY_MAPPING) mapping.update( {"AT": "accession_type", "PM": "pubmed_id", "N2": "abstract2", "SV": "serial_volume"} ) return mapping @classmethod def file_readable(cls, f): # ensure that file can be successfully parsed try: reader = readris(f, mapping=cls.get_mapping()) [content for content in reader] f.seek(0) return True except IOError as err: logging.warning(err) return False def __init__(self, f): if isinstance(f, str): f = open(f, "r") else: f = f reader = readris(f, mapping=self.get_mapping()) contents = [content for content in reader] f.close() self.raw_references = contents @property def references(self): if not hasattr(self, "_references"): self._references = self._format() return self._references def _format(self): formatted_content = [] for content in self.raw_references: parser = ReferenceParser(content) formatted_content.append(parser.format()) return formatted_content def to_excel(self, fn): header = ReferenceParser.EXTRACTED_FIELDS data_rows = [] for ref in self.references: data_rows.append([ref[fld] for fld in header]) wb = xlsxwriter.Workbook(fn) ws = wb.add_worksheet() bold = wb.add_format({"bold": True}) for c, txt in enumerate(header): ws.write(0, c, txt, bold) for r, row in enumerate(data_rows): for c, txt in enumerate(row): try: ws.write(r + 1, c, txt) except AttributeError: ws.write(r + 1, c, txt) wb.close() class ReferenceParser: PLACEHOLDER_TEXT = "<ADD>" # field types TITLE_FIELDS = ( "translated_title", "title", "primary_title", "secondary_title", "tertiary_title", "short_title", ) AUTHOR_LIST_FIELDS = ( "authors", "first_authors", "secondary_authors", "tertiary_authors", "subsidiary_authors", ) ABSTRACT_FIELDS = ("abstract", "abstract2") YEAR_FIELDS = ("year", "publication_year") # Extract the scopus EID re_scopus_eid = re.compile(r"eid=([-\.\w]+)(?:&|$)", flags=re.UNICODE) EXTRACTED_FIELDS = [ "authors_short", "authors", "title", "year", "citation", "abstract", "PMID", "doi", "accession_number", "accession_db", "reference_type", "id", "json", ] # Match any number (some PMIDs contain number and/or URL) re_pmid = re.compile(r"([\d]+)") def __init__(self, content): self.content = content def format(self): if not hasattr(self, "_formatted"): self._formatted = dict( authors_short=self._get_authors_short(), authors=self._authors, title=self._get_field(self.TITLE_FIELDS, self.PLACEHOLDER_TEXT), year=self._get_field(self.YEAR_FIELDS, None), citation=self._get_citation(), abstract=self._get_field(self.ABSTRACT_FIELDS, ""), PMID=self._get_pmid(), doi=self.content.get("doi", None), accession_number=self._get_accession_number(), accession_db=self.content.get("name_of_database", None), reference_type=self.content.get("type_of_reference", None), id=utils.try_int(self.content["id"]), json=self.content, ) return self._formatted def _get_field(self, fields, default): for fld in fields: if fld in self.content: return self.content.get(fld) return default def _get_pmid(self) -> Optional[int]: # get PMID if specified in that field if "pubmed_id" in self.content: pubmed_id = self.content["pubmed_id"] if type(pubmed_id) is int: return pubmed_id else: m = self.re_pmid.findall(pubmed_id) if len(m) > 0: # no try/catch req'd; return first matching int return int(m[0]) # get value accession number is NLM if self.content.get("name_of_database", "") == "NLM" and "accession_number" in self.content: try: return int(self.content["accession_number"]) except ValueError: pass return None def _get_accession_number(self): number = self.content.get("accession_number", None) # extract the Scopus EID if number and isinstance(number, str) and "eid=" in number: m = self.re_scopus_eid.findall(number) if len(m) > 0: number = m[0] return number def _clean_authors(self): authors = [] for fld in self.AUTHOR_LIST_FIELDS: if fld in self.content: authors.extend([author for author in self.content[fld]]) self._authors = utils.normalize_authors(authors) def _get_authors_short(self): if not hasattr(self, "_authors"): self._clean_authors() return utils.get_author_short_text(self._authors) def _get_journal_citation(self): # volume is sometimes blank; only add parens if non-blank volume = str(self.content.get("volume", "")) if len(volume) > 0: volume = f"; {volume}" # issue is sometimes blank; only add parens if non-blank issue = str(self.content.get("note", "")) if len(issue) > 0: issue = f" ({issue})" # pages is sometimes blank; only add colon if non-blank pages = str(self.content.get("start_page", "")) if len(pages) > 0: pages = f":{pages}" sec_title = str(self.content.get("secondary_title", "")) # journal year = self.content.get("year", "") # year return f"{sec_title} {year}{volume}{issue}{pages}" def _get_book_citation(self): vals = [] if "secondary_title" in self.content: vals.append(f"{self.content['secondary_title']}.") if "year" in self.content: vals.append(f"{self.content['year']}.") if "start_page" in self.content: vals.append(f"Pages {self.content['start_page']}.") if "issn" in self.content: vals.append(f"{self.content['issn']}") return " ".join(vals) def _get_citation(self): refType = self.content.get("type_of_reference", "") citation = self.PLACEHOLDER_TEXT if refType in ("JFULL", "JOUR"): citation = self._get_journal_citation() elif refType in ("BOOK", "CHAP"): citation = self._get_book_citation() elif refType == "SER": citation = self.content.get("alternate_title1", "") elif refType == "CONF": citation = self.content.get("short_title", "") else: id_ = self.content.get("id", None) logging.warning(f'Unknown type: "{refType}", id="{id_}"') return citation
StarcoderdataPython
1604884
<reponame>AsM0DeUz/leapp-repository import os import shutil import sys from leapp.libraries.common.utils import makedirs from leapp.libraries.stdlib import api LEAPP_HOME = '/root/tmp_leapp_py3' def _get_python_dirname(): # NOTE: I thought about the static value: python2.7 for el7, python3.6 for # el8; but in the end I've ratcher switched to this generic solution. return 'python{}.{}'.format(sys.version_info.major, sys.version_info.minor) def _get_orig_leapp_path(): return os.path.join('/usr/lib', _get_python_dirname(), 'site-packages/leapp') def apply_python3_workaround(): py3_leapp = os.path.join(LEAPP_HOME, 'leapp3') if os.path.exists(LEAPP_HOME): try: shutil.rmtree(LEAPP_HOME) except OSError as e: api.current_logger().error('Could not remove {} directory: {}'.format(LEAPP_HOME, str(e))) makedirs(LEAPP_HOME) leapp_lib_symlink_path = os.path.join(LEAPP_HOME, 'leapp') os.symlink(_get_orig_leapp_path(), leapp_lib_symlink_path) with open(py3_leapp, 'w') as f: f_content = [ '#!/usr/bin/python3', 'import sys', 'sys.path.append(\'{}\')'.format(LEAPP_HOME), '', 'import leapp.cli', 'sys.exit(leapp.cli.main())', ] f.write('{}\n\n'.format('\n'.join(f_content))) os.chmod(py3_leapp, 0o770)
StarcoderdataPython
1669714
import numpy as np import gym import cv2 from baselines.common.atari_wrappers import FrameStack from retro_contest.local import make as make_local cv2.ocl.setUseOpenCL(False) # No GPU use class PreprocessFrame(gym.ObservationWrapper): """ Grayscales and resizes Frame """ def __init__(self, env, width=96, height=96): super().__init__(self, env) self.width = width self.height = height self.observation_space = gym.spaces.Box( low=0, high=255, shape=(self.height, self.width, 1), dtype=np.uint8) def observation(self, frame): """ Returns preprocessed frame """ frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA) frame = frame[:, :, None] return frame class ActionDiscretizer(gym.ActionWrapper): """ Wraps a retro environment to make it use discrete actions for the game """ def __init__(self, env): super(ActionDiscretizer, self).__init__(env) buttons = ['B', 'A', 'MODE', 'START', 'UP', 'DOWN', 'LEFT', 'RIGHT', 'C', 'Y', 'X', 'Z' ] actions = [['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], [ 'RIGHT', 'DOWN'], ['DOWN'], ['DOWN', 'B'], ['B']] self.actions_ = [] """ For each action: - create an array of 12[buttons] False For each button in action: - make button index True Creates arrays of actions, where each True element is the clicked button """ for action in actions: arr = np.array([False] * 12) for button in action: arr[buttons.index(button)] = True self.actions_.append(arr) self.action_space = gym.spaces.Discrete(len(self.actions_)) def action(self, a_id): """ Retrieves an action """ return self.actions_[a_id].copy() class RewardScaler(gym.RewardWrapper): """ Rescales the rewards for PPO. Effects Perfomance """ def reward(self, reward): return reward * 0.01 class AllowBackTracking(gym.Wrapper): """ Use deltas in max(X) rather than deltas in X Agents are not discouraged heavily from exploring backwards if there is no way to advance forward directly. """ def __init__(self, env): super(AllowBackTracking, self).__init__(env) self._cur_x = 0 self._max_x = 0 def reset(self, **kwargs): self._cur_x = 0 self._max_x = 0 return self.env.reset(**kwargs) def step(self, action): obs, reward, done, info = self.env.step(action) self._cur_x += reward reward = max(0, self._cur_x - self._max_x) self._max_x = max(self._cur_x, self._max_x) return obs, reward, done, info def create_env(env_idx): """ Creates an environment with standard wrappers """ wrappers = [ {'game': 'SonicTheHedgehog-Genesis', 'state': 'SpringYardZone.Act3'}, {'game': 'SonicTheHedgehog-Genesis', 'state': 'SpringYardZone.Act2'}, {'game': 'SonicTheHedgehog-Genesis', 'state': 'GreenHillZone.Act3'}, {'game': 'SonicTheHedgehog-Genesis', 'state': 'GreenHillZone.Act1'}, {'game': 'SonicTheHedgehog-Genesis', 'state': 'StarLightZone.Act2'}, {'game': 'SonicTheHedgehog-Genesis', 'state': 'StarLightZone.Act1'}, {'game': 'SonicTheHedgehog-Genesis', 'state': 'MarbleZone.Act2'}, {'game': 'SonicTheHedgehog-Genesis', 'state': 'MarbleZone.Act1'}, {'game': 'SonicTheHedgehog-Genesis', 'state': 'MarbleZone.Act3'}, {'game': 'SonicTheHedgehog-Genesis', 'state': 'ScrapBrainZone.Act2'}, {'game': 'SonicTheHedgehog-Genesis', 'state': 'LabyrinthZone.Act2'}, {'game': 'SonicTheHedgehog-Genesis', 'state': 'LabyrinthZone.Act1'}, {'game': 'SonicTheHedgehog-Genesis', 'state': 'LabyrinthZone.Act3'} ] print(wrappers[env_idx]['game'], wrappers[env_idx]['state'], flush=True) env = make_local(game=wrappers[env_idx]['game'], state=wrappers[env_idx]['state'], bk2dir='/records') # Build actions array env = ActionDiscretizer(env) # Scale rewards env = RewardScaler(env) # Preprocess frames and Stack env = PreprocessFrame(env) env = FrameStack(env, 4) env = AllowBackTracking(env) return env def make_train(env_indices=[0], all_=False): """ Returns a list of environments with given indices """ env_indices = np.arange(0, 13) if all_ else env_indices return [create_env(idx) for idx in env_indices]
StarcoderdataPython
3387061
<filename>ktane/vanilla.py<gh_stars>1-10 "Solver scripts for all vanilla modules." from typing import Final, List, NamedTuple, Tuple, Dict, Counter from ktane.directors import ModuleSolver, EdgeFlag, Port from ktane.ask import talk from ktane import ask from ktane.solverutils import morse, maze, grid # MorseCode, Maze __all__ = [ "Wires", "TheButton", "Keypad", "SimonSays", "WhosOnFirst", "Memory", "MorseCode", "ComplicatedWires", "WireSequence", "Maze", "Password" ] class Wires(ModuleSolver): "Solver for vanilla Wires." name: Final = "Wires" id: Final = "Wires" required_edgework: Final = (EdgeFlag.SERIAL,) def stage(self) -> None: talk("What color wires are on the module, from top to bottom?") talk("Type R for red, Y for yellow, B for blue, W for white, and K for black.") wirelist = ask.str_from_regex(r"[rybwk]{3,6}").lower() wire: str if len(wirelist) == 3: if 'r' not in wirelist: wire = "second" elif wirelist[-1] == 'w': wire = "last" elif wirelist.count('b') > 1: wire = "last blue" else: wire = "last" elif len(wirelist) == 4: if wirelist.count('r') > 1 and self.bomb.serial_odd: wire = "last red" elif wirelist[-1] == 'y' and 'r' not in wirelist: wire = "first" elif wirelist.count('b') == 1: wire = "first" elif wirelist.count('y') > 1: wire = "last" else: wire = "second" elif len(wirelist) == 5: if wirelist[-1] == 'k' and self.bomb.serial_odd: wire = "fourth" elif wirelist.count('r') == 1 and wirelist.count('y') > 1: wire = "first" elif 'k' not in wirelist: wire = "second" else: wire = "first" elif len(wirelist) == 6: if 'y' not in wirelist and self.bomb.serial_odd: wire = "third" elif wirelist.count('y') == 1 and wirelist.count('w') > 1: wire = "fourth" elif 'r' not in wirelist: wire = "last" else: wire = "fourth" talk(f"Cut the {wire} wire.") class TheButton(ModuleSolver): "Solver for vanilla The Button." name: Final = "The Button" id: Final = "BigButton" required_edgework: Final = (EdgeFlag.BATTERIES, EdgeFlag.INDICATORS) valid_colors: Final = {"red", "yellow", "blue", "white"} valid_labels: Final = {"abort", "detonate", "hold", "press"} def stage(self) -> None: talk("What color is the button?") talk('Type one of "red", "yellow", "blue", or "white", without quotes.') color = ask.str_from_set(self.valid_colors) talk("What is the text on the label?") talk('Type one of "abort", "detonate", "hold", or "press", without quotes.') label = ask.str_from_set(self.valid_labels) if color == "blue" and label == "abort": self._hold() elif self.bomb.batteries > 1 and label == "detonate": talk("Press and immediately release the button.") elif color == "white" and ('car', True) in self.bomb.indicators: self._hold() elif self.bomb.batteries > 2 and ('frk', True) in self.bomb.indicators: talk("Press and immediately release the button.") elif color == "yellow": self._hold() elif color == "red" and label == "hold": talk("Press and immediately release the button.") else: self._hold() def _hold(self) -> None: talk("Hold down the button. What color is the strip on the right?") talk('Type one of "red", "yellow", "blue", or "white", without quotes.') strip = ask.str_from_set(self.valid_colors) digit: int if strip == "blue": digit = 4 elif strip == "yellow": digit = 5 else: digit = 1 talk(f"Release the button when the countdown timer has a {digit} " "in any position.") class Keypad(ModuleSolver): "Solver for vanilla Keypad." name: Final = "Keypad" id: Final = "Keypad" required_edgework: Final = () valid_symbols: Final = { "copyright", "filled star", "hollow star", "smiley face", "double k", "omega", "squidknife", "pumpkin", "hook n", "six", "squiggly n", "at", "ae", "melted three", "euro", "n with hat", "dragon", "question mark", "paragraph", "right c", "left c", "pitchfork", "cursive", "tracks", "balloon", "upside down y", "bt" } columns: Final = { ("balloon", "at", "upside down y", "squiggly n", "squidknife", "hook n", "left c"), ("euro", "balloon", "left c", "cursive", "hollow star", "hook n", "question mark"), ("copyright", "pumpkin", "cursive", "double k", "melted three", "upside down y", "hollow star"), ("six", "paragraph", "bt", "squidknife", "double k", "question mark", "smiley face"), ("pitchfork", "smiley face", "bt", "right c", "paragraph", "dragon", "filled star"), ("six", "euro", "tracks", "ae", "pitchfork", "n with hat", "omega") } def stage(self) -> None: talk("What symbols are on the keypad?") symbols = ask.list_from_set(self.valid_symbols, print_options=True, expected_len=4) while all(any(sym not in col for sym in symbols) for col in self.columns): talk("I couldn't find a solution for those symbols.") talk("Please ensure you typed them correctly.") talk("What symbols are on the keypad?") symbols = ask.list_from_set(self.valid_symbols, print_options=True, expected_len=4) for col in self.columns: if all(sym in col for sym in symbols): talk("Press the keys in the following order:") for symbol in [sym for sym in col if sym in symbols]: talk(symbol.upper()) class SimonSays(ModuleSolver): "Solver for vanilla Simon Says." name: Final = "<NAME>" id: Final = "Simon" required_edgework: Final = (EdgeFlag.SERIAL, EdgeFlag.STRIKES) total_stages = 5 # max number of stages valid_colors: Final = {"red", "blue", "green", "yellow"} color_sequence: List[str] def stage(self) -> None: if self.current_stage == 1: talk("What color is flashing?") else: talk("What color is now flashing at the end of the sequence?") talk('Type one of "red", "blue", "green", or "yellow", without quotes.') new_color = ask.str_from_set(self.valid_colors) self.color_sequence.append(new_color) if self.bomb.serial_vowel: if self.bomb.strikes == 0: simon_key = {"red": "Blue", "blue": "Red", "green": "Yellow", "yellow": "Green"} elif self.bomb.strikes == 1: simon_key = {"red": "Yellow", "blue": "Green", "green": "Blue", "yellow": "Red"} else: simon_key = {"red": "Green", "blue": "Red", "green": "Yellow", "yellow": "Blue"} else: if self.bomb.strikes == 0: simon_key = {"red": "Blue", "blue": "Yellow", "green": "Green", "yellow": "Red"} elif self.bomb.strikes == 1: simon_key = {"red": "Red", "blue": "Blue", "green": "Yellow", "yellow": "Green"} else: simon_key = {"red": "Yellow", "blue": "Green", "green": "Blue", "yellow": "Red"} talk("Press the following colors in order:") for color in self.color_sequence: talk(simon_key[color]) def custom_data_init(self) -> None: self.color_sequence = [] def custom_data_clear(self) -> None: self.color_sequence = [] def solve(self) -> None: self.announce() while self.do_stage(): self.stage() self.check_strike() if self.current_stage >= 3: if self.check_solve(): return self.reset_stages() def on_this_struck(self) -> None: super().on_this_struck() self.color_sequence.pop() class WhosOnFirst(ModuleSolver): "Solver for vanilla Who's On First." name: Final = "Who's on First" id: Final = "WhosOnFirst" required_edgework: Final = () total_stages = 3 valid_displays: Final = { "yes", "first", "display", "okay", "says", "nothing", "empty", "blank", "no", "led", "lead", "read", "red", "reed", "leed", "hold on", "you", "you are", "your", "you're", "ur", "there", "they're", "their", "they are", "see", "c", "cee" } valid_labels: Final = { "ready", "first", "no", "blank", "nothing", "yes", "what", "uhhh", "left", "right", "middle", "okay", "wait", "press", "you", "you are", "your", "you're", "ur", "u", "uh huh", "uh uh", "what?", "done", "next", "hold", "sure", "like" } display_to_index: Final = { "yes": 2, "first": 1, "display": 5, "okay": 1, "says": 5, "nothing": 2, "empty": 4, # no text on display "blank": 3, "no": 5, "led": 2, "lead": 5, "read": 3, "red": 3, "reed": 4, "leed": 4, "hold on": 5, "you": 3, "you are": 5, "your": 3, "you're": 3, "ur": 0, "there": 5, "they're": 4, "their": 3, "they are": 2, "see": 5, "c": 1, "cee": 5 } label_to_buttons: Final = { "ready": ("yes", "okay", "what", "middle", "left", "press", "right", "blank"), "first": ("left", "okay", "yes", "middle", "no", "right", "nothing", "uhhh", "wait", "ready", "blank", "what", "press"), "no": ("blank", "uhhh", "wait", "first", "what", "ready", "right", "yes", "nothing", "left", "press", "okay"), "blank": ("wait", "right", "okay", "middle"), "nothing": ("uhhh", "right", "okay", "middle", "yes", "blank", "no", "press", "left", "what", "wait", "first"), "yes": ("okay", "right", "uhhh", "middle", "first", "what", "press", "ready", "nothing"), "what": ("uhhh",), "uhhh": ("ready", "nothing", "left", "what", "okay", "yes", "right", "no", "press", "blank"), "left": ("right",), "right": ("yes", "nothing", "ready", "press", "no", "wait", "what"), "middle": ("blank", "ready", "okay", "what", "nothing", "press", "no", "wait", "left"), "okay": ("middle", "no", "first", "yes", "uhhh", "nothing", "wait"), "wait": ("uhhh", "no", "blank", "okay", "yes", "left", "first", "press", "what"), "press": ("right", "middle", "yes", "ready"), "you": ("sure", "you are", "your", "you're", "next", "uh huh", "ur", "hold", "what?"), "you are": ("your", "next", "like", "uh huh", "what?", "done", "uh uh", "hold", "you", "u", "you're", "sure", "ur"), "your": ("uh uh", "you are", "uh huh"), "you're": ("you",), "ur": ("done", "u"), "u": ("uh huh", "sure", "next", "what?", "you're", "ur", "uh uh", "done"), "uh huh": (), "uh uh": ("ur", "u", "you are", "you're", "next"), "what?": ("you", "hold", "you're", "your", "u", "done", "uh uh", "like", "you are", "uh huh", "ur", "next"), "done": ("sure", "uh huh", "next", "what?", "your", "ur", "you're", "hold", "like", "you", "u", "you are", "uh uh"), "next": ("what?", "uh huh", "uh uh", "your", "hold", "sure"), "hold": ("you are", "u", "done", "uh uh", "you", "ur", "sure", "what?", "you're", "next"), "sure": ("you are", "done", "like", "you're", "you", "hold", "uh huh", "ur"), "like": ("you're", "next", "u", "ur", "hold", "done", "uh uh", "what?", "uh huh", "you") } def stage(self) -> None: talk('What text is on the display? ' '(If there is no text, type "Empty".)') display = ask.str_from_set(self.valid_displays) talk("What are the button labels, in reading order?") labels = ask.list_from_set(self.valid_labels, expected_len=6) label_index = self.display_to_index[display] key_label = labels[label_index] answer_label = key_label for button in self.label_to_buttons[key_label]: if button in labels: answer_label = button break talk(f"Press the button labeled {answer_label.upper()}.") class _MemoryItem(NamedTuple): label: str position: int class Memory(ModuleSolver): "Solver for vanilla Memory." name: Final = "Memory" id: Final = "Memory" required_edgework: Final = () total_stages = 5 reset_stages_on_strike = True presses: List[_MemoryItem] def custom_data_init(self) -> None: self.presses = [] def custom_data_clear(self) -> None: self.presses = [] def stage(self) -> None: talk("What number is on the display?") display = int(ask.str_from_regex(r'[1-4]')) talk("What numbers are on the buttons, in reading order?") buttons = ask.str_from_regex(r'[1-4]{4}') while any(c not in buttons for c in '1234'): talk("There should be one of each number on the buttons.") talk("What numbers are on the buttons, in reading order?") buttons = ask.str_from_regex(r'[1-4]{4}') item: _MemoryItem if self.current_stage == 1: item = self._stage_1(display, buttons) elif self.current_stage == 2: item = self._stage_2(display, buttons) elif self.current_stage == 3: item = self._stage_3(display, buttons) elif self.current_stage == 4: item = self._stage_4(display, buttons) else: # self.current_stage == 5 item = self._stage_5(display, buttons) talk(f"Press the button labeled {item.label}.") self.presses.append(item) # region stage helpers def _in_position(self, position: int, buttons: str) -> _MemoryItem: return _MemoryItem(buttons[position-1], position) def _with_label(self, label: str, buttons: str) -> _MemoryItem: return _MemoryItem(label, buttons.index(label)+1) def _stage_1(self, display: int, buttons: str) -> _MemoryItem: if display in {1, 2}: return self._in_position(2, buttons) if display == 3: return self._in_position(3, buttons) # display == 4 return self._in_position(4, buttons) def _stage_2(self, display: int, buttons: str) -> _MemoryItem: if display == 1: return self._with_label('4', buttons) if display in {2, 4}: return self._in_position(self.presses[0].position, buttons) # display == 3 return self._in_position(1, buttons) def _stage_3(self, display: int, buttons: str) -> _MemoryItem: if display == 1: return self._with_label(self.presses[1].label, buttons) if display == 2: return self._with_label(self.presses[0].label, buttons) if display == 3: return self._in_position(3, buttons) # display == 4 return self._with_label('4', buttons) def _stage_4(self, display: int, buttons: str) -> _MemoryItem: if display == 1: return self._in_position(self.presses[0].position, buttons) if display == 2: return self._in_position(1, buttons) # display in {3, 4} return self._in_position(self.presses[1].position, buttons) def _stage_5(self, display: int, buttons: str) -> _MemoryItem: if display == 1: return self._with_label(self.presses[0].label, buttons) if display == 2: return self._with_label(self.presses[1].label, buttons) if display == 3: return self._with_label(self.presses[3].label, buttons) # display == 4 return self._with_label(self.presses[2].label, buttons) # endregion class MorseCode(ModuleSolver): "Solver for vanilla Morse Code." name: Final = "Morse Code" id: Final = "Morse" required_edgework: Final = () word_to_freq: Final = { "shell": "505", "halls": "515", "slick": "522", "trick": "532", "boxes": "535", "leaks": "542", "strobe": "545", "bistro": "552", "flick": "555", "bombs": "565", "break": "572", "brick": "575", "steak": "582", "sting": "592", "vector": "595", "beats": "600" } def stage(self) -> None: talk("What Morse Code sequence is flashing?") word = morse.ask_word() while word not in self.word_to_freq: talk("That word isn't in my table.") talk("What Morse Code sequence is flashing?") word = morse.ask_word() talk(f"Respond at frequency 3.{self.word_to_freq[word]} MHz.") class ComplicatedWires(ModuleSolver): "Solver for vanilla Complicated Wires." name: Final = "Complicated Wires" id: Final = "Venn" required_edgework: Final = (EdgeFlag.SERIAL, EdgeFlag.PORTS, EdgeFlag.BATTERIES) venn_diagram: Final = ( # no red ( # no blue # none L S SL ((('C'), ('D')), (('C'), ('B'))), # blue # none L S SL ((('S'), ('P')), (('D'), ('P'))), ), # red ( # no blue # none L S SL ((('S'), ('B')), (('C'), ('B'))), # blue # none L S SL ((('S'), ('S')), (('P'), ('D'))), ) ) def stage(self) -> None: talk("What wires are on the module?") talk("For each wire, include its colors, any of (R)ed, (B)lue, or (W)hite,") talk("whether the LED above it is (L)it, and whether a (S)tar is present.") talk("Input each wire as a string of the parenthesized letters above.") wirelist = ask.list_from_regex(r"[rbwls]+") for wire in wirelist: if self.cut(wire): talk(f"Cut wire {wire.upper()}.") else: talk(f"Do not cut wire {wire.upper()}.") def cut(self, wire: str) -> bool: "Determine whether a given wire must be cut." action = self.venn_diagram['r' in wire]['b' in wire]['s' in wire]['l' in wire] if action == 'C': return True if action == 'D': return False if action == 'S': return not self.bomb.serial_odd if action == 'P': return self.bomb.has_port(Port.PARALLEL) if action == 'B': return self.bomb.batteries >= 2 raise RuntimeError("Invalid Venn Diagram action letter") class _SequenceWire(NamedTuple): color: str label: str class WireSequence(ModuleSolver): "Solver for vanilla Wire Sequence." name: Final = "Wire Sequence" id: Final = "WireSequence" required_edgework: Final = () total_stages = 4 cut_table: Final[Dict[str, Tuple[str, ...]]] = { 'red': ('c', 'b', 'a', 'ac', 'b', 'ac', 'abc', 'ab', 'b'), 'blue': ('b', 'ac', 'b', 'a', 'b', 'bc', 'c', 'ac', 'a'), 'black': ('abc', 'ac', 'b', 'ac', 'b', 'bc', 'ab', 'c', 'c') } wire_counts: Counter[str] last_stage_counts: Counter[str] def custom_data_init(self) -> None: self.wire_counts = Counter() self.last_stage_counts = Counter() def custom_data_clear(self) -> None: self.wire_counts.clear() self.last_stage_counts.clear() def on_this_struck(self) -> None: self.last_stage_counts.clear() @classmethod def _check_wire_text(cls, text: str) -> bool: if len(text.split()) != 2: return False if text.split()[0] not in cls.cut_table: return False if text.split()[1] not in 'abc': return False return True def ask_wires(self) -> Tuple[_SequenceWire, ...]: "Get a set of wires from the user." talk("What wires are on the panel, in order by their left plug?") talk("Input their color followed by the letter") talk('they\'re plugged into, like "red C".') wirelist = ask.list_from_func(self._check_wire_text) converted_wirelist = tuple(_SequenceWire._make(wire.split()) for wire in wirelist) return converted_wirelist def stage(self) -> None: # canonize last_stage_counts before starting new stage self.wire_counts.update(self.last_stage_counts) self.last_stage_counts.clear() wires = self.ask_wires() for wire in wires: self.last_stage_counts[wire.color] += 1 wire_count = self.wire_counts[wire.color] + self.last_stage_counts[wire.color] if wire.label in self.cut_table[wire.color][wire_count-1]: talk(f"Cut the {wire.color} wire " f"connected to label {wire.label.upper()}.") else: talk(f"Do not cut the {wire.color} wire " f"connected to label {wire.label.upper()}.") talk("Press the down arrow to finish this panel.") class Maze(ModuleSolver): "Solver for vanilla Maze." name: Final = "Maze" id: Final = "Maze" required_edgework: Final = () mazes: Final[Tuple[Tuple[maze.Wall, ...], ...]] = ( (maze.Wall(0, 5), maze.Wall(1, 2), maze.Wall(1, 8), maze.Wall(1, 10), maze.Wall(2, 1), maze.Wall(2, 5), maze.Wall(3, 4), maze.Wall(3, 6), maze.Wall(3, 8), maze.Wall(4, 1), maze.Wall(4, 5), maze.Wall(5, 2), maze.Wall(5, 8), maze.Wall(6, 1), maze.Wall(6, 7), maze.Wall(7, 2), maze.Wall(7, 4), maze.Wall(7, 6), maze.Wall(7, 8), maze.Wall(8, 5), maze.Wall(8, 9), maze.Wall(9, 2), maze.Wall(9, 8), maze.Wall(10, 3), maze.Wall(10, 7)), (maze.Wall(0, 5), maze.Wall(1, 0), maze.Wall(1, 4), maze.Wall(1, 10), maze.Wall(2, 3), maze.Wall(2, 7), maze.Wall(3, 2), maze.Wall(3, 6), maze.Wall(3, 8), maze.Wall(4, 1), maze.Wall(4, 5), maze.Wall(5, 4), maze.Wall(5, 8), maze.Wall(6, 3), maze.Wall(6, 7), maze.Wall(6, 9), maze.Wall(7, 2), maze.Wall(7, 6), maze.Wall(8, 1), maze.Wall(8, 3), maze.Wall(8, 5), maze.Wall(8, 9), maze.Wall(9, 8), maze.Wall(10, 1), maze.Wall(10, 5)), (maze.Wall(0, 5), maze.Wall(0, 7), maze.Wall(1, 2), maze.Wall(2, 1), maze.Wall(2, 3), maze.Wall(2, 5), maze.Wall(2, 9), maze.Wall(3, 0), maze.Wall(3, 6), maze.Wall(3, 8), maze.Wall(4, 3), maze.Wall(4, 5), maze.Wall(4, 9), maze.Wall(6, 1), maze.Wall(6, 3), maze.Wall(6, 5), maze.Wall(6, 7), maze.Wall(6, 9), maze.Wall(8, 1), maze.Wall(8, 5), maze.Wall(8, 7), maze.Wall(8, 9), maze.Wall(9, 2), maze.Wall(9, 4), maze.Wall(10, 7)), (maze.Wall(0, 3), maze.Wall(1, 4), maze.Wall(1, 6), maze.Wall(1, 8), maze.Wall(2, 1), maze.Wall(2, 3), maze.Wall(3, 6), maze.Wall(3, 8), maze.Wall(4, 1), maze.Wall(4, 5), maze.Wall(4, 9), maze.Wall(5, 2), maze.Wall(5, 4), maze.Wall(5, 8), maze.Wall(6, 1), maze.Wall(7, 2), maze.Wall(7, 4), maze.Wall(7, 6), maze.Wall(7, 8), maze.Wall(8, 9), maze.Wall(9, 2), maze.Wall(9, 4), maze.Wall(9, 6), maze.Wall(10, 5), maze.Wall(10, 9)), (maze.Wall(1, 0), maze.Wall(1, 2), maze.Wall(1, 4), maze.Wall(1, 6), maze.Wall(2, 9), maze.Wall(3, 2), maze.Wall(3, 4), maze.Wall(3, 8), maze.Wall(3, 10), maze.Wall(4, 3), maze.Wall(4, 7), maze.Wall(5, 4), maze.Wall(5, 6), maze.Wall(6, 1), maze.Wall(6, 7), maze.Wall(6, 9), maze.Wall(7, 2), maze.Wall(7, 4), maze.Wall(7, 8), maze.Wall(8, 1), maze.Wall(8, 9), maze.Wall(9, 4), maze.Wall(9, 6), maze.Wall(9, 8), maze.Wall(10, 1)), (maze.Wall(0, 1), maze.Wall(0, 5), maze.Wall(1, 6), maze.Wall(2, 1), maze.Wall(2, 3), maze.Wall(2, 5), maze.Wall(2, 9), maze.Wall(3, 8), maze.Wall(4, 3), maze.Wall(4, 5), maze.Wall(4, 7), maze.Wall(5, 2), maze.Wall(5, 4), maze.Wall(5, 10), maze.Wall(6, 3), maze.Wall(6, 7), maze.Wall(6, 9), maze.Wall(7, 0), maze.Wall(8, 3), maze.Wall(8, 5), maze.Wall(8, 7), maze.Wall(9, 2), maze.Wall(9, 4), maze.Wall(9, 8), maze.Wall(10, 7)), (maze.Wall(0, 7), maze.Wall(1, 2), maze.Wall(1, 4), maze.Wall(2, 1), maze.Wall(2, 5), maze.Wall(2, 9), maze.Wall(3, 4), maze.Wall(3, 6), maze.Wall(3, 8), maze.Wall(4, 3), maze.Wall(4, 7), maze.Wall(5, 0), maze.Wall(5, 2), maze.Wall(5, 6), maze.Wall(5, 10), maze.Wall(6, 3), maze.Wall(6, 9), maze.Wall(7, 6), maze.Wall(7, 8), maze.Wall(8, 1), maze.Wall(8, 3), maze.Wall(8, 9), maze.Wall(9, 2), maze.Wall(9, 4), maze.Wall(9, 6)), (maze.Wall(0, 1), maze.Wall(0, 7), maze.Wall(1, 4), maze.Wall(2, 5), maze.Wall(2, 9), maze.Wall(3, 2), maze.Wall(3, 4), maze.Wall(3, 6), maze.Wall(3, 8), maze.Wall(4, 1), maze.Wall(4, 9), maze.Wall(5, 4), maze.Wall(5, 6), maze.Wall(6, 1), maze.Wall(6, 5), maze.Wall(7, 2), maze.Wall(7, 6), maze.Wall(7, 8), maze.Wall(7, 10), maze.Wall(8, 1), maze.Wall(8, 3), maze.Wall(9, 4), maze.Wall(9, 6), maze.Wall(9, 8), maze.Wall(9, 10)), (maze.Wall(0, 1), maze.Wall(1, 4), maze.Wall(1, 6), maze.Wall(2, 1), maze.Wall(2, 3), maze.Wall(2, 7), maze.Wall(2, 9), maze.Wall(3, 6), maze.Wall(4, 5), maze.Wall(4, 9), maze.Wall(5, 2), maze.Wall(5, 4), maze.Wall(5, 8), maze.Wall(6, 1), maze.Wall(6, 3), maze.Wall(6, 7), maze.Wall(7, 6), maze.Wall(7, 8), maze.Wall(8, 1), maze.Wall(8, 3), maze.Wall(8, 5), maze.Wall(8, 9), maze.Wall(9, 10), maze.Wall(10, 3), maze.Wall(10, 7)) ) mark_to_maze: Dict[grid.Coord, Tuple[maze.Wall, ...]] = { grid.Coord(1, 0): mazes[0], grid.Coord(2, 5): mazes[0], grid.Coord(3, 1): mazes[1], grid.Coord(1, 4): mazes[1], grid.Coord(3, 3): mazes[2], grid.Coord(3, 5): mazes[2], grid.Coord(0, 0): mazes[3], grid.Coord(3, 0): mazes[3], grid.Coord(2, 4): mazes[4], grid.Coord(5, 3): mazes[4], grid.Coord(0, 4): mazes[5], grid.Coord(4, 2): mazes[5], grid.Coord(0, 1): mazes[6], grid.Coord(5, 1): mazes[6], grid.Coord(0, 3): mazes[7], grid.Coord(3, 2): mazes[7], grid.Coord(1, 2): mazes[8], grid.Coord(4, 0): mazes[8], } def stage(self) -> None: while True: talk("What coordinate contains the white light?") start = maze.Node._make(grid.ask_coord()) talk("What coordinate contains the red triangle?") goal = maze.Node._make(grid.ask_coord()) talk("What coordinate contains a circular marking?") talk("(You may use either one.)") marking = grid.ask_coord() while marking not in self.mark_to_maze: talk("That doesn't fit any of the mazes.") talk("What coordinate contains a circular marking?") talk("(You may use either one.)") marking = grid.ask_coord() walls = self.mark_to_maze[marking] path = maze.solve_maze(grid.Dimensions(6, 6), start, goal, walls) if not path: talk("Something went wrong and I couldn't find a path.") continue talk("Press the following directions in order:") for direction in path: talk(direction) return class Password(ModuleSolver): "Solver for vanilla Password." name: Final = "Password" id: Final = "Password" required_edgework: Final = () valid_words: Final = { "about", "after", "again", "below", "could", "every", "first", "found", "great", "house", "large", "learn", "never", "other", "place", "plant", "point", "right", "small", "sound", "spell", "still", "study", "their", "there", "these", "thing", "think", "three", "water", "where", "which", "world", "would", "write" } def stage(self) -> None: while True: possible_words: List[str] = list(self.valid_words) for column_index in range(5): talk(f"What letters are in column {column_index + 1}?") letters = ask.str_from_regex(r"[a-z]{6}") while len(set(letters)) != 6: # all letters should be unique talk("There should be 6 unique letters in the column.") talk(f"What letters are in column {column_index + 1}?") letters = ask.str_from_regex(r"[a-z]{6}") possible_words = [word for word in possible_words if word[column_index] in letters] # filter if len(possible_words) == 1: answer = possible_words[0].upper() talk(f'Enter the password "{answer}".') return if len(possible_words) == 0: break # no valid word or multiple valid words talk("Something went wrong. Let's start over.")
StarcoderdataPython
100244
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 import math import numpy as np import states.area import states.face import states.fail import states.success from challenge import Challenge class NoseState: MAXIMUM_DURATION_IN_SECONDS = 10 AREA_BOX_TOLERANCE = 0.05 NOSE_BOX_TOLERANCE = 0.55 TRAJECTORY_ERROR_THRESHOLD = 0.01 HISTOGRAM_BINS = 3 MIN_DIST = 0.10 ROTATION_THRESHOLD = 5.0 MIN_DIST_FACTOR_ROTATED = 0.75 MIN_DIST_FACTOR_NOT_ROTATED = 1.5 def __init__(self, challenge, original_frame): self.challenge = challenge self.image_width = challenge['imageWidth'] self.image_height = challenge['imageHeight'] # Applying tolerance area_width_tolerance = challenge['areaWidth'] * NoseState.AREA_BOX_TOLERANCE area_height_tolerance = challenge['areaHeight'] * NoseState.AREA_BOX_TOLERANCE self.area_box = (challenge['areaLeft'] - area_width_tolerance, challenge['areaTop'] - area_height_tolerance, challenge['areaWidth'] + 2*area_width_tolerance, challenge['areaHeight'] + 2*area_height_tolerance) nose_width_tolerance = challenge['noseWidth'] * NoseState.NOSE_BOX_TOLERANCE nose_height_tolerance = challenge['noseHeight'] * NoseState.NOSE_BOX_TOLERANCE self.nose_box = (challenge['noseLeft'] - nose_width_tolerance, challenge['noseTop'] - nose_height_tolerance, challenge['noseWidth'] + 2*nose_width_tolerance, challenge['noseHeight'] + 2*nose_height_tolerance) self.challenge_in_the_right = challenge['noseLeft'] + Challenge.NOSE_BOX_SIZE/2 > self.image_width/2 self.original_frame = original_frame self.original_landmarks = original_frame['rekMetadata'][0]['Landmarks'] self.nose_trajectory = [] def process(self, frame): rek_metadata = frame['rekMetadata'][0] rek_face_bbox = [ self.image_width * rek_metadata['BoundingBox']['Left'], self.image_height * rek_metadata['BoundingBox']['Top'], self.image_width * rek_metadata['BoundingBox']['Width'], self.image_height * rek_metadata['BoundingBox']['Height'] ] if not states.area.AreaState.is_inside_area_box(self.area_box, rek_face_bbox): return False rek_landmarks = rek_metadata['Landmarks'] rek_pose = rek_metadata['Pose'] if self.is_inside_nose_box(rek_landmarks): verified = self.verify_challenge(rek_landmarks, rek_pose, self.challenge_in_the_right) return verified return None def is_inside_nose_box(self, landmarks): for landmark in landmarks: if landmark['Type'] == 'nose': nose_left = self.image_width * landmark['X'] nose_top = self.image_height * landmark['Y'] self.nose_trajectory.append((landmark['X'], landmark['Y'])) return (self.nose_box[0] <= nose_left <= self.nose_box[0] + self.nose_box[2] and self.nose_box[1] <= nose_top <= self.nose_box[1] + self.nose_box[3]) return False def get_next_state_failure(self): return states.fail.FailState() def get_next_state_success(self): return states.success.SuccessState() def verify_challenge(self, current_landmarks, pose, challenge_in_the_right): # Validating continuous and linear nose trajectory nose_trajectory_x = [nose[0] for nose in self.nose_trajectory] nose_trajectory_y = [nose[1] for nose in self.nose_trajectory] _, residuals, _, _, _ = np.polyfit(nose_trajectory_x, nose_trajectory_y, 2, full=True) trajectory_error = math.sqrt(residuals/len(self.nose_trajectory)) if trajectory_error > NoseState.TRAJECTORY_ERROR_THRESHOLD: return False # Plotting landmarks from the first frame in a histogram original_landmarks_x = [self.image_width * landmark['X'] for landmark in self.original_landmarks] original_landmarks_y = [self.image_height * landmark['Y'] for landmark in self.original_landmarks] original_histogram, _, _ = np.histogram2d(original_landmarks_x, original_landmarks_y, bins=NoseState.HISTOGRAM_BINS) original_histogram = np.reshape(original_histogram, NoseState.HISTOGRAM_BINS**2) / len(original_landmarks_x) # Plotting landmarks from the last frame in a histogram current_landmarks_x = [self.image_width * landmark['X'] for landmark in current_landmarks] current_landmarks_y = [self.image_height * landmark['Y'] for landmark in current_landmarks] current_histogram, _, _ = np.histogram2d(current_landmarks_x, current_landmarks_y, bins=NoseState.HISTOGRAM_BINS) current_histogram = np.reshape(current_histogram, NoseState.HISTOGRAM_BINS**2) / len(current_landmarks_x) # Calculating the Euclidean distance between histograms dist = np.linalg.norm(original_histogram - current_histogram) # Estimating left and right rotation yaw = pose['Yaw'] rotated_right = yaw > NoseState.ROTATION_THRESHOLD rotated_left = yaw < - NoseState.ROTATION_THRESHOLD rotated_face = rotated_left or rotated_right # Validating distance according to rotation if (rotated_right and challenge_in_the_right) or (rotated_left and not challenge_in_the_right): min_dist = NoseState.MIN_DIST * NoseState.MIN_DIST_FACTOR_ROTATED elif not rotated_face: min_dist = NoseState.MIN_DIST * NoseState.MIN_DIST_FACTOR_NOT_ROTATED else: return False if dist > min_dist: return True return False
StarcoderdataPython
3354080
import datetime import decimal import uuid from dateutil.relativedelta import relativedelta from django.db import transaction from django.db.models import Sum from django.http import HttpResponse from django.utils.translation import ugettext_lazy as _ from django_filters.rest_framework import DjangoFilterBackend from rest_framework import exceptions, status from rest_framework.decorators import action from rest_framework.response import Response from waldur_core.core import validators as core_validators from waldur_core.core import views as core_views from waldur_core.structure import filters as structure_filters from waldur_core.structure import models as structure_models from waldur_core.structure import permissions as structure_permissions from waldur_mastermind.common.utils import quantize_price from . import filters, log, models, serializers, tasks, utils class InvoiceViewSet(core_views.ReadOnlyActionsViewSet): queryset = models.Invoice.objects.order_by('-year', '-month') serializer_class = serializers.InvoiceSerializer lookup_field = 'uuid' filter_backends = ( structure_filters.GenericRoleFilter, structure_filters.CustomerAccountingStartDateFilter, DjangoFilterBackend, ) filterset_class = filters.InvoiceFilter def _is_invoice_created(invoice): if invoice.state != models.Invoice.States.CREATED: raise exceptions.ValidationError( _('Notification only for the created invoice can be sent.') ) @action(detail=True, methods=['post']) def send_notification(self, request, uuid=None): invoice = self.get_object() tasks.send_invoice_notification.delay(invoice.uuid.hex) return Response( { 'detail': _( 'Invoice notification sending has been successfully scheduled.' ) }, status=status.HTTP_200_OK, ) send_notification_permissions = [structure_permissions.is_staff] send_notification_validators = [_is_invoice_created] @action(detail=True) def pdf(self, request, uuid=None): invoice = self.get_object() file = utils.create_invoice_pdf(invoice) file_response = HttpResponse(file, content_type='application/pdf') filename = invoice.get_filename() file_response[ 'Content-Disposition' ] = 'attachment; filename="{filename}"'.format(filename=filename) return file_response @transaction.atomic @action(detail=True, methods=['post']) def paid(self, request, uuid=None): invoice = self.get_object() if request.data: serializer = serializers.PaidSerializer(data=request.data) serializer.is_valid(raise_exception=True) try: profile = models.PaymentProfile.objects.get( is_active=True, organization=invoice.customer ) except models.PaymentProfile.DoesNotExist: raise exceptions.ValidationError( _('The active profile for this customer does not exist.') ) payment = models.Payment.objects.create( date_of_payment=serializer.validated_data['date'], sum=invoice.total_current, profile=profile, invoice=invoice, ) proof = serializer.validated_data.get('proof') if proof: payment.proof = proof payment.save() log.event_logger.invoice.info( 'Payment for invoice ({month}/{year}) has been added."', event_type='payment_created', event_context={ 'month': invoice.month, 'year': invoice.year, 'customer': invoice.customer, }, ) invoice.state = models.Invoice.States.PAID invoice.save(update_fields=['state']) return Response(status=status.HTTP_200_OK) paid_permissions = [structure_permissions.is_staff] paid_validators = [core_validators.StateValidator(models.Invoice.States.CREATED)] @action(detail=True) def stats(self, request, uuid=None): invoice = self.get_object() offerings = {} for item in invoice.items.all(): if not item.resource: continue resource = item.resource offering = resource.offering customer = offering.customer service_category_title = offering.category.title service_provider_name = customer.name service_provider_uuid = customer.serviceprovider.uuid.hex if offering.uuid.hex not in offerings.keys(): offerings[offering.uuid.hex] = { 'offering_name': offering.name, 'aggregated_cost': item.total, 'service_category_title': service_category_title, 'service_provider_name': service_provider_name, 'service_provider_uuid': service_provider_uuid, } else: offerings[offering.uuid.hex]['aggregated_cost'] += item.total queryset = [dict(uuid=key, **details) for (key, details) in offerings.items()] for item in queryset: item['aggregated_cost'] = quantize_price( decimal.Decimal(item['aggregated_cost']) ) page = self.paginate_queryset(queryset) return self.get_paginated_response(page) @action(detail=False) def growth(self, request): if not self.request.user.is_staff and not request.user.is_support: raise exceptions.PermissionDenied() customers = structure_models.Customer.objects.all() customers = structure_filters.AccountingStartDateFilter().filter_queryset( request, customers, self ) customers_count = 4 if 'customers_count' in request.query_params: try: customers_count = int(request.query_params['customers_count']) except ValueError: raise exceptions.ValidationError('customers_count is not a number') if customers_count > 20: raise exceptions.ValidationError( 'customers_count should not be greater than 20' ) is_accounting_mode = request.query_params.get('accounting_mode') == 'accounting' today = datetime.date.today() current_month = today - relativedelta(months=12) majors = list( models.Invoice.objects.filter( customer__in=customers, created__gte=current_month ) .values('customer_id') .annotate(total=Sum('current_cost')) .order_by('-total') .values_list('customer_id', flat=True)[:customers_count] ) minors = customers.exclude(id__in=majors) customer_periods = {} total_periods = {} other_periods = {} for i in range(13): invoices = models.Invoice.objects.filter( year=current_month.year, month=current_month.month, ) key = f'{current_month.year}-{current_month.month}' row = customer_periods[key] = {} subtotal = 0 for invoice in invoices.filter(customer_id__in=majors): value = is_accounting_mode and invoice.price or invoice.total subtotal += value row[invoice.customer.uuid.hex] = value other_periods[key] = sum( is_accounting_mode and invoice.price or invoice.total for invoice in invoices.filter(customer_id__in=minors) ) total_periods[key] = subtotal + other_periods[key] current_month += relativedelta(months=1) result = { 'periods': total_periods.keys(), 'total_periods': total_periods.values(), 'other_periods': other_periods.values(), 'customer_periods': [ { 'name': customer.name, 'periods': [ customer_periods[period].get(customer.uuid.hex, 0) for period in total_periods.keys() ], } for customer in structure_models.Customer.objects.filter(id__in=majors) ], } return Response(result, status=status.HTTP_200_OK) @action(detail=True, methods=['post']) def set_backend_id(self, request, uuid=None): invoice = self.get_object() serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) backend_id = serializer.validated_data['backend_id'] invoice.backend_id = backend_id invoice.save() return Response(status=status.HTTP_200_OK) set_backend_id_permissions = [structure_permissions.is_staff] set_backend_id_serializer_class = serializers.BackendIdSerializer class InvoiceItemViewSet(core_views.ActionsViewSet): disabled_actions = ['create'] queryset = models.InvoiceItem.objects.all() serializer_class = serializers.InvoiceItemDetailSerializer lookup_field = 'uuid' def get_queryset(self): qs = super().get_queryset() if self.request.user.is_staff: return qs else: return qs.none() @transaction.atomic @action(detail=True, methods=['post']) def create_compensation(self, request, **kwargs): invoice_item = self.get_object() serializer = self.get_serializer_class()(data=request.data) serializer.is_valid(raise_exception=True) offering_component_name = serializer.validated_data['offering_component_name'] if invoice_item.unit_price < 0: return Response( 'Can not create compensation for invoice item with negative unit price.', status=status.HTTP_400_BAD_REQUEST, ) year, month = utils.get_current_year(), utils.get_current_month() invoice, _ = models.Invoice.objects.get_or_create( customer=invoice_item.invoice.customer, month=month, year=year, ) # Fill new invoice item details if not invoice_item.details: invoice_item.details = {} invoice_item.details['original_invoice_item_uuid'] = invoice_item.uuid.hex invoice_item.details['offering_component_name'] = offering_component_name # Save new invoice item to database invoice_item.invoice = invoice invoice_item.pk = None invoice_item.uuid = uuid.uuid4() invoice_item.unit_price *= -1 invoice_item.save() return Response( {'invoice_item_uuid': invoice_item.uuid.hex}, status=status.HTTP_201_CREATED, ) create_compensation_serializer_class = serializers.InvoiceItemCompensationSerializer update_serializer_class = serializers.InvoiceItemUpdateSerializer partial_update_serializer_class = serializers.InvoiceItemUpdateSerializer create_compensation_permissions = ( update_permissions ) = partial_update_permissions = destroy_permissions = [ structure_permissions.is_staff ] class PaymentProfileViewSet(core_views.ActionsViewSet): lookup_field = 'uuid' filter_backends = ( structure_filters.GenericRoleFilter, DjangoFilterBackend, filters.PaymentProfileFilterBackend, ) filterset_class = filters.PaymentProfileFilter create_permissions = ( update_permissions ) = partial_update_permissions = destroy_permissions = enable_permissions = [ structure_permissions.is_staff ] queryset = models.PaymentProfile.objects.all().order_by('name') serializer_class = serializers.PaymentProfileSerializer @action(detail=True, methods=['post']) def enable(self, request, uuid=None): profile = self.get_object() profile.is_active = True profile.save(update_fields=['is_active']) return Response( {'detail': _('Payment profile has been enabled.')}, status=status.HTTP_200_OK, ) class PaymentViewSet(core_views.ActionsViewSet): lookup_field = 'uuid' filter_backends = ( structure_filters.GenericRoleFilter, DjangoFilterBackend, ) filterset_class = filters.PaymentFilter create_permissions = ( update_permissions ) = ( partial_update_permissions ) = ( destroy_permissions ) = link_to_invoice_permissions = unlink_from_invoice_permissions = [ structure_permissions.is_staff ] queryset = models.Payment.objects.all().order_by('created') serializer_class = serializers.PaymentSerializer @action(detail=True, methods=['post']) def link_to_invoice(self, request, uuid=None): payment = self.get_object() serializer = self.get_serializer_class()(data=request.data) serializer.is_valid(raise_exception=True) invoice = serializer.validated_data['invoice'] if invoice.customer != payment.profile.organization: raise exceptions.ValidationError( _('The passed invoice does not belong to the selected customer.') ) payment.invoice = invoice payment.save(update_fields=['invoice']) log.event_logger.invoice.info( 'Payment for invoice ({month}/{year}) has been added.', event_type='payment_created', event_context={ 'month': invoice.month, 'year': invoice.year, 'customer': invoice.customer, }, ) return Response( {'detail': _('An invoice has been linked to payment.')}, status=status.HTTP_200_OK, ) def _link_to_invoice_exists(payment): if payment.invoice: raise exceptions.ValidationError(_('Link to an invoice exists.')) link_to_invoice_validators = [_link_to_invoice_exists] link_to_invoice_serializer_class = serializers.LinkToInvoiceSerializer def _link_to_invoice_does_not_exist(payment): if not payment.invoice: raise exceptions.ValidationError(_('Link to an invoice does not exist.')) @action(detail=True, methods=['post']) def unlink_from_invoice(self, request, uuid=None): payment = self.get_object() invoice = payment.invoice payment.invoice = None payment.save(update_fields=['invoice']) log.event_logger.invoice.info( 'Payment for invoice ({month}/{year}) has been removed.', event_type='payment_removed', event_context={ 'month': invoice.month, 'year': invoice.year, 'customer': invoice.customer, }, ) return Response( {'detail': _('An invoice has been unlinked from payment.')}, status=status.HTTP_200_OK, ) unlink_from_invoice_validators = [_link_to_invoice_does_not_exist] def perform_create(self, serializer): super(PaymentViewSet, self).perform_create(serializer) payment = serializer.instance log.event_logger.payment.info( 'Payment for {customer_name} in the amount of {amount} has been added.', event_type='payment_added', event_context={ 'amount': payment.sum, 'customer': payment.profile.organization, }, ) def perform_destroy(self, instance): customer = instance.profile.organization amount = instance.sum super(PaymentViewSet, self).perform_destroy(instance) log.event_logger.payment.info( 'Payment for {customer_name} in the amount of {amount} has been removed.', event_type='payment_removed', event_context={'amount': amount, 'customer': customer,}, )
StarcoderdataPython
36431
<filename>setup.py # -*- coding: utf-8 -*- from setuptools import setup, find_packages with open('README.md') as f: readme = f.read() with open('LICENSE') as f: license = f.read() setup( name='vindinium-client', version='0.1.0', description='Client for Vindinium.org', long_description=readme, author='<NAME>', author_email='<EMAIL>', url='https://github.com/halfstrik/vendinium-client', license=license, packages=find_packages(), install_requires=['requests==2.18.4'], )
StarcoderdataPython
3371488
import swap #import compare_SWAP_GZ2 as utils from simulation import Simulation import os, sys, subprocess, getopt from argparse import ArgumentParser from astropy.table import Table import pdb import datetime as dt import numpy as np import cPickle def MachineShop(args): # Buh. I never built in the ability to change directories on the fly #machine_sim_directory = 'sims_Machine/redo_with_circular_morphs' """ Sometimes you just need to the run the Machine on a bunch of already made SWAP-runs / simulations. If so, this script is for you! """ # Get parameters from the SWAP run of interest the = swap.Configuration(args.config) params = the.parameters # This pulls up the FIDUCIAL SWAP simulation sim = Simulation(config=args.config, directory='sims_SWAP/S_PLPD5_p5_ff_norand', variety='feat_or_not') # this was originally set to 2/17/09 which is WRONG # 11/2/17: WHY?? Fuck you, <NAME>. What am I supposed to do here?? first_day = dt.datetime(2009, 2, 12) today = dt.datetime.strptime(params['start'], '%Y-%m-%d_%H:%M:%S') start_day = dt.datetime(2009, 2, 17) last_day = dt.datetime.strptime(params['end'], '%Y-%m-%d_%H:%M:%S') yesterday = None run_machine = False SWAP_retired = 0 notfound = 0 last_night = None for idx, filename in enumerate(sim.retiredFileList[(today-first_day).days:]): print "" print "----------------------- The Machine Shop ----------------------------" print "Today is {}".format(today) if today >= last_day: print "Get outta the machine shop!" exit() # --------------------------------------------------------------------- # OPEN METADATA PICKLE (updated each time MachineClassifier is run) # --------------------------------------------------------------------- backup_meta_file = params['metadatafile'].replace('.pickle', '_orig.pickle') if today == first_day: try: storage = swap.read_pickle(backup_meta_file,'metadata') except: print "MachineShop: Backup metadata pickle not yet created." print "MachineShop: Opening original metadata pickle file instead" storage = swap.read_pickle(params['metadatafile'],'metadata') if 'retired_date' not in storage.subjects.colnames: storage.subjects['retired_date'] = '2016-09-10' if 'valid' not in np.unique(storage.subjects['MLsample']): expert = (storage.subjects['Expert_label']!=-1) storage.subjects['MLsample'][expert] = 'valid' # save an untouched copy for reference later print "MachineShop: Creating a backup metadata pickle" swap.write_pickle(storage, backup_meta_file) else: storage = swap.read_pickle(params['metadatafile'],'metadata') # Regardless of which metadata you open, make sure it has these columns # (old metadata files WON'T have them!) if 'retired_date' not in storage.subjects.colnames: storage.subjects['retired_date'] = '2016-09-10' if 'valid' not in np.unique(storage.subjects['MLsample']): expert = (storage.subjects['Expert_label']!=-1) storage.subjects['MLsample'][expert] = 'valid' subjects = storage.subjects # I just need to know what was retired TONIGHT -- # compare what's retired UP TILL tonight with what was # retired up till LAST NIGHT SWAP_retired_by_tonight = sim.fetchCatalog(filename) # If we're picking up where we left off, grab previous training sample #if today>start_day and last_night is None: # print 'MachineShop: getting previous training sample' # last_night = subjects[subjects['MLsample']=='train'] # last_night['zooid'] = last_night['SDSS_id'] try: ids_retired_tonight = set(SWAP_retired_by_tonight['zooid']) - \ set(last_night['zooid']) except: ids_retired_tonight = set(SWAP_retired_by_tonight['zooid']) print "Newly retired subjects: {}".format(len(ids_retired_tonight)) # Now that I have the ids from the previous night, adjust the # metadata file to reflect what was retired / add SWAP info for ID in list(ids_retired_tonight): # Locate this subject in the metadata file mask = subjects['SDSS_id'] == int(ID) # Update them in metadata file as training sample for MC # DOUBLE CHECK THAT IT HAS NOT BEEN RETIRED BY MACHINE!!! #if subjects['MLsample'][mask] == 'test ': if subjects['MLsample'][mask] == 'test': SWAP_retired+=1 subjects['MLsample'][mask] = 'train' subjects['retired_date'][mask] = dt.datetime.strftime(today, '%Y-%m-%d') subjects['SWAP_prob'][mask] = SWAP_retired_by_tonight['P'][SWAP_retired_by_tonight['zooid']==ID] run_machine = True else: notfound +=1 if len(subjects[subjects['MLsample']=='train'])>=10000: run_machine = True last_night = SWAP_retired_by_tonight print "Retired by this day:", len(last_night) print "" print "MachineShop: Found {0} subjects retired by SWAP on {1}"\ .format(SWAP_retired, today) print "MachineShop: {0} total subjects retired so far"\ .format(np.sum(subjects['MLsample']=='train')) print "MachineShop: Found {0} subjects retired by Machine."\ .format(np.sum(subjects['MLsample']=='mclas')) print "MachineShop: Saving updated StorageLocker." params['dir'] = os.getcwd() # Save our new metadata file -- MC needs this -- save to NOT the original params['metadatafile'] = params['dir']+'/'+params['survey']+'_metadata.pickle' swap.write_pickle(storage, params['metadatafile']) if run_machine: # Need to doctor the config to refect the "correct date" params['start'] = today.strftime('%Y-%m-%d_%H:%M:%S') swap.write_config(args.config, params) # Run MachineClassifier.py using this subject file os.system("python MachineClassifier.py -c %s"%args.config) """os.system("python test_Machine.py -c {0}".format(args.config))""" # MachineClassifier updates the configfile so now we need to open the NEW one the = swap.Configuration(args.config) params = the.parameters # Update date (since we're not running SWAP) today += dt.timedelta(days=1) if __name__ == '__main__': parser = ArgumentParser() parser.add_argument("config") parser.add_argument("-o", "--old", dest='old_run', action='store_true', default=False) args = parser.parse_args() MachineShop(args) """ This whole approach is stupid. # Determine amount of SWAP output # If there was a successful run, there should be # -- directories beginning with params['survey'] # -- pickle files beginning with params['trunk'] detectfilelist = utils.fetch_filelist(params, kind='detected') if args.old_run: rejectfilelist = utils.fetch_filelist(params, kind='retired') else: rejectfilelist = utils.fetch_filelist(params, kind='rejected') train_detected_ids = set() train_rejected_ids = set() for dfile, rfile in zip(detectfilelist, rejectfilelist): detected = utils.fetch_classifications(dfile) rejected = utils.fetch_classifications(rfile) detect_ids = set(detected['zooid']) reject_ids = set(rejected['zooid']) # Grab only the new ones new_detected = detect_ids.difference(train_detected_ids) print "%i new detected subjects"%len(new_detected) new_rejected = reject_ids.difference(train_rejected_ids) print "%i new rejected subjects"%len(new_rejected) # Loop through the new ids and switch from 'test' to 'train' for new in new_detected: if subjects['MLsample'][subjects['SDSS_id']==new] != 'valid': subjects['MLsample'][subjects['SDSS_id']==new] = 'train' subjects['SWAP_prob'][subjects['SDSS_id']==new] = 1. for new in new_rejected: if subjects['MLsample'][subjects['SDSS_id']==new] != 'valid': subjects['MLsample'][subjects['SDSS_id']==new] = 'train' subjects['SWAP_prob'][subjects['SDSS_id']==new] = 0. # --------------------------------------------------------------------- # OPEN COLLECTION PICKLE (updated each time MachineClassifier is run) # --------------------------------------------------------------------- backup_col_file = params['samplefile'].replace('.pickle','_orig.pickle') if today == start_day: try: # If we're starting fresh, we want to open the original file, if it exists collection = swap.read_pickle(backup_col_file,'collection') except: # If it doesn't exist, open up the regular file and ... print "MachineShop: backup collection pickle has not been made yet" print "MachineShop: opening original collection file" collection = swap.read_pickle(params['samplefile'],'collection') # Save the original collection file for comparison later if not os.path.isfile(backup_col_file): print "MachineShop: creating a backup collection pickle" swap.write_pickle(collection, backup_col_file) else: # If we're in the middle of the run, we want to open the file that's # constantly being updated by MachineClassifier collection = swap.read_pickle(params['samplefile'],'collection') # --------------------------------------------------------------------- # ISOLATE TRAINING SAMPLE -- SWAP-RETIRED BY "TODAY" # --------------------------------------------------------------------- for subjectID in collection.list(): subject = collection.member[subjectID] if subject.retiredby == 'machine': machine_retired+=1 if subject.retirement_time != 'not yet': date = dt.datetime.strptime(subject.retirement_time,'%Y-%m-%d_%H:%M:%S') yesterday = today-dt.timedelta(days=1) if (date < today) and (date >= yesterday) and (subject.retiredby == 'swap'): mask = subjects['SDSS_id'] == int(subject.ZooID) # Update them in metadata file as training sample for MC # DOUBLE CHECK THAT IT HAS NOT BEEN RETIRED BY MACHINE!!! if subjects['MLsample'][mask] == 'test ': SWAP_retired+=1 subjects['MLsample'][mask] = 'train' subjects['retired_date'][mask] = dt.datetime.strftime(yesterday, '%Y-%m-%d') subjects['SWAP_prob'][mask] = subject.mean_probability run_machine = True """
StarcoderdataPython
3296605
<gh_stars>100-1000 #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from parlai.mturk.core.agents import MTurkAgent, TIMEOUT_MESSAGE from parlai.mturk.core.agents import AssignState import parlai.mturk.core.shared_utils as shared_utils class MockTurkAgent(MTurkAgent): """Mock turk agent that can act in a parlai mturk world""" def __init__(self, opt, mturk_manager, hit_id, assignment_id, worker_id): super().__init__(opt, mturk_manager, hit_id, assignment_id, worker_id) self.db_logger = None self.mock_status = AssignState.STATUS_NONE self.wants_message = False self.unread_messages = [] self.timed_out = False def get_update_packet(self): """Produce an update packet that represents the state change of this agent""" send_messages = [] while len(self.unread_messages) > 0: pkt = self.unread_messages.pop(0) send_messages.append(pkt.data) done_text = None if self.state.is_final() and \ self.get_status() != AssignState.STATUS_DONE: done_text = self.state.get_inactive_command_text()[0] return { 'new_messages': send_messages, 'all_messages': self.state.get_messages(), 'wants_message': self.wants_message, 'disconnected': self.disconnected, 'agent_id': self.id, 'worker_id': self.worker_id, 'conversation_id': self.conversation_id, 'task_done': self.state.is_final(), 'done_text': done_text, 'status': self.state.get_status(), } def log_reconnect(self): """We aren't logging behavior in the mock""" pass def is_in_task(self): return self.status == AssignState.STATUS_IN_TASK def put_data(self, id, data): """Put data into the message queue""" self.msg_queue.put(data) def flush_msg_queue(self): """Clear all messages in the message queue""" if self.msg_queue is None: return while not self.msg_queue.empty(): self.msg_queue.get() def prepare_timeout(self): """Log a timeout event, tell mturk manager it occurred, return message to return for the act call """ shared_utils.print_and_log( logging.INFO, '{} timed out before sending.'.format(self.id) ) self.timed_out = True return self._get_episode_done_msg(TIMEOUT_MESSAGE) def request_message(self): if not (self.disconnected or self.some_agent_disconnected or self.hit_is_expired): self.wants_message = True def act(self, timeout=None, blocking=True): """Retrieve an act in the normal expected way (out of the queue), but clear the act request if we do end up getting an act.""" gotten_act = super().act(timeout, blocking) if gotten_act is not None: self.wants_message = False gotten_act['episode_done'] = gotten_act.get('episode_done', False) return gotten_act def episode_done(self): """Return whether or not this agent believes the conversation to be done""" if self.get_status() == AssignState.STATUS_DONE: return False else: return True def approve_work(self): print('[mock] Worker {} approved'.format(self.worker_id)) def reject_work(self, reason='unspecified'): print('[mock] Worker {} rejected for reason {}'.format( self.worker_id, reason)) def block_worker(self, reason='unspecified'): print('[mock] Worker {} blocked for reason {}'.format( self.worker_id, reason)) def pay_bonus(self, bonus_amount, reason='unspecified'): print('[mock] Worker {} bonused {} for reason {}'.format( self.worker_id, bonus_amount, reason)) def email_worker(self, subject, message_text): return True def set_hit_is_abandoned(self): self.hit_is_abandoned = True def wait_for_hit_completion(self, timeout=None): pass def shutdown(self, timeout=None, direct_submit=False): pass def update_agent_id(self, agent_id): """State is sent directly from the agent, so no need to send like MTurkAgent does in the full version """ self.id = agent_id
StarcoderdataPython
3363734
<gh_stars>10-100 #!/usr/bin/env python '''Provision Static Routes''' # # Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. # __version__ = '1.0' import re import sys import time import os.path import logging import platform import argparse import socket import struct import subprocess from netaddr import IPNetwork from tempfile import NamedTemporaryFile from distutils.version import LooseVersion logging.basicConfig(format='%(asctime)-15s:: %(funcName)s:%(levelname)s:: %(message)s', level=logging.INFO) log = logging.getLogger(__name__) (PLATFORM, VERSION, EXTRA) = platform.linux_distribution() class StaticRoute(object): '''Base class containing common methods for configuring static routes ''' def __init__(self, **kwargs): self.device = kwargs.get('device', []) self.netw = kwargs.get('network', []) self.gw = kwargs.get('gw', []) self.mask = kwargs.get('netmask', []) self.vlan = kwargs.get('vlan', None) self.no_restart_network = kwargs.get('no_restart_network', False) self.cmd = [] self.nwfile = [] self.tempfile = NamedTemporaryFile(delete=False) self.config_route_list = [] def write_network_script(self): '''Create an interface config file in network-scripts with given config ''' i = 0 for destination in self.netw: #Open the file in append mode to handle the case of single interface having multiple #static routes. Remember in CentOS each interace will have a separate route file with open(self.nwfile[i], 'a') as fd: fd.write(self.cmd[i]) fd.write('\n') i += 1 def restart_service(self): '''Restart network service''' log.info('Restarting Network Services...') os.system('sudo service network restart') time.sleep(3) def pre_config(self): '''Setup env before static route configuration''' if self.vlan: self.device += "."+self.vlan i = 0 #Loop through the number of routes specified for destination in self.netw: filename = 'route-%s' %self.device[i] #Check if the filename exists. If not create new filename if not os.path.isfile(filename): self.nwfile.append(os.path.join(os.path.sep, 'etc', 'sysconfig', 'network-scripts', '%s' %(filename))) prefix = IPNetwork('%s/%s' %(destination, self.mask[i])).prefixlen self.cmd += ['%s/%s via %s dev %s' %( destination, prefix, self.gw[i], self.device[i])] self.config_route_list.append('%s %s %s' %(destination, self.mask[i], self.gw[i])) i+=1 def verify_route(self): '''verify configured static routes''' actual_list = [] for route in open('/proc/net/route', 'r').readlines(): route_fields = route.split() if route_fields[0].strip() in self.device: flags = int(route_fields[3], 16) destination = socket.inet_ntoa(struct.pack('I', int(route_fields[1], 16))) if flags & 0x2: gateway = socket.inet_ntoa(struct.pack('I', int(route_fields[2], 16))) mask = socket.inet_ntoa(struct.pack('I', int(route_fields[7], 16))) actual_list.append('%s %s %s' %(destination, mask, gateway)) if cmp(sorted(actual_list), sorted(self.config_route_list)): raise RuntimeError('Seems Routes are not properly configured') def post_config(self): '''Execute commands after static route configuration''' if not self.no_restart_network: self.restart_service() self.verify_route() def setup(self): '''High level method to call individual methods to configure static routes ''' self.pre_config() self.write_network_script() self.post_config() os.unlink(self.tempfile.name) class UbuntuStaticRoute(StaticRoute): '''Configure Static Route in Ubuntu''' def restart_service(self): '''Restart network service for Ubuntu''' log.info('Restarting Network Services...') if LooseVersion(VERSION) < LooseVersion("14.04"): subprocess.call('sudo /etc/init.d/networking restart', shell=True) else: subprocess.call('sudo ifdown -a && sudo ifup -a', shell=True) time.sleep(5) def write_network_script(self): '''Add route to ifup-parts dir and set the correct permission''' if os.path.isfile(self.nwfile): tmpfile = os.path.join(os.path.join(os.path.sep, 'tmp'), 'moved-%s' %os.path.basename(self.nwfile)) log.info('Backup existing file %s to %s' %(self.nwfile, tmpfile)) os.system('sudo cp %s %s'%(self.nwfile, tmpfile)) # read existing file with open(self.tempfile.name, 'w') as fd: i = 0 fd.write('#!/bin/bash\n') for interface in self.device: fd.write('[ "$IFACE" == "%s" ] &&\n' %interface) fd.write(self.cmd[i]) fd.write('\n') i += 1 os.system('sudo cp -f %s %s'%(self.tempfile.name, self.nwfile)) os.system('sudo chmod 755 %s'%(self.nwfile)) with open(self.tempfile.name, 'w') as fd: i = 0 fd.write('#!/bin/bash\n') for interface in self.device: fd.write('[ "$IFACE" == "%s" ] &&\n' %interface) fd.write(self.downcmd[i]) fd.write('\n') i += 1 os.system('sudo cp -f %s %s'%(self.tempfile.name, self.downfile)) os.system('sudo chmod 755 %s'%(self.downfile)) def pre_config(self): '''Setup env before static route configuration in Ubuntu''' # Any changes to the file/logic with static routes has to be # reflected in setup.py too if self.vlan: self.device = 'vlan'+self.vlan i = 0 for destination in self.netw: prefix = IPNetwork('%s/%s' %(destination, self.mask[i])).prefixlen self.cmd += ['%s/%s via %s dev %s' %( destination, prefix, self.gw[i], self.device[i])] self.config_route_list.append('%s %s %s' %(destination, self.mask[i], self.gw[i])) i+=1 self.downfile = os.path.join(os.path.sep, 'etc', 'network', 'if-down.d', 'routes') self.downcmd = ['ip route del '+x for x in self.cmd] self.nwfile = os.path.join(os.path.sep, 'etc', 'network', 'if-up.d', 'routes') self.cmd = ['ip route add '+x for x in self.cmd] def parse_cli(args): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--version', '-v', action='version', version=__version__, help='Display version and exit') parser.add_argument('--device', action='store', default=[], nargs='+', metavar='INTERFACE', help='Interface Name') parser.add_argument('--network', action='store', default=[], nargs='+', metavar='DESTINATION', help='Network address of the Static route') parser.add_argument('--netmask', action='store', default=[], nargs='+', metavar='NETMASK', help='Netmask of the Static route') parser.add_argument('--gw', action='store', default=[], nargs='+', metavar='GATEWAY', help='Gateway Address of the Static route') parser.add_argument('--vlan', action='store', help='vLAN ID') parser.add_argument('--no-restart-network', action='store_true', default=False, help='Disable network restart after configuring interfaces') pargs = parser.parse_args(args) if len(args) == 0: parser.print_help() sys.exit(2) return dict(pargs._get_kwargs()) def main(): pargs = parse_cli(sys.argv[1:]) if PLATFORM.lower() != 'ubuntu': route = StaticRoute(**pargs) else: route = UbuntuStaticRoute(**pargs) route.setup() if __name__ == '__main__': main()
StarcoderdataPython
3295323
#!/usr/bin/env python #from distutils.core import setup from setuptools import setup, find_packages setup(name='Fraunhofer', version='1.0.0', description='Generic Steller Abundance Determination Software', author='<NAME>', author_email='<EMAIL>', url='https://github.com/dnidever/fraunhofer', packages=find_packages(exclude=["tests"]), scripts=['bin/hofer'], install_requires=['numpy','astropy(>=4.0)','scipy','dlnpyutils(>=1.0.3)','doppler @ git+https://github.com/dnidever/[email protected]#egg=doppler', 'the-cannon @ git+https://github.com/dnidever/[email protected]#egg=the-cannon', 'synple @ git+https://github.com/dnidever/[email protected]#egg=synple'], dependency_links=['http://github.com/dnidever/doppler/tarball/v1.1.0#egg=doppler','https://github.com/dnidever/AnniesLasso/tarball/v1.0.0#egg=the-cannon', 'http://github.com/dnidever/synple/tarball/v1.0.0#egg=synple'], include_package_data=True, )
StarcoderdataPython
3215254
pg_schema_sql = ''' CREATE TABLE male( id serial PRIMARY KEY, name text ); CREATE TABLE female( id serial PRIMARY KEY, name text, husband integer UNIQUE DEFERRABLE REFERENCES male(id) DEFERRABLE ); CREATE TABLE child( id serial PRIMARY KEY, name text, father integer REFERENCES male(id) DEFERRABLE, mother integer REFERENCES female(id) DEFERRABLE ); '''
StarcoderdataPython
198438
<reponame>eeshashetty/VisualCryptography import cv2 import numpy as np from watermark_generator import wm_gen import argparse parser = argparse.ArgumentParser() parser.add_argument('--image') parser.add_argument('--watermark') parser.add_argument('--owner') args = parser.parse_args() template = cv2.imread(args.watermark,0) PATH = args.image owner = args.owner final = wm_gen(PATH, owner) res = cv2.matchTemplate(final,template,cv2.TM_CCOEFF_NORMED) if(res[0] > 0.8): print("The Picture is Authentic! [%0.2f Percent Similar]"%(res[0]*100)) else: print("The Picture is Fake! [%0.2f Percent Similar]"%(res[0]*100))
StarcoderdataPython
3395462
from flash.text.seq2seq.translation.data import TranslationData # noqa: F401 from flash.text.seq2seq.translation.model import TranslationTask # noqa: F401
StarcoderdataPython
3347677
import numpy as np import pandas as pd def growth_event_annotate(df, threshold=0.5): '''This function adds a "divison event" column to the passed DataFrame and fills it based on the change in the areas between the previous and the next index in the data frame. If that change is above a certain threshold, the subsequent times are delineated as the next division event. ----- Parameters: df - DataFrame containing columns "time (min)", "area (ยตmยฒ)", and "bacterium". ----- kwargs: threshold - the value that determines if the area change is a fluctuation in data analysis/acquisition or a true division event. Default is 0.5. ----- Returns: df - the altered DataFrame, now with a filled "division event" column.''' event_id = 1 start_of_event = 0 df['division event'] = np.nan indices = np.arange(df.shape[0] - 1) for index in indices: area_change = np.abs(df.loc[index, 'area (ยตmยฒ)'] - df.loc[index + 1, 'area (ยตmยฒ)']) if area_change > threshold: end_of_event = index for ind in range(start_of_event, end_of_event + 1): df.loc[ind, 'division event'] = event_id start_of_event = index + 1 event_id += 1 if index >= 10283: df.loc[index, 'division event'] = 107.0 df.loc[10320, 'division event'] = 107.0 return df
StarcoderdataPython
1777751
"""Module with implementation of the Data class.""" import os import string import random import shutil import numpy import dask.array as dsarray import pyarrow import zarr import cbox.lib.boost as cbox class Data(cbox.create.Data): """Default class to store data""" type_ = "default" def __init__(self, **attributes): """Initialize the class object Parameters ---------- attributes : dictionary { 'nblocks' : total number of blocks, 'nxb' : number of grid points per block in x dir, 'nyb' : number of grid points per block in y dir, 'nzb' : number of grid points per block in z dir, 'xguard' : number of guard cells in x dir, 'yguard' : number of guard cells in y dir, 'zguard' : number of guard cells in z dir, 'inputfile' : hdf5 inputfile default (None), 'variables' : dictionary of variables default ({}) 'storage' : 'numpy', 'zarr', 'dask', 'pyarrow' } """ super().__init__() self._set_attributes(attributes) self._set_data() def __repr__(self): """ Return a representation of the object """ return ("Data:\n" + " - type : {}\n".format(type(self)) + " - keys : {}\n".format(self.varlist)) def __getitem__(self,varkey): """ Get variable data """ return self.variables[varkey] def __setitem__(self,varkey,value): """ Set variable data """ self.variables[varkey] = value def _set_attributes(self,attributes): """ Private method for intialization """ default_attributes = {'nblocks' : 1, 'inputfile' : None, 'boxmem' : None, 'variables' : {}, 'nxb' : 1, 'nyb' : 1, 'nzb' : 1, 'xguard' : 0, 'yguard' : 0, 'zguard' : 0, 'storage' : 'numpy-memmap'} for key in attributes: if key in default_attributes: default_attributes[key] = attributes[key] else: raise ValueError('[bubblebox.library.create.Data] '+ 'Attribute "{}" not present in class Data'.format(key)) for key, value in default_attributes.items(): setattr(self, key, value) def _set_data(self): """ Private method for setting new data """ self.varlist = list(self.variables.keys()) if self.storage == 'numpy': self._create_numpy_arrays() elif self.storage == 'numpy-memmap': self._create_numpy_memmap() elif self.storage == 'zarr': self._create_zarr_objects() elif self.storage == 'dask': self._create_numpy_memmap() self._create_dask_objects() else: raise NotImplementedError('[bubblebox.library.create.Data] '+ 'Storage format "{}" not implemented'.format(self.storage)) def _create_numpy_memmap(self): """ Create numpy memory maps for empty keys in variables dictionary """ emptykeys = [key for key,value in self.variables.items() if type(value) is type(None)] if not emptykeys: return if not self.boxmem: namerandom = ''.join(random.choice(string.ascii_lowercase) for i in range(5)) self.boxmem = "".join(['./boxmem_',namerandom]) try: os.mkdir(self.boxmem) except FileExistsError: pass for varkey in emptykeys: outputfile = os.path.join(self.boxmem,varkey) outputshape = (self.nblocks, self.nzb+2*self.zguard, self.nyb+2*self.yguard, self.nxb+2*self.xguard) self.variables[varkey] = numpy.memmap(outputfile, dtype=float, shape=outputshape, mode='w+') def _create_zarr_objects(self): """ Create zarr objects """ emptykeys = [key for key,value in self.variables.items() if type(value) is type(None)] if not emptykeys: return if not self.boxmem: namerandom = ''.join(random.choice(string.ascii_lowercase) for i in range(5)) self.boxmem = "".join(['./boxmem_',namerandom]) try: os.mkdir(self.boxmem) except FileExistsError: pass for varkey in emptykeys: outputfile = os.path.join(self.boxmem,varkey) outputshape = (self.nblocks, self.nzb+2*self.zguard, self.nyb+2*self.yguard, self.nxb+2*self.xguard) self.variables[varkey] = zarr.open(outputfile, mode='w', shape=outputshape, chunks=(1, self.nzb+2*self.zguard, self.nyb+2*self.yguard, self.nxb+2*self.xguard),dtype=float) def _create_numpy_arrays(self): """ Create numpy arrays for empty keys in variables dictionary """ emptykeys = [key for key,value in self.variables.items() if type(value) is type(None)] if not emptykeys: return for varkey in emptykeys: outputshape = (self.nblocks, self.nzb+2*self.zguard, self.nyb+2*self.yguard, self.nxb+2*self.xguard) self.variables[varkey] = numpy.ndarray(dtype=float, shape=outputshape) def _create_dask_objects(self): """ Create dask array representation of data """ emptykeys = [key for key,value in self.variables.items() if type(value) is type(None)] if not emptykeys: return for varkey in emptykeys: if type(self.variables[varkey]) is not dsarray.core.Array: self.variables[varkey] = dsarray.from_array(self.variables[varkey], chunks=(1, self.nzb+2*self.zguard, self.nyb+2*self.yguard, self.nxb+2*self.xguard)) def _create_pyarrow_objects(self): """ Create a pyarrow tensor objects """ emptykeys = [key for key,value in self.variables.items() if type(value) is type(None)] if not emptykeys: return for varkey in emptykeys: if type(self.variables[varkey]) is not pyarrow.lib.Tensor: templist = [] for lblock in range(self.nblocks): templist.append(pyarrow.Tensor.from_numpy(self.variables[varkey][lblock])) self.variables[varkey] = templist def purge(self,purgeflag='all'): """ Clean up data and close it """ if self.boxmem and (purgeflag == 'all' or purgeflag == 'boxmem'): try: shutil.rmtree(self.boxmem) except: pass if self.inputfile and (purgeflag == 'all' or purgeflag == 'inputfile'): self.inputfile.close() def addvar(self,varkey): """ Add a variables to data """ self.variables[varkey] = None self._set_data() def delvar(self,varkey): """ Delete a variable """ del self.variables[varkey] if self.boxmem: outputfile = os.path.join(self.boxmem,varkey) try: shutil.rmtree(outputfile) except: pass self.varlist = list(self.variables.keys())
StarcoderdataPython
3350213
<filename>sessions/migrations/0003_alter_session_public_key.py # Generated by Django 3.2.8 on 2021-10-13 01:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('sessions', '0002_auto_20211011_1511'), ] operations = [ migrations.AlterField( model_name='session', name='public_key', field=models.BinaryField(max_length=255), ), ]
StarcoderdataPython
3380155
<filename>scripts/gatherResults_kASA.py import os, sys path = sys.argv[1] numberOfTools = 1 numberOfMuts = int(sys.argv[2]) outfilePath = sys.argv[3] resultMatrixSens = [[""] * (numberOfMuts + 1) for i in range(numberOfTools + 1)] resultMatrixPrec = [[""] * (numberOfMuts + 1) for i in range(numberOfTools + 1)] resultMatrixSpecificity = [[""] * (numberOfMuts + 1) for i in range(numberOfTools + 1)] resultMatrixF1 = [[""] * (numberOfMuts + 1) for i in range(numberOfTools + 1)] resultMatrixMCC = [[""] * (numberOfMuts + 1) for i in range(numberOfTools + 1)] toolNames = set() toolNames.add("kASA") mutationrates = set() for file in os.listdir(path): if "result" in file: filenameArr = file.split("_") if "kASA" in filenameArr: #tool = filenameArr[0] + "_" + filenameArr[1] #mutationrate = filenameArr[3] tool = filenameArr[0] mutationrate = filenameArr[2] mutationrates.add(int(mutationrate)) tools = {} mutationrates = sorted(list(mutationrates)) mutations = {} counter = 1 for entry in toolNames: tools[entry] = counter resultMatrixSens[counter][0] = entry resultMatrixPrec[counter][0] = entry resultMatrixSpecificity[counter][0] = entry resultMatrixF1[counter][0] = entry resultMatrixMCC[counter][0] = entry counter += 1 counter = 1 for entry in mutationrates: entry = str(entry) mutations[entry] = counter resultMatrixSens[0][counter] = entry resultMatrixPrec[0][counter] = entry resultMatrixSpecificity[0][counter] = entry resultMatrixF1[0][counter] = entry resultMatrixMCC[0][counter] = entry counter += 1 for file in os.listdir(path): if "result" in file: filenameArr = file.split("_") if "kASA" in filenameArr: #tool = filenameArr[0] + "_" + filenameArr[1] #mutationrate = filenameArr[3] tool = filenameArr[0] mutationrate = filenameArr[2] row = tools[tool] col = mutations[mutationrate] resultFile = open(path+file) next(resultFile) resultMatrixSens[row][col] = (((next(resultFile)).split(" "))[1]).rstrip("\n\r") resultMatrixPrec[row][col] = (((next(resultFile)).split(" "))[1]).rstrip("\n\r") resultMatrixSpecificity[row][col] = (((next(resultFile)).split(" "))[1]).rstrip("\n\r") resultMatrixF1[row][col] = (((next(resultFile)).split(" "))[1]).rstrip("\n\r") resultMatrixMCC[row][col] = (((next(resultFile)).split(" "))[1]).rstrip("\n\r") outfile = open(outfilePath + "kASA_GatheredResults.txt", 'w') outfile.write("Sensitivity\n") outfile.write(resultMatrixSens[0][0]) for j in range(1,numberOfMuts + 1): outfile.write("," + resultMatrixSens[0][j]) outfile.write("\n") for j in range(1,numberOfMuts + 1): outfile.write("," + resultMatrixSens[1][j]) outfile.write("\n") outfile.write("Precision\n") outfile.write(resultMatrixPrec[0][0]) for j in range(1,numberOfMuts + 1): outfile.write("," + resultMatrixPrec[0][j]) outfile.write("\n") for j in range(1,numberOfMuts + 1): outfile.write("," + resultMatrixPrec[1][j]) outfile.write("\n") outfile.write("F1\n") outfile.write(resultMatrixF1[0][0]) for j in range(1,numberOfMuts + 1): outfile.write("," + resultMatrixF1[0][j]) outfile.write("\n") for j in range(1,numberOfMuts + 1): outfile.write("," + resultMatrixF1[1][j]) outfile.write("\n") outfile.write("Specificity\n") outfile.write(resultMatrixSpecificity[0][0]) for j in range(1,numberOfMuts + 1): outfile.write("," + resultMatrixSpecificity[0][j]) outfile.write("\n") for j in range(1,numberOfMuts + 1): outfile.write("," + resultMatrixSpecificity[1][j]) outfile.write("\n") outfile.write("MCC\n") outfile.write(resultMatrixMCC[0][0]) for j in range(1,numberOfMuts + 1): outfile.write("," + resultMatrixMCC[0][j]) outfile.write("\n") for j in range(1,numberOfMuts + 1): outfile.write("," + resultMatrixMCC[1][j])
StarcoderdataPython
4841796
<filename>0-encapsulation/python/main.py # loose control of object attributes class Without: def __init__(self): self.age = 0 print("Without object is created") # get control of object attributes class With: def __init__(self): self.__age = 0 print("With object is created") def set_age(self, v): if v < 0 or v > 150: print("Input value {} is impossible for human".format(v)) return self.__age = v def get_age(self) -> int: return self.__age def main(): wo = Without() print(wo.age) wo.age = -100 print(wo.age) ww = With() """ print(ww.__age) Traceback (most recent call last): File "D:\workspace\oop\encapsulation\python\main.py", line 16, in <module> main() File "D:\workspace\oop\encapsulation\python\main.py", line 13, in main print(ww.__age) AttributeError: 'With' object has no attribute '__age' """ ww.set_age(160) print(ww.get_age()) ww.set_age(40) print(ww.get_age()) if __name__ == '__main__': main()
StarcoderdataPython
171558
<reponame>simmsbra/quickcell from random import randrange, Random from card import Card from cell import Cell from cascade import Cascade, get_dependent_cards, is_dependent_card_of from foundation import Foundation from game_exception import EmptyOriginException, FullDestinationException, CompatibilityException, TooFewSlotsException class Board: def __init__(self, seed=None): if seed is None: self.seed = randrange(1000000) else: self.seed = seed suits = ['clubs', 'spades', 'hearts', 'diamonds'] deck = [] for suit in suits: for rank in range(1, 14): deck.append(Card(suit, rank)) Random(self.seed).shuffle(deck) # Fill the cascades with the shuffled deck of cards self.cascades = [] place = 0 for c in range(8): if c <= 3: end = place + 7 else: end = place + 6 self.cascades.append(Cascade(deck[place:end])) place = end self.foundations = {} for suit in suits: self.foundations[suit] = (Foundation(suit)) self.cells = [] for i in range(4): self.cells.append(Cell()) def move(self, origin, destination): tmp = origin.view() destination.accept(tmp) origin.remove() def cell_to_foundations(self, index): card = self.cells[index].view() self.move(self.cells[index], self.foundations[card.suit]) def cell_to_row(self, cell_index, row_index): self.move(self.cells[cell_index], self.cascades[row_index]) def row_to_foundations(self, index): card = self.cascades[index].view() self.move(self.cascades[index], self.foundations[card.suit]) def row_to_cell(self, index): for i, cell in enumerate(self.cells): try: self.move(self.cascades[index], self.cells[i]) break except EmptyOriginException: raise except FullDestinationException: continue else: raise FullDestinationException('The cells are full.') def row_to_row(self, orig, dest): from_row = self.cascades[orig] to_row = self.cascades[dest] from_row.view() # makes sure it's not empty movable_stack_index = from_row.movable_stack_index() if to_row.is_empty(): for i in range(movable_stack_index, len(from_row.cards)): if (len(from_row.cards) - i) <= self.calc_move_capacity(to_row): movable_stack_index = i break else: for i in range(movable_stack_index, len(from_row.cards)): if is_dependent_card_of(from_row.cards[i], to_row.cards[-1]): movable_stack_index = i break else: raise CompatibilityException('The card(s) in the origin row cannot sit on the destination row.') if self.calc_move_capacity(to_row) < (len(from_row.cards) - movable_stack_index): raise TooFewSlotsException('There are not enough open slots to move that stack.') tmp = from_row.view(movable_stack_index) to_row.accept(tmp) from_row.remove(movable_stack_index) # return how big of a stack of cards can be moved to given cascade def calc_move_capacity(self, to_row): empty_cells = 0 for cell in self.cells: if cell.card is None: empty_cells += 1 empty_cascades = 0 for cascade in self.cascades: if cascade.is_empty(): empty_cascades += 1 if to_row.is_empty(): empty_cascades -= 1 return (empty_cells + 1) * 2**empty_cascades def should_foundations_accept_card(self, card): # should not accept if cannot accept if not self.foundations[card.suit].can_accept(card): return False # accept only if none of the dependent cards may need to sit # in the cascades on the given card for dependent_card in get_dependent_cards(card): if (not self.foundations[dependent_card.suit].contains(dependent_card) and not self.should_foundations_accept_card(dependent_card)): return False return True # automatically move cards to the foundations that should move there def auto_move(self): while True: has_moved = False for i in range(len(self.cells)): try: if self.should_foundations_accept_card(self.cells[i].view()): self.cell_to_foundations(i) has_moved = True except EmptyOriginException: pass for i in range(len(self.cascades)): try: if self.should_foundations_accept_card(self.cascades[i].view()): self.row_to_foundations(i) has_moved = True except EmptyOriginException: pass if not has_moved: break
StarcoderdataPython
1670873
#!/usr/bin/env python # -*- coding: utf-8 -*- # ============================================================================= # Copyright Ostap developers # ============================================================================= # 1.5.99.0 (Sep 13, 2020, 13:46 [UTC]) # .oooooo. . # d8P' `Y8b .o8 # 888 888 .oooo.o .o888oo .oooo. oo.ooooo. # 888 888 d88( "8 888 `P )88b 888' `88b # 888 888 `"Y88b. 888 .oP"888 888 888 # `88b d88' o. )88b 888 . d8( 888 888 888 # `Y8bood8P' 8""888P' "888" `Y888""8o 888bod8P' # 888 # o888o # # Simple interactive PyRoot-based analysis environment to provide access # to zillions useful decorators for ROOT (and not only ROOT!) objects&classes # # - https://github.com/OstapHEP/ostap # - https://ostaphep.github.io/ostap-tutorials # - https://github.com/OstapHEP/ostap-tutorials # # ============================================================================= __all__ = ( 'banner' , ## Ostap banner 'version' , ## Ostap version 'version_int' , ## Ostap version as integer 'version_info' , ## version info as named tuple 'build_date' , ## Ostap build/reelase date (as string) 'build_time' , ## Ostap build/release time (as datetime) ) # ============================================================================= ## the actual version of Ostap __version__ = "1.5.99.0" __date__ = "Sep 13, 2020, 13:46 [UTC]" # ============================================================================= import datetime from collections import namedtuple # ============================================================================= version = __version__ VersionInfo = namedtuple("VersionInfo", ('major','minor','patch','tweak' ) ) version_info = VersionInfo ( 1 , 5 , 99 , 0 ) build_date = __date__ build_time = datetime.datetime.utcfromtimestamp ( 1600004784 ) version_int = version_info.tweak +\ version_info.patch * 100 +\ version_info.minor * 100 * 100 +\ version_info.major * 100 * 100 * 100 # ============================================================================= ## import ostap.fixes.fixes # ============================================================================= ## Banner banner = r""" 1.5.99.0 (Sep 13, 2020, 13:46 [UTC]) .oooooo. . d8P' `Y8b .o8 888 888 .oooo.o .o888oo .oooo. oo.ooooo. 888 888 d88( "8 888 `P )88b 888' `88b 888 888 `"Y88b. 888 .oP"888 888 888 `88b d88' o. )88b 888 . d8( 888 888 888 `Y8bood8P' 8""888P' "888" `Y888""8o 888bod8P' 888 o888o - https://github.com/OstapHEP/ostap - https://ostaphep.github.io/ostap-tutorials - https://github.com/OstapHEP/ostap-tutorials """ # ============================================================================= ## The END # =============================================================================
StarcoderdataPython
10620
#! /usr/bin/env python from __future__ import print_function import pandas as pd import numpy as np import argparse def generate_csv(start_index, fname): cols = [ str('A' + str(i)) for i in range(start_index, NUM_COLS + start_index) ] data = [] for i in range(NUM_ROWS): vals = (np.random.choice(NUM_DISTINCT_VALS) for j in range(NUM_COLS)) data.append(vals) df = pd.DataFrame(data=data, columns=cols) df.to_csv(fname, index=False, header=True) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Generate sample tables to test joins.') parser.add_argument('--num-rows', '-r', type=int, default=100) parser.add_argument('--num-cols', '-c', type=int, required=True) parser.add_argument('--num-distinct-vals', '-d', type=int, required=True) parser.add_argument('--num-cols-overlap', '-o', type=int, default=1) args = parser.parse_args() NUM_ROWS = args.num_rows NUM_COLS = args.num_cols NUM_DISTINCT_VALS = args.num_distinct_vals num_overlap = args.num_cols_overlap if num_overlap > NUM_COLS: print('--num-cols-overlap cannot be greater than --num-cols') import sys sys.exit(1) generate_csv(0, 'table_a.csv') generate_csv(NUM_COLS - num_overlap, 'table_b.csv')
StarcoderdataPython
82154
<filename>wagtail/wagtailimages/api/admin/serializers.py from __future__ import absolute_import, unicode_literals from ..fields import ImageRenditionField from ..v2.serializers import ImageSerializer class AdminImageSerializer(ImageSerializer): thumbnail = ImageRenditionField('max-165x165', source='*', read_only=True)
StarcoderdataPython
1700037
<filename>68-text-justification/text-justification.py # -*- coding: utf-8 -*- """ Created on Sat Jan 23 23:52:34 2021 @author: nacer """ """ Problem link : https://leetcode.com/problems/text-justification/ """ class Solution: def fullJustify(self, words: List[str], maxWidth: int) -> List[str]: output = [] line = [] j = 0 for i in range(len(words)): if line == [] : line += [words[i]] elif lenLine(line) + len(words[i]) + 1 <= maxWidth : line += [' ', words[i]] else : line = addSpace(line,maxWidth) output += [fuseLine(line)] line = [words[i]] line += [' ' * (maxWidth - lenLine(line))] output += [fuseLine(line)] return output def lenLine(L): s = 0 for i in range(len(L)): s+=len(L[i]) return s def addSpace(line, width): if len(line) == 1: return line + [' ' * (width - lenLine(line))] while(lenLine(line)!=width): spacesToAdd = width - lenLine(line) for i in range(1,len(line),2): if spacesToAdd>0: line[i] = line[i] + ' ' spacesToAdd -= 1 return line def fuseLine(L): s = "" for word in L: s+=word return s
StarcoderdataPython
1659966
import pandas as pd import numpy as np import nltk import collections import pickle import pyhanlp from collections import Iterable from collections import Counter from pandas import DataFrame from sklearn.decomposition import PCA from pyhanlp import * """ ไธ€ใ€ๅŠ ่ฝฝๆ•ฐๆฎ๏ผš 1.ๅŠ ่ฝฝ้—ฎ็ญ”ๅฏน 2.ๅŠ ่ฝฝ้ข„่ฎญ็ปƒ็š„่ฏๅ‘้‡ๆจกๅž‹ ไบŒใ€้—ฎ้ข˜็š„ๅ‘้‡ๅŒ–๏ผš 1.้—ฎ้ข˜ๅˆ†่ฏ๏ผŒๅพ—ๅˆฐ่ฏญๆ–™ๅบ“ 2.ๅฐ†่ฏ่ฝฌๅŒ–ไธบๅ‘้‡ 3.ๅฐ†้—ฎ้ข˜็”ฑ่ฏๅ‘้‡็Ÿฉ้˜ต่ฝฌๅŒ–ไธบ้—ฎ้ข˜ๅ‘้‡ 4.ๅฐ†้—ฎ้ข˜ๅ‘้‡็ป„ๅˆๅพ—ๅˆฐ้—ฎ้ข˜ๅ‘้‡็Ÿฉ้˜ต ไธ‰ใ€ๅฐ†็ป“ๆžœไฟๅญ˜ """ #ๆ•ฐๆฎ่พ“ๅ…ฅ """ ่พ“ๅ…ฅ๏ผšcsvๆ–‡ไปถ็š„่ทฏๅพ„ ่พ“ๅ‡บ๏ผšDataframe๏ผŒ ๅˆ—ๅ็งฐไธบQuestion Answer """ def load_data(data_path): """ data = pd.read_excel(data_path) new_data = data.iloc[:, 0:2] new_data.columns = [["Question", "Answers"]] new_data = new_data[:-1]""" data = pd.read_csv(data_path) return data #ๆธ…ๆด—ๆ•ฐๆฎ๏ผˆๅˆ†่ฏ๏ผŒ ๅˆ ้™คๅœ็”จ่ฏ๏ผ‰ """ ่พ“ๅ…ฅ๏ผšstrๅŽŸๅง‹้—ฎ้ข˜๏ผŒๅœ็”จ่ฏ๏ผŒๅˆ†่ฏๅ™จ ่พ“ๅ‡บ๏ผšstr[]๏ผŒๅˆ†ๅฅฝ่ฏ็š„ๅˆ—่กจ """ def clean(sentence, stop_words, segment): #words_list = jieba.lcut(sentence, cut_all=True) clean_words = [] words_list = segment.segment(sentence) for word in words_list: if str(word) not in stop_words: clean_words.append(str(word)) """ words_list = segment.segment(sentence) clean_words = [str(word) for word in words_list if str(word) not in stop_words] """ return clean_words #่Žทๅพ—่พ“ๅ…ฅๆ–‡ๆœฌ """ ่พ“ๅ…ฅ๏ผšๅฏผๅ…ฅ็š„ๆ–‡ไปถ Dataframe๏ผŒ ๅฏนๅบ”็š„columnๅ็งฐ, stop_wordsๅœ็”จ่ฏ ่พ“ๅ‡บ๏ผšlist๏ผŒ ๆ–‡ๆœฌ้›† """ def get_corpus(source, column, stop_words): HanLP.Config.ShowTermNature = False newSegment = JClass("com.hankcs.hanlp.tokenizer.NLPTokenizer") corpus = [] for i in source[column]: tmp_Q = clean(i, stop_words, newSegment) corpus.append(tmp_Q) return corpus #raw sentence vector """ ่พ“ๅ…ฅ๏ผš่ฏๅ‘้‡ๆจกๅž‹๏ผŒ ๅˆ†ๅฅฝ่ฏ็š„ๅฅๅญ ่พ“ๅ‡บ๏ผšๅฅๅญๅฏนๅบ”็š„ๅ‘้‡ ๆณจๆ„๏ผŒ็”ฑไบŽๆœ‰ไบ›่ฏ็š„่ฏ้ข‘่ฟ‡ไฝŽ๏ผŒ ๅฏนไบŽkeyerror๏ผŒ่ฟ™้‡Œๅฐ†ไปฅ0ๅ‘้‡ไปฃๆ›ฟ """ def s2v(model, sentence): vec = [] for word in sentence: try: tmp = model[word] except KeyError: tmp = [0 for _ in range(300)] #print("missing word:%s \n" %(word)) vec.append(tmp) return vec #่Žทๅพ—้—ฎ้ข˜็Ÿฉ้˜ต def get_Question_matrix(model, corpus, pattern = 'SIF', SIF_weight = 0.0001): if pattern == 'AVG': Q = [] for query in corpus: tmp_vec = np.array(s2v(model, query)) Q_vec = tmp_vec.mean(axis = 0) Q.append(Q_vec) Q_matrix = np.array(Q) return (Q_matrix, 0, 0) elif pattern == 'SIF': Q = [] raw = [] merge = [] weight = [] for i in range(len(corpus)): merge.extend(corpus[i]) fdist = nltk.probability.FreqDist(merge) count = 0 for query in corpus: tmp_vec = np.array(s2v(model, query)) weight_matrix = np.array([[SIF_weight/(SIF_weight + fdist[word]/fdist.N()) for word in query]]) tmp = tmp_vec * weight_matrix.T Q_vec = tmp.mean(axis = 0) Q.append(Q_vec) weight.append(weight_matrix) raw.append(tmp_vec) #print(weight[3455]) #print(raw[3455]) Q_matrix = np.array(Q) #print(Q_matrix[3455]) pca = PCA(n_components = 1) u = pca.fit_transform(Q_matrix.T) res = Q_matrix - np.dot(Q_matrix, np.dot(u, u.T)) #print(res[3455]) return (res, fdist, u) class question_database(): def __init__(self, Q_matrix, fdist, singular_v): self.Q_matrix = Q_matrix self.fdist = fdist self.singular_v = singular_v def main(): stop_words = {'ใ€‚', ',', '๏ผŸ', 'ๅนด', '็š„', ''} path = "C:/Users/leo/Desktop/knowledge_quiz" #model = Word2Vec.load("trained_on_wiki.model") model = np.load('simplified_model.npy').item() print("load model successfully") data = load_data(path + "/DATA/clean.csv") print("load data successfully") corpus = get_corpus(data, "Question", stop_words) print("generate corpus successfully") Q_matrix, fdist, singular_v = get_Question_matrix(model, corpus, 'SIF') print("generate question matrix successfully") #print(Q_matrix) QD = question_database(Q_matrix, fdist, singular_v) with open(path+"/DATA/QD.txt", 'wb') as f: pickle.dump(QD, f, 0) print("question database saved successfully") return 0 if __name__ == '__main__': main()
StarcoderdataPython
4818988
<gh_stars>10-100 import random import queue import csv import os from sklearn.model_selection import ParameterGrid def average(lst): return sum(lst) / float(len(lst)) # --- Hyperparameters based off of assumptions --- # # Assume reviewers have an independent 75% probability of choosing the better application HUMAN_COMPARISON_SUCCESS_RATE = 0.75 # --- Toggles --- # NUM_RUNS = 100 LOG_DIR = "../../log" def simulate_and_compute_params(bonus=2, penalty=2, threshold=-7, apps_total=250, apps_cutoff=50, runs=10): class AppMeta: def __init__(self, rank, wins, losses): self.rank = rank self.wins = wins self.losses = losses @property def score(self): return self.wins * bonus - self.losses * penalty @property def alive(self): return self.score > threshold def __lt__(self, other): # Add nondeterministic factor to __lt__ for random retrieval of equal W/L items from heap return ((self.wins + self.losses), random.random()) < ((other.wins + other.losses), random.random()) class Statistics: def __init__(self): self.num_comparisons = [] self.false_negatives_top_10 = [] self.false_negatives_top_20 = [] self.false_negatives_top_40 = [] def update(self, nc, fn10, fn20, fn40): self.num_comparisons.append(nc) self.false_negatives_top_10.append(fn10) self.false_negatives_top_20.append(fn20) self.false_negatives_top_40.append(fn40) @property def aggregate(self): return { "NUM COMPARISONS": average(self.num_comparisons), "FALSE NEGATIVES TOP 10": average(self.false_negatives_top_10), "FALSE NEGATIVES TOP 20": average(self.false_negatives_top_20), "FALSE NEGATIVES TOP 40": average(self.false_negatives_top_40), } statistics = Statistics() for i in range(runs): # Create scoreboard objects app_metadata = { j: AppMeta(j, 0, 0) for j in range(apps_total) } num_comparisons = 0 num_remaining = apps_total # Construct heap that returns apps with min total comparisons comparison_heap = queue.PriorityQueue(apps_total) for _, app_meta in app_metadata.items(): comparison_heap.put(app_meta) # Continue running simulation until the number of remaining apps is apps_cutoff while num_remaining > apps_cutoff: # By preserving rank order, left is always stronger than right left, right = comparison_heap.get(), comparison_heap.get() left, right = sorted([left, right], key=lambda a: a.rank) choose_correct_ranking = random.random() <= HUMAN_COMPARISON_SUCCESS_RATE if choose_correct_ranking: left.wins += 1 right.losses += 1 else: left.losses += 1 right.wins += 1 # Only add back to heap if score is above threshold if left.alive: comparison_heap.put(left) else: num_remaining -= 1 if right.alive: comparison_heap.put(right) else: num_remaining -= 1 num_comparisons += 1 # Evaluate results final_ranking = [] while not comparison_heap.empty(): final_ranking.append(comparison_heap.get()) final_ranking.sort(key=lambda a: (a.score, a.wins), reverse=True) final_set = { a.rank for a in final_ranking } fn10 = len([1 for j in range(10) if j not in final_set]) fn20 = len([1 for j in range(20) if j not in final_set]) fn40 = len([1 for j in range(40) if j not in final_set]) statistics.update(num_comparisons, fn10, fn20, fn40) return statistics def grid_search(): param_grid = { "bonus": list(range(1, 3, 1)), "penalty": list(range(2, 3, 1)), "threshold": list(range(-5, -13, -2)), "apps_total": list(range(200, 350, 50)), "apps_cutoff": list(range(50, 150, 50)), "runs": [NUM_RUNS], } param_grid = ParameterGrid(param_grid) aggregate_list = [] # Grid search to collect all statistics for params in param_grid: print("Simulating %d runs with params %s" % (NUM_RUNS, params)) statistics = simulate_and_compute_params(**params) aggregate = statistics.aggregate combined = { **params, **aggregate } aggregate_list.append(combined) # Write all data to csv log outfile_name = "compare_sim.csv" outpath = os.path.join(LOG_DIR, outfile_name) with open(outpath, "w") as f: csvwriter = csv.DictWriter(f, aggregate_list[0].keys()) csvwriter.writeheader() csvwriter.writerows(aggregate_list) print("Done! Find the output summary at %s" % os.path.abspath(outpath)) if __name__ == "__main__": grid_search()
StarcoderdataPython
3319429
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import namedtuple import itertools as it import numpy as onp import six from six.moves import reduce from .. import core from .. import ad_util from .. import tree_util from .. import linear_util as lu from ..abstract_arrays import ShapedArray from ..util import partial, unzip2, concatenate, safe_map, prod from ..lib import xla_bridge as xb from .xla import (xla_shape, xla_destructure, translation_rule, xla_shape_to_result_shape, jaxpr_computation) from .partial_eval import trace_to_subjaxpr, merge_pvals, JaxprTrace, PartialVal from .batching import dimsize, broadcast from . import partial_eval as pe from . import parallel from . import xla from . import ad map = safe_map ### util def shard_arg(arg): sz = arg.shape[0] shards = [arg[i] for i in range(sz)] return [xb.device_put(shards[i], n) for n, i in enumerate(assign_shards(sz))] def unshard_output(axis_size, out_shards): _, ids = onp.unique(assign_shards(axis_size), return_index=True) return onp.stack([out_shards[i] for i in ids]) def assign_shards(size): groupsize, ragged = divmod(xb.get_replica_count(), size) assert not ragged indices = onp.tile(onp.arange(size)[:, None], (1, groupsize)) return tuple(indices.ravel()) def replica_groups(mesh_spec, mesh_axis): mesh_spec = mesh_spec + [xb.get_replica_count() // prod(mesh_spec)] groups = onp.split(onp.arange(prod(mesh_spec)).reshape(mesh_spec), mesh_spec[mesh_axis], axis=mesh_axis) groups = map(onp.ravel, groups) return tuple(tuple(group) for group in zip(*groups)) ### xla_pcall AxisEnv = namedtuple("AxisEnv", ["names", "sizes"]) def axis_read(axis_env, axis_name): return max(i for i, name in enumerate(axis_env.names) if name == axis_name) def compile_replicated(jaxpr, axis_name, axis_size, consts, *abstract_args): axis_env = AxisEnv([axis_name], [axis_size]) arg_shapes = list(map(xla_shape, abstract_args)) built_c = replicated_comp(jaxpr, axis_env, consts, (), *arg_shapes) result_shape = xla_shape_to_result_shape(built_c.GetReturnValueShape()) return built_c.Compile(arg_shapes, xb.get_compile_options()), result_shape def replicated_comp(jaxpr, axis_env, const_vals, freevar_shapes, *arg_shapes): c = xb.make_computation_builder("replicated_computation") def read(v): return env[v] def write(v, node): assert node is not None env[v] = node def axis_env_extend(name, size): return AxisEnv(axis_env.names + [name], axis_env.sizes + [size]) env = {} write(core.unitvar, c.Tuple()) if const_vals: map(write, jaxpr.constvars, map(c.Constant, const_vals)) map(write, jaxpr.freevars, map(c.ParameterWithShape, freevar_shapes)) else: all_freevars = it.chain(jaxpr.constvars, jaxpr.freevars) map(write, all_freevars, map(c.ParameterWithShape, freevar_shapes)) map(write, jaxpr.invars, map(c.ParameterWithShape, arg_shapes)) for eqn in jaxpr.eqns: in_nodes = map(read, eqn.invars) if eqn.primitive in parallel_translation_rules: name = eqn.params['axis_name'] device_groups = replica_groups(axis_env.sizes, axis_read(axis_env, name)) params = {k: eqn.params[k] for k in eqn.params if k != 'axis_name'} rule = parallel_translation_rules[eqn.primitive] ans = rule(c, *in_nodes, device_groups=device_groups, **params) elif eqn.bound_subjaxprs: if eqn.primitive is xla_pcall_p: name = eqn.params['axis_name'] new_env = axis_env_extend(name, eqn.params['axis_size']) in_nodes = map(partial(xla_split, c, new_env.sizes), in_nodes) in_shapes = map(c.GetShape, in_nodes) (subjaxpr, const_bindings, freevar_bindings), = eqn.bound_subjaxprs subc = replicated_comp( subjaxpr, new_env, (), map(c.GetShape, map(read, const_bindings + freevar_bindings)), *in_shapes) subfun = (subc, tuple(map(read, const_bindings + freevar_bindings))) sharded_result = xla.xla_call_translation_rule(c, subfun, *in_nodes) device_groups = replica_groups(new_env.sizes, axis_read(new_env, name)) ans = xla_join(c, device_groups, sharded_result) else: in_shapes = map(c.GetShape, in_nodes) subcs = [ jaxpr_computation( subjaxpr, (), map(c.GetShape, map(read, const_bindings + freevar_bindings)), *in_shapes) for subjaxpr, const_bindings, freevar_bindings in eqn.bound_subjaxprs] subfuns = [(subc, tuple(map(read, const_bindings + freevar_bindings))) for subc, (_, const_bindings, freevar_bindings) in zip(subcs, eqn.bound_subjaxprs)] ans = translation_rule(eqn.primitive)(c, *(subfuns + in_nodes), **eqn.params) else: ans = translation_rule(eqn.primitive)(c, *in_nodes, **eqn.params) out_nodes = xla_destructure(c, ans) if eqn.destructure else [ans] map(write, eqn.outvars, out_nodes) return c.Build(read(jaxpr.outvar)) def xla_split(c, axis_sizes, x): def _xla_split(shape, x): if shape.is_tuple(): elts = map(_xla_split, shape.tuple_shapes(), xla_destructure(c, x)) return c.Tuple(*elts) else: size = onp.array(prod(axis_sizes), onp.uint32) idx = c.Rem(c.ReplicaId(), c.Constant(size)) dims = list(shape.dimensions()) zero = onp.zeros(len(dims) - 1, onp.uint32) start_indices = c.Concatenate([c.Reshape(idx, None, [1]), c.Constant(zero)], 0) return c.Reshape(c.DynamicSlice(x, start_indices, [1] + dims[1:]), None, dims[1:]) return _xla_split(c.GetShape(x), x) # TODO(b/110096942): more efficient gather def xla_join(c, device_groups, x): def _xla_join(shape, x): if shape.is_tuple(): elts = map(_xla_join, shape.tuple_shapes(), xla_destructure(c, x)) return c.Tuple(*elts) else: group_size = len(device_groups[0]) broadcasted = c.Broadcast(x, (group_size,)) return c.AllToAll(broadcasted, 0, 0, device_groups) return _xla_join(c.GetShape(x), x) def xla_pcall_impl(fun, *args, **params): axis_name = params.pop('axis_name') axis_size = params.pop('axis_size') assert not params flat_args, in_trees = unzip2(map(xla.tree_flatten, args)) flat_args = concatenate(flat_args) fun, out_tree = xla.flatten_fun(fun, in_trees) abstract_args = map(partial(abstractify, axis_size), flat_args) compiled_fun = parallel_callable(fun, axis_name, axis_size, *abstract_args) flat_ans = compiled_fun(out_tree(), *flat_args) if out_tree() is xla.leaf: return flat_ans else: return xla.build_tree(iter(flat_ans), out_tree()) def abstractify(axis_size, x): assert onp.shape(x)[0] == axis_size aval = xla.abstractify(x) return ShapedArray(aval.shape[1:], aval.dtype) @lu.memoize def parallel_callable(fun, axis_name, axis_size, *avals): pvals = [PartialVal((aval, core.unit)) for aval in avals] with core.new_master(JaxprTrace, True) as master: jaxpr, (pval, consts, env) = trace_to_subjaxpr(fun, master).call_wrapped(pvals) assert not env compiled, _ = compile_replicated(jaxpr, axis_name, axis_size, consts, *avals) del master, consts, jaxpr, env return partial(execute_replicated, compiled, pval, axis_size) def execute_replicated(compiled, pval, axis_size, out_tree, *args): input_bufs = zip(*map(shard_arg, args)) if args else [[]] * xb.get_replica_count() out_bufs = compiled.ExecutePerReplica(input_bufs) out_shards = [merge_pvals(buf.to_py(), pval) for buf in out_bufs] if out_tree is xla.leaf: return unshard_output(axis_size, out_shards) else: return map(partial(unshard_output, axis_size), zip(*out_shards)) xla_pcall_p = core.Primitive('xla_pcall') xla_pcall = partial(core.call_bind, xla_pcall_p) xla_pcall_p.def_custom_bind(xla_pcall) xla_pcall_p.def_impl(xla_pcall_impl) ad.primitive_transposes[xla_pcall_p] = partial(ad.call_transpose, xla_pcall_p) # xla.translations[xla_pcall_p] = xla.xla_call_translation_rule # TODO(mattjj) pe.map_primitives.add(xla_pcall_p) parallel_translation_rules = {}
StarcoderdataPython
3384158
<gh_stars>100-1000 import unittest from main import * class ConditionalsTests(unittest.TestCase): def test_main(self): self.assertIsInstance(value, str) self.assertIs(value, 'y', "program must print 'yes'")
StarcoderdataPython
1766878
#!/usr/bin/env python from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.action_chains import ActionChains from pytesseract import image_to_string from urllib.request import urlretrieve from PIL import Image from io import BytesIO from getpass import getpass import sys,time user = input('''Enter your Roll No in the format BE/10xxx/1x : ''') passwd = getpass('Enter your password : ') browser = webdriver.Firefox() browser.get('http://erp.bitmesra.ac.in') user_name = browser.find_element_by_id('txt_username') user_name.send_keys(user) pwd = browser.find_element_by_id('txt_password') pwd.send_keys(<PASSWORD>) time.sleep(2) captcha = browser.find_element_by_xpath('//*[@id="frmDefault"]/div[3]/div/div[4]/div[1]/div[2]/img') src = captcha.get_attribute('src') location = captcha.location size = captcha.size png = browser.get_screenshot_as_png() im = Image.open(BytesIO(png)) left = location['x'] top = location['y'] right = location['x'] + size['width'] bottom = location['y'] + size['height'] im = im.crop((left, top, right, bottom)) im.save('screenshot.png') cp_txt = image_to_string(Image.open('screenshot.png')) captcha_box = browser.find_element_by_id('txtcaptcha') captcha_box.send_keys(cp_txt) time.sleep(1) login = browser.find_element_by_id('btnSubmit') login.click()
StarcoderdataPython
1717054
#!/usr/bin/env python # bcast2.py from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() if rank == 0: data = {'key1' : [7, 2.72, 2+3j], 'key2' : ( 'abc', 'xyz')} else: data = None data = comm.bcast(data, root=0) print ("process %s" % (rank)) print (rank,data)
StarcoderdataPython
3289408
#!/usr/bin/env python """Parsers for Linux PAM configuration files.""" import os import re from builtins import zip # pylint: disable=redefined-builtin from grr_response_core.lib import parser from grr_response_core.lib.parsers import config_file from grr_response_core.lib.rdfvalues import config_file as rdf_config_file class PAMParser(parser.FileParser, config_file.FieldParser): """Parser for PAM configurations.""" output_types = ["PamConfig"] supported_artifacts = ["LinuxPamConfigs"] process_together = True # The syntax is based on: # http://linux.die.net/man/5/pam.d PAMDIR = "/etc/pam.d" OLD_PAMCONF_FILENAME = "/etc/pam.conf" PAMCONF_RE = re.compile( r""" (\S+) # The "type". \s+ # separator ( # Now match the "control" argument. \[[^\]]*\] # Complex form. e.g. [success=ok default=die] etc. | \w+ # Or a single word form. ) # End of the "control" argument. \s+ # separator (\S+) # The "module-path". (?:\s+(.*))? # And the optional "module-arguments" is anything else. """, re.VERBOSE) def _FixPath(self, path): # Anchor any relative paths in the PAMDIR if not os.path.isabs(path): return os.path.join(self.PAMDIR, path) else: return path def EnumerateAllConfigs(self, stats, file_objects): """Generate RDFs for the fully expanded configs. Args: stats: A list of RDF StatEntries corresponding to the file_objects. file_objects: A list of file handles. Returns: A tuple of a list of RDFValue PamConfigEntries found & a list of strings which are the external config references found. """ # Convert the stats & file_objects into a cache of a # simple path keyed dict of file contents. cache = {} for stat_obj, file_obj in zip(stats, file_objects): cache[stat_obj.pathspec.path] = file_obj.read() result = [] external = [] # Check to see if we have the old pam config file laying around. if self.OLD_PAMCONF_FILENAME in cache: # The PAM documentation says if it contains config data, then # it takes precedence over the rest of the config. # If it doesn't, the rest of the PAMDIR config counts. result, external = self.EnumerateConfig(None, self.OLD_PAMCONF_FILENAME, cache) if result: return result, external # If we made it here, there isn't a old-style pam.conf file worth # speaking of, so process everything! for path in cache: # PAM uses the basename as the 'service' id. service = os.path.basename(path) r, e = self.EnumerateConfig(service, path, cache) result.extend(r) external.extend(e) return result, external def EnumerateConfig(self, service, path, cache, filter_type=None): """Return PamConfigEntries it finds as it recursively follows PAM configs. Args: service: A string containing the service name we are processing. path: A string containing the file path name we want. cache: A dictionary keyed on path, with the file contents (list of str). filter_type: A string containing type name of the results we want. Returns: A tuple of a list of RDFValue PamConfigEntries found & a list of strings which are the external config references found. """ result = [] external = [] path = self._FixPath(path) # Make sure we only look at files under PAMDIR. # Check we have the file in our artifact/cache. If not, our artifact # didn't give it to us, and that's a problem. # Note: This should only ever happen if it was referenced # from /etc/pam.conf so we can assume that was the file. if path not in cache: external.append("%s -> %s", self.OLD_PAMCONF_FILENAME, path) return result, external for tokens in self.ParseEntries(cache[path]): if path == self.OLD_PAMCONF_FILENAME: # We are processing the old style PAM conf file. It's a special case. # It's format is "service type control module-path module-arguments" # i.e. the 'service' is the first arg, the rest is line # is like everything else except for that addition. try: service = tokens[0] # Grab the service from the start line. tokens = tokens[1:] # Make the rest of the line look like "normal". except IndexError: continue # It's a blank line, skip it. # Process any inclusions in the line. new_path = None filter_request = None try: # If a line starts with @include, then include the entire referenced # file. # e.g. "@include common-auth" if tokens[0] == "@include": new_path = tokens[1] # If a line's second arg is an include/substack, then filter the # referenced file only including entries that match the 'type' # requested. # e.g. "auth include common-auth-screensaver" elif tokens[1] in ["include", "substack"]: new_path = tokens[2] filter_request = tokens[0] except IndexError: # It's not a valid include line, so keep processing as normal. pass # If we found an include file, enumerate that file now, and # included it where we are in this config file. if new_path: # Preemptively check to see if we have a problem where the config # is referencing a file outside of the expected/defined artifact. # Doing it here allows us to produce a better context for the # problem. Hence the slight duplication of code. new_path = self._FixPath(new_path) if new_path not in cache: external.append("%s -> %s" % (path, new_path)) continue # Skip to the next line of the file. r, e = self.EnumerateConfig(service, new_path, cache, filter_request) result.extend(r) external.extend(e) else: # If we have been asked to filter on types, skip over any types # we are not interested in. if filter_type and tokens[0] != filter_type: continue # We can skip this line. # If we got here, then we want to include this line in this service's # config. # Reform the line and break into the correct fields as best we can. # Note: ParseEntries doesn't cope with what we need to do. match = self.PAMCONF_RE.match(" ".join(tokens)) if match: p_type, control, module_path, module_args = match.group(1, 2, 3, 4) # Trim a leading "-" from the type field if present. if p_type.startswith("-"): p_type = p_type[1:] result.append( rdf_config_file.PamConfigEntry( service=service, type=p_type, control=control, module_path=module_path, module_args=module_args)) return result, external def ParseMultiple(self, stats, file_objects, knowledge_base): _ = knowledge_base results, externals = self.EnumerateAllConfigs(stats, file_objects) yield rdf_config_file.PamConfig(entries=results, external_config=externals)
StarcoderdataPython
3276534
<filename>apps/menuplans/dbaccess.py import datetime from uuid import uuid4 import xml.etree.ElementTree as et from basex.basex import recipe_db from recipes.dbaccess import get_random_recipes GET_MENUPLANS_QUERY = ''' import module namespace paging="custom/pagination"; declare variable $query as xs:string external; declare variable $offset as xs:integer external; declare variable $limit as xs:integer external; let $upperQuery := upper-case($query) let $selection := for $menuplan in collection("recipe")//menuplan where $menuplan//recipe[matches(upper-case(name), $upperQuery)] or $menuplan//recipe//ingredient[matches(upper-case(name), $upperQuery)] or $menuplan[matches(upper-case(name), $upperQuery)] order by xs:dateTime($menuplan/creationDate) descending return $menuplan let $result := paging:paged($selection, 'menuplans', $offset, $limit, true()) return $result ''' GET_MENUPLAN_DISPLAY_QUERY = ''' declare variable $pk as xs:string external; <menuplan>{ let $doc := //menuplan[pk=$pk] return (<days>{ for $recipe at $recipeIndex in $doc//recipe return <day> <number>{$recipeIndex}</number> <recipe>{$recipe/name/text()}</recipe> </day> }</days>, <recipes>{ $doc//recipe }</recipes>, <shoppingList>{ for $ingredient in distinct-values($doc//ingredient/name/text()) order by $ingredient ascending return for $unit in distinct-values($doc//ingredient[name=$ingredient]/amount/unit) let $numericAmounts := $doc//ingredient[name=$ingredient]/amount[string(number(value)) != 'NaN' and unit=$unit]/value let $alphaAmounts := $doc//ingredient[name=$ingredient]/amount[string(number(value)) = 'NaN' and unit=$unit]/value/text() return <shoppingListItem> <name>{$ingredient}</name> <unit>{$unit}</unit> <amount>{sum($numericAmounts)}</amount> <alphaAmounts>{ for $a in $alphaAmounts return <value>{$a}</value> }</alphaAmounts> </shoppingListItem> }</shoppingList>) }</menuplan> ''' def get_menuplan_display(pk): with recipe_db() as db: q = db.query(GET_MENUPLAN_DISPLAY_QUERY) q.bind('$pk', pk) menuplans = q.execute() return menuplans.encode('utf8') def get_menuplans(query=None, offset=0, limit=500): with recipe_db() as db: q = db.query(GET_MENUPLANS_QUERY) q.bind('$offset', str(offset)) q.bind('$limit', str(limit)) q.bind('$query', str(query or '')) menuplans = q.execute() return menuplans.encode('utf8') def create_menuplan(people, num_recipes): pk = str(uuid4()) people = int(people) current_date = datetime.datetime.now().isoformat('T') erecipes = et.fromstring(get_random_recipes(number_of_recipes=num_recipes, rating='good')) for erecipe in erecipes: rpeople = erecipe.findtext('people') if rpeople: rpeople = float(rpeople) for evalue in erecipe.findall('.//amount/value'): if evalue.text and evalue.text.isdigit(): new_value = '%.2f' % (float(evalue.text) * float(people) / rpeople) if new_value.endswith('.00'): new_value = new_value.replace('.00', '') evalue.text = new_value eroot = et.Element('menuplans') emenuplan = et.SubElement(eroot, 'menuplan') emenuplan.append(erecipes) ename = et.SubElement(emenuplan, 'name') ecreationDate = et.SubElement(emenuplan, 'creationDate') epeople = et.SubElement(emenuplan, 'people') epk = et.SubElement(emenuplan, 'pk') epk.text = pk ename.text = current_date ecreationDate.text = current_date epeople.text = str(people) return pk, eroot def menuplan_path(pk): return 'menuplans/%s.xml' % pk def add_menuplan(pk, menuplan): with recipe_db() as db: db.add(menuplan_path(pk), menuplan)
StarcoderdataPython
4822309
<reponame>citReyJoshua/supplie<gh_stars>0 from django.contrib import admin from backend.product.models import Product, ProductImage admin.site.register(Product) admin.site.register(ProductImage)
StarcoderdataPython
92113
# -*- coding: utf-8 -*- """ @date: 2021/5/4 ไธ‹ๅˆ7:11 @file: torchvision_resnet_to_zcls_resnet.py @author: zj @description: Transform torchvision pretrained model into zcls format """ import os from torchvision.models.resnet import resnet18, resnet34, resnet50, resnet101, resnet152, resnext50_32x4d, \ resnext101_32x8d from zcls.model.recognizers.resnet.resnet import ResNet from zcls.config import cfg from zcls.util.checkpoint import CheckPointer def convert(torchvision_resnet, zcls_resnet): torchvision_resnet_dict = torchvision_resnet.state_dict() zcls_resnet_dict = zcls_resnet.state_dict() for k, v in torchvision_resnet_dict.items(): if 'downsample' in k: zcls_resnet_dict[f"backbone.{k.replace('downsample', 'down_sample')}"] = v elif 'layer' in k: zcls_resnet_dict[f'backbone.{k}'] = v elif 'fc' in k: zcls_resnet_dict[f'head.{k}'] = v elif 'conv1.weight' == k: zcls_resnet_dict['backbone.stem.0.weight'] = v elif 'bn1' in k: zcls_resnet_dict[k.replace('bn1', 'backbone.stem.1')] = v else: raise ValueError("{k} doesn't exist") return zcls_resnet_dict def process(item, cfg_file): if item == 'resnet18': torchvision_resnet = resnet18(pretrained=True) elif item == 'resnet34': torchvision_resnet = resnet34(pretrained=True) elif item == 'resnet50': torchvision_resnet = resnet50(pretrained=True) elif item == 'resnet101': torchvision_resnet = resnet101(pretrained=True) elif item == 'resnet152': torchvision_resnet = resnet152(pretrained=True) elif item == 'resnext50_32x4d': torchvision_resnet = resnext50_32x4d(pretrained=True) elif item == 'resnext101_32x8d': torchvision_resnet = resnext101_32x8d(pretrained=True) else: raise ValueError(f"{item} doesn't exists") cfg.merge_from_file(cfg_file) zcls_resnet = ResNet(cfg) zcls_resnet_dict = convert(torchvision_resnet, zcls_resnet) zcls_resnet.load_state_dict(zcls_resnet_dict) res_dir = 'outputs/converters/' if not os.path.exists(res_dir): os.makedirs(res_dir) checkpoint = CheckPointer(model=zcls_resnet, save_dir=res_dir, save_to_disk=True) checkpoint.save(f'{item}_imagenet') if __name__ == '__main__': item_list = ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d'] cfg_file_list = [ 'r18_zcls_imagenet_224.yaml', 'r34_zcls_imagenet_224.yaml', 'r50_zcls_imagenet_224.yaml', 'r101_zcls_imagenet_224.yaml', 'r152_zcls_imagenet_224.yaml', 'rxt50_32x4d_zcls_imagenet_224.yaml', 'rxt101_32x8d_zcls_imagenet_224.yaml' ] prefix_path = 'configs/benchmarks/resnet' for item, cfg_file in zip(item_list, cfg_file_list): config_path = os.path.join(prefix_path, cfg_file) print(config_path) process(item, config_path)
StarcoderdataPython
3229719
<filename>foo.py # -*- coding: utf-8 -*- """ Created on Wed Jun 15 09:15:09 2016 @author: ericgrimson """ x = 6 if x != 5: print('i am here') else: print('no I am not')
StarcoderdataPython
1704650
<filename>tests/v1/test_organizations_api.py # coding: utf-8 # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2019-Present Datadog, Inc. from __future__ import absolute_import import unittest import datadog_api_client.v1 from datadog_api_client.v1.api.organizations_api import OrganizationsApi # noqa: E501 class TestOrganizationsApi(unittest.TestCase): """OrganizationsApi unit test stubs""" def setUp(self): self.api = OrganizationsApi() # noqa: E501 def tearDown(self): pass def test_create_child_org(self): """Test case for create_child_org Create a child organization # noqa: E501 """ pass def test_get_org(self): """Test case for get_org Get organization information # noqa: E501 """ pass def test_list_orgs(self): """Test case for list_orgs List your managed organizations # noqa: E501 """ pass def test_update_org(self): """Test case for update_org Update your organization # noqa: E501 """ pass def test_upload_id_p_for_org(self): """Test case for upload_id_p_for_org Upload IdP metadata # noqa: E501 """ pass if __name__ == '__main__': unittest.main()
StarcoderdataPython
1632817
<reponame>ATrain951/01.python-com_Qproject """ # Sample code to perform I/O: name = input() # Reading input from STDIN print('Hi, %s.' % name) # Writing output to STDOUT # Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail """ # Write your code here import sys t = int(sys.stdin.readline()) for _ in range(t): n, m, x, y = map(int, sys.stdin.readline().strip().split()) ans = min((y * n + m) // (x + y), n) print(ans)
StarcoderdataPython
1703042
<filename>MyTools/lightup.py import sys import Image import colorsys import ImageDraw import math def convert(img): rgb_img = img.convert('RGBA') img = Image.new('RGBA', rgb_img.size, (0x00,0x00,0x00,0xff)) draw = ImageDraw.Draw(img) x,y = rgb_img.size for i in range(0,x): for j in range(0,y): r, g, b, a = rgb_img.getpixel((i, j)) h,s,v = colorsys.rgb_to_hsv(r/255.,g/255.,b/255.) r0 = max(0xff, r * 4); g0 = max(0xff, g * 4); b0 = max(0xff, b * 4); draw.point((i, j), (r0, g0, b0, a)) return img def make_image(file): img = Image.open(file, 'r') img = convert(img) img.save('new/'+file) if __name__ == '__main__': for arg in sys.argv: print arg if arg.endswith('.png'): make_image(arg) #EOF
StarcoderdataPython
4805565
# -*- coding: utf-8 -*- import scrapy from scrapy.contrib.spiders import CrawlSpider from scrapy.http import Request from scrapy.selector import Selector from douban.items import DoubanItem class Douban(CrawlSpider): name = "douban" redis_key = 'douban:start_urls' start_urls = ['http://movie.douban.com/top250'] url = 'http://movie.douban.com/top250' def parse(self,response): #print response.body item = DoubanItem() selector = Selector(response) Movies = selector.xpath('//div[@class="info"]') for eachMovie in Movies: title = eachMovie.xpath('div[@class="hd"]/a/span/text()').extract() fullTitle = '' for each in title: fullTitle += each movieInfo = eachMovie.xpath('div[@class="bd"]/p[@class=""]/text()').extract() star = eachMovie.xpath('div[@class="bd"]/div[@class="star"]/span[@class="rating_num"]/text()').extract()[0] quote = eachMovie.xpath('div[@class="bd"]/p[@class="quote"]/span/text()').extract() #quoteๅฏ่ƒฝไธบ็ฉบ๏ผŒ้œ€่ฆๅ…ˆ่ฟ›่กŒๅˆคๆ–ญ if quote: quote = quote[0] else: quote = '' item['title'] = fullTitle item['movieInfo'] = ';'.join(movieInfo) item['star'] = star item['quote'] = quote yield item nextLink = selector.xpath('//span[@class="next"]/link/@href').extract() #็ฌฌๅ้กตๆ˜ฏๆœ€ๅŽไธ€้กต๏ผŒๆฒกๆœ‰ไธ‹ไธ€้กต็š„้“พๆŽฅ if nextLink: nextLink = nextLink[0] #print nextLink yield Request(self.url + nextLink,callback=self.parse)
StarcoderdataPython
3347180
# Generated by Django 2.0.6 on 2018-08-24 13:48 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('products', '0001_initial'), ] operations = [ migrations.CreateModel( name='ProductPriceOffer', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('status', models.PositiveIntegerField(choices=[(0, 'Not reviewed'), (1, 'Processing'), (2, 'Canceled'), (3, 'Completed')], default=0, verbose_name='Status')), ('name', models.CharField(max_length=255, verbose_name='Name')), ('mobile', models.CharField(max_length=255, verbose_name='Mobile phone')), ('email', models.EmailField(max_length=255, verbose_name='Email')), ('text', models.TextField(max_length=1000, verbose_name='Offer')), ('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date created')), ('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='price_offers', to='products.Product', verbose_name='Product')), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='offers', to=settings.AUTH_USER_MODEL, verbose_name='User')), ], options={ 'verbose_name': 'Product price offer', 'verbose_name_plural': 'Product price offers', 'ordering': ['-date_created'], }, ), ]
StarcoderdataPython
3322274
from pyderman.util import downloader import re import json def get_url(version='latest', _os=None, _os_bit=None): beta = True pattern = version bit = '' if not version or version == 'latest': beta = False pattern = '' if _os == 'linux': bit = '64' if _os_bit == '64' else 'i686' for release in _releases(): name = release['name'].lower() if not beta and 'beta' in name: continue if _os in name and pattern in name and bit in name: ver = re.search(r'(\d{1,2}\.\d{1,2}\.\d{1,2})', name).group(1) return 'phantomjs.*/bin/phantomjs', release['links']['self']['href'], ver def _releases(): page = 'https://api.bitbucket.org/2.0/repositories/ariya/phantomjs/downloads/' while page: data = json.loads(downloader.raw(page)) for release in data['values']: yield release page = data['next'] if 'next' in data else None if __name__ == "__main__": print(get_url('latest', 'win', '64'))
StarcoderdataPython
3392911
""" test_QuerySet """ from AzureODM.QuerySet import ( QuerySet, obj_to_query_value, Q, QCombination, AndOperator, OrOperator) from AzureODM.Entity import Entity from AzureODM.Fields import GenericField, FloatField, KeyField import pytest import re from datetime import datetime, timezone regex = re.compile(QuerySet.cmp_exp) class Test_QCombination: """test QCombination""" @pytest.fixture() def fake_entity(self): class FakeEntity(Entity): metas = { 'table_name': 'lolol' } PartitionKey = KeyField() RowKey = KeyField() return FakeEntity def test_q1_q_q2_q(self): q1 = Q(PartitionKey='p1') q2 = Q(RowKey='r1') a = QCombination(AndOperator, q1, q2) assert a.subquires[0].k == 'PartitionKey' assert a.subquires[0].v == 'p1' assert a.subquires[1].__name__ == 'AndOperator' assert a.subquires[2].k == 'RowKey' assert a.subquires[2].v == 'r1' def test_q1_qcom_q2_q(self): q1 = Q(PartitionKey='p1') q2 = Q(RowKey='r1') qc = QCombination(AndOperator, q1, q2) assert len(qc.subquires) == 3 a = QCombination(AndOperator, qc, Q(Temp='lol')) print('---------{}'.format(a.subquires)) assert len(a.subquires) == 5 def test_q1_qcom_q2_qcom(self): q1 = Q(PartitionKey='p1') q2 = Q(RowKey='r1') q3 = Q(PartitionKey='p1') q4 = Q(RowKey='r1') qc = QCombination(AndOperator, q1, q2) qc2 = QCombination(AndOperator, q3, q4) a = QCombination(OrOperator, qc, qc2) assert len(a.subquires) == 7 assert a.subquires[1] is AndOperator assert a.subquires[3] is OrOperator assert a.subquires[5] is AndOperator def test_compile(self, fake_entity): a = Q(PartitionKey='p1') | Q(RowKey='r1') & Q(RowKey='r3') qs = a.compile(entity=fake_entity) expected = "(PartitionKey eq 'p1' or RowKey eq 'r1' and RowKey eq 'r3')" assert expected == qs class Test_Q: """Test q""" def test_two_combine_and(self): a = (Q(PartitionKey='p1') & Q(RowKey='r1')) assert isinstance(a, QCombination) assert len(a.subquires) == 3 assert a.subquires[0].k == 'PartitionKey' assert a.subquires[0].v == 'p1' assert a.subquires[1].__name__ == 'AndOperator' assert a.subquires[2].k == 'RowKey' assert a.subquires[2].v == 'r1' def test_two_combine_or(self): a = (Q(PartitionKey='p1') | Q(RowKey='r1')) assert isinstance(a, QCombination) assert len(a.subquires) == 3 assert a.subquires[0].k == 'PartitionKey' assert a.subquires[0].v == 'p1' assert a.subquires[1].__name__ == 'OrOperator' assert a.subquires[2].k == 'RowKey' assert a.subquires[2].v == 'r1' def test_three_combine_and_or(self): a = (Q(PartitionKey='p1') | Q(RowKey='r1') & Q(RowKey='r3')) assert isinstance(a, QCombination) assert len(a.subquires) == 5 assert a.subquires[0].k == 'PartitionKey' assert a.subquires[0].v == 'p1' assert a.subquires[1].__name__ == 'OrOperator' assert a.subquires[2].k == 'RowKey' assert a.subquires[2].v == 'r1' @pytest.fixture() def fake_entity(self): class FakeEntity(Entity): metas = { 'table_name': 'lolol' } PartitionKey = KeyField() RowKey = KeyField() return FakeEntity def test_in_query(self, fake_entity): a = Q(RowKey__in=[123, 124, '412']) query_string = a.compile(entity=fake_entity) expected = "(RowKey eq 123 or RowKey eq 124 or RowKey eq '412')" assert query_string == expected class Test_obj_to_query_value: """test obj_to_query_value""" @pytest.mark.parametrize('obj,expected', [ ('thisisgood', "'thisisgood'"), (123456, '123456'), (123.456, '123.456'), (True, 'true'), (False, 'false'), (datetime(2014, 1, 1, 0, 0, 0, tzinfo=timezone.utc), "datetime'2014-01-01T00:00:00Z'") ]) def test_obj_conversion(self, obj, expected): assert obj_to_query_value(obj) == expected def test_raises_ValueError_if_datetime_is_naive(self): with pytest.raises(ValueError) as e: obj_to_query_value(datetime(2014, 1, 1, 1, 1, 1, 1)) assert 'only timezone awared datetime is accpeted' in str(e) def test_type_not_match(self): with pytest.raises(Exception) as e: obj_to_query_value(None) assert 'obj type is unknown, ' in str(e) class Test_q_regexp: """test q_regexp""" @pytest.mark.parametrize('query_string,expected_field,expected_operator', [ ('thisisgood', 'thisisgood', None), ('thisisgood__gt', 'thisisgood', 'gt'), ('thisisgood__ge', 'thisisgood', 'ge'), ('thisisgood__lt', 'thisisgood', 'lt'), ('thisisgood__le', 'thisisgood', 'le'), ('thisisgood__ne', 'thisisgood', 'ne') ]) def test_regexp(self, query_string, expected_field, expected_operator): m = regex.match(query_string) assert m is not None assert m.group('field') == expected_field if expected_operator is not None: assert m.group('operator') == expected_operator else: assert m.group('operator') is None @pytest.mark.parametrize('query_string', [ '', '__gt', '__', '_asdf__gt', '.' ]) def test_should_not_match(self, query_string): m = regex.match(query_string) assert m is None class Test_query_parser: """test query_parser""" @pytest.fixture() def fake_entity(self): class FakeEntity(Entity): metas = { 'table_name': 'lolol' } PartitionKey = KeyField() RowKey = KeyField() return FakeEntity @pytest.fixture() def fake_entity_selected(self, fake_entity): a = fake_entity.select(fields=None) return a def test_query_parser_wrap_where(self, fake_entity_selected): a = fake_entity_selected.where(PartitionKey='lol') expected = "PartitionKey eq 'lol'" assert a.filter == expected a.andWhere(RowKey__gt='r1') expected += " and RowKey gt 'r1'" assert a.filter == expected a.orWhere(RowKey__ge='o1') expected += " or RowKey ge 'o1'" assert a.filter == expected a.notWhere(RowKey__ne=True) expected += " not RowKey ne true" assert a.filter == expected def test_query_parser_wrap_raises_when_args(self, fake_entity_selected): a = fake_entity_selected with pytest.raises(Exception) as e: a.where('lol') assert 'you cannot put args into query function' in str(e) def test_raises_when_more_than_one_kwargs(self, fake_entity_selected): a = fake_entity_selected with pytest.raises(Exception) as e: a.where(RowKey='lol', PartitionKey='lll') assert 'you cannot put more than one args into query function' in str( e) def test_raises_when_no_target_entity(self, fake_entity_selected): a = fake_entity_selected a._targeted_entity = None with pytest.raises(Exception) as e: a.where(RowKey='lol') assert 'please call select before using query' in str(e) def test_raises_when_value_is_None(self, fake_entity_selected): with pytest.raises(Exception) as e: fake_entity_selected.where(RowKey=None) assert 'comparison value cannot be None' in str(e) def test_raises_when_query_is_not_valid(self, fake_entity_selected): with pytest.raises(ValueError) as e: fake_entity_selected.where(RowKey__xx='lol') assert 'is not a valid query' in str(e) def test_raises_when_field_is_not_defined(self, fake_entity_selected): with pytest.raises(KeyError) as e: fake_entity_selected.where(XXXX__lt='lol') assert 'field is not defined, ' in str(e) def test_limit_raises_if_not_None_or_int(self, fake_entity_selected): with pytest.raises(TypeError) as e: fake_entity_selected.limit(limit='lol') assert 'limit is not an int, ' in str(e) def test_limit_will_set__limit_attr(self, fake_entity_selected): fake_entity_selected.limit(limit=123) assert fake_entity_selected._limit == 123 fake_entity_selected.limit(limit=None) assert fake_entity_selected._limit is None class Test_select: """test select""" def test_raise_if_entity_not_Entity(self): q = QuerySet() with pytest.raises(TypeError): q.select(entity={}, fields=None) def test_raise_if_fields_not_None_not_list_not_star(self): q = QuerySet() with pytest.raises(TypeError) as e: q.select(entity=Entity, fields=123) assert 'fields can only be list or None' in str(e) def test_stringify_fields_for_select(self): q = QuerySet() q.select(entity=Entity, fields=['PartitionKey', 'RowKey', 'f1']) assert q._select == 'PartitionKey,RowKey,f1' def test_stringify_fields_None_for_select(self): q = QuerySet() q.select(entity=Entity, fields=None) assert q._select == '*' def test_fields_accept_star(self): q = QuerySet() q.select(entity=Entity, fields='*') assert q._select == '*' def test_added_PartitionKey_RowKey_to_Fields_if_not_existed(self): q = QuerySet() q.select(entity=Entity, fields=['f1']) assert q._select == 'f1,PartitionKey,RowKey' def test_save_values_to_attributes(self): q = QuerySet() q.select(entity=Entity, fields=['PartitionKey', 'RowKey', 'f1']) assert q._targeted_entity == Entity class Test_go: """test select""" def test_raise_if__target_entity_is_None(self): q = QuerySet() with pytest.raises(Exception) as e: q.go() assert 'you must call select before call go' in str(e) def test_call_and_return__targeted_entity_find(self, monkeypatch): class FakeEntity(Entity): metas = { 'table_name': 'lolol' } PartitionKey = KeyField() RowKey = KeyField() def fake_find(*args, **kwargs): assert kwargs['filter'] == "PartitionKey eq 'lol'" assert kwargs['select'] == 'PartitionKey,RowKey,f1' assert kwargs['limit'] == 10 raise MemoryError('called fake_find') monkeypatch.setattr(FakeEntity, 'find', fake_find) q = QuerySet() q.select(entity=FakeEntity, fields=['PartitionKey', 'RowKey', 'f1']) q.where(PartitionKey='lol').limit(10) with pytest.raises(MemoryError) as e: q.go() assert 'called fake_find' in str(e)
StarcoderdataPython
3205478
#!/usr/bin/env python """ Python implementation of vic2nc This module facilitates the conversion of ascii VIC output files into 3 or 4 dimenstional netcdf files. References: - VIC: http://www.hydro.washington.edu/Lettenmaier/Models/VIC/index.shtml - netCDF: http://www.unidata.ucar.edu/software/netcdf/ - Python netCDF4: https://code.google.com/p/netcdf4-python/ - NetCDF Climate and Forecast (CF) Metadata Convention: http://cf-pcmdi.llnl.gov/ - Pandas: http://pandas.pydata.org/ """ # Imports from __future__ import print_function from os import path from glob import glob from re import findall from collections import deque from bisect import bisect_left from getpass import getuser from datetime import datetime, timedelta from pandas import read_table, DataFrame from netCDF4 import Dataset, date2num, num2date, default_fillvals import socket import subprocess import dateutil.relativedelta as relativedelta import os import sys import numpy as np import time as tm from tonic.io import read_config, SafeConfigParser from tonic.tonic import calc_grid, get_grid_inds, NcVar from tonic.pycompat import pyzip, pyrange description = 'Convert a set of VIC ascii outputs to gridded netCDF' help = 'Convert a set of VIC ascii outputs to gridded netCDF' # -------------------------------------------------------------------- # SECSPERDAY = 86400.0 REFERENCE_STRING = '0001-01-01 00:00:00' TIMEUNITS = 'days since {0}'.format(REFERENCE_STRING) # (MUST BE DAYS)! TIMESTAMPFORM = '%Y-%m-%d-%H' # Precision NC_DOUBLE = 'f8' NC_FLOAT = 'f4' NC_INT = 'i4' # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # # Default configuration default_config = {'OPTIONS': {'out_file_format': 'NETCDF3_64BIT', 'precision': 'single', 'calendar': 'standard', 'time_segment': 'month', 'snow_bands': False, 'veg_tiles': False, 'soil_layers': False}, 'DOMAIN': {'longitude_var': 'longitude', 'latitude_var': 'latitude', 'y_x_dims': ['y', 'x']}} # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # class Point(object): '''Creates a point class for intellegently storing coordinate information''' def __init__(self, lat='', lon='', x='', y='', filename=''): '''Defines x and y variables''' self.lat = lat self.lon = lon self.x = x self.y = y self.filename = filename def _open_binary(self): print('opening binary file: {0}'.format(self.filename)) self.f = open(self.filename, 'rb') def _open_ascii(self): print('opening ascii file: {0}'.format(self.filename)) # return an iterator self._reader = read_table(self.filename, sep=self.delimeter, header=None, iterator=True, usecols=self.usecols, names=self.names) def _open_netcdf(self): print('opening netcdf file: {0}'.format(self.filename)) self.f = Dataset(self.filename, 'r') def _read_ascii(self, count=None): self.df = self._reader.get_chunk(count) return def _read_binary(self, count=-1): d = np.fromfile(self.f, dtype=self.dt, count=count) data = {} for i, name in enumerate(self.names): data[name] = np.array(d[name], dtype=self.dtypes[i], copy=True) / float(self.bin_mults[i]) self.df = DataFrame(data) return def _read_netcdf(self): data = {} for key in self.names: data[key] = np.squeeze(self.f.variables[key][:]) self.df = DataFrame(data) def close(self): print('closing file: {0}'.format(self.filename)) try: self.f.close() except: pass def __str__(self): return "Point({0},{1},{2},{3})".format(self.lat, self.lon, self.y, self.x) def __repr__(self): return "Point(lat={0}, lon={1}, \ y={2}, x={3}, \ filename={4})".format(self.lat, self.lon, self.y, self.x, self.filename) # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # class Plist(deque): '''List subclass that has a few helper methods for adding and obtaining coordinates''' def get_lons(self): return np.array([p.lon for p in self]) def get_lats(self): return np.array([p.lat for p in self]) def add_xs(self, xinds): for i in pyrange(len(self)): self[i].x = xinds[i] return def add_ys(self, yinds): for i in pyrange(len(self)): self[i].y = yinds[i] return def get_ys(self): return np.array([p.y for p in self]) def get_xs(self): return np.array([p.x for p in self]) def get_data(self, name, data_slice): return np.array([p.df[name].values[data_slice] for p in self]) def set_fileformat(self, fileformat): """sets and assigns fileformat specific attributes and methods""" if fileformat == 'ascii': delimeter = r'\t' # VIC ascii files are tab seperated else: delimeter = r',' # true csv for p in self: p.fileformat = fileformat if fileformat in ['ascii', 'csv']: p.open = p._open_ascii p.delimeter = delimeter p.read = p._read_ascii elif fileformat == 'binary': p.open = p._open_binary p.read = p._read_binary p.dt = np.dtype(list(pyzip(p.names, p.bin_dtypes))) elif fileformat == 'netcdf': p.open = p._open_netcdf p.read = p._read_netcdf else: raise ValueError('Unknown file format: {0}'.format(fileformat)) return def set_names(self, names): for p in self: p.names = names return def set_usecols(self, usecols): for p in self: p.usecols = usecols return def set_dtypes(self, dtypes): for p in self: p.dtypes = dtypes return def set_bin_dtypes(self, bin_dtypes): for p in self: p.bin_dtypes = bin_dtypes return def set_bin_mults(self, bin_mults): for p in self: p.bin_mults = bin_mults return # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # class Segment(object): def __init__(self, num, i0, i1, nc_format, filename, memory_mode='original'): '''Class used for holding segment information ''' self.num = num self.i0 = i0 self.i1 = i1 self.filename = filename self.fields = {} self.memory_mode = memory_mode self.nc_write(nc_format) # Set slice if memory_mode == 'original': self.slice = slice(None) else: self.slice = slice(i0, i1) def nc_globals(self, title='VIC netCDF file', history='Created: {0} by {1}'.format(tm.ctime(tm.time()), getuser()), institution='University of Washington', source=sys.argv[0], references=( 'Primary Historical Reference for VIC: Liang,' 'X., <NAME>, <NAME>, and <NAME>,' '1994: A Simple hydrologically Based Model of Land' 'Surface Water and Energy Fluxes for GSMs, J. Geophys.' 'Res., 99(D7), 14,415-14,428.'), comment=( 'Output from the Variable Infiltration Capacity' '(VIC) Macroscale Hydrologic Model'), conventions='CF-1.6', target_grid_file='unknown', username=None, hostname=None, version=None, **kwargs): self.f.title = title.encode() self.f.history = history.encode() self.f.institution = institution.encode() self.f.source = source.encode() self.f.references = references.encode() self.f.comment = comment.encode() self.f.conventions = conventions.encode() if hostname: self.f.hostname = hostname else: self.f.hostname = socket.gethostname() self.f.hostname = self.f.hostname.encode() if username: self.f.username = username else: self.f.username = getuser() self.f.username = self.f.username.encode() if version: self.f.version = version else: try: self.f.version = subprocess.check_output(["git", "describe"]).rstrip() except: self.f.version = 'unknown' self.f.version = self.f.version.encode() for attribute, value in kwargs.items(): if hasattr(self.f, attribute): print( 'WARNING: Attribute {0} already exists'.format(attribute)) print('Renaming to g_{0} to avoid ' 'overwriting.'.format(attribute)) attribute = 'g_{0}'.format(attribute) if isinstance(value, str): value = value.encode() setattr(self.f, attribute, value) return def __str__(self): return "Segment Object({0})".format(self.filename) def __repr__(self): return """ -------------------------- Segment {0} -------------------------- Filename: {1} Start Index: {2} End Index: {3} Start Date: {4} End Date: {5} ------------------------------------------------------------------ """.format(self.num, self.filename, self.i0, self.i1, self.startdate, self.enddate) def nc_time(self, t0, t1, times, calendar): """ define time dimension (and write data) """ self.f.createDimension('time', len(times[self.i0:self.i1])) time = self.f.createVariable('time', 'f8', ('time', )) time[:] = times[self.i0:self.i1] time.long_name = 'time'.encode() time.units = TIMEUNITS.encode() time.calendar = calendar.encode() self.count = len(time) self.startdate = t0 self.enddate = t1 def nc_domain(self, domain): """ define the coordinate dimension (and write data) """ # Setup dimensions dimensions = [] for name, ncvar in domain.items(): # Setup dimensions for dim in ncvar.dimensions: if dim not in dimensions: dimensions.append(dim) self.f.createDimension(dim, getattr(ncvar, dim)) # Create variable if "_FillValue" in ncvar.attributes: fill_val = ncvar.attributes['_FillValue'] del ncvar.attributes['_FillValue'] else: fill_val = None self.fields[name] = self.f.createVariable(name, NC_DOUBLE, ncvar.dimensions, fill_value=fill_val) # Apply the data self.fields[name][:] = ncvar # Add the attributes for key, val in ncvar.attributes.items(): if isinstance(val, str): val = val.encode() setattr(self.fields[name], key, val) return def nc_dimensions(self, snow_bands=False, veg_tiles=False, soil_layers=False): """ Define 4th dimensions """ if snow_bands: self.f.createDimension('snow_bands', snow_bands) if veg_tiles: self.f.createDimension('veg_tiles', veg_tiles) if soil_layers: self.f.createDimension('soil_layers', soil_layers) return def nc_fields(self, fields, y_x_dims, precision): """ define each field """ coords = ('time',) + tuple(y_x_dims) if precision == 'single': prec_global = NC_FLOAT elif precision == 'double': prec_global = NC_DOUBLE else: raise ValueError('Unkown value for OPTIONS[precision] \ field: {0}'.format(precision)) self.three_dim_vars = [] self.four_dim_vars = [] for name, field in fields.items(): write_out_var = True if 'write_out_var' in field: if not field['write_out_var']: write_out_var = False if write_out_var: if 'dim4' in field: ncols = len(self.f.dimensions[field['dim4']]) if len(field['column']) == ncols: # 4d var coords = ('time',) + tuple([field['dim4']]) \ + tuple(y_x_dims) self.four_dim_vars.append(name) else: raise ValueError('Number of columns for variable {0}' 'does not match the length ({1}) of ' 'the {2} dimension'.format( name, ncols, field['dim4'])) else: # standard 3d var coords = ('time',) + tuple(y_x_dims) self.three_dim_vars.append(name) if 'type' in field: prec = field['type'] else: prec = prec_global fill_val = default_fillvals[prec] self.fields[name] = self.f.createVariable(name, prec, coords, fill_value=fill_val, zlib=False) if 'units' in field: self.fields[name].long_name = name.encode() self.fields[name].coordinates = 'lon lat'.encode() for key, val in field.items(): if isinstance(val, str): val = val.encode() setattr(self.fields[name], key, val) else: raise ValueError('Field {0} missing units \ attribute'.format(name)) return def allocate(self): self.data = {} for name, field in self.fields.items(): self.data[name] = np.atleast_3d(np.zeros_like(field)) if hasattr(field, '_FillValue'): self.data[name][:] = field._FillValue def nc_add_data_to_array(self, point): for name in self.three_dim_vars: self.data[name][:, point.y, point.x] = \ point.df[name].values[self.slice] for name in self.four_dim_vars: varshape = self.f.variables[name].shape[1] for i in pyrange(varshape): subname = name + str(i) self.data[name][:, i, point.y, point.x] = point.df[subname].values[self.slice] def nc_add_data_standard(self, points): ys = points.get_ys() xs = points.get_xs() for p in points: for name in self.three_dim_vars: data = points.get_data(name, self.slice) self.f.variables[name][:, ys, xs] = data for name in self.four_dim_vars: varshape = self.f.variables[name].shape[1] for i in pyrange(varshape): sn = name + str(i) self.f.variables[name][:, i, ys, xs] = p.df[sn].values[self.slice] def nc_write_data_from_array(self): """ write completed data arrays to disk """ for name in self.three_dim_vars: self.f.variables[name][:, :, :] = self.data[name] for name in self.four_dim_vars: self.f.variables[name][:, :, :, :] = self.data[name] def nc_write(self, nc_format): self.f = Dataset(self.filename, mode="w", clobber=True, format=nc_format) self.f.set_fill_on() def nc_close(self): self.f.close() print('Closed: {0}'.format(self.filename)) # -------------------------------------------------------------------- # def _run(args): """Top level driver""" print('running now...') if args.create_batch: # ------------------------------------------------------------ # # Create batch files and exit batch(args.config_file, args.create_batch, args.batch_dir) # ------------------------------------------------------------ # else: # ------------------------------------------------------------ # # Read Configuration files config_dict = read_config(args.config_file, default_config=default_config) options = config_dict.pop('OPTIONS') global_atts = config_dict.pop('GLOBAL_ATTRIBUTES') if not options['regular_grid']: domain_dict = config_dict.pop('DOMAIN') else: domain_dict = None # set aside fields dict fields = config_dict vic2nc(options, global_atts, domain_dict, fields) # ------------------------------------------------------------ # return # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def vic2nc(options, global_atts, domain_dict, fields): """ Convert ascii VIC files to netCDF format""" # determine run mode if (options['memory_mode'] == 'standard') \ and (options['chunksize'] in ['all', 'All', 'ALL', 0]): memory_mode = 'big_memory' else: memory_mode = options['memory_mode'] print("\n-------------------------------") print("Configuration File Options") print("-------------OPTIONS-------------") for pair in options.items(): print("{0}: {1}".format(*pair)) print('Fields: {0}'.format(", ".join(fields.keys()))) if domain_dict: print("-------------DOMAIN--------------") for pair in domain_dict.items(): print("{0}: {1}".format(*pair)) print("--------GLOBAL_ATTRIBUTES--------") for pair in global_atts.items(): print("{0}: {1}".format(*pair)) print("--------RUN MODE--------") print('Memory Mode: {0}'.format(memory_mode)) if memory_mode == 'standard': print('Chunksize={0}'.format(options['chunksize'])) print("---------------------------------\n") # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Make output directory if not os.path.exists(options['out_directory']): os.makedirs(options['out_directory']) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Make pairs (i.e. find inds) files = glob(options['input_files']) points = get_file_coords(files) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Get target grid information if domain_dict: domain = read_domain(domain_dict) target_grid_file = path.split(domain_dict['filename'])[1] global_atts['target_grid_file'] = target_grid_file else: # must be a regular grid, build from file names domain = calc_grid(points.get_lats(), points.get_lons()) target_grid_file = None domain_dict = {'y_x_dims': ['lat', 'lon']} # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Get grid index locations points = get_grid_inds(domain, points) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Get timestamps if options['input_file_format'].lower() == 'ascii': if ('bin_start_date' in options and 'bin_end_date' in options and 'bin_dt_sec' in options): vic_datelist, vic_ordtime = make_dates( options['bin_start_date'], options['bin_end_date'], options['bin_dt_sec'], calendar=options['calendar']) else: vic_datelist = get_dates(files[0]) vic_ordtime = date2num(vic_datelist, TIMEUNITS, calendar=options['calendar']) elif options['input_file_format'].lower() in ['binary', 'netcdf']: vic_datelist, vic_ordtime = make_dates(options['bin_start_date'], options['bin_end_date'], options['bin_dt_sec'], calendar=options['calendar']) else: raise ValueError('Unknown input file format: {}. Valid options are \ ascii or binary'.format(options['input_file_format'])) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Determine time segmentation if options['start_date']: start_date = datetime.strptime(options['start_date'], TIMESTAMPFORM) if start_date < vic_datelist[0]: print("WARNING: Start date in configuration file is before " "first date in file.") start_date = vic_datelist[0] print('WARNING: New start date is {0}'.format(start_date)) else: start_date = vic_datelist[0] if options['end_date']: end_date = datetime.strptime(options['end_date'], TIMESTAMPFORM) if end_date > vic_datelist[-1]: print("WARNING: End date in configuration file is after " "last date in file.") end_date = vic_datelist[-1] print('WARNING: New end date is {0}'.format(end_date)) else: end_date = vic_datelist[-1] # Ordinal Time start_ord = date2num(start_date, TIMEUNITS, calendar=options['calendar']) end_ord = date2num(end_date, TIMEUNITS, calendar=options['calendar']) print("netCDF Start Date: {0}".format(start_date)) print("netCDF End Date: {0}".format(end_date)) segment_dates = [] if options['time_segment'] == 'day': # calendar insensitive num_segments = np.ceil(end_ord - start_ord) if start_date.hour == 0: segment_dates = num2date(np.arange(start_ord, end_ord + 1, 1), TIMEUNITS, calendar=options['calendar']) else: # allow start at time other than 0 temp = [start_ord].append(np.arange(np.ceil(start_ord), end_ord + 1, 1)) segment_dates = num2date(temp, TIMEUNITS, calendar=options['calendar']) elif options['time_segment'] == 'month': num_segments = (end_date.year - start_date.year) * 12 \ + end_date.month - start_date.month + 1 month = start_date.month year = start_date.year for i in pyrange(num_segments + 1): segment_dates.append(datetime(year, month, 1)) month += 1 if month == 13: month = 1 year += 1 elif options['time_segment'] == 'year': num_segments = end_date.year - start_date.year + 1 year = start_date.year for i in pyrange(num_segments + 1): segment_dates.append(datetime(year, 1, 1)) year += 1 elif options['time_segment'] == 'decade': num_segments = (end_date.year - start_date.year) / 10 + 1 year = start_date.year for i in pyrange(num_segments + 1): segment_dates.append(datetime(year, 1, 1)) year += 10 elif options['time_segment'] == 'all': num_segments = 1 segment_dates = [start_date, end_date] else: raise ValueError('Unknown timesegment options \ {0}'.format(options['time_segment'])) print("Number of files: {0}".format(len(segment_dates) - 1)) assert len(segment_dates) == num_segments + 1 # Make sure the first and last dates are start/end_date segment_dates[0] = start_date segment_dates[-1] = end_date + timedelta(minutes=1) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Setup Segments segments = deque() for num in pyrange(num_segments): # Segment time bounds t0 = segment_dates[num] t1 = segment_dates[num + 1] # Get segment inds i0 = bisect_left(vic_datelist, t0) i1 = bisect_left(vic_datelist, t1) # Make segment filename (with path) if options['time_segment'] == 'day': filename = "{0}.{1}.nc".format(options['out_file_prefix'], t0.strftime('%Y-%m-%d')) elif options['time_segment'] == 'month': filename = "{0}.{1}.nc".format(options['out_file_prefix'], t0.strftime('%Y-%m')) elif options['time_segment'] == 'year': filename = "{0}.{1}.nc".format(options['out_file_prefix'], t0.strftime('%Y')) elif options['time_segment'] == 'all': filename = "{0}.{1}-{2}.nc".format(options['out_file_prefix'], t0.strftime('%Y%m%d'), t1.strftime('%Y%m%d')) filename = path.join(options['out_directory'], filename) # Setup segment and initialize netcdf segment = Segment(num, i0, i1, options['out_file_format'], filename, memory_mode=memory_mode) segment.nc_globals(**global_atts) segment.nc_time(t0, t1, vic_ordtime, options['calendar']) segment.nc_dimensions(snow_bands=options['snow_bands'], veg_tiles=options['veg_tiles'], soil_layers=options['soil_layers']) segment.nc_domain(domain) segment.nc_fields(fields, domain_dict['y_x_dims'], options['precision']) print(repr(segment)) segments.append(segment) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Get column numbers and names (will help speed up reading) names = [] usecols = [] dtypes = [] bin_dtypes = [] bin_mults = [] if options['precision'] == 'double': prec = NC_DOUBLE else: prec = NC_FLOAT for name, field in fields.items(): if not np.isscalar(field['column']): # multiple levels for i, col in enumerate(field['column']): names.append(name + str(i)) usecols.append(col) if 'type' in field: if type(field['type']) == list: dtypes.extend(field['type']) else: dtypes.extend([field['type']] * len(field['column'])) else: dtypes.append([prec] * len(field['column'])) if options['input_file_format'].lower() == 'binary': if 'bin_dtype' in field: if type(field['bin_dtype']) == list: bin_dtypes.extend(field['bin_dtype']) else: bin_dtypes.extend([field['bin_dtype']] * len(field['column'])) else: raise ValueError('bin_dtype not in field: {}'.format(name)) if 'bin_mult' in field: if type(field['bin_mult']) == list: bin_mults.extend(field['bin_mult']) else: bin_mults.extend([field['bin_mult']] * len(field['column'])) else: bin_mults.extend([1.0] * len(field['column'])) else: # no levels names.append(name) usecols.append(field['column']) if 'type' in field: dtypes.append(field['type']) else: dtypes.append(prec) if options['input_file_format'].lower() == 'binary': if 'bin_dtype' in field: bin_dtypes.append(field['bin_dtype']) else: raise ValueError('bin_dtype not in field: {}'.format(name)) if 'bin_mult' in field: bin_mults.append(field['bin_mult']) else: bin_mults.append(1.0) print('setting point attributes (fileformat, names, usecols, and dtypes)') # pandas.read_table does not 'honor' the order of the columns in usecols # it simply uses them in ascending order. So the names need to be sorted # the same way. For example, if the columns in the VIC file are: # 3: prcp; 4: evap; 5: runoff; 6; baseflow; 7: sm1; 8: sm2; 9: sm3; 10: swe # and this is parsed from the configuration file as # usecols = [3, 4, 5, 6, 10, 7, 8, 9] # names=['prcp', 'evap', 'runoff', 'baseflow', 'swe', 'sm1', 'sm2', 'sm3'] # then without sorting, the netcdf file will have the wrong variables: # nc_swe will contain sm1, nc_sm1 will contain sm2, nc_sm2: sm3 and # nc_swe: sm3 # the following will ensure that the names are sorted in increasing column # order. Note that sorted(usecols) is not strictly necessary, since # apparently that is done in read_table, but it keeps the names and columns # in the same order names = [x for (y, x) in sorted(pyzip(usecols, names))] usecols = sorted(usecols) points.set_names(names) points.set_usecols(usecols) points.set_dtypes(dtypes) # set binary attributes if options['input_file_format'].lower() == 'binary': points.set_bin_dtypes(bin_dtypes) points.set_bin_mults(bin_mults) points.set_fileformat(options['input_file_format']) print('done') # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # if memory_mode == 'big_memory': # ------------------------------------------------------------ # # run in big memory mode for i, segment in enumerate(segments): segments[i].allocate() while points: point = points.popleft() point.open() point.read() point.close() for segment in segments: segment.nc_add_data_to_array(point) for segment in segments: segment.nc_write_data_from_array() segment.nc_close() # ------------------------------------------------------------ # elif memory_mode == 'standard': # ------------------------------------------------------------ # # Open VIC files and put data into netcdfs chunk = Plist() while points: point = points.popleft() point.open() point.read() point.close() chunk.append(point) if len(chunk) > int(options['chunksize']) or len(points) == 0: for segment in segments: segment.nc_add_data_standard(chunk) chunk = Plist() del point # ------------------------------------------------------------ # # ------------------------------------------------------------ # # Close the netcdf files for segment in segments: segment.nc_close() # ------------------------------------------------------------ # elif memory_mode == 'original': # ------------------------------------------------------------ # # Run in original memory mode (a.k.a. vic2nc.c mode) # Open all files for point in points: point.open() while segments: segment = segments.popleft() segment.allocate() count = segment.count for point in points: point.read(count) segment.nc_add_data_to_array(point) segment.nc_write_data_from_array() segment.nc_close() for point in points: point.close() # ------------------------------------------------------------ # return # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def get_file_coords(files): """ Get list of Point objects """ points = Plist() for i, filename in enumerate(files): # fname = path.split(f)[1][-16:] # just look at last 16 characters f = filename[-22:] # just look at last 16 characters lat, lon = list(map(float, findall(r"[-+]?\d*\.\d+|\d+", f)))[-2:] points.append(Point(lat=lat, lon=lon, filename=filename)) return points # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def get_dates(file): """ Read the first file in the input directory and create a ordinal based timeseries. Also find the indicies to split the time series into months and years """ hours = (0, 1, 2, 3) days = (0, 1, 2) try: data = np.loadtxt(file, usecols=hours, dtype=int) datelist = [datetime(*d) for d in data] except (ValueError, TypeError): data = np.loadtxt(file, usecols=days, dtype=int) datelist = [datetime(*d) for d in data] # check to make sure we haven't used used daily by mistake # (creating a bunch of duplicate times) newlist = [] for i in datelist: if i not in newlist: newlist.append(i) else: raise ValueError('Found duplicate datetimes in datelist') print('VIC startdate: {0}'.format(datelist[0])) print('VIC enddate: {0}'.format(datelist[-1])) return datelist # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def make_dates(start, end, dt, calendar='standard'): """ Return a list of datetime object from inputs of start - python date string (i.e. 1989-01-01-00) end - python date string (i.e. 1989-01-01-23) dt - int or float timestep in seconds """ start = map(int, start.split('-')) end = map(int, end.split('-')) start_ord = date2num(datetime(*start), TIMEUNITS, calendar=calendar) end_ord = date2num(datetime(*end), TIMEUNITS, calendar=calendar) step = float(dt) / SECSPERDAY ordlist = np.arange(start_ord, end_ord + step, step) datelist = num2date(ordlist, TIMEUNITS, calendar=calendar) return datelist, ordlist # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def read_domain(domain_dict): print('reading domain file: {0}'.format(domain_dict['filename'])) f = Dataset(domain_dict['filename']) domain = {'lon': NcVar(f, domain_dict['longitude_var']), 'lat': NcVar(f, domain_dict['latitude_var'])} if domain_dict['copy_vars']: for varname in domain_dict['copy_vars']: domain[varname] = NcVar(f, varname) f.close() return domain # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def batch(config_file, create_batch, batch_dir): """Create a set of batch configuration files""" # Read Configuration files config_dict = read_config(config_file) options = config_dict.pop('OPTIONS') global_atts = config_dict.pop('GLOBAL_ATTRIBUTES') domain_dict = config_dict.pop('DOMAIN', None) fields = config_dict config = SafeConfigParser() config.optionxform = str # Figure out what to call the new files nameprefix = os.path.splitext(os.path.split(config_file)[1])[0] if create_batch == 'variables': # batch by variables # options section config.add_section('OPTIONS') for option, value in options.items(): if type(value) == list: try: value = ", ".join(value) except TypeError: value = ", ".join(repr(e) for e in value) elif type(value) != str: value = str(value) config.set('OPTIONS', option, str(value)) # global_atts section config.add_section('GLOBAL_ATTRIBUTES') for option, value in global_atts.items(): if type(value) == list: try: value = ", ".join(value) except TypeError: value = ", ".join(repr(e) for e in value) elif type(value) != str: value = str(value) config.set('GLOBAL_ATTRIBUTES', option, str(value)) # domain dict section if domain_dict: config.add_section('DOMAIN') for option, value in domain_dict.items(): if type(value) == list: try: value = ", ".join(value) except TypeError: value = ", ".join(repr(e) for e in value) elif type(value) != str: value = str(value) config.set('DOMAIN', option, value.strip("'")) for var, field in fields.items(): suffix = "_{0}.cfg".format(var) new_cfg_file = os.path.join(batch_dir, nameprefix + suffix) # this var config.add_section(var) for option, value in field.items(): if type(value) == list: try: value = ", ".join(value) except TypeError: value = ", ".join(repr(e) for e in value) elif type(value) != str: value = str(value) config.set(var, option, str(value)) # write that config with open(new_cfg_file, 'wb') as cf: config.write(cf) # clear the var section config.remove_section(var) else: # start with existing config config.read(config_file) # by time start_date = datetime.strptime(options['start_date'], TIMESTAMPFORM) end_date = datetime.strptime(options['end_date'], TIMESTAMPFORM) t0 = start_date if create_batch == 'years': td = relativedelta.relativedelta(years=1) t1 = datetime(t0.year, 12, 31, end_date.hour) elif create_batch == 'months': td = relativedelta.relativedelta(months=1) elif create_batch == 'days': # days option is only valid for gregorian calendar td = relativedelta.relativedelta(days=1) hour = relativedelta.relativedelta(hours=-1) i = 0 while t0 < end_date: i += 1 t1 = t0 + td if t1 > end_date: t1 = end_date else: t1 += hour suffix = '_{0}'.format(i) new_cfg_file = os.path.join(batch_dir, nameprefix + suffix) # Write config replacing start and end dates config.set('OPTIONS', 'start_date', t0.strftime(TIMESTAMPFORM)) config.set('OPTIONS', 'end_date', t1.strftime(TIMESTAMPFORM)) with open(new_cfg_file, 'wb') as cf: config.write(cf) t0 += td return # -------------------------------------------------------------------- #
StarcoderdataPython
3291543
# exercise 43 was sort of a demonstration of how he created his own game
StarcoderdataPython
1628454
#!/usr/bin/python # _*_ coding:utf-8 _*_ import flickrapi import json import time import os statei = os.path.isfile("done_time_ids.txt") statef = os.path.isfile("raw_time_data.csv") if statei == False: stateicreate = open("done_time_ids.txt", "w") stateicreate.close() else: pass if statef == False: statefcreate = open("raw_time_data.csv", "w") statefcreate.close() else: pass #Flickrapi documentation: https://stuvel.eu/flickrapi-doc/2-calling.html #FIRST: get your own API-keys! api_key = u"YOUR_API_KEY_HERE" #Request your own key and place the key inside the quotes. api_secret = u"YOUR_API_SECRET_HERE" #Request your own key and place the secret inside the quotes. raw_time_file = open("raw_time_data.csv", "a") #where your datapoints will be stored at history = open("done_time_ids.txt", "r") #all photo_ID's that have been added in the past. donepre = history.readlines() #Preventing adding the same photo twice. history.close() done = [] for item in donepre: item = item.strip() done.append(item) donepids = open("done_time_ids.txt", "a") all_photos = open("done_ids.txt", "r") all_pids = all_photos.readlines() print str(len(all_pids)) + " + photos will be checked" print "Ready loading history." flickr = flickrapi.FlickrAPI(api_key, api_secret, format='json') flickr.authenticate_via_browser(perms='read') #Requires read authentification: https://www.flickr.com/services/api/flickr.photos.getWithGeoData.html (Needs to be done once per Computer running this) add_data = True #needed for the while loop d = 0 d = d +len(done) tot = str(len(all_pids)) while int(d) != int(len(all_pids)): pid = str(all_pids[d]) pid = pid.strip() if pid not in donepre: try: print "Processing photo-id: " + str(pid) +" at: " + str(d+1) + " of " + tot +"." timeframe = flickr.photos.getInfo(photo_id=pid) parsed = json.loads(timeframe.decode("utf-8")) #returns a dictionary data = parsed["photo"] uploadtime = data["dateuploaded"] shottime = data["dates"] shottakentime = shottime["taken"] donepre.append(pid) donepids.write(pid + "\n") raw_time_file.write('"' + pid + '";"' + shottakentime + '";"' + uploadtime + '" \n') except: print "Error with photo " + str(pid) +"." raw_time_file.write('"' + pid + '";"null"' + '";null" \n') d = d+1 # raw_time_file.close() #Closing the CSV file print "Process complete" ext = raw_input("Press enter to terminate the program")
StarcoderdataPython
3264362
# -*- coding: utf-8 -*- # visigoth: A lightweight Python3 library for rendering data visualizations in SVG # Copyright (C) 2020 Visigoth Developers # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software # and associated documentation files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING # BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import unittest import datetime from visigoth import Diagram from visigoth.internal.utils.test_utils import TestUtils from visigoth.charts.area import Area from visigoth.utils import DiscreteHueManager from visigoth.common import DiscreteHueLegend, Text from visigoth.containers import Grid,Sequence class TestArea(unittest.TestCase): def test_basic(self): d = Diagram(fill="white") dp = DiscreteHueManager() data = [ (1,12,"A"), (2,14,"A"), (3,11,"A"), (4,9,"A"), (5,7,"A"), (1,2,"B"), (2,4,"B"), (3,7,"B"), (4,10,"B"), (5,11,"B"), (1,2,"C"), (2,3,"C"), (3,1,"C"), (4,3,"C"), (5,2,"C") ] d.add(Text("Individual areas")) g = Grid() row = 0 col = 0 for cat in ["A","B","C"]: s = Sequence() s.add(Text(cat)) area0 = Area([d for d in data if d[2] == cat], x=0, y=1, hue_manager=dp, height=300, width=300) s.add(area0) g.add(row,col,s) col += 1 if col == 2: row += 1 col = 0 d.add(g) d.add(Text("Smoothing")) d.add(Text("Stacked areas")) area1 = Area(data, x=0, y=1, size=1, hue=2, height=600, hue_manager=dp, width=600) area1.getMarkerManager().setMaxRadius(10) d.add(area1) legend1 = DiscreteHueLegend(dp,400,legend_columns=2) d.add(legend1) d.add(Text("Stepped")) area1a = Area(data, x=0, y=1, size=1, hue=2, height=600, hue_manager=dp, width=600, stepping=True) area1a.getMarkerManager().setMaxRadius(10) d.add(area1a) legend1 = DiscreteHueLegend(dp, 400, legend_columns=2) d.add(legend1) for smoothing in [0.1,0.3,0.5]: d.add(Text("Smoothing=%f"%(smoothing))) area2 = Area(data, x=0, y=1, hue=2, height=400, smoothing=smoothing, hue_manager=dp, width=400) d.add(area2) data3 = [{"y": i, "x": datetime.datetime(1999, 9, 3) + (i ** 1.5) * datetime.timedelta(days=1)} for i in range(0, 20)] hue_manager3 = DiscreteHueManager() hue_manager3.setDefaultHue("purple") line3 = Area(data3, x="x", y="y", hue_manager=hue_manager3, width=600, height=600, smoothing=0.0) d.add(line3) data4 = [{"x": i, "y": (i ** 0.6) * datetime.timedelta(days=1)} for i in range(0, 20)] hue_manager4 = DiscreteHueManager() hue_manager4.setDefaultHue("red") line4 = Area(data4, x="x", y="y", hue_manager=hue_manager4, width=600, height=600, smoothing=00) d.add(line4) TestUtils.draw_output(d,"test_area") if __name__ == "__main__": unittest.main()
StarcoderdataPython
1631437
from dataclasses import dataclass from .. import Command, ParsedCommand, Parser from ...nodes import PositionNode from ...parser_types import Position @dataclass() class ParsedSetworldspawnCommand(ParsedCommand): command: str position: PositionNode = None def __str__(self): if self.position is not None: return f'{self.command} {self.position}' return self.command setworldspawn = Command('setworldspawn', parsed=ParsedSetworldspawnCommand) # setworldspawn [<pos>] # - setworldspawn <pos> setworldspawn.add_variation( Parser(Position(), 'position'), ) # - setworldspawn setworldspawn.add_variation()
StarcoderdataPython
72897
<reponame>Alejandro-sin/Learning_Notebooks ''' Voy a tratar de atrapar diferentes errores con try except. El try captura un error especifico en el ejemplo un Type Error y retorna un mensaje predeterminado. Try solo funciona con Type Errrors. ''' def palindrome(word): if word == word[::-1]: return print(f'{word} es un Palindromo') palindrome('ana') try: #palindrome(1) # TypeError: 'int' object is not subscriptable palindrome("ana") except TypeError: print('Solo es posible ingresar strings') else: print('Todo estรก bien') finally: print('Se ejecutรณ todo')
StarcoderdataPython
3221499
<filename>systemrdl/core/value_normalization.py import hashlib from typing import Any, Optional, Union, List from .. import rdltypes from .. import node def normalize(value: Any, owner_node: Optional[node.Node]=None) -> str: """ Flatten an RDL value into a unique string that is used for type normalization. """ # Determine what type is being flattened if isinstance(value, bool): return normalize_boolean(value) elif isinstance(value, int): return normalize_scalar(value) elif isinstance(value, str): return normalize_string(value) elif isinstance(value, list): return normalize_array(value) elif isinstance(value, (rdltypes.BuiltinEnum, rdltypes.UserEnum)): return normalize_enum(value) elif isinstance(value, rdltypes.UserStruct): return normalize_struct(value) elif isinstance(value, node.Node): return normalize_component_ref(value, owner_node) elif isinstance(value, rdltypes.PropertyReference): return normalize_property_ref(value, owner_node) elif rdltypes.is_user_enum(value): return normalize_user_enum_type(value) else: # Should never get here raise RuntimeError(value) def normalize_scalar(value: int) -> str: """ 5.1.1.4 - c.1: Scalar values shall be rendered using their hexadecimal representation. """ return "%x" % value def normalize_boolean(value: bool) -> str: """ 5.1.1.4 - c.2: Boolean values shall be rendered using either t for true or f for false. """ if value: return "t" else: return "f" def normalize_string(value: str) -> str: """ 5.1.1.4 - c.3: String values shall be rendered using the first eight characters of their md5 (Message-Digest Algorithm) checksum. """ md5 = hashlib.md5(value.encode('utf-8')).hexdigest() return md5[:8] def normalize_enum(value: Union[rdltypes.BuiltinEnum, rdltypes.UserEnum]) -> str: """ 5.1.1.4 - c.4: Enum values shall be rendered using their enumerator literal. """ return value.name def normalize_array(value: List[Any]) -> str: """ 5.1.1.4 - c.5: Arrays shall be rendered by: 1. generating the normalized values of its elements, 2. joining these elements with single underscores (_) into a single character sequence, and 3. using the first eight characters of the md5 checksum of this character sequence ... which can be semi-formalized as: subsequence( md5( join( normalized_values, '_' ), 0, 8 ) """ norm_elements = [] for element in value: norm_elements.append(normalize(element)) norm_str = "_".join(norm_elements) md5 = hashlib.md5(norm_str.encode('utf-8')).hexdigest() return md5[:8] def normalize_struct(value: rdltypes.UserStruct) -> str: """ 5.1.1.4 - c.6: Structs shall be rendered by: 1. generating the normalized value of each member, 2. joining each memberโ€™s name with its normalized value, separated by a single underscore (_), 3. joining the member character sequences with single underscores, 4. using the first eight characters of the md5 checksum of this character sequence ... which can be semi-formalized as: member_normalization = concat( member_name, '_', normalized_member_value ) subsequence( md5( join( apply( struct_members, member_normalization ) ), 0, 8) """ norm_elements = [] for member_name, member_value in value._values.items(): norm_elements.append("%s_%s" % (member_name, normalize(member_value))) norm_str = "_".join(norm_elements) md5 = hashlib.md5(norm_str.encode('utf-8')).hexdigest() return md5[:8] def normalize_component_ref(value: node.Node, owner_node: node.Node) -> str: """ Hash of relative path from owner of the property to the target component """ path = value.get_rel_path(owner_node) md5 = hashlib.md5(path.encode('utf-8')).hexdigest() return md5[:8] def normalize_property_ref(value: rdltypes.PropertyReference, owner_node: node.Node) -> str: """ Hash of relative path from owner of the property to the target component's property """ path = "%s->%s" % (value.node.get_rel_path(owner_node), value.name) md5 = hashlib.md5(path.encode('utf-8')).hexdigest() return md5[:8] def normalize_user_enum_type(value: type) -> str: """ Enum type references shall be rendered using their enumeration type name. """ return value.__name__
StarcoderdataPython
1654412
number = int(input('how much number of element you want to sum : ')) lst = [] for i in range(number): element = int(input('enter your number : ')) #123 lst.append(element) print('sum of element is = ' , sum(lst)) print('max number of element is = ' , max(lst))
StarcoderdataPython
73482
# -*- coding: utf-8 -*- from functools import partial s_open = partial(open, mode='r')
StarcoderdataPython
1610996
<filename>carsus/io/tests/test_ionization.py<gh_stars>10-100 import pytest import pandas as pd from pandas.util.testing import assert_series_equal from numpy.testing import assert_almost_equal from sqlalchemy.orm import joinedload from carsus.model import Ion from carsus.io.nist.ionization import (NISTIonizationEnergiesParser, NISTIonizationEnergiesIngester, NISTIonizationEnergies) test_data = """ <h2> Be specta </h2> <pre> -------------------------------------------------------------------------------------------- At. num | Ion Charge | Ground Shells | Ground Level | Ionization Energy (a) (eV) | --------|------------|---------------|--------------|--------------------------------------| 4 | 0 | 1s2.2s2 | 1S0 | 9.3226990(70) | 4 | +1 | 1s2.2s | 2S*<1/2> | 18.211153(40) | 4 | +2 | 1s2 | (1,3/2)<2> | <a class=bal>[</a>153.8961980(40)<a class=bal>]</a> | 4 | +3 | 1s | 2S<1/2> | <a class=bal>(</a>217.7185766(10)<a class=bal>)</a> | -------------------------------------------------------------------------------------------- </pre> """ expected_at_num = [4, 4, 4, 4] expected_ion_charge = [0, 1, 2, 3] expected_indices = list(zip(expected_at_num, expected_ion_charge)) expected_ground_shells = ('ground_shells', ['1s2.2s2', '1s2.2s', '1s2', '1s'] ) expected_ground_level = ('ground_level', ['1S0', '2S*<1/2>', '(1,3/2)<2>', '2S<1/2>'] ) expected_j = ('J', [0.0, 0.5, 2.0, 0.5]) expected_ioniz_energy_value = ('ionization_energy_value', [9.3226990, 18.211153, 153.8961980, 217.7185766] ) expected_ioniz_energy_uncert = ('ionization_energy_uncert', [7e-6, 4e-5, 4e-6, 1e-6] ) expected_ioniz_energy_method = ('ionization_energy_method', ['meas', 'meas', 'intrpl', 'theor'] ) @pytest.fixture def ioniz_energies_parser(): parser = NISTIonizationEnergiesParser(input_data=test_data) return parser @pytest.fixture def ioniz_energies(ioniz_energies_parser): return ioniz_energies_parser.prepare_ioniz_energies() @pytest.fixture def ground_levels(ioniz_energies_parser): return ioniz_energies_parser.prepare_ground_levels() @pytest.fixture def ioniz_energies_ingester(memory_session): ingester = NISTIonizationEnergiesIngester(memory_session) ingester.parser(test_data) return ingester @pytest.fixture(params=[expected_ground_shells, expected_ground_level, expected_ioniz_energy_value, expected_ioniz_energy_uncert, expected_ioniz_energy_method]) def expected_series_ioniz_energies(request): index = pd.MultiIndex.from_tuples(tuples=expected_indices, names=['atomic_number', 'ion_charge']) name, data = request.param return pd.Series(data=data, name=name, index=index) @pytest.fixture(params=[expected_j]) def expected_series_ground_levels(request): index = pd.MultiIndex.from_tuples(tuples=expected_indices, names=['atomic_number', 'ion_charge']) name, data = request.param return pd.Series(data=data, name=name, index=index) def test_prepare_ioniz_energies_null_values(ioniz_energies): assert all(pd.notnull(ioniz_energies["ionization_energy_value"])) def test_prepare_ioniz_energies(ioniz_energies, expected_series_ioniz_energies): series = ioniz_energies[expected_series_ioniz_energies.name] assert_series_equal(series, expected_series_ioniz_energies) def test_prepare_ground_levels(ground_levels, expected_series_ground_levels): series = ground_levels[expected_series_ground_levels.name] assert_series_equal(series, expected_series_ground_levels) @pytest.mark.parametrize("index, value, uncert", zip(expected_indices, expected_ioniz_energy_value[1], expected_ioniz_energy_uncert[1])) def test_ingest_ionization_energies(index, value, uncert, memory_session, ioniz_energies_ingester): ioniz_energies_ingester.ingest(ionization_energies=True, ground_levels=False) atomic_number, ion_charge = index ion = memory_session.query(Ion).options(joinedload('ionization_energies')).get((atomic_number, ion_charge)) ion_energy = ion.ionization_energies[0] assert_almost_equal(ion_energy.quantity.value, value) assert_almost_equal(ion_energy.uncert, uncert) @pytest.mark.parametrize("index, exp_j", zip(expected_indices, expected_j[1])) def test_ingest_ground_levels(index, exp_j, memory_session, ioniz_energies_ingester): ioniz_energies_ingester.ingest(ionization_energies=True, ground_levels=True) atomic_number, ion_charge = index ion = memory_session.query(Ion).options(joinedload('levels')).get((atomic_number, ion_charge)) ground_level = ion.levels[0] assert_almost_equal(ground_level.J, exp_j) @pytest.mark.remote_data def test_ingest_nist_asd_ion_data(memory_session): ingester = NISTIonizationEnergiesIngester(memory_session, spectra="h-uuh") ingester.ingest(ionization_energies=True, ground_levels=True) @pytest.mark.remote_data def test_ground_levels_missing_j(): ionization_energies = NISTIonizationEnergies(spectra="Nd") ground_levels = ionization_energies.get_ground_levels() ground_levels = ground_levels.set_index(['atomic_number', 'ion_charge']) assert ground_levels.loc[(60, 5)]['g'] == 1 assert ground_levels.loc[(60, 6)]['g'] == 1 assert ground_levels.loc[(60, 7)]['g'] == 1 assert ground_levels.loc[(60, 8)]['g'] == 1 assert ground_levels.loc[(60, 9)]['g'] == 1 assert ground_levels.loc[(60, 10)]['g'] == 1 def test_nist_asd_version(): nist_ionization = NISTIonizationEnergies('H') version = nist_ionization.version version_split = version.split('.') assert len(version_split) > 1 to_int = [ int(i) for i in version_split ]
StarcoderdataPython
1631195
<filename>test/unit/factories/NetworkFactoryTest.py import os import unittest from typing import Any from src.shapeandshare.dicebox.config.dicebox_config import DiceboxConfig from src.shapeandshare.dicebox.factories.network_factory import NetworkFactory from src.shapeandshare.dicebox.models.network import Network from src.shapeandshare.dicebox.models.optimizers import Optimizers class NetworkFactoryTest(unittest.TestCase): TEST_DATA_BASE = "test/fixtures" local_config_file = "%s/dicebox.config" % TEST_DATA_BASE dicebox_config: DiceboxConfig = DiceboxConfig(config_file=local_config_file) network_factory: NetworkFactory = NetworkFactory(config=dicebox_config) def setUp(self): self.maxDiff = None def test_create_random_network(self): new_network_one: Network = self.network_factory.create_random_network() new_network_two: Network = self.network_factory.create_random_network() self.assertNotEqual(new_network_one.decompile(), new_network_two.decompile()) def test_create_network(self): new_network_one: Network = self.network_factory.create_random_network() decompiled_network_one: Any = new_network_one.decompile() new_network_two: Network = self.network_factory.create_network(decompiled_network_one) decompiled_network_two: Any = new_network_two.decompile() self.assertEqual(decompiled_network_one, decompiled_network_two) def test_should_throw_exception_when_asked_to_create_an_unknown_layer_type(self): os.environ["LAYER_TYPES"] = '["random", "unsupported"]' local_dicebox_config: DiceboxConfig = DiceboxConfig(config_file=self.local_config_file) local_network_factory: NetworkFactory = NetworkFactory(config=local_dicebox_config) definition = { "input_shape": 1, "output_size": 1, "optimizer": Optimizers.ADAM.value, "layers": [{"type": "random"}], } try: local_network_factory.create_network(network_definition=definition) self.assertTrue(False, "Expected exception not seen.") except Exception: self.assertTrue(True, "Expected exception seen.") del os.environ["LAYER_TYPES"] if __name__ == "__main__": runner = unittest.TextTestRunner() runner.run(NetworkFactoryTest())
StarcoderdataPython
3257608
<filename>blackbook/models/transaction.py from django.db import models from django.utils import timezone from django.utils.functional import cached_property from djmoney.models.fields import MoneyField from djmoney.money import Money from .base import get_default_currency from .account import Account from .category import Category from .budget import BudgetPeriod import uuid class TransactionJournal(models.Model): class TransactionType(models.TextChoices): DEPOSIT = "deposit", "Deposit" START = "start", "Opening balance" RECONCILIATION = "reconciliation", "Reconciliation" TRANSFER = "transfer", "Transfer" WITHDRAWAL = "withdrawal", "Withdrawal" type = models.CharField(max_length=50, choices=TransactionType.choices, default=TransactionType.WITHDRAWAL) date = models.DateField(default=timezone.localdate) short_description = models.CharField(max_length=150) description = models.TextField(blank=True, null=True) uuid = models.UUIDField("UUID", default=uuid.uuid4, editable=False, db_index=True, unique=True) budget = models.ForeignKey(BudgetPeriod, on_delete=models.SET_NULL, blank=True, null=True, related_name="transactions") category = models.ForeignKey(Category, on_delete=models.SET_NULL, blank=True, null=True, related_name="transactions") source_accounts = models.JSONField(null=True) destination_accounts = models.JSONField(null=True) amount = MoneyField("amount", max_digits=15, decimal_places=2, default_currency=get_default_currency(), default=0) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) class Meta: ordering = ["date", "created"] get_latest_by = "date" def __str__(self): return self.short_description def _verify_transaction_type(self, type, transactions): if type in [self.TransactionType.START, self.TransactionType.RECONCILIATION]: return type owned_accounts = [Account.AccountType.ASSET_ACCOUNT, Account.AccountType.LIABILITIES_ACCOUNT] source_accounts = [] destination_accounts = [] for transaction in transactions: if transaction["amount"].amount > 0: destination_accounts.append(transaction["account"]) else: source_accounts.append(transaction["account"]) source_accounts = list(set(source_accounts)) destination_accounts = list(set(destination_accounts)) source_accounts_owned = True destination_accounts_owned = True if len(source_accounts) > 0: for account in source_accounts: if account.type not in owned_accounts: source_accounts_owned = False if len(destination_accounts) > 0: for account in destination_accounts: if account.type not in owned_accounts: destination_accounts_owned = False if len(source_accounts) > 0: if source_accounts_owned: if len(destination_accounts) > 0 and destination_accounts_owned: return self.TransactionType.TRANSFER return self.TransactionType.WITHDRAWAL else: if len(destination_accounts) > 0 and destination_accounts_owned: return self.TransactionType.DEPOSIT return self.TransactionType.WITHDRAWAL return self.TransactionType.DEPOSIT def _create_transactions(self, transactions): source_accounts = [] destination_accounts = [] for transaction in transactions: if transaction["amount"].amount > 0: destination_accounts.append(transaction["account"]) else: source_accounts.append(transaction["account"]) if self.type in [self.TransactionType.START, self.TransactionType.DEPOSIT, self.TransactionType.RECONCILIATION]: if len(destination_accounts) == 0: raise AttributeError("There should be at least one receiving transaction for this transaction type %s" % self.type) elif self.type == self.TransactionType.WITHDRAWAL: if len(source_accounts) == 0: raise AttributeError("There should be at least one spending transaction for this transaction type %s" % self.type) for transaction in transactions: if str(transaction["amount"].currency) != str(transaction["account"].currency): raise AttributeError("Amount should be in the same currency as the account it relates to (%s)" % transaction) self.transactions.create( account=transaction["account"], amount=transaction["amount"], foreign_amount=transaction.get("foreign_amount", None) ) @classmethod def create(cls, transactions): """Transactions should be in a fixed format { "short_description", "description", "date", "type", "category", "budget", "transactions" [{ "account", "amount", "foreign_amount" }] }""" journal = cls.objects.create( date=transactions["date"], short_description=transactions["short_description"], type=transactions["type"], description=transactions.get("description", None), category=transactions.get("category", None), budget=transactions.get("budget", None), ) journal.type = journal._verify_transaction_type(type=transactions["type"], transactions=transactions["transactions"]) journal._create_transactions(transactions=transactions["transactions"]) journal.update_accounts() return journal def update(self, transactions): self.short_description = transactions["short_description"] self.description = transactions["description"] self.date = transactions["date"] self.type = transactions["type"] self.category = transactions.get("category", None) self.budget = transactions.get("budget", None) self.save() self.transactions.all().delete() self._create_transactions(transactions=transactions["transactions"]) self.update_accounts() def update_accounts(self): source_accounts = self.get_source_accounts() destination_accounts = self.get_destination_accounts() self.source_accounts = [ {"account": account.name, "slug": account.slug, "type": account.get_type_display(), "link_type": account.type, "icon": account.icon} for account in source_accounts ] self.destination_accounts = [ {"account": account.name, "slug": account.slug, "type": account.get_type_display(), "link_type": account.type, "icon": account.icon} for account in destination_accounts ] self.amount = self.transactions.first().amount if self.type == self.TransactionType.WITHDRAWAL: self.amount = self.transactions.get(amount__lte=0).amount if self.type == self.TransactionType.DEPOSIT or type == self.TransactionType.TRANSFER: self.amount = self.transactions.get(amount__gte=0).amount if self.type == self.TransactionType.TRANSFER: self.amount = abs(self.amount) self.save() def get_source_accounts(self): accounts = Account.objects.filter(transactions__journal=self, transactions__amount__lte=0).distinct() return [account for account in accounts] def get_destination_accounts(self): accounts = Account.objects.filter(transactions__journal=self, transactions__amount__gte=0).distinct() return [account for account in accounts] class Transaction(models.Model): account = models.ForeignKey(Account, on_delete=models.SET_NULL, blank=True, null=True, related_name="transactions") amount = MoneyField("amount", max_digits=15, decimal_places=2, default_currency=get_default_currency(), default=0) foreign_amount = MoneyField( "foreign amount", max_digits=15, decimal_places=2, default_currency=get_default_currency(), default=0, blank=True, null=True ) uuid = models.UUIDField("UUID", default=uuid.uuid4, editable=False, db_index=True, unique=True) journal = models.ForeignKey(TransactionJournal, on_delete=models.CASCADE, related_name="transactions") created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __str__(self): return self.journal.short_description
StarcoderdataPython
1637667
<gh_stars>0 # Generated by Django 2.2 on 2020-09-20 00:18 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0002_auto_20200919_2337'), ] operations = [ migrations.AddField( model_name='machines', name='instance_name', field=models.TextField(default=0), preserve_default=False, ), ]
StarcoderdataPython
15534
def lcs(x, y): """ Longest Common Subsequence """ n = len(x) + 1 m = len(y) + 1 table = [ [0]*m for i in range(n) ] for i in range(n): for j in range(m): # If either string is empty, then lcs = 0 if i == 0 or j == 0: table[i][j] = 0 elif x[i - 1] == y[j - 1]: table[i][j] = 1 + table[i-1][j-1] else: table[i][j] = max(table[i-1][j], table[i][j-1]) return table[len(x)][len(y)] if __name__ == '__main__': x = "AGGTAB" y = "GXTXAYB" print lcs(x, y)
StarcoderdataPython
81043
# # @lc app=leetcode id=4 lang=python3 # # [4] Median of Two Sorted Arrays # # https://leetcode.com/problems/median-of-two-sorted-arrays/description/ # # algorithms # Hard (30.86%) # Likes: 9316 # Dislikes: 1441 # Total Accepted: 872.2K # Total Submissions: 2.8M # Testcase Example: '[1,3]\n[2]' # # Given two sorted arrays nums1 and nums2 of size m and n respectively, return # the median of the two sorted arrays. # # # Example 1: # # # Input: nums1 = [1,3], nums2 = [2] # Output: 2.00000 # Explanation: merged array = [1,2,3] and median is 2. # # # Example 2: # # # Input: nums1 = [1,2], nums2 = [3,4] # Output: 2.50000 # Explanation: merged array = [1,2,3,4] and median is (2 + 3) / 2 = 2.5. # # # Example 3: # # # Input: nums1 = [0,0], nums2 = [0,0] # Output: 0.00000 # # # Example 4: # # # Input: nums1 = [], nums2 = [1] # Output: 1.00000 # # # Example 5: # # # Input: nums1 = [2], nums2 = [] # Output: 2.00000 # # # # Constraints: # # # nums1.length == m # nums2.length == n # 0 <= m <= 1000 # 0 <= n <= 1000 # 1 <= m + n <= 2000 # -10^6 <= nums1[i], nums2[i] <= 10^6 # # # # Follow up: The overall run time complexity should be O(log (m+n)). # # @lc code=start class Solution_QuickSelect: def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float: nums = nums1[:] + nums2[:] length = len(nums) if length % 2 == 0: return (self._quick_select(nums, 0, length - 1, length // 2 + 1) + self._quick_select(nums, 0, length - 1, (length - 1) // 2 + 1)) / 2 else: return self._quick_select(nums, 0, length - 1, length // 2 + 1) def _quick_select(self, nums, start, end, k): left, right = start, end pivot = nums[(left + right) // 2] while left <= right: while left <= right and nums[left] > pivot: left += 1 while left <= right and nums[right] < pivot: right -= 1 if left <= right: nums[left], nums[right] = nums[right], nums[left] left += 1 right -= 1 if start + k - 1 <= right: return self._quick_select(nums, start, right, k) if start + k - 1 >= left: return self._quick_select(nums, left, end, k - (left - start)) return nums[right + 1] class Solution: def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float: if len(nums1) > len(nums2): return self.findMedianSortedArrays(nums2, nums1) l1, l2 = len(nums1), len(nums2) left, right = 0, l1 while left <= right: position_x = (left + right) // 2 position_y = (l1 + l2 + 1) // 2 - position_x max_left_x = nums1[position_x - 1] if position_x != 0 else float('-inf') max_left_y = nums2[position_y - 1] if position_y != 0 else float('-inf') min_right_x = nums1[position_x] if position_x < l1 else float('inf') min_right_y = nums2[position_y] if position_y < l2 else float('inf') if (max_left_x <= min_right_y and max_left_y <= min_right_x): # we found the partition if (l1 + l2) % 2 == 0: return (max(max_left_x, max_left_y) + min(min_right_x, min_right_y)) / 2 else: return max(max_left_x, max_left_y) elif max_left_x > min_right_y: # we should move left right = position_x - 1 else: left = position_x + 1 return 0 # @lc code=end
StarcoderdataPython
1732204
import logging from typing import List from opyoid.bindings import Binding, BindingToProviderAdapter, ClassBindingToProviderAdapter, \ InstanceBindingToProviderAdapter, MultiBindingToProviderAdapter, ProviderBindingToProviderAdapter, \ SelfBindingToProviderAdapter from opyoid.bindings.registered_binding import RegisteredBinding from opyoid.exceptions import BindingError from opyoid.injection_context import InjectionContext from opyoid.injection_state import InjectionState from opyoid.provider import Provider from opyoid.utils import InjectedT class FromRegisteredBindingProviderFactory: """Creates Providers, one per binding.""" logger = logging.getLogger(__name__) def __init__(self) -> None: self._binding_to_provider_adapters: List[BindingToProviderAdapter] = [ SelfBindingToProviderAdapter(), InstanceBindingToProviderAdapter(), ClassBindingToProviderAdapter(), ProviderBindingToProviderAdapter(), MultiBindingToProviderAdapter(self), ] def create(self, binding: RegisteredBinding[Binding[InjectedT]], context: InjectionContext[InjectedT], cache_provider: bool = True) -> Provider[InjectedT]: module_path = binding.source_path while module_path: state = context.injection_state if module_path[0] not in state.state_by_module: state.state_by_module[module_path[0]] = InjectionState( state.provider_creator, module_path[0].binding_registry, state.options, state, ) context = context.get_new_state_context(state.state_by_module[module_path[0]]) module_path = module_path[1:] if cache_provider: return context.get_provider() return self._create_from_binding(binding, context) def _create_from_binding(self, binding: RegisteredBinding[Binding[InjectedT]], context: InjectionContext[InjectedT]) -> Provider[InjectedT]: for adapter in self._binding_to_provider_adapters: if adapter.accept(binding.raw_binding, context): return adapter.create(binding, context) raise BindingError(f"Could not find a BindingToProviderAdapter for {binding!r}")
StarcoderdataPython
1776199
<filename>Modulo1/script/hola.py # control + s -> guardar cambios # control + n -> crear nuevo archivo nombre = input("Introduce tu nombre: ") num = int(input("Ingrese un numero entero: ")) # print("valor de numero es: " + num) print("Hola de nuevo {} de edad {}".format(nombre, num)) print(f"Hola por tercera vez {nombre} y tienes {num} aรฑos")
StarcoderdataPython
1684857
<filename>pysaint/api.py """ End User๋ฅผ ์œ„ํ•œ ๊ฐ„๋‹จํ•œ api """ from .constants import Line from .saint import Saint import copy from tqdm import tqdm from datetime import datetime def get(course_type, year_range, semesters, line=Line.FIVE_HUNDRED, **kwargs): """ THIS IS THE END POINT OF pysaint API USAGE:: >>> import pysaint >>> res = pysaint.get('์ „๊ณต', ['2018'], ['2 ํ•™๊ธฐ']) >>> print(res) >>> res = pysaint.get('๊ต์–‘ํ•„์ˆ˜', range(2015, 2017), ['1 ํ•™๊ธฐ', '์—ฌ๋ฆ„ํ•™๊ธฐ', '2 ํ•™๊ธฐ', '๊ฒจ์šธํ•™๊ธฐ']) >>> print(res) >>> res = pysaint.get('๊ต์–‘์„ ํƒ', (2016, 2017, 2018), ('1 ํ•™๊ธฐ', )) >>> print(res) >>> res = pysaint.get('์ „๊ณต', ['2018'], ['2 ํ•™๊ธฐ'], line=200) >>> print(res) :param course_type: :type course_type: str example ) '๊ต์–‘ํ•„์ˆ˜' '์ „๊ณต' '์—ฐ๊ณ„์ „๊ณต' '๊ต์–‘์„ ํƒ' '๊ต์ง' '์ฑ„ํ”Œ' :param year_range: :type year_range: list or tuple or range or str or int example ) '2018' ['2018'] [2018] ['2017', '2018'] [2017, 2018] (2015, 2016, 2017) ('2016', '2017', '2018') range(2015, 2019) :param semesters: :type semesters: list or tuple or str example ) '1 ํ•™๊ธฐ' ['1 ํ•™๊ธฐ', '์—ฌ๋ฆ„ํ•™๊ธฐ', '2 ํ•™๊ธฐ', '๊ฒจ์šธํ•™๊ธฐ'] ('1 ํ•™๊ธฐ', '2 ํ•™๊ธฐ', ) :param line: :type line: int example ) 10 20 50 100 200 500 :param silent: decide progress bar silent or not :return: dict """ if type(year_range) not in (tuple, list, range, str, int): raise ValueError("get() got wrong arguments year_range: {}\n" "expected tuple type or list, or range type but got {} type".format(year_range, type(year_range))) if type(semesters) not in (tuple, list, str): raise ValueError("get() got wrong arguments semesters: {}\n" "expected tuple type or list type but got {} type".format(semesters, type(semesters))) if type(year_range) in (str, int): year_range = [year_range] if type(semesters) is str: semesters = [semesters] if not Line.has_value(line): raise ValueError("get() got wrong arguments line: {}\n" "line should be one of {}".format(line, Line.list())) reformed_year_range = [] current_year = datetime.now().year for year in year_range: if 2000 < int(year) <= current_year: pass else: raise ValueError("get() got wrong arguments year_range: {}\n" "expected to be in year range(2000, 2021) but got {}".format(year_range, int(year))) reformed_year_range.append("{}".format(year)) if course_type == '๊ต์–‘ํ•„์ˆ˜': return _liberal_arts(year_range=reformed_year_range, semesters=semesters, line=line, **kwargs) elif course_type == '์ „๊ณต': return _major(year_range=reformed_year_range, semesters=semesters, line=line,**kwargs) elif course_type == '๊ต์–‘์„ ํƒ': return _selective_liberal(year_range=reformed_year_range, semesters=semesters, line=line, **kwargs) elif course_type == '์—ฐ๊ณ„์ „๊ณต': return _related_major(year_range=reformed_year_range, semesters=semesters, line=line, **kwargs) elif course_type == '์œตํ•ฉ์ „๊ณต': return _fusion_major(year_range=reformed_year_range, semesters=semesters, line=line, **kwargs) elif course_type == '๊ต์ง': return _teaching(year_range=reformed_year_range, semesters=semesters, line=line, **kwargs) elif course_type == '์ฑ„ํ”Œ': return _chapel(year_range=reformed_year_range, semesters=semesters, line=line, **kwargs) else: raise ValueError("get() got wrong arguments course_type: {} \n" "expected to get '๊ต์–‘ํ•„์ˆ˜', '์ „๊ณต', '๊ต์–‘์„ ํƒ'".format(course_type)) def grade(id, password=None): """ get grade card from saint.ssu.ac.kr :param id: student id e.g.) 2015xxxx :param password: <PASSWORD> :return: list """ saint = login(id, password) grade_card = saint.get_grade() return grade_card def _liberal_arts(year_range=[], semesters=[], line=int(Line.FIVE_HUNDRED), silent=False): """ ๊ต์–‘ํ•„์ˆ˜ ๊ณผ๋ชฉ๋“ค์„ ํ•™๊ธฐ ๋‹จ์œ„๋กœ ๋ฌถ์–ด์„œ ๋ฐ˜ํ™˜ํ•œ๋‹ค. :param year_range: :type year_range: list or tuple example input ) [2013, 2014, 2015, 2016, 2017, 2018] or (2017, 2018) :param semesters: :type semesters: list or tuple example input ) ['1 ํ•™๊ธฐ', '์—ฌ๋ฆ„ํ•™๊ธฐ', '2 ํ•™๊ธฐ', '๊ฒจ์šธํ•™๊ธฐ'] or ('1 ํ•™๊ธฐ') :param line: :type line: int example ) 10 20 50 100 200 500 :return: { 2013: { '์ „์ฒดํ•™๋…„': { 'CHAPEL': [ { dictionary which has dict_keys(['๊ณ„ํš', '์ด์ˆ˜๊ตฌ๋ถ„(์ฃผ์ „๊ณต)', '์ด์ˆ˜๊ตฌ๋ถ„(๋‹ค์ „๊ณต)', '๊ณตํ•™์ธ์ฆ', '๊ต๊ณผ์˜์—ญ', '๊ณผ๋ชฉ๋ฒˆํ˜ธ', '๊ณผ๋ชฉ๋ช…', '๋ถ„๋ฐ˜', '๊ต์ˆ˜๋ช…', '๊ฐœ์„คํ•™๊ณผ', '์‹œ๊ฐ„/ํ•™์ (์„ค๊ณ„)', '์ˆ˜๊ฐ•์ธ์›', '์—ฌ์„', '๊ฐ•์˜์‹œ๊ฐ„(๊ฐ•์˜์‹ค)', '์ˆ˜๊ฐ•๋Œ€์ƒ']) } ], '์ปดํ“จํ„ฐํ™œ์šฉ1(Excel)': [], '์ปดํ“จํ„ฐํ™œ์šฉ2(PPT)': [], 'Practical Reading ๏ผ† Writing': [], 'ํ˜„๋Œ€์ธ๊ณผ์„ฑ์„œ2': [] } } '1ํ•™๋…„': {...}, '2ํ•™๋…„': {...}, '3ํ•™๋…„': {...}, '4ํ•™๋…„': {...}, '5ํ•™๋…„': {...} }, year: { grade: { course_name: [] <- list which has dictionaries as it's elements } } } """ ret = {year: {} for year in year_range} saint = Saint() saint.select_course_section('๊ต์–‘ํ•„์ˆ˜') def __get_whole_course(year, semester, _line=line): saint.select_year(year) saint.select_semester(semester) saint.select_line(_line) liberal_map = saint.get_liberal_arts_map() course_map = {name: [] for name in liberal_map} pbar = tqdm(liberal_map, disable=silent) for course_name in pbar: pbar.set_description("Processing {:8s}".format(course_name)) course_map[course_name] = saint.select_on_liberal_arts(course_name) return course_map year_bar = tqdm(year_range, disable=silent) for year in year_bar: year_bar.set_description("Year: {:4s}".format(year)) semester_bar = tqdm(semesters, disable=silent) for semester in semester_bar: semester_bar.set_description("Semester: {:6s}".format(semester)) course_bunch = __get_whole_course(year, semester) ret[year][semester] = course_bunch return ret def _major(year_range=[], semesters=[], line=Line.FIVE_HUNDRED, silent=False): """ ์ „๊ณต ๊ณผ๋ชฉ๋“ค์„ ํ•™๊ธฐ ๋‹จ์œ„๋กœ ๋ฌถ์–ด์„œ ๋ฐ˜ํ™˜ํ•œ๋‹ค. :param year_range: :type year_range: list or tuple :param semesters: :type semesters: list or tuple :param line: :type line: int :return: { '2017': { '1 ํ•™๊ธฐ': { '์ธ๋ฌธ๋Œ€ํ•™': { '์ค‘์–ด์ค‘๋ฌธํ•™๊ณผ': { '์ค‘์–ด์ค‘๋ฌธํ•™๊ณผ': [ { '๊ณ„ํš': '\xa0', '์ด์ˆ˜๊ตฌ๋ถ„(์ฃผ์ „๊ณต)': '์ „์„ -์ค‘๋ฌธ', '์ด์ˆ˜๊ตฌ๋ถ„(๋‹ค์ „๊ณต)': '๋ณต์„ -์ค‘๋ฌธ/๋ถ€์„ -์ค‘๋ฌธ', '๊ณตํ•™์ธ์ฆ': '\xa0', '๊ต๊ณผ์˜์—ญ': '7+1๊ต๊ณผ๋ชฉ\n์ธํ„ด์‰ฝ(์žฅ๊ธฐ๊ณผ์ •)\n์ธํ„ด์‰ฝ', '๊ณผ๋ชฉ๋ฒˆํ˜ธ': '5010611601', '๊ณผ๋ชฉ๋ช…': '๊ตญ๋‚ด์žฅ๊ธฐํ˜„์žฅ์‹ค์Šต(3)', '๋ถ„๋ฐ˜': '\xa0', '๊ต์ˆ˜๋ช…': '\xa0', '๊ฐœ์„คํ•™๊ณผ': '๊ฒฝ๋ ฅ๊ฐœ๋ฐœํŒ€', '์‹œ๊ฐ„/ํ•™์ (์„ค๊ณ„)': '3.00 /3', '์ˆ˜๊ฐ•์ธ์›': '1', '์—ฌ์„': '199', '๊ฐ•์˜์‹œ๊ฐ„(๊ฐ•์˜์‹ค)': '\xa0', '์ˆ˜๊ฐ•๋Œ€์ƒ': '์ „์ฒด' }, { ... dict_keys(['๊ณ„ํš', '์ด์ˆ˜๊ตฌ๋ถ„(์ฃผ์ „๊ณต)', '์ด์ˆ˜๊ตฌ๋ถ„(๋‹ค์ „๊ณต)', '๊ณตํ•™์ธ์ฆ', '๊ต๊ณผ์˜์—ญ', '๊ณผ๋ชฉ๋ฒˆํ˜ธ', '๊ณผ๋ชฉ๋ช…', '๋ถ„๋ฐ˜', '๊ต์ˆ˜๋ช…', '๊ฐœ์„คํ•™๊ณผ', '์‹œ๊ฐ„/ํ•™์ (์„ค๊ณ„)', '์ˆ˜๊ฐ•์ธ์›', '์—ฌ์„', '๊ฐ•์˜์‹œ๊ฐ„(๊ฐ•์˜์‹ค)', '์ˆ˜๊ฐ•๋Œ€์ƒ']) } ] }, '๊ตญ์–ด๊ตญ๋ฌธํ•™๊ณผ': {}, '์ผ์–ด์ผ๋ฌธํ•™๊ณผ': {}, '์˜์–ด์˜๋ฌธํ•™๊ณผ': {}, '๋ถˆ์–ด๋ถˆ๋ฌธํ•™๊ณผ': {}, '์ฒ ํ•™๊ณผ': {}, '์‚ฌํ•™๊ณผ': {}, '๊ธฐ๋…๊ตํ•™๊ณผ': {}, }, '์ž์—ฐ๊ณผํ•™๋Œ€ํ•™': {}, '๋ฒ•๊ณผ๋Œ€ํ•™': {}, '์‚ฌํšŒ๊ณผํ•™๋Œ€ํ•™': {}, '๊ฒฝ์ œํ†ต์ƒ๋Œ€ํ•™': {}, '๊ฒฝ์˜๋Œ€ํ•™': {}, '๊ณต๊ณผ๋Œ€ํ•™': {}, 'IT๋Œ€ํ•™': {}, '๋ฒ ์–ด๋“œํ•™๋ถ€๋Œ€ํ•™': {}, '์˜ˆ์ˆ ์ฐฝ์ž‘ํ•™๋ถ€': {}, '์Šคํฌ์ธ ํ•™๋ถ€': {}, '์œตํ•ฉํŠน์„ฑํ™”์ž์œ ์ „๊ณตํ•™๋ถ€': {} } }, 'year': { 'semester': { 'college': { 'faculty': { 'major': [ { dict_keys(['๊ณ„ํš', '์ด์ˆ˜๊ตฌ๋ถ„(์ฃผ์ „๊ณต)', '์ด์ˆ˜๊ตฌ๋ถ„(๋‹ค์ „๊ณต)', '๊ณตํ•™์ธ์ฆ', '๊ต๊ณผ์˜์—ญ', '๊ณผ๋ชฉ๋ฒˆํ˜ธ', '๊ณผ๋ชฉ๋ช…', '๋ถ„๋ฐ˜', '๊ต์ˆ˜๋ช…', '๊ฐœ์„คํ•™๊ณผ', '์‹œ๊ฐ„/ํ•™์ (์„ค๊ณ„)', '์ˆ˜๊ฐ•์ธ์›', '์—ฌ์„', '๊ฐ•์˜์‹œ๊ฐ„(๊ฐ•์˜์‹ค)', '์ˆ˜๊ฐ•๋Œ€์ƒ']) } ] } } } } } """ ret = {year: {} for year in year_range} saint = Saint() def __get_whole_course(year, semester, _line=line): saint.select_year(year) saint.select_semester(semester) saint.select_line(_line) major_map = saint.get_major_map() course_map = copy.deepcopy(major_map) for college in major_map: for faculty in major_map[college]: course_map[college][faculty] = {key: [] for key in major_map[college][faculty]} college_bar = tqdm(major_map, disable=silent) for college in college_bar: college_bar.set_description("Processing {:8s}".format(college)) faculty_bar = tqdm(major_map[college], disable=silent) for faculty in faculty_bar: faculty_bar.set_description_str("Processing {:8s}".format(faculty)) for major in major_map[college][faculty]: course_map[college][faculty][major] = saint.select_on_major(college, faculty, major) return course_map year_bar = tqdm(year_range, disable=silent) for year in year_bar: year_bar.set_description("Year: {:4}".format(year)) semester_bar = tqdm(semesters, disable=silent) for semester in semester_bar: semester_bar.set_description_str("Semester: {:6}".format(semester)) course_bunch = __get_whole_course(year, semester) ret[year][semester] = course_bunch return ret def _selective_liberal(year_range=[], semesters=[], line=Line.FIVE_HUNDRED, silent=False): """ ๊ต์–‘์„ ํƒ ๊ณผ๋ชฉ๋“ค์„ ํ•™๊ธฐ ๋‹จ์œ„๋กœ ๋ฌถ์–ด์„œ ๋ฐ˜ํ™˜ํ•œ๋‹ค. :param year_range: :param semesters: :param line: :return: dict { 2017: { '1 ํ•™๊ธฐ': { ์ „์ฒด: [ { '๊ณ„ํš': '\xa0', '์ด์ˆ˜๊ตฌ๋ถ„(์ฃผ์ „๊ณต)': '๊ต์„ ', '์ด์ˆ˜๊ตฌ๋ถ„(๋‹ค์ „๊ณต)': '\xa0', '๊ณตํ•™์ธ์ฆ': '\xa0', '๊ต๊ณผ์˜์—ญ': '*์„ธ๊ณ„์˜์–ธ์–ด(ํ•ต์‹ฌ-์ฐฝ์˜)\n(๊ธฐ์ดˆ์—ญ๋Ÿ‰-๊ตญ์ œ์–ด๋ฌธ)์˜์–ด', '๊ณผ๋ชฉ๋ฒˆํ˜ธ': '2150017601', '๊ณผ๋ชฉ๋ช…': 'Advanced Writing and speaking English I', '๋ถ„๋ฐ˜': '\xa0', '๊ต์ˆ˜๋ช…': '์ด์ข…์ผ\n์ด์ข…์ผ\n์ด์ข…์ผ', '๊ฐœ์„คํ•™๊ณผ': '๋ฒค์ฒ˜๊ฒฝ์˜ํ•™๊ณผ(๊ณ„์•ฝํ•™๊ณผ)', '์‹œ๊ฐ„/ํ•™์ (์„ค๊ณ„)': '3.00 /3', '์ˆ˜๊ฐ•์ธ์›': '11', '์—ฌ์„': '39', '๊ฐ•์˜์‹œ๊ฐ„(๊ฐ•์˜์‹ค)': '์›” 19:00-19:50 (์กฐ๋งŒ์‹๊ธฐ๋…๊ด€ 12530-์ด์ข…์ผ)\n์›” 20:00-20:50 (์กฐ๋งŒ์‹๊ธฐ๋…๊ด€ 12530-์ด์ข…์ผ)\n์›” 21:00-21:50 (์กฐ๋งŒ์‹๊ธฐ๋…๊ด€ 12530-์ด์ข…์ผ)', '์ˆ˜๊ฐ•๋Œ€์ƒ': '์ „์ฒดํ•™๋…„ ๋ฒค์ฒ˜๊ฒฝ์˜ํ•™๊ณผ(๊ณ„์•ฝํ•™๊ณผ) (๋Œ€์ƒ์™ธ์ˆ˜๊ฐ•์ œํ•œ)(๋Œ€์ƒ์™ธ์ˆ˜๊ฐ•์ œํ•œ)' }, { dict_keys(['๊ณ„ํš', '์ด์ˆ˜๊ตฌ๋ถ„(์ฃผ์ „๊ณต)', '์ด์ˆ˜๊ตฌ๋ถ„(๋‹ค์ „๊ณต)', '๊ณตํ•™์ธ์ฆ', '๊ต๊ณผ์˜์—ญ', '๊ณผ๋ชฉ๋ฒˆํ˜ธ', '๊ณผ๋ชฉ๋ช…', '๋ถ„๋ฐ˜', '๊ต์ˆ˜๋ช…', '๊ฐœ์„คํ•™๊ณผ', '์‹œ๊ฐ„/ํ•™์ (์„ค๊ณ„)', '์ˆ˜๊ฐ•์ธ์›', '์—ฌ์„', '๊ฐ•์˜์‹œ๊ฐ„(๊ฐ•์˜์‹ค)', '์ˆ˜๊ฐ•๋Œ€์ƒ']) } ] *๋ฌธํ•™๊ณผ ์˜ˆ์ˆ (์œตํ•ฉ-์ธ๋ฌธ): [] *์—ญ์‚ฌ์™€์ฒ ํ•™(์œตํ•ฉ-์ธ๋ฌธ): [] *์ •๋ณด์™€๊ธฐ์ˆ (์œตํ•ฉ-์ž์—ฐ): [] *์ฐฝ์˜์„ฑ๊ณผ์˜์‚ฌ์†Œํ†ต๋Šฅ๋ ฅ(ํ•ต์‹ฌ-์ฐฝ์˜): [] *์„ธ๊ณ„์˜์–ธ์–ด(ํ•ต์‹ฌ-์ฐฝ์˜): [] *์„ธ๊ณ„์˜๋ฌธํ™”์™€๊ตญ์ œ๊ด€๊ณ„(ํ•ต์‹ฌ-์ฐฝ์˜): [] *์ธ๊ฐ„๊ณผ์‚ฌํšŒ(์œตํ•ฉ-์‚ฌํšŒ): [] *์ •์น˜์™€๊ฒฝ์ œ(์œตํ•ฉ-์‚ฌํšŒ): [] *์ž์—ฐ๊ณผํ•™๊ณผ์ˆ˜๋ฆฌ(์œตํ•ฉ-์ž์—ฐ): [] *์ƒํ™œ๊ณผ๊ฑด๊ฐ•(์‹ค์šฉ-์ƒํ™œ): [] *ํ•™๋ฌธ๊ณผ์ง„๋กœํƒ์ƒ‰(์‹ค์šฉ-์ƒํ™œ): [] *์ธ์„ฑ๊ณผ๋ฆฌ๋”์‰ฝ(ํ•ต์‹ฌ-์ฐฝ์˜): [] ์ˆญ์‹คํ’ˆ์„ฑ(์ธ์„ฑ-์ข…๊ต๊ฐ€์น˜์ธ์„ฑ๊ต์œก): [] ์ˆญ์‹คํ’ˆ์„ฑ(์ธ์„ฑ-๊ฐ€์น˜๊ด€๋ฐ์œค๋ฆฌ๊ต์œก): [] ์ˆญ์‹คํ’ˆ์„ฑ(์ธ์„ฑ-๊ณต๋™์ฒด์ธ์„ฑ๊ต์œก): [] ์ˆญ์‹คํ’ˆ์„ฑ(๋ฆฌ๋”์‹ญ-ํ†ต์ผ๋ฆฌ๋”์‹ญ): [] ์ˆญ์‹คํ’ˆ์„ฑ(๋ฆฌ๋”์‹ญ-๋ฆฌ๋”์‹ญ์ด๋ก ๋ฐ์‹ค์ฒœ): [] ๊ธฐ์ดˆ์—ญ๋Ÿ‰(์‚ฌ๊ณ ๋ ฅ-๋…ผ๋ฆฌ๋ฐ๋น„ํŒ์ ์‚ฌ๊ณ ): [] ๊ธฐ์ดˆ์—ญ๋Ÿ‰(์‚ฌ๊ณ ๋ ฅ-์ฐฝ์˜๋ฐ์œตํ•ฉ์ ์‚ฌ๊ณ ): [] ๊ธฐ์ดˆ์—ญ๋Ÿ‰(์‚ฌ๊ณ ๋ ฅ-์ˆ˜๋ฆฌ์ ์‚ฌ๊ณ ): [] ๊ธฐ์ดˆ์—ญ๋Ÿ‰(ํ•œ๊ตญ์–ด์˜์‚ฌ์†Œํ†ต-์ฝ๊ธฐ์™€์“ฐ๊ธฐ): [] ๊ธฐ์ดˆ์—ญ๋Ÿ‰(ํ•œ๊ตญ์–ด์˜์‚ฌ์†Œํ†ต-์˜์‚ฌ์†Œํ†ต): [] (๊ธฐ์ดˆ์—ญ๋Ÿ‰-๊ตญ์ œ์–ด๋ฌธ)์˜์–ด: [] ๊ธฐ์ดˆ์—ญ๋Ÿ‰(๊ตญ์ œ์–ด๋ฌธ-๊ตญ์ œ์–ด): [] ๊ธฐ์ดˆ์—ญ๋Ÿ‰(๊ตญ์ œ์–ด๋ฌธ-๊ณ ์ „์–ด๋ฌธ ): [] ๊ธฐ์ดˆ์—ญ๋Ÿ‰(๊ณผํ•™์ •๋ณด๊ธฐ์ˆ -๊ณผํ•™): [] ๊ธฐ์ดˆ์—ญ๋Ÿ‰(๊ณผํ•™์ •๋ณด๊ธฐ์ˆ -์ •๋ณด๊ธฐ์ˆ ): [] ๊ท ํ˜•๊ต์–‘(์ธ๋ฌธํ•™-๋ฌธํ•™/์–ดํ•™/์˜ˆ์ˆ ): [] ๊ท ํ˜•๊ต์–‘(์ธ๋ฌธํ•™-์—ญ์‚ฌ): [] ๊ท ํ˜•๊ต์–‘(์ธ๋ฌธํ•™-์ฒ ํ•™/์‚ฌ์ƒ): [] ๊ท ํ˜•๊ต์–‘(์‚ฌํšŒ๊ณผํ•™-์‚ฌํšŒ/์ •์น˜/๊ฒฝ์ œ): [] ๊ท ํ˜•๊ต์–‘(์‚ฌํšŒ๊ณผํ•™-๋ฌธํ™”๋ฐ๋ฌธ๋ช…): [] ๊ท ํ˜•๊ต์–‘(์ž์—ฐ๊ณผํ•™-์ž์—ฐ๊ณผํ•™): [] ์‹ค์šฉ๊ต์–‘(๊ฐœ์ธ๊ณผ๊ฐ€์กฑ์ƒํ™œ): [] ์‹ค์šฉ๊ต์–‘(๊ฒฝ์ œ๊ฒฝ์˜): [] ์‹ค์šฉ๊ต์–‘(๊ณต๊ณต์ƒํ™œ): [] ์‹ค์šฉ๊ต์–‘(๊ธฐ์ˆ ์ƒํ™œ): [] ์‹ค์šฉ๊ต์–‘(์ž๊ธฐ๊ฐœ๋ฐœ๊ณผ์ง„๋กœํƒ์ƒ‰) } }, year: { 'semester': { 'section': [ { dict_keys(['๊ณ„ํš', '์ด์ˆ˜๊ตฌ๋ถ„(์ฃผ์ „๊ณต)', '์ด์ˆ˜๊ตฌ๋ถ„(๋‹ค์ „๊ณต)', '๊ณตํ•™์ธ์ฆ', '๊ต๊ณผ์˜์—ญ', '๊ณผ๋ชฉ๋ฒˆํ˜ธ', '๊ณผ๋ชฉ๋ช…', '๋ถ„๋ฐ˜', '๊ต์ˆ˜๋ช…', '๊ฐœ์„คํ•™๊ณผ', '์‹œ๊ฐ„/ํ•™์ (์„ค๊ณ„)', '์ˆ˜๊ฐ•์ธ์›', '์—ฌ์„', '๊ฐ•์˜์‹œ๊ฐ„(๊ฐ•์˜์‹ค)', '์ˆ˜๊ฐ•๋Œ€์ƒ']) } ] } } } """ ret = {year: {} for year in year_range} saint = Saint() saint.select_course_section('๊ต์–‘์„ ํƒ') # is this necessary job? saint.select_year('2017') saint.select_semester('2 ํ•™๊ธฐ') def __get_whole_course(year, semester, _line=line): saint.select_year(year) saint.select_semester(semester) saint.select_line(_line) selective_map = saint.get_selective_liberal_map() course_map = {course_name: {} for course_name in selective_map} pbar = tqdm(selective_map, disable=silent) for course_name in pbar: pbar.set_description("Processing {:8s}".format(course_name)) if course_name != '': course_map[course_name] = saint.select_on_selective_liberal(course_name) return course_map year_bar = tqdm(year_range, disable=silent) for year in year_bar: year_bar.set_description("Year: {:4s}".format(year)) semester_bar = tqdm(semesters, disable=silent) for semester in semester_bar: semester_bar.set_description("semester: {:6s}".format(semester)) course_bunch = __get_whole_course(year, semester) ret[year][semester] = course_bunch return ret def _related_major(year_range=[], semesters=[], line=Line.FIVE_HUNDRED, silent=False): """ ๊ต์–‘์„ ํƒ ๊ณผ๋ชฉ๋“ค์„ ํ•™๊ธฐ ๋‹จ์œ„๋กœ ๋ฌถ์–ด์„œ ๋ฐ˜ํ™˜ํ•œ๋‹ค. :param year_range: :param semesters: :param line: :return: dict { 2017: { '1 ํ•™๊ธฐ': { "์ค‘๊ตญ์–ด๊ฒฝ์ œ๊ตญ์ œํ†ต์ƒ์—ฐ๊ณ„์ „๊ณต": [ { "๊ณ„ํš": "ย ", "์ด์ˆ˜๊ตฌ๋ถ„(์ฃผ์ „๊ณต)": "์ „์„ -๊ฒฝ์ œ", "์ด์ˆ˜๊ตฌ๋ถ„(๋‹ค์ „๊ณต)": "๋ณต์„ -๊ฒฝ์ œ/๋ถ€์„ -๊ฒฝ์ œ/์—ฐ๊ณ„2-๋ฒค์ฒ˜์ž๋ณธ๊ฒฝ์ œํ•™/์—ฐ๊ณ„2-์ผ๋ณธ์–ด๊ฒฝ์ œํ†ต์ƒ/์—ฐ๊ณ„2-์ค‘๊ตญ์–ด๊ฒฝ์ œํ†ต์ƒ", "๊ณตํ•™์ธ์ฆ": "ย ", "๊ต๊ณผ์˜์—ญ": "ย ", "๊ณผ๋ชฉ๋ฒˆํ˜ธ": "2150191901", "๊ณผ๋ชฉ๋ช…": "๊ณต๊ณต๊ฒฝ์ œํ•™(์‹ค์‹œ๊ฐ„ํ™”์ƒ๊ฐ•์˜) (์˜จ๋ผ์ธ)", "๋ถ„๋ฐ˜": "ย ", "๊ต์ˆ˜๋ช…": "์šฐ์ง„ํฌ\n์šฐ์ง„ํฌ", "๊ฐœ์„คํ•™๊ณผ": "๊ฒฝ์ œํ•™๊ณผ", "์‹œ๊ฐ„/ํ•™์ (์„ค๊ณ„)": "3.00 /3.0 (0 )", "์ˆ˜๊ฐ•์ธ์›": "0", "์—ฌ์„": "35", "๊ฐ•์˜์‹œ๊ฐ„(๊ฐ•์˜์‹ค)": "์›” 15:00-16:15 (-์šฐ์ง„ํฌ)\n์ˆ˜ 13:30-14:45 (์ˆญ๋•๊ฒฝ์ƒ๊ด€ 02109-์šฐ์ง„ํฌ)", "์ˆ˜๊ฐ•๋Œ€์ƒ": "3ํ•™๋…„ ๊ฒฝ์ œ,๋ฒค์ฒ˜์ž๋ณธ๊ฒฝ์ œํ•™,์ผ๋ณธ์–ด๊ฒฝ์ œํ†ต์ƒ,์ค‘๊ตญ์–ด๊ฒฝ์ œํ†ต์ƒ" } ] ์ผ๋ณธ์–ด๊ฒฝ์ œ๊ตญ์ œํ†ต์ƒ์—ฐ๊ณ„์ „๊ณต: [] ๊ธˆ์œต๊ณตํ•™ยท๋ณดํ—˜๊ณ„๋ฆฌ์—ฐ๊ณ„์ „๊ณต: [] ์˜์–ดยท์ค‘๊ตญ์–ด์—ฐ๊ณ„์ „๊ณต: [] PreMed์—ฐ๊ณ„์ „๊ณต: [] ๋ฒค์ฒ˜์ž๋ณธ๊ฒฝ์ œํ•™์—ฐ๊ณ„์ „๊ณต: [] ๋ณดํ—˜๊ณ„๋ฆฌยท๋ฆฌ์Šคํฌ์—ฐ๊ณ„์ „๊ณต: [] ์œตํ•ฉ์ฐฝ์—…์—ฐ๊ณ„: [] } }, year: { 'semester': { 'section': [ { dict_keys(['๊ณ„ํš', '์ด์ˆ˜๊ตฌ๋ถ„(์ฃผ์ „๊ณต)', '์ด์ˆ˜๊ตฌ๋ถ„(๋‹ค์ „๊ณต)', '๊ณตํ•™์ธ์ฆ', '๊ต๊ณผ์˜์—ญ', '๊ณผ๋ชฉ๋ฒˆํ˜ธ', '๊ณผ๋ชฉ๋ช…', '๋ถ„๋ฐ˜', '๊ต์ˆ˜๋ช…', '๊ฐœ์„คํ•™๊ณผ', '์‹œ๊ฐ„/ํ•™์ (์„ค๊ณ„)', '์ˆ˜๊ฐ•์ธ์›', '์—ฌ์„', '๊ฐ•์˜์‹œ๊ฐ„(๊ฐ•์˜์‹ค)', '์ˆ˜๊ฐ•๋Œ€์ƒ']) } ] } } } """ ret = {year: {} for year in year_range} saint = Saint() saint.select_course_section('์—ฐ๊ณ„์ „๊ณต') # is this necessary job? saint.select_year('2017') saint.select_semester('2 ํ•™๊ธฐ') def __get_whole_course(year, semester, _line=line): saint.select_year(year) saint.select_semester(semester) saint.select_line(_line) related_major_map = saint.get_related_major_map() course_map = {course_name: {} for course_name in related_major_map} pbar = tqdm(related_major_map, disable=silent) for course_name in pbar: pbar.set_description("Processing {:8s}".format(course_name)) if course_name != '': course_map[course_name] = saint.select_on_related_major(course_name) return course_map year_bar = tqdm(year_range, disable=silent) for year in year_bar: year_bar.set_description("Year: {:4s}".format(year)) semester_bar = tqdm(semesters, disable=silent) for semester in semester_bar: semester_bar.set_description("semester: {:6s}".format(semester)) course_bunch = __get_whole_course(year, semester) ret[year][semester] = course_bunch return ret def _fusion_major(year_range=[], semesters=[], line=Line.FIVE_HUNDRED, silent=False): """ ์œตํ•ฉ์ „๊ณต ๊ณผ๋ชฉ๋“ค์„ ํ•™๊ธฐ ๋‹จ์œ„๋กœ ๋ฌถ์–ด์„œ ๋ฐ˜ํ™˜ํ•œ๋‹ค. :param year_range: :param semesters: :param line: :return: dict { 2021: { '1 ํ•™๊ธฐ': { "๋น…๋ฐ์ดํ„ฐ์œตํ•ฉ": [ { "๊ณ„ํš": "ย ", "์ด์ˆ˜๊ตฌ๋ถ„(์ฃผ์ „๊ณต)": "์ „ํ•„-์†Œํ”„ํŠธ", "์ด์ˆ˜๊ตฌ๋ถ„(๋‹ค์ „๊ณต)": "๋ณตํ•„-์†Œํ”„ํŠธ/์œต์„ -๋น…๋ฐ์ดํ„ฐ์œตํ•ฉ", "๊ณตํ•™์ธ์ฆ": "๊ณตํ•™์ฃผ์ œ-์†Œํ”„ํŠธ๊ณต์ธ์ฆ/์ธํ•„-์†Œํ”„ํŠธ๊ณต์ธ์ฆ", "๊ต๊ณผ์˜์—ญ": "ย ", "๊ณผ๋ชฉ๋ฒˆํ˜ธ": "2150013201", "๊ณผ๋ชฉ๋ช…": "๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค(์‹ค์‹œ๊ฐ„ํ™”์ƒ+์‚ฌ์ „๋…นํ™”๊ฐ•์˜) (์˜จ๋ผ์ธ) ( ๊ฐ€๋ฐ˜ )", "๋ถ„๋ฐ˜": "ย ", "๊ต์ˆ˜๋ช…": "ย ", "๊ฐœ์„คํ•™๊ณผ": "์†Œํ”„ํŠธ์›จ์–ดํ•™๋ถ€", "์‹œ๊ฐ„/ํ•™์ (์„ค๊ณ„)": "3.00 /3.0", "์ˆ˜๊ฐ•์ธ์›": "0", "์—ฌ์„": "40", "๊ฐ•์˜์‹œ๊ฐ„(๊ฐ•์˜์‹ค)": "์›” 15:00-16:15 (-)", "์ˆ˜๊ฐ•๋Œ€์ƒ": "3ํ•™๋…„ ์†Œํ”„ํŠธ,๋น…๋ฐ์ดํ„ฐ์œตํ•ฉ" } ] ๋น…๋ฐ์ดํ„ฐ์ปดํ“จํŒ…์œตํ•ฉ: [] ์Šค๋งˆํŠธ์†Œ์žฌ/์ œํ’ˆ์œตํ•ฉ: [] ์Šค๋งˆํŠธ์ด๋™์ฒด์œตํ•ฉ: [] ์–‘์ž๋‚˜๋…ธ์œตํ•ฉ: [] ์—๋„ˆ์ง€๊ณตํ•™์œตํ•ฉ: [] ํ†ต์ผ์™ธ๊ต๋ฐ๊ฐœ๋ฐœํ˜‘๋ ฅ์œตํ•ฉ: [] ์Šค๋งˆํŠธ์ž๋™์ฐจ์œตํ•ฉ: [] ์ •๋ณด๋ณดํ˜ธ์œตํ•ฉ: [] ICT์œ ํ†ต๋ฌผ๋ฅ˜์œตํ•ฉ: [] ๋ฌธํ™”์„œ๋น„์Šค์‚ฐ์—…์œตํ•ฉ: [] ์Šคํฌ์ธ ๋งˆ์ผ€ํŒ…์œตํ•ฉ: [] ์‚ฌ๋ฌผ์ธํ„ฐ๋„ท์‹œ์Šคํ…œ์œตํ•ฉ: [] ๊ณผํ•™์ฒ ํ•™์œตํ•ฉ: [] ์ธ๊ฐ„๋ฐ์‚ฌํšŒํ†ต์„ญ์œตํ•ฉ: [] ํ—ฌ์Šค์ผ€์–ด๋น…๋ฐ์ดํ„ฐ์œตํ•ฉ: [] ๋””์ž์ธํ”Œ๋ž˜๋‹์œตํ•ฉ: [] ์‚ฌํšŒ์ ๊ธฐ์—…๊ณผ์‚ฌํšŒํ˜์‹ ์œตํ•ฉ: [] ์Šคํฌ์ธ ๋งค๋‹ˆ์ง€๋จผํŠธ์œตํ•ฉ: [] IT์Šคํƒ€ํŠธ์—…์—‘์…€๋Ÿฌ๋ ˆ์ดํ„ฐ์œตํ•ฉ: [] AI๋กœ๋ด‡์œตํ•ฉ: [] ๋‰ด๋ฏธ๋””์–ด์ฝ˜ํ…์ธ ์œตํ•ฉ: [] ๋ฌธํ™”์ฝ˜ํ…์ธ ๋น„์ฆˆ๋‹ˆ์Šค์œตํ•ฉ: [], ์ฃผ๊ฑฐ๋ณต์ง€๋„์‹œํ–‰์ •์œตํ•ฉ: [], ๋ฉ”์นดํŠธ๋กœ๋‹‰์Šค๊ณตํ•™์œตํ•ฉ: [], ํ”„๋ ˆ์ž„/์‚ฌํšŒ์ด์Šˆ๊ธฐํš์œตํ•ฉ: [], ์‚ฌํšŒ๊ณต๋™์ฒดํ˜์‹ ์œตํ•ฉ: [], ๋„ค๋Ÿฌํ‹ฐ๋ธŒ๋””์ง€ํ„ธ์•„ํŠธ์œตํ•ฉ: [], ์‚ฌํšŒ๋ถ„์„๋ฐ์ดํ„ฐ๋งˆ์ผ€ํŒ…์œตํ•ฉ: [], ํŒจ์…˜๋ฏธ๋””์–ด๋งˆ์ผ€ํŒ…์œตํ•ฉ: [], ๊ตญ์ œ๋„์‹œ๊ณ„ํšโ‹…ํ–‰์ •์œตํ•ฉ: [], ํ† ํƒˆ๋””์ž์ธ๋ธŒ๋žœ๋”ฉ์œตํ•ฉ: [], AI-์ธ์ง€์–ธ์–ด์œตํ•ฉ: [], ๋‰ด๋ฏธ๋””์–ด๋งˆ์ผ€ํŒ…์œตํ•ฉ: [], ๋™์•„์‹œ์•„๊ฒฝ์ œํ†ต์ƒ์œตํ•ฉ: [ AI๋ชจ๋นŒ๋ฆฌํ‹ฐ์œตํ•ฉ: [], ์Šค๋งˆํŠธ์•ˆ์ „๋ณด๊ฑดํ™˜๊ฒฝ์œตํ•ฉ: [], ๋ฐ์ดํ„ฐ๋งˆ์ผ€ํŒ…์œตํ•ฉ: [], ์ง€์†๊ฐ€๋Šฅ๋””์ž์ธ์œตํ•ฉ: [] } }, year: { 'semester': { 'section': [ { dict_keys(['๊ณ„ํš', '์ด์ˆ˜๊ตฌ๋ถ„(์ฃผ์ „๊ณต)', '์ด์ˆ˜๊ตฌ๋ถ„(๋‹ค์ „๊ณต)', '๊ณตํ•™์ธ์ฆ', '๊ต๊ณผ์˜์—ญ', '๊ณผ๋ชฉ๋ฒˆํ˜ธ', '๊ณผ๋ชฉ๋ช…', '๋ถ„๋ฐ˜', '๊ต์ˆ˜๋ช…', '๊ฐœ์„คํ•™๊ณผ', '์‹œ๊ฐ„/ํ•™์ (์„ค๊ณ„)', '์ˆ˜๊ฐ•์ธ์›', '์—ฌ์„', '๊ฐ•์˜์‹œ๊ฐ„(๊ฐ•์˜์‹ค)', '์ˆ˜๊ฐ•๋Œ€์ƒ']) } ] } } } """ ret = {year: {} for year in year_range} saint = Saint() saint.select_course_section('์œตํ•ฉ์ „๊ณต') # is this necessary job? saint.select_year('2017') saint.select_semester('2 ํ•™๊ธฐ') def __get_whole_course(year, semester, _line=line): saint.select_year(year) saint.select_semester(semester) saint.select_line(_line) fusion_major_map = saint.get_fusion_major_map() course_map = {course_name: {} for course_name in fusion_major_map} pbar = tqdm(fusion_major_map, disable=silent) for course_name in pbar: pbar.set_description("Processing {:8s}".format(course_name)) if course_name != '': course_map[course_name] = saint.select_on_fusion_major(course_name) return course_map year_bar = tqdm(year_range, disable=silent) for year in year_bar: year_bar.set_description("Year: {:4s}".format(year)) semester_bar = tqdm(semesters, disable=silent) for semester in semester_bar: semester_bar.set_description("semester: {:6s}".format(semester)) course_bunch = __get_whole_course(year, semester) ret[year][semester] = course_bunch return ret def _teaching(year_range=[], semesters=[], line=Line.FIVE_HUNDRED, silent=False): """ ๊ต์ง ๊ณผ๋ชฉ๋“ค์„ ํ•™๊ธฐ ๋‹จ์œ„๋กœ ๋ฌถ์–ด์„œ ๋ฐ˜ํ™˜ํ•œ๋‹ค. :param year_range: :param semesters: :param line: :return: dict { 2021: { '1 ํ•™๊ธฐ': { "๊ต์ง": [ { "๊ณ„ํš": "ย ", "์ด์ˆ˜๊ตฌ๋ถ„(์ฃผ์ „๊ณต)": "๊ต์ง", "์ด์ˆ˜๊ตฌ๋ถ„(๋‹ค์ „๊ณต)": "ย ", "๊ณตํ•™์ธ์ฆ": "ย ", "๊ต๊ณผ์˜์—ญ": "๊ต์ง์ด๋ก ์˜์—ญ", "๊ณผ๋ชฉ๋ฒˆํ˜ธ": "5011868701", "๊ณผ๋ชฉ๋ช…": "๊ต์œก๊ณผ์ •(์‹ค์‹œ๊ฐ„ํ™”์ƒ๊ฐ•์˜) (์˜จ๋ผ์ธ)", "๋ถ„๋ฐ˜": "ย ", "๊ต์ˆ˜๋ช…": "์กฐํ˜ธ์ œ", "๊ฐœ์„คํ•™๊ณผ": "๊ต์งํŒ€", "์‹œ๊ฐ„/ํ•™์ (์„ค๊ณ„)": "2.00 /2.0 (0 )", "์ˆ˜๊ฐ•์ธ์›": "0", "์—ฌ์„": "30", "๊ฐ•์˜์‹œ๊ฐ„(๊ฐ•์˜์‹ค)": "๊ธˆ 18:00-19:50 (-์กฐํ˜ธ์ œ)", "์ˆ˜๊ฐ•๋Œ€์ƒ": "์ „์ฒด" } ] } }, year: { 'semester': { 'section': [ { dict_keys(['๊ณ„ํš', '์ด์ˆ˜๊ตฌ๋ถ„(์ฃผ์ „๊ณต)', '์ด์ˆ˜๊ตฌ๋ถ„(๋‹ค์ „๊ณต)', '๊ณตํ•™์ธ์ฆ', '๊ต๊ณผ์˜์—ญ', '๊ณผ๋ชฉ๋ฒˆํ˜ธ', '๊ณผ๋ชฉ๋ช…', '๋ถ„๋ฐ˜', '๊ต์ˆ˜๋ช…', '๊ฐœ์„คํ•™๊ณผ', '์‹œ๊ฐ„/ํ•™์ (์„ค๊ณ„)', '์ˆ˜๊ฐ•์ธ์›', '์—ฌ์„', '๊ฐ•์˜์‹œ๊ฐ„(๊ฐ•์˜์‹ค)', '์ˆ˜๊ฐ•๋Œ€์ƒ']) } ] } } } """ ret = {year: {} for year in year_range} saint = Saint() saint.select_course_section('๊ต์ง') # is this necessary job? saint.select_year('2017') saint.select_semester('2 ํ•™๊ธฐ') def __get_whole_course(year, semester, _line=line): saint.select_year(year) saint.select_semester(semester) saint.select_line(_line) teaching_map = ['๊ต์ง'] course_map = {course_name: {} for course_name in teaching_map} pbar = tqdm(teaching_map, disable=silent) for course_name in pbar: pbar.set_description("Processing {:8s}".format(course_name)) if course_name != '': course_map[course_name] = saint.select_on_teaching() return course_map year_bar = tqdm(year_range, disable=silent) for year in year_bar: year_bar.set_description("Year: {:4s}".format(year)) semester_bar = tqdm(semesters, disable=silent) for semester in semester_bar: semester_bar.set_description("semester: {:6s}".format(semester)) course_bunch = __get_whole_course(year, semester) ret[year][semester] = course_bunch return ret def _chapel(year_range=[], semesters=[], line=Line.FIVE_HUNDRED, silent=False): """ ์ฑ„ํ”Œ ๊ณผ๋ชฉ๋“ค์„ ํ•™๊ธฐ ๋‹จ์œ„๋กœ ๋ฌถ์–ด์„œ ๋ฐ˜ํ™˜ํ•œ๋‹ค. :param year_range: :param semesters: :param line: :return: dict { 2021: { '1 ํ•™๊ธฐ': { "๊ต์ˆ˜์™€ํ•จ๊ป˜ํ•˜๋Š”์ฑ„ํ”Œ": [ { "๊ณ„ํš": "ย ", "์ด์ˆ˜๊ตฌ๋ถ„(์ฃผ์ „๊ณต)": "์ฑ„ํ”Œ", "์ด์ˆ˜๊ตฌ๋ถ„(๋‹ค์ „๊ณต)": "ย ", "๊ณตํ•™์ธ์ฆ": "ย ", "๊ต๊ณผ์˜์—ญ": "์ฑ„ํ”Œ๊ณผ๋ชฉ", "๊ณผ๋ชฉ๋ฒˆํ˜ธ": "2150051501", "๊ณผ๋ชฉ๋ช…": "๊ต์ˆ˜์™€ํ•จ๊ป˜ํ•˜๋Š”์ฑ„ํ”Œ", "๋ถ„๋ฐ˜": "ย ", "๊ต์ˆ˜๋ช…": "๊ฐ•์•„๋žŒ", "๊ฐœ์„คํ•™๊ณผ": "ํ•™์›์„ ๊ตํŒ€", "์‹œ๊ฐ„/ํ•™์ (์„ค๊ณ„)": "1.00 /0.5", "์ˆ˜๊ฐ•์ธ์›": "0", "์—ฌ์„": "15", "๊ฐ•์˜์‹œ๊ฐ„(๊ฐ•์˜์‹ค)": "ํ™” 15:00-15:50 (์ง„๋ฆฌ๊ด€ 11111-๊ฐ•์•„๋žŒ)", "์ˆ˜๊ฐ•๋Œ€์ƒ": "์ „์ฒด" } ], "CHAPEL": [] } }, year: { 'semester': { 'section': [ { dict_keys(['๊ณ„ํš', '์ด์ˆ˜๊ตฌ๋ถ„(์ฃผ์ „๊ณต)', '์ด์ˆ˜๊ตฌ๋ถ„(๋‹ค์ „๊ณต)', '๊ณตํ•™์ธ์ฆ', '๊ต๊ณผ์˜์—ญ', '๊ณผ๋ชฉ๋ฒˆํ˜ธ', '๊ณผ๋ชฉ๋ช…', '๋ถ„๋ฐ˜', '๊ต์ˆ˜๋ช…', '๊ฐœ์„คํ•™๊ณผ', '์‹œ๊ฐ„/ํ•™์ (์„ค๊ณ„)', '์ˆ˜๊ฐ•์ธ์›', '์—ฌ์„', '๊ฐ•์˜์‹œ๊ฐ„(๊ฐ•์˜์‹ค)', '์ˆ˜๊ฐ•๋Œ€์ƒ']) } ] } } } """ ret = {year: {} for year in year_range} saint = Saint() saint.select_course_section('์ฑ„ํ”Œ') # is this necessary job? saint.select_year('2017') saint.select_semester('2 ํ•™๊ธฐ') def __get_whole_course(year, semester, _line=line): saint.select_year(year) saint.select_semester(semester) saint.select_line(_line) chapel_map = saint.get_chapel_map() course_map = {course_name: {} for course_name in chapel_map} pbar = tqdm(chapel_map, disable=silent) for course_name in pbar: pbar.set_description("Processing {:8s}".format(course_name)) if course_name != '': course_map[course_name] = saint.select_on_chapel(course_name) return course_map year_bar = tqdm(year_range, disable=silent) for year in year_bar: year_bar.set_description("Year: {:4s}".format(year)) semester_bar = tqdm(semesters, disable=silent) for semester in semester_bar: semester_bar.set_description("semester: {:6s}".format(semester)) course_bunch = __get_whole_course(year, semester) ret[year][semester] = course_bunch return ret def _cyber(year_range=[], semesters=[], silent=False): """ TODO: ์‹œ๊ฐ„๋‚˜๋ฉด ๋งŒ๋“ค๊ธฐ :param year_range: :param semesters: :return: """ def login(user_id, password=<PASSWORD>): """ log in saint.ssu.ac.kr :param user_id: student id :param password: <PASSWORD> :return: """ if password is None: import getpass password = getpass.getpass("PASSWORD for {}: ".format(user_id)) else: password = password saint = Saint() saint.login(user_id, password) return saint
StarcoderdataPython