hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2981c0d7db082728c69505a339fd5c5214978ef0 | 758 | py | Python | agent.py | shukia/2048-python | e6385ea3749098a418198e727e6976dccf344960 | [
"MIT"
]
| null | null | null | agent.py | shukia/2048-python | e6385ea3749098a418198e727e6976dccf344960 | [
"MIT"
]
| null | null | null | agent.py | shukia/2048-python | e6385ea3749098a418198e727e6976dccf344960 | [
"MIT"
]
| null | null | null | from logic import *
class Agent:
def __init__(self):
self.matrix = []
self.score = 0
def initialize_game(self):
self.score = 0
self.matrix = new_game(4)
self.matrix = add_two(self.matrix)
self.matrix = add_two(self.matrix)
def move(self, direction):
self.matrix, board_changed, score_change = move(self.matrix, direction)
if board_changed:
self.matrix = add_two(self.matrix)
self.score += score_change
return self.matrix, self.score, game_state(self.matrix)
def simulate_move(self, direction):
mat, board_changed, score_change = move(self.matrix, direction)
return mat, self.score + score_change, game_state(mat), board_changed
| 29.153846 | 79 | 0.64248 | 734 | 0.968338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
29843af5e308c3a6a1b810c3658616edbb2840d2 | 2,234 | py | Python | config/synthia_rand_cityscapes.py | BarracudaPff/code-golf-data-pythpn | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | [
"MIT"
]
| null | null | null | config/synthia_rand_cityscapes.py | BarracudaPff/code-golf-data-pythpn | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | [
"MIT"
]
| null | null | null | config/synthia_rand_cityscapes.py | BarracudaPff/code-golf-data-pythpn | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | [
"MIT"
]
| null | null | null | problem_type = "segmentation"
dataset_name = "synthia_rand_cityscapes"
dataset_name2 = None
perc_mb2 = None
model_name = "resnetFCN"
freeze_layers_from = None
show_model = False
load_imageNet = True
load_pretrained = False
weights_file = "weights.hdf5"
train_model = True
test_model = True
pred_model = False
debug = True
debug_images_train = 50
debug_images_valid = 50
debug_images_test = 50
debug_n_epochs = 2
batch_size_train = 2
batch_size_valid = 2
batch_size_test = 2
crop_size_train = (512, 512)
crop_size_valid = None
crop_size_test = None
resize_train = None
resize_valid = None
resize_test = None
shuffle_train = True
shuffle_valid = False
shuffle_test = False
seed_train = 1924
seed_valid = 1924
seed_test = 1924
optimizer = "rmsprop"
learning_rate = 0.0001
weight_decay = 0.0
n_epochs = 1000
save_results_enabled = True
save_results_nsamples = 5
save_results_batch_size = 5
save_results_n_legend_rows = 1
earlyStopping_enabled = True
earlyStopping_monitor = "val_jaccard"
earlyStopping_mode = "max"
earlyStopping_patience = 50
earlyStopping_verbose = 0
checkpoint_enabled = True
checkpoint_monitor = "val_jaccard"
checkpoint_mode = "max"
checkpoint_save_best_only = True
checkpoint_save_weights_only = True
checkpoint_verbose = 0
plotHist_enabled = True
plotHist_verbose = 0
LRScheduler_enabled = True
LRScheduler_batch_epoch = "batch"
LRScheduler_type = "poly"
LRScheduler_M = 75000
LRScheduler_decay = 0.1
LRScheduler_S = 10000
LRScheduler_power = 0.9
TensorBoard_enabled = True
TensorBoard_histogram_freq = 1
TensorBoard_write_graph = True
TensorBoard_write_images = False
TensorBoard_logs_folder = None
norm_imageNet_preprocess = True
norm_fit_dataset = False
norm_rescale = 1
norm_featurewise_center = False
norm_featurewise_std_normalization = False
norm_samplewise_center = False
norm_samplewise_std_normalization = False
norm_gcn = False
norm_zca_whitening = False
cb_weights_method = None
da_rotation_range = 0
da_width_shift_range = 0.0
da_height_shift_range = 0.0
da_shear_range = 0.0
da_zoom_range = 0.5
da_channel_shift_range = 0.0
da_fill_mode = "constant"
da_cval = 0.0
da_horizontal_flip = True
da_vertical_flip = False
da_spline_warp = False
da_warp_sigma = 10
da_warp_grid_size = 3
da_save_to_dir = False | 24.822222 | 42 | 0.821844 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.059087 |
2986e8913a7519f773b1d594848f51448026d50a | 583 | py | Python | utils/HTMLParser.py | onyb/janitor | a46f3bf23467a27c6f5891b64c797295e5cc47d0 | [
"Apache-2.0"
]
| null | null | null | utils/HTMLParser.py | onyb/janitor | a46f3bf23467a27c6f5891b64c797295e5cc47d0 | [
"Apache-2.0"
]
| null | null | null | utils/HTMLParser.py | onyb/janitor | a46f3bf23467a27c6f5891b64c797295e5cc47d0 | [
"Apache-2.0"
]
| null | null | null | from bs4 import BeautifulSoup
from optimizers.AdvancedJSOptimizer import AdvancedJSOptimizer
from optimizers.CSSOptimizer import CSSOptimizer
class HTMLParser(object):
def __init__(self, html):
self.soup = BeautifulSoup(html, 'lxml')
def js_parser(self):
for script in self.soup.find_all('script'):
opt = AdvancedJSOptimizer()
script.string = opt.process(script.string)
def css_parser(self):
for style in self.soup.find_all('style'):
opt = CSSOptimizer()
style.string = opt.process(style.string) | 32.388889 | 62 | 0.680961 | 439 | 0.753002 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.036021 |
461e4085634d8de9c3e950613c2eaf835255268d | 5,590 | py | Python | spades/main.py | kuwv/spades | 9c36eff2c8fe2e4acc69a317d0c58bb8e1c2373f | [
"Apache-2.0"
]
| null | null | null | spades/main.py | kuwv/spades | 9c36eff2c8fe2e4acc69a317d0c58bb8e1c2373f | [
"Apache-2.0"
]
| null | null | null | spades/main.py | kuwv/spades | 9c36eff2c8fe2e4acc69a317d0c58bb8e1c2373f | [
"Apache-2.0"
]
| null | null | null | '''Provide interface for game.'''
from typing import Any, Dict, List, Optional, Union
import flask
from flask import Blueprint, url_for
from flask_login import current_user, login_required
from flask_wtf import FlaskForm
from flask_sse import sse
from werkzeug.wrappers import Response
from wtforms import IntegerField, SubmitField
from wtforms.validators import DataRequired, NumberRange
# from spades import exceptions
from spades.game import GameState
from spades.game.models.player import Player
main = Blueprint('main', __name__)
mock_names: List[str] = ['john']
__game: GameState = GameState()
class LobbyForm(FlaskForm):
start_game: SubmitField = SubmitField('start game')
join_game: SubmitField = SubmitField('join game')
class BidForm(FlaskForm):
bid: IntegerField = IntegerField(
'bid',
validators=[
DataRequired(),
NumberRange(min=1, max=13)
]
)
submit: SubmitField = SubmitField('bid')
def get_player() -> Optional[Player]:
player = __game.get_player_by_username(current_user.username)
if not player:
__game.add_player(Player(current_user.username))
player = __game.get_player_by_username(current_user.username)
return player
def get_turns(players: List[Player]) -> List[Dict[str, Any]]:
player_turns: List[Dict[str, Any]] = []
def is_active(turn: int) -> str:
if __game.state != 'playing': # type: ignore
print('gamestate', False)
return 'false'
elif __game.current_turn != turn:
print('turn:', __game.current_turn, turn)
return 'false'
else:
print('active:', True)
return 'true'
for n, player in enumerate(players):
inst = {
'username': player.username,
'active': is_active(n)
}
if player.username == current_user.username:
inst['hand'] = player.hand.to_json # type: ignore
else:
inst['card_count'] = len(player.hand) # type: ignore
player_turns.append(inst)
print('player turns', player_turns)
return player_turns
@main.route('/')
def index() -> str:
'''Provide start page.'''
return flask.render_template('index.html')
@main.route('/lobby', methods=['GET', 'POST'])
@login_required
def lobby() -> Union[Response, str]:
'''Provide lobby to coordinate new games.'''
form = LobbyForm()
if form.validate_on_submit():
if form.join_game.data:
print('join game')
if (
hasattr(__game, 'state') and
__game.state == 'waiting' # type: ignore
):
if not __game.get_player_by_username(
current_user.username
):
__game.add_player(Player(current_user.username))
if __game.check_player_count():
__game.start_game() # type: ignore
return flask.redirect(url_for('main.gameboard'))
# if games != []:
# return flask.render_template(
# 'lobby.html', form=form, games=mock_names
# )
return flask.render_template('lobby.html', form=form)
@main.route('/play', methods=['POST'])
@login_required
def play() -> None:
'''Publish card play for user.'''
username = flask.request.form['username']
rank = flask.request.form['rank']
suit = flask.request.form['suit']
card_played = {'username': username, 'rank': rank, 'suit': suit}
# TODO: submit card to game
print(
'turn',
__game.state, # type: ignore
__game.get_player_turn(username),
__game.current_turn
)
__game.make_play(__game.get_player_turn(username), rank, suit)
sse.publish(card_played, type='play-card')
@main.route('/bids', methods=['GET', 'POST'])
@login_required
def bids() -> Union[Response, str]:
form = BidForm()
if form.validate_on_submit():
player_bid = flask.request.form['bid']
__game.accept_bid(
__game.get_player_turn(current_user.username),
player_bid
)
__game.start_turn() # type: ignore
return flask.redirect(url_for('main.gameboard'))
player = get_player()
return flask.render_template(
'bid.html', form=form, data=player.hand.to_json # type: ignore
)
@main.route('/gameboard')
@login_required
def gameboard() -> Union[Response, str]:
'''Provide gameboard.'''
# Setup mock players - less than four fail
for player_name in mock_names:
if not __game.get_player_by_username(player_name):
__game.add_player(Player(player_name))
# mock end
players = []
player = get_player()
if __game.check_player_count():
if __game.state == 'waiting': # type: ignore
__game.start_game()
print('starting game', __game.state)
if __game.state == 'bidding': # type: ignore
print('cards', player.hand.to_json)
print('accepting bids')
# return flask.redirect(url_for('main.bids'))
if __game.state == 'playing': # type: ignore
print('playing game')
if __game.state == 'cleanup': # type: ignore
print('clean up match')
players = get_turns(__game.players)
if hasattr(player, 'hand'):
print('hand')
return flask.render_template(
'gameboard.html', state=__game.state, data=players # type: ignore
)
else:
print('no hand')
return flask.render_template('gameboard.html')
| 31.055556 | 78 | 0.617352 | 364 | 0.065116 | 0 | 0 | 3,428 | 0.613238 | 0 | 0 | 1,138 | 0.203578 |
461e9e37802d7e3293cd85e18609ca2b81610862 | 341 | gyp | Python | cluster-1.59/heatmap_clustering/binding.gyp | ericaflin/libheatmap | 18a1aadf4d4cea68f9fd6da20ae858479cf90dab | [
"MIT"
]
| 2 | 2020-08-13T19:00:03.000Z | 2021-08-24T06:50:47.000Z | cluster-1.59/heatmap_clustering/binding.gyp | ericaflin/libheatmap | 18a1aadf4d4cea68f9fd6da20ae858479cf90dab | [
"MIT"
]
| null | null | null | cluster-1.59/heatmap_clustering/binding.gyp | ericaflin/libheatmap | 18a1aadf4d4cea68f9fd6da20ae858479cf90dab | [
"MIT"
]
| null | null | null | {
"targets": [
{
"target_name": "cclust",
"sources": [ "./src/heatmap_clustering_js_module.cpp" ],
'dependencies': ['bonsaiclust']
},
{
'target_name': 'bonsaiclust',
'type': 'static_library',
'sources': [ 'src/cluster.c' ],
'cflags': ['-fPIC', '-I', '-pedantic', '-Wall']
}
]
}
| 20.058824 | 62 | 0.495601 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.630499 |
461ea3bb055956b5f646cce50edcc52ff396af68 | 4,041 | py | Python | pcg_gazebo/parsers/urdf/__init__.py | TForce1/pcg_gazebo | 9ff88016b7b6903236484958ca7c6ed9f8ffb346 | [
"ECL-2.0",
"Apache-2.0"
]
| 40 | 2020-02-04T18:16:49.000Z | 2022-02-22T11:36:34.000Z | pcg_gazebo/parsers/urdf/__init__.py | awesomebytes/pcg_gazebo | 4f335dd460ef7c771f1df78b46a92fad4a62cedc | [
"ECL-2.0",
"Apache-2.0"
]
| 75 | 2020-01-23T13:40:50.000Z | 2022-02-09T07:26:01.000Z | pcg_gazebo/parsers/urdf/__init__.py | GimpelZhang/gazebo_world_generator | eb7215499d0ddc972d804c988fadab1969579b1b | [
"ECL-2.0",
"Apache-2.0"
]
| 18 | 2020-09-10T06:35:41.000Z | 2022-02-20T19:08:17.000Z | # Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .actuator import Actuator
from .axis import Axis
from .box import Box
from .child import Child
from .collision import Collision
from .color import Color
from .cylinder import Cylinder
from .dynamics import Dynamics
from .gazebo import Gazebo
from .geometry import Geometry
from .hardware_interface import HardwareInterface
from .inertia import Inertia
from .inertial import Inertial
from .joint import Joint
from .limit import Limit
from .link import Link
from .mass import Mass
from .material import Material
from .mechanical_reduction import MechanicalReduction
from .mesh import Mesh
from .mimic import Mimic
from .origin import Origin
from .parent import Parent
from .robot import Robot
from .safety_controller import SafetyController
from .sphere import Sphere
from .texture import Texture
from .transmission import Transmission
from .type import Type
from .visual import Visual
def get_all_urdf_element_classes():
"""Get list of all URDF element classes."""
import sys
import inspect
from ..types import XMLBase
output = list()
current_module = sys.modules[__name__]
for name, obj in inspect.getmembers(current_module):
if inspect.isclass(obj):
if issubclass(obj, XMLBase) and obj._TYPE == 'urdf':
output.append(obj)
return output
def create_urdf_element(tag, *args):
"""URDF element factory.
> *Input arguments*
* `tag` (*type:* `str`): Name of the URDF element.
* `args`: Extra arguments for URDF element constructor.
> *Returns*
URDF element if `tag` refers to a valid URDF element.
`None`, otherwise.
"""
import sys
import inspect
from ..types import XMLBase
current_module = sys.modules[__name__]
for name, obj in inspect.getmembers(current_module):
if inspect.isclass(obj):
if issubclass(obj, XMLBase):
if tag == obj._NAME and obj._TYPE == 'urdf':
return obj(*args)
return None
def create_urdf_type(tag):
"""Return handle of the URDF element type.
> *Input arguments*
* `tag` (*type:* `str`): Name of the URDF element.
> *Returns*
URDF element type if `tag` is valid, `None` otherwise`.
"""
import sys
import inspect
from ..types import XMLBase
current_module = sys.modules[__name__]
for name, obj in inspect.getmembers(current_module):
if inspect.isclass(obj):
if issubclass(obj, XMLBase):
if tag == obj._NAME and obj._TYPE == 'urdf':
return obj
return None
def is_urdf_element(obj):
"""Test if XML element is an URDF element."""
from ..types import XMLBase
return obj.__class__ in XMLBase.__subclasses__() and \
obj._TYPE == 'urdf'
__all__ = [
'get_all_urdf_element_classes',
'create_urdf_element',
'create_urdf_type',
'is_urdf_element',
'Actuator',
'Axis',
'Box',
'Child',
'Collision',
'Color',
'Cylinder',
'Dynamics',
'Gazebo',
'Geometry',
'HardwareInterface',
'Inertia',
'Inertial',
'Joint',
'Limit',
'Link',
'Mass',
'Material',
'MechanicalReduction',
'Mesh',
'Mimic',
'Origin',
'Parent',
'Robot',
'SafetyController',
'Sphere',
'Texture',
'Transmission',
'Type',
'Visual'
]
| 26.411765 | 74 | 0.675823 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,629 | 0.403118 |
461f2a3e6e5c1ff2b9ea5f5bf6bd2c1511d2a0ba | 514 | py | Python | homework/supporting.py | viaviare/MyFirstRepository | dab8530d16ab9746471b61b61e006d9febfed195 | [
"Unlicense"
]
| null | null | null | homework/supporting.py | viaviare/MyFirstRepository | dab8530d16ab9746471b61b61e006d9febfed195 | [
"Unlicense"
]
| null | null | null | homework/supporting.py | viaviare/MyFirstRepository | dab8530d16ab9746471b61b61e006d9febfed195 | [
"Unlicense"
]
| null | null | null | from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
class Supporting:
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(driver, 10)
def is_element_present(self, driver, *args):
try:
self.driver.find_element(*args)
return True
except NoSuchElementException:
return False
def implicit_wait(self):
self.driver.implicitly_wait(10) | 24.47619 | 61 | 0.677043 | 392 | 0.762646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4620512d639e2a5d97e4f0f0858dfee7bfe3ebb2 | 1,516 | py | Python | vplsSinVlan.py | javicond3/mininetVPLS | 73164f175d2a1873ccab2a317bd09aa5d09bdda6 | [
"Unlicense"
]
| null | null | null | vplsSinVlan.py | javicond3/mininetVPLS | 73164f175d2a1873ccab2a317bd09aa5d09bdda6 | [
"Unlicense"
]
| null | null | null | vplsSinVlan.py | javicond3/mininetVPLS | 73164f175d2a1873ccab2a317bd09aa5d09bdda6 | [
"Unlicense"
]
| null | null | null | """Custom topology example
Two directly connected switches plus a host for each switch:
host --- switch --- switch --- host
Adding the 'topos' dict with a key/value pair to generate our newly defined
topology enables one to pass in '--topo=mytopo' from the command line.
"""
from mininet.topo import Topo
class MyTopo( Topo ):
"Simple topology example."
def __init__( self ):
"Create custom topo."
# Initialize topology
Topo.__init__( self )
# Add hosts and switches
h1 = self.addHost('h1', mac='00:00:00:00:00:01')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
h4 = self.addHost('h4')
h5 = self.addHost('h5')
h6 = self.addHost('h6')
s1 = self.addSwitch('s1')
s2 = self.addSwitch('s2')
s3 = self.addSwitch('s3')
s4 = self.addSwitch('s4')
s5 = self.addSwitch('s5')
s6 = self.addSwitch('s6')
# Add links
self.addLink(s1, h1, port1=1, port2=0)
self.addLink(s2, h2, port1=1, port2=0)
self.addLink(s3, h3, port1=1, port2=0)
self.addLink(s4, h4, port1=1, port2=0)
self.addLink(s5, h5, port1=1, port2=0)
self.addLink(s6, h6, port1=1, port2=0)
self.addLink(s1, s2)
self.addLink(s2, s3)
self.addLink(s3, s4)
self.addLink(s4, s1)
self.addLink(s4, s2)
self.addLink(s1, s5)
self.addLink(s4, s5)
self.addLink(s2, s6)
self.addLink(s3, s6)
topos = { 'vpls': ( lambda: MyTopo() ) }
| 26.137931 | 75 | 0.582454 | 1,157 | 0.763193 | 0 | 0 | 0 | 0 | 0 | 0 | 456 | 0.300792 |
46209cb73578b98d96c50b38e0aa7cf17f602e19 | 2,404 | py | Python | modin/engines/base/io/column_stores/feather_dispatcher.py | webclinic017/modin | 6e38ae4f459660460d07e7d93fe1dda4ad9214cd | [
"ECL-2.0",
"Apache-2.0"
]
| 1 | 2021-10-11T08:52:27.000Z | 2021-10-11T08:52:27.000Z | modin/engines/base/io/column_stores/feather_dispatcher.py | todd-yu/modin | f284bb3cfd6e53b468da09efd6edc32bafd10689 | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | modin/engines/base/io/column_stores/feather_dispatcher.py | todd-yu/modin | f284bb3cfd6e53b468da09efd6edc32bafd10689 | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses `FeatherDispatcher` class, that is used for reading `.feather` files."""
from modin.engines.base.io.column_stores.column_store_dispatcher import (
ColumnStoreDispatcher,
)
class FeatherDispatcher(ColumnStoreDispatcher):
"""
Class handles utils for reading `.feather` files.
Inherits some common for columnar store files util functions from
`ColumnStoreDispatcher` class.
"""
@classmethod
def _read(cls, path, columns=None, **kwargs):
"""
Read data from the file path, returning a query compiler.
Parameters
----------
path : str or file-like object
The filepath of the feather file.
columns : array-like, optional
Columns to read from file. If not provided, all columns are read.
**kwargs : dict
`read_feather` function kwargs.
Returns
-------
BaseQueryCompiler
Query compiler with imported data for further processing.
Notes
-----
`PyArrow` engine and local files only are supported for now,
multi threading is set to False by default.
PyArrow feather is used. Please refer to the documentation here
https://arrow.apache.org/docs/python/api.html#feather-format
"""
if columns is None:
from pyarrow.feather import read_feather
df = read_feather(path)
# pyarrow.feather.read_feather doesn't support columns as pandas.Index
columns = list(df.columns)
return cls.build_query_compiler(path, columns, use_threads=False)
| 38.774194 | 89 | 0.688852 | 1,424 | 0.592346 | 0 | 0 | 1,195 | 0.497088 | 0 | 0 | 1,910 | 0.794509 |
4620f61e29c562c8eee22b703bf2ebfcf3321f30 | 1,482 | py | Python | rnnparser/RecursiveNN/tests_npRNN/test_tree_utils.py | uphere-co/nlp-prototype | c4623927e5c5c5f9c3e702eb36497ea1d9fd1ff3 | [
"BSD-3-Clause"
]
| null | null | null | rnnparser/RecursiveNN/tests_npRNN/test_tree_utils.py | uphere-co/nlp-prototype | c4623927e5c5c5f9c3e702eb36497ea1d9fd1ff3 | [
"BSD-3-Clause"
]
| null | null | null | rnnparser/RecursiveNN/tests_npRNN/test_tree_utils.py | uphere-co/nlp-prototype | c4623927e5c5c5f9c3e702eb36497ea1d9fd1ff3 | [
"BSD-3-Clause"
]
| null | null | null | import pytest
from npRNN.tree_utils import Node, NodeTree
def test_merge_results():
#sentence='I know a name of the cat on a hat'
sentence='a name of the cat on a hat'
words=[Node(word) for word in sentence.split()]
tree=NodeTree(words, [0, 5, 3, 1, 2, 0, 0])
assert tree.phrase.name =='(((a name) (of the)) ((cat on) (a hat)))'
assert tree.phrase.depth==3
assert tree.history == [0, 5, 3, 1, 2, 0, 0]
tree=NodeTree(words, [0, 5, 0, 0, 1, 1, 0])
assert tree.phrase.name =='((((a name) of) the) ((cat on) (a hat)))'
assert tree.phrase.depth==4
assert tree.history == [0, 5, 0, 0, 1, 1, 0]
tree=NodeTree(words, [2,0,3,2,2,0,0])
assert tree.phrase.name =='(((a name) (of the)) ((cat (on a)) hat))'
assert tree.phrase.depth==4
assert tree.history == [2,0,3,2,2,0,0]
def test_merge_dicrection():
sentence='a name of the cat on a hat'
words=[Node(word) for word in sentence.split()]
merge_history=[3,1,1,0,2,1,0]
all_nodes, _ =NodeTree.directed_merge(words,merge_history)
print all_nodes
composites=all_nodes[len(words):]
print composites
left_merged=NodeTree.get_merge_direction(composites)
expected_left_merged = [[True, False, False, True],[True, True, False, True],\
[True, False, True],[True, True],[True, False, False],[True, False],[True]]
assert left_merged == expected_left_merged
depths = [x.depth for x in composites]
assert depths==[1, 1, 2, 3, 1, 2, 4]
| 38 | 83 | 0.632928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 227 | 0.153171 |
4621fb7b46924baa48b672c2c85f2d00296c68a8 | 1,267 | py | Python | pyrefine/script.py | jezcope/pyrefine | 44872592b1c0430d942d6901d7670e7b0ae77b11 | [
"MIT"
]
| 27 | 2017-03-29T09:12:43.000Z | 2021-12-16T17:00:14.000Z | pyrefine/script.py | jezcope/pyrefine | 44872592b1c0430d942d6901d7670e7b0ae77b11 | [
"MIT"
]
| 7 | 2017-04-02T22:10:22.000Z | 2021-06-01T21:25:26.000Z | pyrefine/script.py | jezcope/pyrefine | 44872592b1c0430d942d6901d7670e7b0ae77b11 | [
"MIT"
]
| 2 | 2019-07-31T15:03:21.000Z | 2021-12-20T12:12:15.000Z | """A script is a series of operations."""
import json
import os
from .ops import create
class Script(object):
"""A script is a series of operations."""
def __init__(self, s=None):
"""Parse a script from a JSON string."""
if s is not None:
self.parsed_script = json.loads(s)
self.operations = [create(params)
for params in self.parsed_script]
def __len__(self):
"""Return the number of operations."""
return len(self.operations)
def execute(self, data):
"""Execute all operations on the provided dataset.
Args:
data (:class:`pandas.DataFrame`): The data to transform. Not
guaranteed immutable.
Returns:
:class:`pandas.DataFrame`: The transformed data.
"""
for op in self.operations:
data = op(data)
return data
def load_script(f):
"""Load and parse the script given.
Args:
f (:class:`file` or :class:`str`): Open file object or filename.
Returns:
:class:`Script`: The parsed script object.
"""
if isinstance(f, (str, os.PathLike)):
f = open(f)
with f:
return parse(f.read())
parse = Script
| 21.844828 | 72 | 0.566693 | 831 | 0.65588 | 0 | 0 | 0 | 0 | 0 | 0 | 621 | 0.490134 |
46220a2b446c7a9b49f727a4d45bc84e233eea22 | 571 | py | Python | makeCourse/plastex/mhchem/__init__.py | dualspiral/makecourse | 96c0d3137b00a400df082f160eabf8a925953067 | [
"Apache-2.0"
]
| null | null | null | makeCourse/plastex/mhchem/__init__.py | dualspiral/makecourse | 96c0d3137b00a400df082f160eabf8a925953067 | [
"Apache-2.0"
]
| null | null | null | makeCourse/plastex/mhchem/__init__.py | dualspiral/makecourse | 96c0d3137b00a400df082f160eabf8a925953067 | [
"Apache-2.0"
]
| null | null | null | from plasTeX import Command, Environment, sourceChildren
from plasTeX.Base.LaTeX import Math
from plasTeX.Base.TeX.Primitives import BoxCommand
# mhchem package - mostly handled by mathjax
# Overrive boxcommands inside MathJaX to avoid extra <script type="math/tex">
class MHBoxCommand(BoxCommand):
class math(Math.math):
@property
def source(self):
if self.hasChildNodes():
return u'$%s$' % sourceChildren(self)
return '$'
class ce(MHBoxCommand):
args = 'self'
class pu(MHBoxCommand):
args = 'self'
| 28.55 | 77 | 0.681261 | 298 | 0.521891 | 0 | 0 | 149 | 0.260946 | 0 | 0 | 143 | 0.250438 |
4624e2bb70d8902485f3a782d8d8f3f7917f1147 | 5,187 | py | Python | src/data_augmentation.py | pallabganguly/gestures-cnn | 8778760d7a5854a5987d24d7b8ff30afb216a624 | [
"MIT"
]
| 1 | 2018-05-08T15:34:50.000Z | 2018-05-08T15:34:50.000Z | src/data_augmentation.py | pallabganguly/gestures-cnn | 8778760d7a5854a5987d24d7b8ff30afb216a624 | [
"MIT"
]
| 9 | 2018-04-25T09:09:08.000Z | 2022-03-11T23:24:27.000Z | src/data_augmentation.py | pallabganguly/gestures-cnn | 8778760d7a5854a5987d24d7b8ff30afb216a624 | [
"MIT"
]
| 1 | 2018-06-14T08:44:48.000Z | 2018-06-14T08:44:48.000Z | """
Totally untested file. Will be removed in subsequent commits
"""
import tensorflow as tf
import matplotlib.image as mpimg
import numpy as np
from math import ceil, floor
import os
IMAGE_SIZE = 720
def central_scale_images(X_imgs, scales):
# Various settings needed for Tensorflow operation
boxes = np.zeros((len(scales), 4), dtype = np.float32)
for index, scale in enumerate(scales):
x1 = y1 = 0.5 - 0.5 * scale # To scale centrally
x2 = y2 = 0.5 + 0.5 * scale
boxes[index] = np.array([y1, x1, y2, x2], dtype = np.float32)
box_ind = np.zeros((len(scales)), dtype = np.int32)
crop_size = np.array([IMAGE_SIZE, IMAGE_SIZE], dtype = np.int32)
X_scale_data = []
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape = (1, IMAGE_SIZE, IMAGE_SIZE, 3))
# Define Tensorflow operation for all scales but only one base image at a time
tf_img = tf.image.crop_and_resize(X, boxes, box_ind, crop_size)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for img_data in X_imgs:
batch_img = np.expand_dims(img_data, axis = 0)
scaled_imgs = sess.run(tf_img, feed_dict = {X: batch_img})
X_scale_data.extend(scaled_imgs)
X_scale_data = np.array(X_scale_data, dtype = np.float32)
return X_scale_data
from math import ceil, floor
def get_translate_parameters(index):
if index == 0: # Translate left 20 percent
offset = np.array([0.0, 0.2], dtype = np.float32)
size = np.array([IMAGE_SIZE, ceil(0.8 * IMAGE_SIZE)], dtype = np.int32)
w_start = 0
w_end = int(ceil(0.8 * IMAGE_SIZE))
h_start = 0
h_end = IMAGE_SIZE
elif index == 1: # Translate right 20 percent
offset = np.array([0.0, -0.2], dtype = np.float32)
size = np.array([IMAGE_SIZE, ceil(0.8 * IMAGE_SIZE)], dtype = np.int32)
w_start = int(floor((1 - 0.8) * IMAGE_SIZE))
w_end = IMAGE_SIZE
h_start = 0
h_end = IMAGE_SIZE
elif index == 2: # Translate top 20 percent
offset = np.array([0.2, 0.0], dtype = np.float32)
size = np.array([ceil(0.8 * IMAGE_SIZE), IMAGE_SIZE], dtype = np.int32)
w_start = 0
w_end = IMAGE_SIZE
h_start = 0
h_end = int(ceil(0.8 * IMAGE_SIZE))
else: # Translate bottom 20 percent
offset = np.array([-0.2, 0.0], dtype = np.float32)
size = np.array([ceil(0.8 * IMAGE_SIZE), IMAGE_SIZE], dtype = np.int32)
w_start = 0
w_end = IMAGE_SIZE
h_start = int(floor((1 - 0.8) * IMAGE_SIZE))
h_end = IMAGE_SIZE
return offset, size, w_start, w_end, h_start, h_end
def translate_images(X_imgs):
offsets = np.zeros((len(X_imgs), 2), dtype = np.float32)
n_translations = 4
X_translated_arr = []
tf.reset_default_graph()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(n_translations):
X_translated = np.zeros((len(X_imgs), IMAGE_SIZE, IMAGE_SIZE, 3),
dtype = np.float32)
X_translated.fill(0.0) # Filling background color
base_offset, size, w_start, w_end, h_start, h_end = get_translate_parameters(i)
offsets[:, :] = base_offset
glimpses = tf.image.extract_glimpse(X_imgs, size, offsets)
glimpses = sess.run(glimpses)
X_translated[:, h_start: h_start + size[0], \
w_start: w_start + size[1], :] = glimpses
X_translated_arr.extend(X_translated)
X_translated_arr = np.array(X_translated_arr, dtype = np.float32)
return X_translated_arr
def rotate_images(X_imgs):
X_rotate = []
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape = (IMAGE_SIZE, IMAGE_SIZE, 3))
k = tf.placeholder(tf.int32)
tf_img = tf.image.rot90(X, k = k)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for img in X_imgs:
for i in range(3): # Rotation at 90, 180 and 270 degrees
rotated_img = sess.run(tf_img, feed_dict = {X: img, k: i + 1})
X_rotate.append(rotated_img)
X_rotate = np.array(X_rotate, dtype = np.float32)
return X_rotate
def flip_images(X_imgs):
X_flip = []
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape = (IMAGE_SIZE, IMAGE_SIZE, 3))
tf_img1 = tf.image.flip_left_right(X)
tf_img2 = tf.image.flip_up_down(X)
tf_img3 = tf.image.transpose_image(X)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for img in X_imgs:
flipped_imgs = sess.run([tf_img1, tf_img2, tf_img3], feed_dict = {X: img})
X_flip.extend(flipped_imgs)
X_flip = np.array(X_flip, dtype = np.float32)
return X_flip
# Produce each image at scaling of 90%, 75% and 60% of original image.
X_imgs = os.listdir("/home/pallab/gestures-cnn/images/resized/")
scaled_imgs = central_scale_images(X_imgs, [0.90, 0.75, 0.60])
translated_imgs = translate_images(X_imgs)
rotated_imgs = rotate_images(X_imgs)
flipped_images = flip_images(X_imgs)
| 39.295455 | 91 | 0.634664 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 502 | 0.09678 |
4626213aaa7096da9c7ee3b429d3bfce1126595c | 1,721 | py | Python | link_to_the_past/hashes.py | zsquareplusc/lttp-backup | 32862ec136c9bda412256142c7d44d1564aab784 | [
"BSD-3-Clause"
]
| null | null | null | link_to_the_past/hashes.py | zsquareplusc/lttp-backup | 32862ec136c9bda412256142c7d44d1564aab784 | [
"BSD-3-Clause"
]
| null | null | null | link_to_the_past/hashes.py | zsquareplusc/lttp-backup | 32862ec136c9bda412256142c7d44d1564aab784 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python3
# encoding: utf-8
#
# (C) 2012-2016 Chris Liechti <[email protected]>
#
# SPDX-License-Identifier: BSD-3-Clause
"""\
Link To The Past - a backup tool
Hash functions and commands.
"""
import hashlib
import zlib
class CRC32(object):
"""\
CRC32 API compatible to the hashlib functions (subset used by this program).
>>> h = CRC32()
>>> h.update(b'Hello World')
>>> h.hexdigest()
'4a17b156'
"""
def __init__(self):
self.value = 0
def update(self, data):
self.value = zlib.crc32(data, self.value) & 0xffffffff
def hexdigest(self):
return '{:08x}'.format(self.value)
class NoHash(object):
"""\
API compatible to the hashlib functions (subset used by this program).
>>> h = NoHash()
>>> h.update(b'Hello World')
>>> h.hexdigest()
'-'
"""
def __init__(self):
pass
def update(self, data):
pass
def hexdigest(self):
return '-'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
SUPPORTED_HASHES = {
'NONE': NoHash,
'CRC32': CRC32,
'MD5': hashlib.md5,
'SHA-256': hashlib.sha256,
'SHA-512': hashlib.sha512,
}
def get_factory(name):
"""\
Get an object for calculating a hash.
>>> f = get_factory('SHA-256')
>>> h = f()
>>> h.update(b'Hello World')
>>> h.hexdigest()
'a591a6d40bf420404a011733cfb7b190d62c65bf0bcda32b57b277d9ad9f146e'
"""
if name is None:
name = 'NONE'
return SUPPORTED_HASHES[name.upper()]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
import doctest
doctest.testmod()
| 20.247059 | 80 | 0.535735 | 741 | 0.430564 | 0 | 0 | 0 | 0 | 0 | 0 | 1,006 | 0.584544 |
462677ff68dd069f07b9a100cb78c22fa238e35f | 360 | py | Python | Interview/langTrans.py | dnootana/Python | 2881bafe8bc378fa3cae50a747fcea1a55630c63 | [
"MIT"
]
| 1 | 2021-02-19T11:00:11.000Z | 2021-02-19T11:00:11.000Z | Interview/langTrans.py | dnootana/Python | 2881bafe8bc378fa3cae50a747fcea1a55630c63 | [
"MIT"
]
| null | null | null | Interview/langTrans.py | dnootana/Python | 2881bafe8bc378fa3cae50a747fcea1a55630c63 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3.8
table="".maketrans("0123456789","\N{Devanagari digit zero}\N{Devanagari digit one}"
"\N{Devanagari digit two}\N{Devanagari digit three}"
"\N{Devanagari digit four}\N{Devanagari digit five}"
"\N{Devanagari digit six}\N{Devanagari digit seven}"
"\N{Devanagari digit eight}\N{Devanagari digit nine}")
print("0123456789".translate(table)) | 45 | 83 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 310 | 0.861111 |
4626c0de5deedd2af689cbda3e1e357bed965d98 | 4,909 | py | Python | cutde/opencl.py | brendanjmeade/cutde | 8d9c01ab7e6073e8a8a45af927644c9b676e5ec6 | [
"CNRI-Python"
]
| 1 | 2021-04-23T21:07:29.000Z | 2021-04-23T21:07:29.000Z | cutde/opencl.py | brendanjmeade/cutde | 8d9c01ab7e6073e8a8a45af927644c9b676e5ec6 | [
"CNRI-Python"
]
| null | null | null | cutde/opencl.py | brendanjmeade/cutde | 8d9c01ab7e6073e8a8a45af927644c9b676e5ec6 | [
"CNRI-Python"
]
| null | null | null | import logging
import warnings
import pyopencl
import pyopencl.array
logger = logging.getLogger(__name__)
gpu_initialized = False
gpu_ctx = None
gpu_queue = None
def report_devices(ctx):
device_names = [d.name for d in ctx.devices]
logger.info("initializing opencl context with devices = " + str(device_names))
def initialize_with_ctx(ctx):
global gpu_initialized, gpu_ctx, gpu_queue
gpu_ctx = ctx
gpu_queue = pyopencl.CommandQueue(
gpu_ctx, properties=pyopencl.command_queue_properties.PROFILING_ENABLE
)
gpu_initialized = True
report_devices(ctx)
def avoid_apple_cpu(ctx):
"""
The Apple CPU OpenCL implementation is awful. Instead, we should just use
PoCL.
"""
if ctx.devices[0].platform.name == "Apple" and "CPU" in ctx.devices[0].name:
platforms = pyopencl.get_platforms()
platform_idx = None
for i, p in enumerate(platforms):
if p.name != "Apple":
platform_idx = i
else:
apple_platform_idx = i
if platform_idx is not None:
warnings.warn(
"The OpenCL context created used the Apple CPU"
" implementation which is not supported. Trying again"
f" with a different platform: {p.name}"
)
return pyopencl.create_some_context(answers=[str(platform_idx)])
# If no other platforms were found, let's try to
# find a non-CPU device like an Iris Pro.
platform_idx = apple_platform_idx
device_idx = None
for i, d in enumerate(platforms[platform_idx].get_devices()):
if "CPU" in d.name:
continue
device_idx = i
break
if device_idx is not None:
warnings.warn(
"The OpenCL context created used the Apple CPU"
" implementation which is not supported. Trying again"
f" with a different device: {d.name}"
)
return pyopencl.create_some_context(
answers=[str(platform_idx), str(device_idx)]
)
raise NotImplementedError(
"cutde does not support the Apple CPU OpenCL implementation and no other"
" platform or device was found. Please consult the cutde README"
)
return ctx
def ensure_initialized():
global gpu_initialized
if not gpu_initialized:
ctx = pyopencl.create_some_context()
ctx = avoid_apple_cpu(ctx)
initialize_with_ctx(ctx)
def ptr(arr):
if type(arr) is pyopencl.array.Array:
return arr.data
return arr
def to_gpu(arr, float_type):
ensure_initialized()
if type(arr) is pyopencl.array.Array:
return arr
to_type = arr.astype(float_type)
return pyopencl.array.to_device(gpu_queue, to_type)
def zeros_gpu(shape, float_type):
ensure_initialized()
return pyopencl.array.zeros(gpu_queue, shape, float_type)
def empty_gpu(shape, float_type):
ensure_initialized()
return pyopencl.array.empty(gpu_queue, shape, float_type)
def threaded_get(arr):
return arr.get()
class ModuleWrapper:
def __init__(self, module):
self.module = module
def __getattr__(self, name):
kernel = getattr(self.module, name)
def provide_queue_wrapper(*args, grid=None, block=None, **kwargs):
global_size = [b * g for b, g in zip(grid, block)]
arg_ptrs = [ptr(a) for a in args]
return kernel(gpu_queue, global_size, block, *arg_ptrs, **kwargs)
return provide_queue_wrapper
def compile(code):
ensure_initialized()
compile_options = []
# debug_opts = ["-g", "-Werror"]
# compile_options.extend(debug_opts)
fast_opts = [
# '-cl-finite-math-only',
"-cl-unsafe-math-optimizations",
# '-cl-no-signed-zeros',
"-cl-mad-enable",
# '-cl-strict-aliasing'
]
compile_options.extend(fast_opts)
return ModuleWrapper(pyopencl.Program(gpu_ctx, code).build(options=compile_options))
cluda_preamble = """
// taken from pyopencl._cluda
#define LOCAL_BARRIER barrier(CLK_LOCAL_MEM_FENCE)
// 'static' helps to avoid the "no previous prototype for function" warning
#if __OPENCL_VERSION__ >= 120
#define WITHIN_KERNEL static
#else
#define WITHIN_KERNEL
#endif
#define KERNEL __kernel
#define GLOBAL_MEM __global
#define LOCAL_MEM __local
#define LOCAL_MEM_DYNAMIC __local
#define LOCAL_MEM_ARG __local
#define CONSTANT __constant
// INLINE is already defined in Beignet driver
#ifndef INLINE
#define INLINE inline
#endif
#define SIZE_T size_t
#define VSIZE_T size_t
// used to align fields in structures
#define ALIGN(bytes) __attribute__ ((aligned(bytes)))
#if defined(cl_khr_fp64)
#pragma OPENCL EXTENSION cl_khr_fp64: enable
#elif defined(cl_amd_fp64)
#pragma OPENCL EXTENSION cl_amd_fp64: enable
#endif
"""
| 27.734463 | 88 | 0.66266 | 460 | 0.093705 | 0 | 0 | 0 | 0 | 0 | 0 | 1,664 | 0.338969 |
4626daaa44d52cdbb1bec3a34b51700caf38c8dc | 448 | py | Python | tests/test_lamost_tools.py | igomezv/astroNN | 50af116f9cbfc684b63e7ddcf8829343a455722b | [
"MIT"
]
| 156 | 2017-10-22T01:29:10.000Z | 2022-03-14T10:28:09.000Z | tests/test_lamost_tools.py | AbdulfattahBaalawi/astroNN | 0b970dd1a8d4d5e6d611ffa52cfd3c2ffdcb4643 | [
"MIT"
]
| 16 | 2017-11-02T21:29:28.000Z | 2022-03-14T08:40:41.000Z | tests/test_lamost_tools.py | AbdulfattahBaalawi/astroNN | 0b970dd1a8d4d5e6d611ffa52cfd3c2ffdcb4643 | [
"MIT"
]
| 46 | 2017-11-01T18:56:03.000Z | 2022-03-07T06:44:22.000Z | import unittest
import numpy as np
from astroNN.lamost import wavelength_solution, pseudo_continuum
class LamostToolsTestCase(unittest.TestCase):
def test_wavelength_solution(self):
wavelength_solution()
wavelength_solution(dr=5)
self.assertRaises(ValueError, wavelength_solution, dr=1)
def test_norm(self):
pseudo_continuum(np.ones(3909), np.ones(3909))
if __name__ == '__main__':
unittest.main()
| 23.578947 | 64 | 0.734375 | 295 | 0.658482 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.022321 |
4627027be2fa84a4ea17d7c6b65cd28da93acd06 | 8,265 | py | Python | phase-iii-client/services/aureas/views.py | williamegomez/AUREAS | 06f5bda347481628ba2a08a854b76151a59d6e66 | [
"MIT"
]
| 5 | 2019-02-12T18:46:40.000Z | 2019-02-24T15:24:43.000Z | phase-iii-client/services/aureas/views.py | williamegomez/AUREAS | 06f5bda347481628ba2a08a854b76151a59d6e66 | [
"MIT"
]
| null | null | null | phase-iii-client/services/aureas/views.py | williamegomez/AUREAS | 06f5bda347481628ba2a08a854b76151a59d6e66 | [
"MIT"
]
| 1 | 2019-11-29T02:28:09.000Z | 2019-11-29T02:28:09.000Z | from django.http import HttpResponse
from rest_framework.decorators import api_view
from rest_framework.decorators import parser_classes
from rest_framework.parsers import JSONParser
import numpy as np
import json
import os
from .utils.spectrogram_utils import SpectrogramUtils
from .utils.feature_extraction_utils import FeatureExtractionUtils
from .utils.classification_utils import ClassificationUtils
from .utils.file_utils import FileUtils
from .utils.dir_utils import DirUtils
from .constants.headers import headers_data, headers_clusters, headers_clusters_no_display
file_utils = FileUtils()
dir_utils = DirUtils()
@api_view(['GET'])
@parser_classes((JSONParser,))
def get_species(request):
species = os.listdir('clusters/model/')
species_data = []
for specie in species:
with open('clusters/model/' + specie, 'r') as infile:
data = json.load(infile)
species_data.append(data)
return HttpResponse(json.dumps(species_data, separators=(',', ':')))
@api_view(['GET', 'POST'])
@parser_classes((JSONParser,))
def get_clusters(request):
if request.method == 'POST':
data = request.data
directory = data['dir']
files = data['files']
features, segs, metadata = file_utils.process_files(
directory, files)
classification_utils = ClassificationUtils()
ex_level = 1
it_num = 5
data = np.hstack((features, metadata[:, 6].astype(float)[:, None]))
mad = 'binomial'
gad = '3pi'
datanorm, mininums, maximums = classification_utils.norm(data)
recon, mean_class, std_class = classification_utils.lamda(
ex_level, it_num, datanorm, mad, gad)
representive_calls = file_utils.get_representative_calls(
recon, datanorm, metadata)
keys_results = [header['label'] for header in headers_data]
keys_clusters = [header['label'] for header in headers_clusters]
keys_clusters_no_display = [header['label']
for header in headers_clusters_no_display]
data_results = []
for i, value in enumerate(metadata):
values = [value[0], str(recon[i]), *
(value[1:].tolist()), datanorm[i]]
zipbObj = zip(keys_results, values)
data_results.append(dict(zipbObj))
data_clusters = []
for i, value in enumerate(representive_calls):
zipbObj = zip(keys_clusters + keys_clusters_no_display, value)
data_clusters.append(dict(zipbObj))
response = {
'results': {
'headers': headers_data,
'data': data_results,
'model': {
'features': datanorm.tolist(),
'min_values': mininums.tolist(),
'max_values': maximums.tolist(),
'metadata': metadata.tolist()
}
},
'clusters': {
'headers': headers_clusters,
'data': data_clusters
}
}
return HttpResponse(json.dumps(response, separators=(',', ':')))
@api_view(['GET', 'POST'])
@parser_classes((JSONParser,))
def get_segment_in_image(request):
if request.method == 'POST':
data = request.data
spectrogram_utils = SpectrogramUtils()
filename = spectrogram_utils.get_segment_in_image(data['dir'],
data['filename'], 1, float(data['start']) - 0.5, float(data['end']) + 0.5, float(data['min_freq']) - 200, float(data['max_freq']) + 200)
response = {
'url': filename
}
return HttpResponse(json.dumps(response, separators=(',', ':')))
@api_view(['GET', 'POST'])
@parser_classes((JSONParser,))
def save_cluster(request):
if request.method == 'POST':
data = request.data
features = np.array(data['model']['features'])
min_values = data['model']['min_values']
max_values = data['model']['max_values']
metadata = np.array(data['model']['metadata'])
indices = np.array(data['selected'])
audio_path, image_path, metadata_representative = file_utils.save_representative_call(
data['name'], features[indices], metadata[indices])
model = {
'name': data['name'],
'metadata': metadata_representative.tolist(),
'mean_values': np.mean(features[indices], axis=0).tolist(),
'std_values': np.std(features[indices], axis=0).tolist(),
'min_values': min_values,
'max_values': max_values,
'image_path': image_path,
'audio_path': audio_path
}
dir_utils.create_dir('clusters/model/')
with open('clusters/model/' + data['name'], 'w') as outfile:
json.dump(model, outfile)
return HttpResponse(json.dumps(model, separators=(',', ':')))
@api_view(['GET', 'POST'])
@parser_classes((JSONParser,))
def search_clusters(request):
if request.method == 'POST':
data = request.data
directory = data['dir']
files = data['files']
species = data['species']
features, segs, metadata = file_utils.process_files(
directory, files)
classification_utils = ClassificationUtils()
ex_level = 1
it_num = 5
data = np.hstack((features, metadata[:, 6].astype(float)[:, None]))
mad = 'binomial'
gad = '3pi'
num_datos, num_feat = data.shape
mean_class = 0.5 * np.ones((1, num_feat))
std_class = 0.25 * np.ones((1, num_feat))
min_values = np.empty((0, num_feat))
max_values = np.empty((0, num_feat))
for specie in species:
with open('clusters/model/' + specie, 'r') as infile:
model = json.load(infile)
mean_class = np.vstack(
(mean_class, np.array(model['mean_values'])))
std_class = np.vstack(
(std_class, np.array(model['std_values'])))
min_values = np.vstack(
(min_values, np.array(model['min_values'])))
max_values = np.vstack(
(max_values, np.array(model['max_values'])))
general_min_values = np.min(min_values, axis=0)
general_max_values = np.max(max_values, axis=0)
datanorm, mininums, maximums = classification_utils.norm(
data, general_min_values, general_max_values)
recon = classification_utils.predict_lamda(
ex_level, datanorm, mad, gad, mean_class, std_class)
representive_calls = file_utils.get_representative_calls(
recon, datanorm, metadata)
keys_results = [header['label'] for header in headers_data]
keys_clusters = [header['label'] for header in headers_clusters]
keys_clusters_no_display = [header['label']
for header in headers_clusters_no_display]
data_results = []
for i, value in enumerate(metadata):
species_name = species[recon[i] - 1] if recon[i] > 0 else 'NIC'
values = [value[0], species_name, *
(value[1:].tolist()), datanorm[i]]
zipbObj = zip(keys_results, values)
data_results.append(dict(zipbObj))
data_clusters = []
for i, value in enumerate(representive_calls):
value[0] = species[i - 1] if i > 0 else 'NIC'
zipbObj = zip(keys_clusters + keys_clusters_no_display, value)
data_clusters.append(dict(zipbObj))
response = {
'results': {
'headers': headers_data,
'data': data_results,
'model': {
'features': datanorm.tolist(),
'min_values': mininums.tolist(),
'max_values': maximums.tolist(),
'metadata': metadata.tolist()
}
},
'clusters': {
'headers': headers_clusters,
'data': data_clusters
}
}
return HttpResponse(json.dumps(response, separators=(',', ':')))
| 36.409692 | 194 | 0.580762 | 0 | 0 | 0 | 0 | 7,627 | 0.922807 | 0 | 0 | 802 | 0.097036 |
4629ec7d3ad2828b6cef4cc59b99d7273c5fdb56 | 1,416 | py | Python | PyPoll/Homework/main.py | VioletData/python-challenge | 5a59aca85426387db21b0138dfe41aca19aac40d | [
"ADSL"
]
| null | null | null | PyPoll/Homework/main.py | VioletData/python-challenge | 5a59aca85426387db21b0138dfe41aca19aac40d | [
"ADSL"
]
| null | null | null | PyPoll/Homework/main.py | VioletData/python-challenge | 5a59aca85426387db21b0138dfe41aca19aac40d | [
"ADSL"
]
| null | null | null | # Modules
import os
import csv
#Set up path for file
csvpath=os.path.join("..", "Resources", "election_data.csv" )
#print(csvpath)
total_votes=0
#total_profit=0
#previous_value=0
#current_value=0
#list_changes=[]
print("Election Results")
print("---------------------")
#Open the csv file
with open(csvpath, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
#print(csvreader)
#Read the header row
csv_header=next(csvreader)
#print(f"CSV Header: {csv_header}")
#Read each row of data after the header
for row in csvreader:
total_votes=total_votes+1
current_value=int(row[0])
#total_profit=total_profit+1
#current_value=int(row[1])
#monthly_diff=current_value-previous_value
#list_changes.append(monthly_diff)
#list_changes.remove("867884")
#previous_value=current_value
#avg_monthly_diff=sum[list_changes]
# Calculate the average of the changes in Profit/Lossess over the entire period
# Determine the greateest increase in profits (date and amount) over the entire period
# Determine the greaterst decrease in losses (datea and amount) ove the entire period
print("Total Votes: " + str(total_votes))
print("---------------------")
#print("Total: $"+str(total_profit))
print("---------------------")
#print("Average Change: $" +str(total_profit))
print("---------------------")
#print(row)
| 23.6 | 86 | 0.66596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 969 | 0.684322 |
462a57a1182eb2397cc4d73242d61fbadafdf7cc | 367 | py | Python | test/qa-tests/buildscripts/resmokelib/logging/__init__.py | Mrliu8023/mongo-tools | b9048617a6dc788aae9286d0c2bd3fefe49d23d3 | [
"Apache-2.0"
]
| 1 | 2022-02-17T10:51:19.000Z | 2022-02-17T10:51:19.000Z | test/qa-tests/buildscripts/resmokelib/logging/__init__.py | Mrliu8023/mongo-tools | b9048617a6dc788aae9286d0c2bd3fefe49d23d3 | [
"Apache-2.0"
]
| null | null | null | test/qa-tests/buildscripts/resmokelib/logging/__init__.py | Mrliu8023/mongo-tools | b9048617a6dc788aae9286d0c2bd3fefe49d23d3 | [
"Apache-2.0"
]
| null | null | null | """
Extension to the logging package to support buildlogger.
"""
# Alias the built-in logging.Logger class for type checking arguments. Those interested in
# constructing a new Logger instance should use the loggers.new_logger() function instead.
from logging import Logger
from . import config
from . import buildlogger
from . import flush
from . import loggers
| 24.466667 | 90 | 0.784741 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 244 | 0.66485 |
462b42e0c4130840f3bc0cd918b3623bf21c3cd2 | 2,645 | py | Python | cohesity_management_sdk/models/azure_cloud_credentials.py | chandrashekar-cohesity/management-sdk-python | 9e6ec99e8a288005804b808c4e9b19fd204e3a8b | [
"Apache-2.0"
]
| 1 | 2021-01-07T20:36:22.000Z | 2021-01-07T20:36:22.000Z | cohesity_management_sdk/models/azure_cloud_credentials.py | chandrashekar-cohesity/management-sdk-python | 9e6ec99e8a288005804b808c4e9b19fd204e3a8b | [
"Apache-2.0"
]
| null | null | null | cohesity_management_sdk/models/azure_cloud_credentials.py | chandrashekar-cohesity/management-sdk-python | 9e6ec99e8a288005804b808c4e9b19fd204e3a8b | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class AzureCloudCredentials(object):
"""Implementation of the 'AzureCloudCredentials' model.
Specifies the cloud credentials to connect to a Microsoft
Azure service account.
Attributes:
storage_access_key (string): Specifies the access key to use when
accessing a storage tier in a Azure cloud service.
storage_account_name (string): Specifies the account name to use when
accessing a storage tier in a Azure cloud service.
tier_type (TierTypeAzureCloudCredentialsEnum): Specifies the storage
class of Azure. AzureTierType specifies the storage class for
Azure. 'kAzureTierHot' indicates a tier type of Azure properties
that is accessed frequently. 'kAzureTierCool' indicates a tier
type of Azure properties that is accessed less frequently, and
stored for at least 30 days. 'kAzureTierArchive' indicates a tier
type of Azure properties that is accessed rarely and stored for at
least 180 days.
"""
# Create a mapping from Model property names to API property names
_names = {
"storage_access_key":'storageAccessKey',
"storage_account_name":'storageAccountName',
"tier_type":'tierType'
}
def __init__(self,
storage_access_key=None,
storage_account_name=None,
tier_type=None):
"""Constructor for the AzureCloudCredentials class"""
# Initialize members of the class
self.storage_access_key = storage_access_key
self.storage_account_name = storage_account_name
self.tier_type = tier_type
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
storage_access_key = dictionary.get('storageAccessKey')
storage_account_name = dictionary.get('storageAccountName')
tier_type = dictionary.get('tierType')
# Return an object of this model
return cls(storage_access_key,
storage_account_name,
tier_type)
| 35.266667 | 81 | 0.654064 | 2,585 | 0.977316 | 0 | 0 | 904 | 0.341777 | 0 | 0 | 1,819 | 0.687713 |
462c2089ebfd3afcf679c3d29b9f6e291acb4dc2 | 525 | py | Python | src/solutions/01.py | NNRepos/AoC-2021-python-solutions | 556ccc920b96cedbdc2f554a3bee28a793be4483 | [
"MIT"
]
| null | null | null | src/solutions/01.py | NNRepos/AoC-2021-python-solutions | 556ccc920b96cedbdc2f554a3bee28a793be4483 | [
"MIT"
]
| null | null | null | src/solutions/01.py | NNRepos/AoC-2021-python-solutions | 556ccc920b96cedbdc2f554a3bee28a793be4483 | [
"MIT"
]
| null | null | null | from utils.utils import *
lines = get_input(__file__)
lines_as_nums = lines_to_nums(lines)
def part1(nums):
incr = 0
cur = nums[0]
for num in nums:
if num > cur:
incr += 1
cur = num
return incr
def part2():
nums = []
for i in range(len(lines_as_nums)):
if i < len(lines_as_nums) - 2:
nums.append(lines_as_nums[i] + lines_as_nums[i + 1] + lines_as_nums[i + 2])
return part1(nums)
print("part1:", part1(lines_as_nums))
print("part2:", part2())
| 19.444444 | 87 | 0.590476 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.030476 |
462c32bbc3b282511e2c46116729931f71fbbf10 | 1,170 | py | Python | scripts/sqlite_firestore_migration.py | namuan/news-rider | 2f8f5204eda717e39ab7d4c048692d5ec2eb5449 | [
"MIT"
]
| 5 | 2021-04-26T20:46:30.000Z | 2021-05-03T07:29:31.000Z | scripts/sqlite_firestore_migration.py | namuan/news-rider | 2f8f5204eda717e39ab7d4c048692d5ec2eb5449 | [
"MIT"
]
| null | null | null | scripts/sqlite_firestore_migration.py | namuan/news-rider | 2f8f5204eda717e39ab7d4c048692d5ec2eb5449 | [
"MIT"
]
| null | null | null | import datetime
import os
import sys
from google.cloud import firestore
from peewee import *
sys.path.append(os.getcwd())
home_dir = os.getenv('HOME')
db_file_path = os.getcwd() + '/../../data/news_rider.db'
print("Reading database from {}".format(db_file_path))
old_db = SqliteDatabase(db_file_path)
class NewsItem(Model):
NewsUrl = CharField(primary_key=True)
NewsTitle = CharField()
TimeStamp = DateTimeField(default=datetime.datetime.now)
class Meta:
database = old_db
db = firestore.Client()
posts_ref = db.collection('posts')
def save_data(url, title, timestamp):
print(f"Adding {url} for database")
posts_ref.add({
'news_url': url,
'news_title': title,
'timestamp': timestamp
})
def exists_in_database(url):
print(f"Checking if {url} exists in database")
news_found_ref = posts_ref.where('news_url', '==', url).limit(1)
return next(news_found_ref.get(), None) is not None
if __name__ == '__main__':
for news_item in NewsItem.select():
if not exists_in_database(news_item.NewsUrl):
save_data(news_item.NewsUrl, news_item.NewsTitle, news_item.TimeStamp)
| 23.877551 | 82 | 0.692308 | 196 | 0.167521 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.162393 |
462c4f9e4def6f4455874dce4f3095e44613b4b1 | 1,372 | py | Python | tensorforce/core/baselines/mlp_baseline.py | youlei202/tensorforce-lei | 871ef7f5c41d496aa8ad674854792ebd52ce1546 | [
"Apache-2.0"
]
| 1 | 2019-12-21T03:31:33.000Z | 2019-12-21T03:31:33.000Z | tensorforce/core/baselines/mlp_baseline.py | youlei202/tensorforce-lei | 871ef7f5c41d496aa8ad674854792ebd52ce1546 | [
"Apache-2.0"
]
| null | null | null | tensorforce/core/baselines/mlp_baseline.py | youlei202/tensorforce-lei | 871ef7f5c41d496aa8ad674854792ebd52ce1546 | [
"Apache-2.0"
]
| 1 | 2019-12-21T03:31:39.000Z | 2019-12-21T03:31:39.000Z | # Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from tensorforce.core.baselines import NetworkBaseline
class MLPBaseline(NetworkBaseline):
"""
Multi-layer perceptron baseline (single-state) consisting of dense layers.
"""
def __init__(self, sizes, scope='mlp-baseline', summary_labels=()):
"""
Multi-layer perceptron baseline.
Args:
sizes: List of dense layer sizes
"""
layers_spec = []
for size in sizes:
layers_spec.append({'type': 'dense', 'size': size})
super(MLPBaseline, self).__init__(layers_spec, scope, summary_labels)
| 33.463415 | 80 | 0.671283 | 524 | 0.381924 | 0 | 0 | 0 | 0 | 0 | 0 | 904 | 0.658892 |
462cfd1b7c44355d2edae99099ee57b431d8d9a4 | 769 | py | Python | km3pipe/utils/rtree.py | kabartay/km3pipe | 491c425486553e8986682d2b0614918dd23cc964 | [
"MIT"
]
| 2 | 2017-01-19T17:22:49.000Z | 2020-04-18T14:00:38.000Z | km3pipe/utils/rtree.py | kabartay/km3pipe | 491c425486553e8986682d2b0614918dd23cc964 | [
"MIT"
]
| null | null | null | km3pipe/utils/rtree.py | kabartay/km3pipe | 491c425486553e8986682d2b0614918dd23cc964 | [
"MIT"
]
| null | null | null | # coding=utf-8
# Filename: h5tree.py
"""
Print the ROOT file structure.
Usage:
rtree FILE
rtree (-h | --help)
rtree --version
Options:
FILE Input file.
-h --help Show this screen.
"""
from __future__ import division, absolute_import, print_function
from km3pipe.io.root import open_rfile
__author__ = "Moritz Lotze"
__copyright__ = "Copyright 2016, Moritz Lotze and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Moritz Lotze"
__email__ = "[email protected]"
__status__ = "Development"
def rtree(rfile):
rfile = open_rfile(rfile)
for k in rfile.walk():
print(k)
rfile.close()
def main():
from docopt import docopt
arguments = docopt(__doc__)
rtree(arguments['FILE'])
| 19.225 | 76 | 0.684005 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 341 | 0.443433 |
462e9733585d0e06c8930267d52894b75b0d0b14 | 15,952 | py | Python | task_part1_learning.py | till-lu/cit_lcp_2020 | b547185f8be74f57773c3d21ce5e4327b363a2f1 | [
"MIT"
]
| null | null | null | task_part1_learning.py | till-lu/cit_lcp_2020 | b547185f8be74f57773c3d21ce5e4327b363a2f1 | [
"MIT"
]
| null | null | null | task_part1_learning.py | till-lu/cit_lcp_2020 | b547185f8be74f57773c3d21ce5e4327b363a2f1 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from psychopy.visual import Window, TextStim
from psychopy.core import wait, Clock, quit
from psychopy.event import clearEvents, waitKeys, Mouse
from psychopy.gui import Dlg
from time import gmtime, strftime
from codecs import open
from random import shuffle, choice, randint
from copy import deepcopy
from psychopy.iohub import launchHubServer
from numpy import mean, std
from datetime import datetime
from itertools import permutations
import random
## for testing
testing = False # True for testing, False for real recording
###
main_ddline = 1 # sec
isi_set = (500, 800, 1100)
instruction_color = '#111111' #formerly = #9999FF
############ MAIN ITEMS - paste from JS
probe_crime_list_1 = ' Ausgeben als : Tim Koch\n\n Nachricht an Deckname : Blaue Jacke\n\n Aktion : Operation Kuh\n\n Objekt : Regen Akte\n\n Inhalt des Objektes : Helikopter Pläne\n\n Adresse : Hai Straße'
probe_crime_list_2 = ' Ausgeben als : Paul Nowak\n\n Nachricht an Deckname : Weißes Shirt\n\n Aktion : Operation Fichte\n\n Objekt : Eulen Akte\n\n Inhalt des Objektes : Messing Pläne\n\n Adresse : Löwen Straße'
crime_list_1 = ["Tim Koch", "Blaue Jacke", "Operation Kuh", "Regen Akte", "Helikopter Pläne", "Hai Straße"]
crime_list_2 = ["Paul Nowak", "Weißes Shirt","Operation Fichte","Eulen Akte","Messing Pläne","Löwen Straße"]
dummy_list_numbers = [0, 1, 2, 3, 4, 5]
training_recall_item = {0 : 'Ausgeben als', 1 : 'Nachricht an Deckname', 2 : 'Aktion', 3 : 'Objekt', 4 : 'Inhalt des Objektes', 5 : 'Adresse'}
rounds = 1
if testing:
escape_key = 'escape'
instr_wait = 0.1
else:
escape_key = 'notallowed'
instr_wait = 0.5
# EXECUTE all main functions here
def execute():
start_input() # prompt to input stuff
# now initiate stuff
set_screen() # creates psychopy screen and stim objects
# window opens
create_file() # created output file
consent_instructions()
training_instruction()
which_round_indicator()
training_software()
which_round_indicator()
training_list()
training_software()
which_round_indicator()
training_list()
training_software()
final_slide()
win.mouseVisible = False # hide mouse
print("************** END OF LEARNING TASK **************")
ending() # saves demographic & final infos, gives feedback
waitKeys(keyList = ['b']) # press B to end the exp (prevents subject from closing window)
quit()
def consent_instructions():
show_instruction("Bitte füllen Sie die Einverständniserklärung zur Teilnahme am Experiment aus. \nSie sollten diese vor sich auf dem Tisch finden. Bei Unklarheiten oder weiteren Fragen heben Sie leise Ihre Hand.\nWenn Sie damit fertig sind, drücken Sie die Leertaste, um mit dem Experiment zu starten.")
show_instruction("Sie werden nun eine Reihe von Aufgaben am Computer durchführen. Bitte lesen und befolgen Sie die Anweisungen sorgfältig. Sollten Sie während des Experiments Fragen haben, melden Sie sich bei der Versuchsleitung, bevor Sie fortfahren.\nDrücken Sie die Leertaste, um die Anweisungen zu sehen.")
def which_round_indicator():
global condition
if rounds == 1:
show_instruction("Es folgt nun die erste Runde, in der die soeben gezeigten Wortpaare abgefragt werden. Geben Sie diese exakt so, wie sie Ihnen eben gezeigt wurden, ein. \nLeertaste drücken, um fortzufahren.")
elif rounds == 2:
show_instruction("Es folgen erneut alle Informationen, die Sie benötigen, wenn Sie sich als Komplize ausgeben. Damit diese Täuschung funktioniert, ist es sehr wichtig, dass jedes Detail der Nachricht korrekt ist. Bitte prägen Sie sich deshalb erneut alle Informationen ein. \nLeertaste drücken, um fortzufahren.")
elif rounds == 3:
show_instruction("Es folgt nun eine dritte und letzte Runde. Die Wortpaare werden noch einmal gezeigt, bevor diese ein letztes Mal abgefragt werden.\nLeertaste drücken, um fortzufahren.")
def training_instruction():
global condition
if condition % 2 != 0:
probe_crime_list = probe_crime_list_1
else:
probe_crime_list = probe_crime_list_2
show_instruction('Sie sollen eine Person kontaktieren, die unter Verdacht steht, kriminelle Aktivitäten begangen zu haben. Schreiben Sie dieser Person eine E-Mail, in der Sie um die Übergabe illegal erlangter Dokumente bitten. Dazu geben Sie sich als einer der Komplizen der Person aus und loggen sich in den Mail-Account dieses Komplizen ein. In der Nachricht bitten Sie den Verdächtigen, dass er Sie an einem bestimmten Ort trifft und die entsprechenden Dokumente bei sich hat. Die Informationen, die Sie für diese Aufgabe benötigen werden, werden Ihnen gleich präsentiert.\n\nDrücken Sie die Leertaste um fortzufahren.')
show_instruction('Für das Verfassen der E-Mail werden Sie die folgenden Informationen brauchen. Sie loggen sich in den Uni Wien Webmail Account des Komplizen ein und senden dann eine Nachricht an den Decknamen der anderen verdächtigen Person. Sie erklären dieser Person, dass es um eine bestimmte Aktion geht und bitten die Person, Sie an einer bestimmten Adresse zu treffen und zu diesem Treffen das genannte Objekt mit dem sich darin befindenden Inhalt mitzubringen. Drücken Sie daher erst die Leertaste, wenn Sie die unten stehenden Wortpaare, die für das Verfassen der Nachricht benötigt werden, gründlich auswendig gelernt haben. Im Folgenden werden diese in drei Runden abgefragt.\n\n' + probe_crime_list)
def training_list():
global condition
if condition % 2 != 0:
probe_crime_list = probe_crime_list_1
else:
probe_crime_list = probe_crime_list_2
show_instruction('Drücken Sie die Leertaste, wenn Sie die unten stehenden Items gründlich auswendig gelernt haben.\nSie loggen sich in den Uni Wien Webmail Account des Komplizen ein und senden dann eine Nachricht an den Decknamen der anderen verdächtigen Person. Sie erklären dieser Person, dass es um eine bestimmte Aktion geht und bitten die Person, Sie an einer bestimmten Adresse zu treffen und zu diesem Treffen das genannte Objekt mit dem sich darin befindenden Inhalt mitzubringen.\n\n' + probe_crime_list)
def training_software():
global condition, required, typedin, rounds
required_items = []
if condition % 2 != 0:
required_items = crime_list_1
else:
required_items = crime_list_2
combine_shuffle = list(zip(required_items, dummy_list_numbers))
shuffle(combine_shuffle)
required_items[:], dummy_list_numbers[:] = zip(*combine_shuffle)
counter = 0
while counter <= 5:
required = required_items[counter]
cue = training_recall_item[dummy_list_numbers[counter]]
counter += 1
instr_display = TextStim(win, color=instruction_color, font='Helvetica', text = u'Bitte geben Sie im Folgenden das korrekte, zuvor auswendig gelernte Wortpaar ein, drücken Sie dann ENTER.', pos=(0, 150), height=30, wrapWidth=1100, colorSpace='rgb')
input_prompt = TextStim(win, color=instruction_color, font='Helvetica', text = cue + ':', pos=(-100, 0), alignHoriz = 'right', height=35)
input_display = TextStim(win, color='black', pos=(-100, -4), alignHoriz = 'left', height=35, bold = True, colorSpace='rgb')
typedin = ''
while True:
input_display.setText(typedin)
instr_display.draw()
input_prompt.draw()
input_display.draw()
win.flip()
char = waitKeys()[0]
if char == 'backspace' and len(typedin) > 0:
typedin = typedin[:-1]
elif char == escape_key:
break
elif char == 'return':
if len( trm(typedin) ) > 0:
break
elif len(char) == 1 and char.isalpha():
typedin += char.upper()
elif char == 'space':
typedin += ' '
elif char == 'comma':
typedin += ','
typedin_words = trm(typedin)
add_resp()
if counter <= 5:
wait(0.5)
else:
break
rounds += 1
def final_slide():
show_instruction("Sie haben nun alle relevanten Informationen gelernt. Bitte führen Sie die Aufgabe nun aus, indem Sie im Google Chrome Browser auf webmail.univie.ac.at gehen und sich dort mit dem eingespeicherten user:account einloggen und die Nachricht mit den gelernten Informationen verfassen und senden. Wenden Sie sich bitte an die Versuchsleitung, um zum Desktop zu gelangen und führen Sie die Aufgabe dann eigenständig aus. Sollten Sie weitere Fragen haben, wenden Sie sich bitte ebenfalls an die Versuchsleitung.")
waitKeys(keyList = ['b'])
def set_screen(): # screen properties
global win, start_text, left_label, right_label, center_disp, instruction_page
win = Window([1280, 1000], color='#dddddd', fullscr = 1, units = 'pix', allowGUI = True) # 1280 1024
start_text = TextStim(win, color=instruction_color, font='Helvetica', text = u'Um anzufangen, bitte die Leertaste drücken.', pos = [0,-300], height=35, bold = True, wrapWidth= 1100)
left_label = TextStim(win, color='#111111', font='Verdana', text = 'unvertraut', pos = [-350,-160], height=35, alignHoriz='center')
right_label = TextStim(win, color='#111111', font='Verdana', text = 'vertraut', pos = [350,-160], height=35, alignHoriz='center')
center_disp = TextStim(win, color='#111111', font='Arial', text = '', height = 60)
instruction_page = TextStim(win, wrapWidth = 1200, height = 28, font='Helvetica', color = instruction_color)
def start_input():
global subj_id, dems, condition, gender
input_box = Dlg(title=u'Grunddaten', labelButtonOK=u'OK', labelButtonCancel=u'Abbrechen')
input_box.addText(text=u'')
input_box.addField(label=u'c.', tip = '1-8')
input_box.addField(label=u'VP', tip = 'Ziffern')
input_box.addText(text=u'')
input_box.addText(text=u'Bitte ausfüllen:')
input_box.addField(label=u'Geschlecht', initial = '', choices=[u'männlich',u'weiblich', u'divers'] )
input_box.addField(label=u'Alter', tip = 'Ziffern')
input_box.addText(text=u'')
input_box.show()
if input_box.OK:
stop = False
try:
condition = int(input_box.data[0])
except ValueError:
condition = 99
print("Condition must be a number!")
## CONDITIONS:
# use condition nos. for control vs. experimental group
# plus for guilty vs innocent block first
# 1 probes 1 + exp + crime first
# 2 probes 2 + exp + nocrime first
# 3 probes 1 + exp + nocrime first
# 4 probes 2 + exp + crime first
# 5 probes 1 + control + crime first
# 6 probes 2 + control + no crime first
# 7 probes 1 + control + no crime first
# 8 probes 2 + control + crime first first
# check if variables correctly given
if condition not in range(1,9):
if testing:
condition = 1 # set value for testing to skip Dlg input box
print("condition was not set, now set to " + str(condition) + " for testing.")
else:
print("condition was not set correctly (should be 1/2/3/4/5/6/7/8)")
stop = True
try:
subj_num = int(input_box.data[1])
except ValueError:
if testing:
subj_num = 99 # set value for testing to skip Dlg input box
print("subj_num was not set, now set to " + str(subj_num) + " for testing.")
else:
print("vp (subject number) was not set correctly (should be simple number)")
stop = True
try:
age = int(input_box.data[3])
except ValueError:
if testing:
age = 11 # set value for testing to skip Dlg input box
print("age was not set, now set to " + str(age) + " for testing.")
else:
print("age was not set correctly (should be simple number)")
stop = True
if stop:
print("\nTry again with correct inputs.\n")
quit()
subj_id = str(subj_num).zfill(3) + "_" + str(strftime("%Y%m%d%H%M%S", gmtime()))
if input_box.data[2] == 'weiblich':
gender = 2
elif input_box.data[2] == 'männlich':
gender = 1
else:
gender = 3
dems = 'dems\tgender/age\t' + str(gender) + '/' + str(age)
start_date = datetime.now()
else:
quit()
def create_file():
global data_out
f_name = 'lcp1_learning_' + str(condition) + "_" + subj_id + '.txt'
data_out=open(f_name, 'a', encoding='utf-8')
data_out.write( '\t'.join( [ "subject_id", "condition", "probe_item", "typed_in", "similarityscore", "rounds" ] ) + "\n" )
print("File created:", f_name)
def show_instruction(instruction_text):
instruction_page.setText(instruction_text)
instruction_page.draw()
win.flip()
wait(instr_wait)
inst_resp = waitKeys(keyList = ['space', escape_key])
end_on_esc(inst_resp[0])
def end_on_esc(escap):
if escap == escape_key : # escape
print("Trying to escape?")
instruction_page.setText('Sure you want to discontinue and quit the experiment?\n\nPress "y" to quit, or press "n" to continue.')
instruction_page.draw()
win.flip()
wait(1)
quit_resp = waitKeys(keyList = ['y', 'n'])
if quit_resp[0] == 'y':
print("************ ESCAPED ************")
data_out.close()
win.close()
quit()
else:
clearEvents()
print("Continuing...")
# from https://github.com/luosch/similar_text
def similar_str(str1, str2):
"""
return the len of longest string both in str1 and str2
and the positions in str1 and str2
"""
max_len = tmp = pos1 = pos2 = 0
len1, len2 = len(str1), len(str2)
for p in range(len1):
for q in range(len2):
tmp = 0
while p + tmp < len1 and q + tmp < len2 \
and str1[p + tmp] == str2[q + tmp]:
tmp += 1
if tmp > max_len:
max_len, pos1, pos2 = tmp, p, q
return max_len, pos1, pos2
def similar_char(str1, str2):
"""
return the total length of longest string both in str1 and str2
"""
max_len, pos1, pos2 = similar_str(str1, str2)
total = max_len
if max_len != 0:
if pos1 and pos2:
total += similar_char(str1[:pos1], str2[:pos2])
if pos1 + max_len < len(str1) and pos2 + max_len < len(str2):
total += similar_char(str1[pos1 + max_len:], str2[pos2 + max_len:]);
return total
def similar_text(str1, str2):
"""
return a int value in [0, 100], which stands for match level
"""
if not (isinstance(str1, str) or isinstance(str1, unicode)):
raise TypeError("must be str or unicode")
elif not (isinstance(str2, str) or isinstance(str2, unicode)):
raise TypeError("must be str or unicode")
elif len(str1) == 0 and len(str2) == 0:
return 0.0
else:
return int(similar_char(str1, str2) * 200.0 / (len(str1) + len(str2)))
def trm(raw_inp):
return [w for w in raw_inp.replace(',', ' ').split(' ') if w != ''][:2]
def add_resp():
global condition, required
data_out.write( '\t'.join( [ str(subj_id), str(condition), str(required), str(typedin), str(similar_text(str(required.upper()), str(typedin)))]) + '\t' + str(rounds) + '\n' )
print(required, str(typedin), similar_text(str(required.upper()), str(typedin)))
def ending ():
data_out.write(dems + "\n")
data_out.close()
show_instruction( "ENDE" )
# EXECUTE
execute()
| 44.558659 | 716 | 0.656281 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,047 | 0.440327 |
46305fd7b6fccf0915c23423983bb7ae102bfc08 | 14,209 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/f5/bigip_asm_policy_import.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
]
| 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/ansible/modules/network/f5/bigip_asm_policy_import.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
]
| 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/ansible/modules/network/f5/bigip_asm_policy_import.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
]
| 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_asm_policy_import
short_description: Manage BIG-IP ASM policy imports
description:
- Manage BIG-IP ASM policies policy imports.
version_added: 2.8
options:
name:
description:
- The ASM policy to create or override.
type: str
required: True
inline:
description:
- When specified the ASM policy is created from a provided string.
- Content needs to be provided in a valid XML format otherwise the operation will fail.
type: str
source:
description:
- Full path to a policy file to be imported into the BIG-IP ASM.
- Policy files exported from newer versions of BIG-IP cannot be imported into older
versions of BIG-IP. The opposite, however, is true; you can import older into
newer.
- The file format can be binary of XML.
type: path
force:
description:
- When set to C(yes) any existing policy with the same name will be overwritten by the new import.
- Works for both inline and file imports, if the policy does not exist this setting is ignored.
default: no
type: bool
partition:
description:
- Device partition to create policy on.
type: str
default: Common
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Import ASM policy
bigip_asm_policy_import:
name: new_asm_policy
file: /root/asm_policy.xml
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Import ASM policy inline
bigip_asm_policy_import:
name: foo-policy4
inline: <xml>content</xml>
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Override existing ASM policy
bigip_asm_policy:
name: new_asm_policy
file: /root/asm_policy_new.xml
force: yes
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
source:
description: Local path to an ASM policy file.
returned: changed
type: str
sample: /root/some_policy.xml
inline:
description: Contents of policy as an inline string
returned: changed
type: str
sample: <xml>foobar contents</xml>
name:
description: Name of the ASM policy to be created/overwritten
returned: changed
type: str
sample: Asm_APP1_Transparent
force:
description: Set when overwriting an existing policy
returned: changed
type: bool
sample: yes
'''
import os
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.icontrol import upload_file
from library.module_utils.network.f5.icontrol import module_provisioned
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.icontrol import upload_file
from ansible.module_utils.network.f5.icontrol import module_provisioned
class Parameters(AnsibleF5Parameters):
updatables = []
returnables = [
'name',
'inline',
'source',
'force'
]
api_attributes = [
'file',
'name',
]
api_map = {
'file': 'inline',
'filename': 'source',
}
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
if not module_provisioned(self.client, 'asm'):
raise F5ModuleError(
"ASM must be provisioned to use this module."
)
result = dict()
changed = self.policy_import()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def policy_import(self):
self._set_changed_options()
if self.module.check_mode:
return True
if self.exists():
if self.want.force is False:
return False
if self.want.inline:
task = self.inline_import()
self.wait_for_task(task)
return True
self.import_file_to_device()
self.remove_temp_policy_from_device()
return True
def exists(self):
uri = 'https://{0}:{1}/mgmt/tm/asm/policies/'.format(
self.client.provider['server'],
self.client.provider['server_port'],
)
query = "?$filter=contains(name,'{0}')+and+contains(partition,'{1}')&$select=name,partition".format(
self.want.name, self.want.partition
)
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'items' in response and response['items'] != []:
return True
return False
def upload_file_to_device(self, content, name):
url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
try:
upload_file(self.client, url, content, name)
except F5ModuleError:
raise F5ModuleError(
"Failed to upload the file."
)
def _get_policy_link(self):
uri = 'https://{0}:{1}/mgmt/tm/asm/policies/'.format(
self.client.provider['server'],
self.client.provider['server_port'],
)
query = "?$filter=contains(name,'{0}')+and+contains(partition,'{1}')&$select=name,partition".format(
self.want.name, self.want.partition
)
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
policy_link = response['items'][0]['selfLink']
return policy_link
def inline_import(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/asm/tasks/import-policy/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
if self.want.force:
params.update(dict(policyReference={'link': self._get_policy_link()}))
params.pop('name')
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['id']
def wait_for_task(self, task_id):
uri = "https://{0}:{1}/mgmt/tm/asm/tasks/import-policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
task_id
)
while True:
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if response['status'] in ['COMPLETED', 'FAILURE']:
break
time.sleep(1)
if response['status'] == 'FAILURE':
raise F5ModuleError(
'Failed to import ASM policy.'
)
if response['status'] == 'COMPLETED':
return True
def import_file_to_device(self):
name = os.path.split(self.want.source)[1]
self.upload_file_to_device(self.want.source, name)
time.sleep(2)
full_name = fq_name(self.want.partition, self.want.name)
if self.want.force:
cmd = 'tmsh load asm policy {0} file /var/config/rest/downloads/{1} overwrite'.format(full_name, name)
else:
cmd = 'tmsh load asm policy {0} file /var/config/rest/downloads/{1}'.format(full_name, name)
uri = "https://{0}:{1}/mgmt/tm/util/bash/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='-c "{0}"'.format(cmd)
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
if 'commandResult' in response:
if 'Unexpected Error' in response['commandResult']:
raise F5ModuleError(response['commandResult'])
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def remove_temp_policy_from_device(self):
name = os.path.split(self.want.source)[1]
tpath_name = '/var/config/rest/downloads/{0}'.format(name)
uri = "https://{0}:{1}/mgmt/tm/util/unix-rm/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs=tpath_name
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(
required=True,
),
source=dict(type='path'),
inline=dict(),
force=dict(
type='bool',
default='no'
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['source', 'inline']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| 29.85084 | 114 | 0.606235 | 9,511 | 0.669364 | 0 | 0 | 0 | 0 | 0 | 0 | 4,111 | 0.289324 |
4630b18f7ea854ba3d4c006459393021a0b26e65 | 322 | py | Python | Firewall/Model/Host.py | frieagle94/firewall | 9dc0357f78f128c0275baabebfef6fdf8ff35a47 | [
"Apache-2.0"
]
| null | null | null | Firewall/Model/Host.py | frieagle94/firewall | 9dc0357f78f128c0275baabebfef6fdf8ff35a47 | [
"Apache-2.0"
]
| null | null | null | Firewall/Model/Host.py | frieagle94/firewall | 9dc0357f78f128c0275baabebfef6fdf8ff35a47 | [
"Apache-2.0"
]
| null | null | null | __author__ = 'Riccardo Frigerio'
'''
Oggetto HOST
Attributi:
- mac_address: indirizzo MAC
- port: porta a cui e' collegato
- dpid: switch a cui e' collegato
'''
class Host(object):
def __init__(self, mac_address, port, dpid):
self.mac_address = mac_address
self.port = port
self.dpid = dpid
| 18.941176 | 48 | 0.667702 | 158 | 0.490683 | 0 | 0 | 0 | 0 | 0 | 0 | 146 | 0.453416 |
4632248180ce8a33d74a9026c30eef96e40a8172 | 2,466 | py | Python | docker-compose/tweet_collector/tweet_streamer.py | lorenanda/tweets | 926b7a1b33057e57a59dce2d4fc89e7af18962e6 | [
"MIT"
]
| 2 | 2021-05-14T19:11:28.000Z | 2021-06-13T23:17:32.000Z | docker-compose/tweet_collector/tweet_streamer.py | lorenanda/tweets | 926b7a1b33057e57a59dce2d4fc89e7af18962e6 | [
"MIT"
]
| null | null | null | docker-compose/tweet_collector/tweet_streamer.py | lorenanda/tweets | 926b7a1b33057e57a59dce2d4fc89e7af18962e6 | [
"MIT"
]
| 1 | 2022-02-13T10:47:17.000Z | 2022-02-13T10:47:17.000Z | from tweepy import OAuthHandler, Stream, API
from tweepy.streaming import StreamListener
import json
import logging
import pymongo
import config
client = pymongo.MongoClient(host='mongo_container', port=27018)
db = client.tweets_db
auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)
auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)
api = API(auth, wait_on_rate_limit=True)
user = api.me()
logging.critical("connection established with user: " + user.name)
# # Function for Twitter authentication
# def authenticate():
# auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)
# auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)
# return auth
# Function for streaming tweets
class TwitterListener(StreamListener):
#defines what is done with every single tweet as it is intercepted in real-time
def __init__(self, limit, callback):
#super().__init__()
self.limit = limit
self.counter = 0
self.callback = callback
# Return an error if twitter is unreachable
def on_error(self, status):
if status == 420:
print(status)
return False
def get_tweets_dict(self, t):
if 'extended_tweet' in t:
text = t['extended_tweet']['full_text']
else:
text = t['text']
tweet = {
'username': t['user']['screen_name'],
'text': t['text'],
'followers_count': t['user']['followers_count'],
'location':t['user']['location'],
'description':t['user']['description']
}
return tweet
def on_data(self, data):
t = json.loads(data)
tweet = self.get_tweet_dict(t)
self.callback(tweet)
self.counter += 1
if self.counter == self.limit:
return False
def stream_tweets(limit, callback):
stream_listener = StreamListener()
stream = tweepy.Stream(auth=api.auth, listener=stream_listener)
stream.filter(track=['OnThisDay'], follow=['2278940227'], languages=['en'])
def warning_log(tweet):
#logging.critical(f'\n\nTWEET! {tweet["username"]} just tweeted: "{tweet["text"]}"\n\n\n')
logging.critical('\n\nTWEET: ' + tweet['username'] + 'just tweeted: ' + tweet['text'])
db.collections.onthisday.insert_one(tweet)
# Driver function
if __name__ == '__main__':
while True:
stream_tweets(5, warning_log)
time.sleep(30) | 30.825 | 94 | 0.664234 | 1,085 | 0.439984 | 0 | 0 | 0 | 0 | 0 | 0 | 833 | 0.337794 |
46325f61280dc8596ca2d94d27452fa3b0d497c6 | 2,028 | py | Python | manage_it/network/models.py | ShangShungInstitute/django-manage-it | 13cb23b57ce3577db7f69250741bcbfe82b69a57 | [
"MIT",
"Unlicense"
]
| 1 | 2015-01-20T14:34:32.000Z | 2015-01-20T14:34:32.000Z | manage_it/network/models.py | ShangShungInstitute/django-manage-it | 13cb23b57ce3577db7f69250741bcbfe82b69a57 | [
"MIT",
"Unlicense"
]
| null | null | null | manage_it/network/models.py | ShangShungInstitute/django-manage-it | 13cb23b57ce3577db7f69250741bcbfe82b69a57 | [
"MIT",
"Unlicense"
]
| null | null | null | from django.db import models
from django.utils.translation import ugettext_lazy as _
from assets.models import Item
from catalog.models import Inventory
CONNECTION_TYPES = (
(1, "Ethernet 1Gb"),
(2, "Ethernet 100Mb"),
(3, "WIFI"),
(4, "Optic Fiber"),
(5, "USB"),
(6, "HDMI"),
(7, "Telephone"),
)
class Network(models.Model):
"""
ItemConnection for networked assets
"""
inventory = models.ForeignKey(
Inventory, verbose_name=_(u"inventory"))
name = models.CharField(_(u"name"), max_length=100)
description = models.TextField(blank=True, null=True)
ip_range = models.CharField(
_(u"ip_range"),
blank=True, null=True, max_length=100)
def __unicode__(self):
return self.name
class Connection(models.Model):
"""
ItemConnection for networked assets
"""
concetion_type = models.SmallIntegerField(
_(u"link type"), choices=CONNECTION_TYPES)
device_1 = models.ForeignKey(
Item, verbose_name=_(u"item 1"), related_name="dev1")
device_1_interface = models.IPAddressField(
blank=True, null=True)
device_1_mac = models.CharField(
blank=True, null=True, max_length=79)
device_2 = models.ForeignKey(
Item, verbose_name=_(u"item 2"), related_name="dev2")
device_2_interface = models.IPAddressField(
blank=True, null=True)
device_2_mac = models.CharField(
blank=True, null=True, max_length=79)
description = models.TextField(
blank=True, null=True)
network = models.ForeignKey(Network)
class Meta:
unique_together = ("device_1", "device_2")
def __unicode__(self):
return "%s #%s" % (self.network, self.id)
class Interface(models.Model):
mac = models.CharField(_(u"MAC"), blank=True, null=True, max_length=79)
device = models.ForeignKey(Item, verbose_name=_(u"device"))
description = models.TextField(_(u"description"), blank=True, null=True)
def __unicode__(self):
return self.mac
| 28.971429 | 76 | 0.65927 | 1,693 | 0.834813 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.148915 |
46346da00abc925aef8a81ab9a6362af4ad40b6c | 3,940 | py | Python | software/L1_mpu.py | roy-kruemcke/SCUTTLE-SLMP | c181ba451e5fecdbbfb8ac7c4c2d743526112b11 | [
"MIT"
]
| null | null | null | software/L1_mpu.py | roy-kruemcke/SCUTTLE-SLMP | c181ba451e5fecdbbfb8ac7c4c2d743526112b11 | [
"MIT"
]
| null | null | null | software/L1_mpu.py | roy-kruemcke/SCUTTLE-SLMP | c181ba451e5fecdbbfb8ac7c4c2d743526112b11 | [
"MIT"
]
| null | null | null | # L1_mpu.py
# Author: Roy Kruemcke (roanoake)
# 30 NOV 2021
# Allows for the interfacing to the MPU9250 using the smbus2 i2c module
# Written for use with Raspberry Pi 4 Model B
import smbus2
import numpy as np
import data
import time
# Initialize Register Data
CONFIG = 0x1A
USER_CTRL = 0x6A
PWR_MGMT_1, PWR_MGMT_2 = 0x6B, 0x6C
GYRO_CONFIG = 0x1B
G_OFFSET = 0x13
GYRO_OUT = 0x43
ACCEL_CONFIG = 0x1C
ACCEL_CONFIG_2 = 0x1D
A_OFFSET = 0x77
ACCEL_OUT = 0x3B
TEMP_OUT = 0x41
# Initialize Scales
MAX_VAL = 2**16
ACCL_SCALE_2G=MAX_VAL/(2*2) # +-2G
ACCL_SCALE_4G=MAX_VAL/(4*2) # +-4G
ACCL_SCALE_8G=MAX_VAL/(8*2) # +-8G
ACCL_SCALE_16G=MAX_VAL/(16*2) # +-16G
GYRO_SCALE_250DG=MAX_VAL/(250*2) # +-250 deg/s
GYRO_SCALE_500DG=MAX_VAL/(500*2) # +-500 deg/s
GYRO_SCALE_1000DG=MAX_VAL/(1000*2) # +-1000 deg/s
GYRO_SCALE_2000DG=MAX_VAL/(2000*2) # +-2000 deg/s
# Open I2C bus
bus=smbus2.SMBus(1)
mpu = 0x68 # Default address for MPU
def getAccelScale():
"""
Reads the current accelerometer scale, and returns the scaling factor.
"""
acnfg=bus.read_byte_data(mpu,ACCEL_CONFIG)
scale = (acnfg & 0x18) >> 3 # Bits 4:3 hold the full scale
# Return the corresponding scale
if scale==0: return ACCL_SCALE_2G
elif scale==1: return ACCL_SCALE_4G
elif scale==2: return ACCL_SCALE_8G
elif scale==3: return ACCL_SCALE_16G
return None # If you make it here, its bad
def setAccelScale(newScale:int):
"""
Sets the accelerometer scale. Returns True if successful, False otherwise.
:param scale: integer 0-3 that corresponds to the scale.
"""
# Check input
if not(0<=newScale<=3):
print(">> ERROR: attempted to set ACCEL_SCALE to an improper value")
return False
# First, read the current scale
acnfg=bus.read_byte_data(mpu,ACCEL_CONFIG) # Read ACCEL_CONFIG
acnfg &= ~0x18 # Clear previous scale
acnfg |= (newScale << 3) # Set new scale
bus.write_byte_data(mpu,ACCEL_CONFIG,acnfg) # Write new data
time.sleep(0.01) # Wait 10ms
# Check for completion
tmp=bus.read_byte_data(mpu,ACCEL_CONFIG) # Read ACCEL_CONFIG
tmp=(tmp & 0x18) >> 3 # Isolate scale
if tmp==newScale: # Scale was updated
return True
else: # Scale was not updated
print("> Warning: ACCEL_SCALE did not update")
return False
def getGyroScale():
print("Getting Gyrometer Scale.")
gcnfg=bus.read_byte_data(mpu,GYRO_CONFIG)
scale = (gcnfg & 0x18) >> 3 # Bits 4:3 hold the full scale
# Return the corresponding scale
if scale==0: return GYRO_SCALE_250DG
elif scale==1: return GYRO_SCALE_500DG
elif scale==2: return GYRO_SCALE_1000DG
elif scale==3: return GYRO_SCALE_2000DG
return None # If you make it here, its bad
def readAccelerometer():
try:
# Read Accelerometer Data, 2 bytes for 3 axes, 6 bytes total.
twoByteReadings = bus.read_i2c_block_data(mpu, ACCEL_OUT, 6)
# compile all the data into the 16-bit/axis readings.
binaryVals = [(twoByteReadings[i*2] << 8) | twoByteReadings[i*2 + 1] for i in range(3)]
# convert 16-bit unsigned into 16-bit signed
binaryVals = [data.getSignedVal(i,16) for i in binaryVals]
scale = getAccelScale()
# scale binary to meaningful value
accel_vals = [val/scale for val in binaryVals]
# round to 3 decimal places
accel_vals = np.round(accel_vals,3)
except:
print(">> ERROR: ACCEL_OUT could not be read.")
accel_vals = [0,0,0]
return accel_vals
def readGyrometer():
print("Reading Gyrometer")
def readTemperature():
print("Reading Temperature")
print(readAccelerometer())
| 30.542636 | 95 | 0.639848 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,408 | 0.35736 |
4634863621fdac26b1771cdeb54c5f5a51a3dcb7 | 1,448 | py | Python | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/EXT/paletted_texture.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
]
| null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/EXT/paletted_texture.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
]
| null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/EXT/paletted_texture.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
]
| null | null | null | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_EXT_paletted_texture'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_EXT_paletted_texture',error_checker=_errors._error_checker)
GL_COLOR_INDEX12_EXT=_C('GL_COLOR_INDEX12_EXT',0x80E6)
GL_COLOR_INDEX16_EXT=_C('GL_COLOR_INDEX16_EXT',0x80E7)
GL_COLOR_INDEX1_EXT=_C('GL_COLOR_INDEX1_EXT',0x80E2)
GL_COLOR_INDEX2_EXT=_C('GL_COLOR_INDEX2_EXT',0x80E3)
GL_COLOR_INDEX4_EXT=_C('GL_COLOR_INDEX4_EXT',0x80E4)
GL_COLOR_INDEX8_EXT=_C('GL_COLOR_INDEX8_EXT',0x80E5)
GL_TEXTURE_INDEX_SIZE_EXT=_C('GL_TEXTURE_INDEX_SIZE_EXT',0x80ED)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLsizei,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glColorTableEXT(target,internalFormat,width,format,type,table):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glGetColorTableEXT(target,format,type,data):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLfloatArray)
def glGetColorTableParameterfvEXT(target,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
def glGetColorTableParameterivEXT(target,pname,params):pass
| 43.878788 | 118 | 0.810083 | 0 | 0 | 0 | 0 | 532 | 0.367403 | 0 | 0 | 314 | 0.216851 |
4634eaef2f0ee2edb50cfda3fdc934ed5f0de8df | 1,494 | py | Python | backend/apps/csyllabusapi/views/university.py | CSyllabus/webapp | 06391d43d97453a135ff78ff8dcdd35846c94d30 | [
"MIT"
]
| 3 | 2017-11-02T11:24:09.000Z | 2017-12-08T15:33:43.000Z | backend/apps/csyllabusapi/views/university.py | CSyllabus/webapp | 06391d43d97453a135ff78ff8dcdd35846c94d30 | [
"MIT"
]
| 2 | 2017-11-04T10:03:44.000Z | 2017-12-31T08:26:36.000Z | backend/apps/csyllabusapi/views/university.py | CSyllabus/webapp | 06391d43d97453a135ff78ff8dcdd35846c94d30 | [
"MIT"
]
| null | null | null | from rest_framework.parsers import JSONParser, FileUploadParser
from rest_framework.views import APIView
from ..models import City
from ..models import Country
from ..models import University
from ..models import Faculty
from ..models import Program
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework import permissions
from rest_framework.decorators import parser_classes
from django.utils import timezone
try:
from django.utils import simplejson as json
except ImportError:
import json
@permission_classes((permissions.AllowAny,))
@parser_classes((JSONParser,))
class UniversityView(APIView):
def post(self, request):
name = request.data['name']
country = Country.objects.get(id=request.data['country_id'])
city = City.objects.get(id=request.data['city_id'])
University.objects.create(name=name, country=country, city=city)
return Response()
def delete(selfself, request):
id = request.data['id']
University.objects.filter(id=id).delete()
return Response()
def put(selfself, request):
id = request.data['id']
name = request.data['name']
country = Country.objects.get(id=request.data['country_id'])
city = City.objects.get(id=request.data['city_id'])
University.objects.filter(id=id).update(name=name, country=country, city=city, modified=timezone.now())
return Response()
| 33.954545 | 111 | 0.726238 | 837 | 0.560241 | 0 | 0 | 913 | 0.611111 | 0 | 0 | 62 | 0.041499 |
4636eb5b9d65657cf6249aa635cf51cca2defbb9 | 244 | py | Python | prickly-pufferfish/python_questions/add_to_zero.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
]
| 40 | 2020-08-02T07:38:22.000Z | 2021-07-26T01:46:50.000Z | prickly-pufferfish/python_questions/add_to_zero.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
]
| 134 | 2020-07-31T12:15:45.000Z | 2020-12-13T04:42:19.000Z | prickly-pufferfish/python_questions/add_to_zero.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
]
| 101 | 2020-07-31T12:00:47.000Z | 2021-11-01T09:06:58.000Z | """
Write a function with a list of ints as a paramter. /
Return True if any two nums sum to 0. /
>>> add_to_zero([]) /
False /
>>> add_to_zero([1]) /
False /
>>> add_to_zero([1, 2, 3]) /
False /
>>> add_to_zero([1, 2, 3, -2]) /
True /
"""
| 14.352941 | 53 | 0.569672 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 243 | 0.995902 |
4636fb0bc8542fff3140160b2826c73df03573c5 | 827 | py | Python | op_trans/asgi.py | jezzlucena/django-opp-trans | 05e8b2b91a6c46cd800837ae2b683ec043243742 | [
"MIT"
]
| 1 | 2021-03-03T02:22:11.000Z | 2021-03-03T02:22:11.000Z | op_trans/asgi.py | jezzlucena/django-opp-trans | 05e8b2b91a6c46cd800837ae2b683ec043243742 | [
"MIT"
]
| null | null | null | op_trans/asgi.py | jezzlucena/django-opp-trans | 05e8b2b91a6c46cd800837ae2b683ec043243742 | [
"MIT"
]
| null | null | null | """
ASGI config for op_trans project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
from op_trans.websocket import websocket_application
from op_trans.redis_cli import RedisCli
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'op_trans.settings')
django_application = get_asgi_application()
async def application(scope, receive, send):
RedisCli.get()
if scope['type'] == 'http':
await django_application(scope, receive, send)
elif scope['type'] == 'websocket':
await websocket_application(scope, receive, send)
else:
raise NotImplementedError(f"Unknown scope type {scope['type']}")
| 28.517241 | 78 | 0.74607 | 0 | 0 | 0 | 0 | 0 | 0 | 330 | 0.399033 | 331 | 0.400242 |
4637594ad65e429cbd0184284c782da6df047d1a | 482 | py | Python | notifai_recruitment/api.py | BudzynskiMaciej/notifai_recruitment | 56860db3a2dad6115747a675895b8f7947e7e12e | [
"MIT"
]
| null | null | null | notifai_recruitment/api.py | BudzynskiMaciej/notifai_recruitment | 56860db3a2dad6115747a675895b8f7947e7e12e | [
"MIT"
]
| 2 | 2021-05-21T13:26:26.000Z | 2022-02-10T10:04:55.000Z | notifai_recruitment/api.py | BudzynskiMaciej/notifai_recruitment | 56860db3a2dad6115747a675895b8f7947e7e12e | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""API routes config for notifai_recruitment project.
REST framework adds support for automatic URL routing to Django, and provides simple, quick and consistent
way of wiring view logic to a set of URLs.
For more information on this file, see
https://www.django-rest-framework.org/api-guide/routers/
"""
from rest_framework import routers
from textify.api.views import NoteViewSet
router = routers.DefaultRouter()
router.register(r'notes', NoteViewSet)
| 28.352941 | 106 | 0.778008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.69917 |
46385759fc36e37af911b95c5e283c3da2fdcd54 | 3,786 | py | Python | zipline/data/bundles/equities_bundle.py | walterkissling/zipline | 784580ef2271aeaa52d8e526f15c88fbb55b7547 | [
"Apache-2.0"
]
| null | null | null | zipline/data/bundles/equities_bundle.py | walterkissling/zipline | 784580ef2271aeaa52d8e526f15c88fbb55b7547 | [
"Apache-2.0"
]
| null | null | null | zipline/data/bundles/equities_bundle.py | walterkissling/zipline | 784580ef2271aeaa52d8e526f15c88fbb55b7547 | [
"Apache-2.0"
]
| null | null | null | # File to ingest an equities bundle for zipline
# Import libraries
import pandas as pd
import numpy as np
def equities_bundle(path_to_file):
# Define custom ingest function
def ingest(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
cache,
show_progress,
output_dir,
start_session,
end_session):
# Read in data
data = pd.read_csv(path_to_file, index_col = [0, 1], parse_dates = [1], infer_datetime_format = True)
data.volume = data.volume.astype(int)
#data.loc[:, 'volume'] = 100000000
symbols = data.index.levels[0].tolist()
#start_dt = data.index.levels[1].min()
#end_dt = data.index.levels[1].max()
# Create asset metadata
dtype = [('start_date', 'datetime64[ns]'),
('end_date', 'datetime64[ns]'),
('auto_close_date', 'datetime64[ns]'),
('symbol', 'object')]
metadata = pd.DataFrame(np.empty(len(symbols), dtype=dtype))
# Create dividend and split dataframe
dividends = pd.DataFrame(columns = ['sid', 'amount',
'ex_date', 'record_date',
'declared_date', 'pay_date'])
splits = pd.DataFrame(columns = ['sid', 'ratio','effective_date'])
# Create list to hold data
data_to_write = []
# Loop through symbols and prepare data
for sid, symbol in enumerate(symbols):
data_ = data.loc[symbol].sort_index()
start_dt = data_.index.min()
end_dt = data_.index.max()
# Set auto cloes to day after last trade
ac_date = end_dt + pd.tseries.offsets.BDay()
metadata.iloc[sid] = start_dt, end_dt, ac_date, symbol
# Check for splits and dividends
if 'split' in data_.columns:
tmp = 1. / data_[data_['split'] != 1.0]['split']
split = pd.DataFrame(data = tmp.index.tolist(), columns = ['effective_date'])
split['ratio'] = tmp.tolist()
split['sid'] = sid
index = pd.Index(range(splits.shape[0],
splits.shape[0] + split.shape[0]))
split.set_index(index, inplace=True)
splits = splits.append(split)
if 'dividend' in data_.columns:
# ex_date amount sid record_date declared_date pay_date
tmp = data_[data_['dividend'] != 0.0]['dividend']
div = pd.DataFrame(data = tmp.index.tolist(), columns = ['ex_date'])
div['record_date'] = tmp.index
div['declared_date'] = tmp.index
div['pay_date'] = tmp.index
div['amount'] = tmp.tolist()
div['sid'] = sid
ind = pd.Index(range(dividends.shape[0], dividends.shape[0] + div.shape[0]))
div.set_index(ind, inplace=True)
dividends = dividends.append(div)
# Append data to list
data_to_write.append((sid, data_))
daily_bar_writer.write(data_to_write, show_progress = True)
# Hardcode exchange data
metadata['exchange'] = 'CSV'
# Write metadata
asset_db_writer.write(equities = metadata)
# Write splits and dividents
dividends['sid'] = dividends['sid'].astype(int)
splits['sid'] = splits['sid'].astype(int)
adjustment_writer.write(splits = splits,
dividends = dividends)
return ingest | 38.242424 | 109 | 0.530639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 927 | 0.244849 |
46387a117da7cc25b8fd8335919094946b06904f | 343 | py | Python | model/torch_model.py | FernandoLpz/ONNX-PyTorch-TF-Caffe2 | 05aa07b982bd12d2c0d7ce05a94a726b7faaed91 | [
"MIT"
]
| 3 | 2020-10-28T03:46:48.000Z | 2021-07-22T06:45:33.000Z | model/torch_model.py | FernandoLpz/ONNX-PyTorch-TF-Caffe2 | 05aa07b982bd12d2c0d7ce05a94a726b7faaed91 | [
"MIT"
]
| null | null | null | model/torch_model.py | FernandoLpz/ONNX-PyTorch-TF-Caffe2 | 05aa07b982bd12d2c0d7ce05a94a726b7faaed91 | [
"MIT"
]
| null | null | null | import torch
import torch.nn as nn
class TorchModel(nn.ModuleList):
def __init__(self):
super(TorchModel, self).__init__()
self.linear_1 = nn.Linear(2, 12)
self.linear_2 = nn.Linear(12, 1)
def forward(self, x):
out = self.linear_1(x)
out = torch.tanh(out)
out = self.linear_2(out)
out = torch.sigmoid(out)
return out | 21.4375 | 36 | 0.682216 | 307 | 0.895044 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
46389fb316c01f816e67750260b3ef8381f89fb1 | 11,529 | py | Python | nereid/contrib/pagination.py | advocatetax/nereid-1 | ee7ae345e3e7e46b3e13cd6e3b94c388d38a0efa | [
"BSD-3-Clause"
]
| null | null | null | nereid/contrib/pagination.py | advocatetax/nereid-1 | ee7ae345e3e7e46b3e13cd6e3b94c388d38a0efa | [
"BSD-3-Clause"
]
| null | null | null | nereid/contrib/pagination.py | advocatetax/nereid-1 | ee7ae345e3e7e46b3e13cd6e3b94c388d38a0efa | [
"BSD-3-Clause"
]
| 1 | 2021-06-04T13:02:29.000Z | 2021-06-04T13:02:29.000Z | # -*- coding: utf-8 -*-
# This file is part of Tryton & Nereid. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from math import ceil
from sql import Select, Column
from sql.functions import Function
from sql.aggregate import Count
from werkzeug.utils import cached_property
class BasePagination(object):
"""
General purpose paginator for doing pagination
With an empty dataset assert the attributes
>>> p = Pagination(1, 3, [])
>>> p.count
0
>>> p.pages
0
>>> p.begin_count
0
>>> p.end_count
0
Test with a range(1, 10)
>>> p = Pagination(1, 3, range(1, 10))
>>> p.count
9
>>> p.all_items()
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> p.pages
3
>>> p.begin_count
1
>>> p.end_count
3
"""
def __init__(self, page, per_page, data=None):
"""
:param per_page: Items per page
:param page: The page to be displayed
:param data: The data table
"""
self.per_page = per_page
self.page = page
self.data = data if data is not None else []
@property
def count(self):
"Returns the count of data"
return len(self.data)
def all_items(self):
"""Returns complete set of items"""
return self.data
def items(self):
"""Returns the list of items in current page
"""
return self.data[self.offset:self.offset + self.per_page]
def __iter__(self):
for item in list(self.items()):
yield item
def __len__(self):
return self.count
def serialize(self):
return {
"count": self.count,
"pages": self.pages,
"page": self.page,
"per_page": self.per_page,
"items": list(self.items()),
}
@property
def prev(self):
"""Returns a :class:`Pagination` object for the previous page."""
return Pagination(self.page - 1, self.per_page, self.data)
def __next__(self):
"""Returns a :class:`Pagination` object for the next page."""
return Pagination(self.page + 1, self.per_page, self.data)
#: Attributes below this may not require modifications in general cases
def iter_pages(
self, left_edge=2, left_current=2, right_current=2, right_edge=2
):
"""
Iterates over the page numbers in the pagination. The four
parameters control the thresholds how many numbers should be produced
from the sides. Skipped page numbers are represented as `None`.
This is how you could render such a pagination in the templates:
.. sourcecode:: html+jinja
{% macro render_pagination(pagination, endpoint) %}
<div class=pagination>
{%- for page in pagination.iter_pages() %}
{% if page %}
{% if page != pagination.page %}
<a href="{{ url_for(endpoint, page=page) }}">
{{ page }}
</a>
{% else %}
<strong>{{ page }}</strong>
{% endif %}
{% else %}
<span class=ellipsis>…</span>
{% endif %}
{%- endfor %}
</div>
{% endmacro %}
"""
last = 0
for num in range(1, self.pages + 1):
if num <= left_edge or \
(num > self.page - left_current - 1 and
num < self.page + right_current) or \
num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
offset = property(lambda self: (self.page - 1) * self.per_page)
prev_num = property(lambda self: self.page - 1)
has_prev = property(lambda self: self.page > 1)
next_num = property(lambda self: self.page + 1)
has_next = property(lambda self: self.page < self.pages)
pages = property(lambda self: int(ceil(self.count / float(self.per_page))))
begin_count = property(lambda self: min([
((self.page - 1) * self.per_page) + 1,
self.count]))
end_count = property(lambda self: min(
self.begin_count + self.per_page - 1, self.count))
class Pagination(BasePagination):
"""
General purpose paginator for doing pagination which can be used by
passing a search domain .Remember that this means the query will be built
and executed and passed on which could be slower than writing native SQL
queries. While this fits into most use cases, if you would like to use
a SQL query rather than a domain use :class:QueryPagination instead
"""
# The counting of all possible records can be really expensive if you
# have too many records and the selectivity of the query is low. For
# example - a query to display all products in a website would be quick
# in displaying the products but slow in building the navigation. So in
# cases where this could be frequent, the value of count may be cached and
# assigned to this variable
_count = None
def __init__(self, obj, domain, page, per_page, order=None):
"""
:param obj: The object itself. pass self within tryton object
:param domain: Domain for search in tryton
:param per_page: Items per page
:param page: The page to be displayed
"""
self.obj = obj
self.domain = domain
self.order = order
super(Pagination, self).__init__(page, per_page)
@cached_property
def count(self):
"""
Returns the count of entries
"""
if self.ids_domain():
return len(self.domain[0][2])
if self._count is not None:
return self._count
return self.obj.search(domain=self.domain, count=True)
def all_items(self):
"""Returns complete set of items"""
if self.ids_domain():
return self.obj.browse(self.domain[0][2])
return self.obj.search(self.domain)
def ids_domain(self):
"""
Returns True if the domain has only IDs and can skip SQL fetch
to directly browse the records. Else a False is returned
"""
return (len(self.domain) == 1) and \
(self.domain[0][0] == 'id') and \
(self.domain[0][1] == 'in') and \
(self.order is None)
def serialize(self, purpose=None):
rv = super(Pagination, self).serialize()
if hasattr(self.obj, 'serialize'):
rv['items'] = [item.serialize(purpose) for item in list(self.items())]
elif hasattr(self.obj, '_json'):
# older style _json methods
rv['items'] = [item._json() for item in list(self.items())]
else:
rv['items'] = [
{
'id': item.id,
'rec_name': item.rec_name,
} for item in list(self.items())
]
return rv
def items(self):
"""
Returns the list of browse records of items in the page
"""
if self.ids_domain():
ids = self.domain[0][2][self.offset:self.offset + self.per_page]
return self.obj.browse(ids)
else:
return self.obj.search(
self.domain, offset=self.offset, limit=self.per_page,
order=self.order
)
@property
def prev(self, error_out=False):
"""Returns a :class:`Pagination` object for the previous page."""
return self.obj.paginate(self.page - 1, self.per_page, error_out)
def next(self, error_out=False):
"""Returns a :class:`Pagination` object for the next page."""
return self.obj.paginate(self.page + 1, self.per_page, error_out)
class Distinct(Function):
__slots__ = ()
_function = 'DISTINCT'
class QueryPagination(BasePagination):
"""A fast implementation of pagination which uses a SQL query for
generating the IDS and hence the pagination
.. versionchanged::3.2.0.5
The SQL Query has to be an instance of `sql.Select`.
"""
def __init__(self, obj, query, primary_table, page, per_page):
"""
:param query: Query to be used for search.
It must not include an OFFSET or LIMIT as they
would be automatically added to the query.
It must also not have any columns in the select.
:param primary_table: The ~`sql.Table` instance from which the records
have to be selected.
:param page: The page to be displayed
:param per_page: Items per page
"""
self.obj = obj
assert isinstance(query, Select), "Query must be python-sql"
self.query = query
self.primary_table = primary_table
super(QueryPagination, self).__init__(page, per_page)
@cached_property
def count(self):
"Return the count of the Items"
from trytond.transaction import Transaction
# XXX: Ideal case should make a copy of Select query
#
# https://code.google.com/p/python-sql/issues/detail?id=22
query = self.query
query.columns = (Count(Distinct(self.primary_table.id)), )
cursor = Transaction().connection.cursor()
# temporarily remove order_by
order_by = query.order_by
query.order_by = None
try:
cursor.execute(*query)
finally:
# XXX: This can be removed when SQL queries can be copied
# See comment above
query.order_by = order_by
res = cursor.fetchone()
if res:
return res[0]
# There can be a case when query return None and then count
# will be zero
return 0
def all_items(self):
"""Returns complete set of items"""
from trytond.transaction import Transaction
# XXX: Ideal case should make a copy of Select query
#
# https://code.google.com/p/python-sql/issues/detail?id=22
query = self.query
query.columns = (Distinct(self.primary_table.id), ) + tuple(
(o.expression for o in query.order_by if isinstance(
o.expression, Column
))
)
query.offset = None
query.limit = None
cursor = Transaction().connection.cursor()
cursor.execute(*query)
rv = [x[0] for x in cursor.fetchall()]
return self.obj.browse([_f for _f in rv if _f])
def items(self):
"""
Returns the list of browse records of items in the page
"""
from trytond.transaction import Transaction
# XXX: Ideal case should make a copy of Select query
#
# https://code.google.com/p/python-sql/issues/detail?id=22
query = self.query
query.columns = (Distinct(self.primary_table.id), ) + tuple(
(o.expression for o in query.order_by if isinstance(
o.expression, Column
))
)
query.offset = self.offset
query.limit = self.per_page
cursor = Transaction().connection.cursor()
cursor.execute(*query)
rv = [x[0] for x in cursor.fetchall()]
return self.obj.browse([_f for _f in rv if _f])
| 32.94 | 82 | 0.571515 | 11,180 | 0.96956 | 1,645 | 0.142659 | 1,664 | 0.144307 | 0 | 0 | 5,114 | 0.4435 |
4638b2719be97e9698ba7de333d5a9c1e9271178 | 947 | py | Python | app.py | Arpan-206/Youtube-Downloader-Flask | 8ac6233e238d99c4a13d4d00afa2574d156cb80e | [
"MIT"
]
| 3 | 2021-04-28T09:49:43.000Z | 2022-01-03T13:03:02.000Z | app.py | Arpan-206/Youtube-Downloader-Flask | 8ac6233e238d99c4a13d4d00afa2574d156cb80e | [
"MIT"
]
| null | null | null | app.py | Arpan-206/Youtube-Downloader-Flask | 8ac6233e238d99c4a13d4d00afa2574d156cb80e | [
"MIT"
]
| null | null | null | from flask import Flask, request, send_file, render_template, url_for
import pytube
import logging
import sys
import os
from hello import timed_delete
from threading import Timer
timed_delete()
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
app = Flask(__name__)
@app.route("/")
def youtube_downloader():
my_css = url_for('static', filename='cover.css')
return render_template('index.html', css_path= my_css)
@app.route("/download_video", methods=["GET","POST"])
def download_video():
"""
First pytube downloads the file locally in pythonanywhere:
/home/your_username/video_name.mp4
Then use Flask's send_file() to download the video
to the user's Downloads folder.
"""
local_download_path = pytube.YouTube("https://www.youtube.com/watch?v=b1JlYZQG3lI").streams.get_highest_resolution().download()
fname = local_download_path.split("//")
return send_file(fname, as_attachment=True)
| 31.566667 | 131 | 0.744456 | 0 | 0 | 0 | 0 | 666 | 0.703273 | 0 | 0 | 319 | 0.336853 |
4638cae0768af5f19dde04ebbab5eceafeebdaf5 | 14,103 | py | Python | si_unit_pandas/base.py | domdfcoding/si_unit_pandas | 6912d775f88dcfb9112199a57c14b6ce6a979d0e | [
"BSD-3-Clause"
]
| null | null | null | si_unit_pandas/base.py | domdfcoding/si_unit_pandas | 6912d775f88dcfb9112199a57c14b6ce6a979d0e | [
"BSD-3-Clause"
]
| 19 | 2020-12-21T18:26:24.000Z | 2022-01-16T11:47:58.000Z | si_unit_pandas/base.py | domdfcoding/si_unit_pandas | 6912d775f88dcfb9112199a57c14b6ce6a979d0e | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python3
#
# base.py
"""
Base functionality.
"""
#
# Copyright (c) 2020 Dominic Davis-Foster <[email protected]>
#
# Based on cyberpandas
# https://github.com/ContinuumIO/cyberpandas
# Copyright (c) 2018, Anaconda, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# _isstringslice based on awkward-array
# https://github.com/scikit-hep/awkward-array
# Copyright (c) 2018-2019, Jim Pivarski
# Licensed under the BSD 3-Clause License
#
# stdlib
from abc import abstractmethod
from numbers import Real
from typing import Dict, Iterable, List, Optional, Sequence, SupportsFloat, Tuple, Type, TypeVar, Union, overload
# 3rd party
import numpy # type: ignore
from domdf_python_tools.doctools import prettify_docstrings
from pandas.core.arrays import ExtensionArray # type: ignore
from pandas.core.dtypes.base import ExtensionDtype # type: ignore
from pandas.core.dtypes.generic import ABCExtensionArray # type: ignore
from typing_extensions import Literal, Protocol
__all__ = ["NumPyBackedExtensionArrayMixin"]
class NumPyBackedExtensionArrayMixin(ExtensionArray):
"""
Mixin for pandas extension backed by a numpy array.
"""
_dtype: Type[ExtensionDtype]
@property
def dtype(self):
"""
The dtype for this extension array, :class:`~.CelsiusType`.
"""
return self._dtype
@classmethod
def _from_sequence(cls, scalars: Iterable, dtype=None, copy: bool = False):
"""
Construct a new ExtensionArray from a sequence of scalars.
:param scalars: Each element will be an instance of the scalar type for this
array, ``cls.dtype.type``.
:param dtype: Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
:type dtype: dtype, optional
:param copy: If True, copy the underlying data.
"""
return cls(scalars, dtype=dtype)
@classmethod
def _from_factorized(cls, values: numpy.ndarray, original: ExtensionArray):
"""
Reconstruct an ExtensionArray after factorization.
:param values: An integer ndarray with the factorized values.
:param original: The original ExtensionArray that factorize was called on.
.. seealso::
:meth:`pandas.pandas.api.extensions.ExtensionArray.factorize`
"""
return cls(values)
@property
def shape(self) -> Tuple[int]:
"""
Return a tuple of the array dimensions.
"""
return len(self.data),
def __len__(self) -> int:
"""
Returns the length of this array.
"""
return len(self.data)
def setitem(self, indexer, value):
"""
Set the 'value' inplace.
"""
# I think having a separate than __setitem__ is good
# since we have to return here, but __setitem__ doesn't.
self[indexer] = value
return self
@property
def nbytes(self) -> int:
"""
The number of bytes needed to store this object in memory.
"""
return self._itemsize * len(self)
def _formatting_values(self):
return numpy.array(self._format_values(), dtype="object")
def copy(self, deep: bool = False) -> ABCExtensionArray:
"""
Return a copy of the array.
:param deep:
:return:
:rtype:
"""
return type(self)(self.data.copy())
@classmethod
def _concat_same_type(cls, to_concat: Sequence[ABCExtensionArray]) -> ABCExtensionArray:
"""
Concatenate multiple arrays.
:param to_concat: sequence of this type
"""
return cls(numpy.concatenate([array.data for array in to_concat]))
def tolist(self) -> List:
"""
Convert the array to a Python list.
"""
return self.data.tolist()
def argsort(
self,
ascending: bool = True,
kind: Union[Literal["quicksort"], Literal["mergesort"], Literal["heapsort"]] = "quicksort",
*args,
**kwargs,
) -> numpy.ndarray:
r"""
Return the indices that would sort this array.
:param ascending: Whether the indices should result in an ascending
or descending sort.
:param kind: {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
\*args and \*\*kwargs are passed through to :func:`numpy.argsort`.
:return: Array of indices that sort ``self``. If NaN values are contained,
NaN values are placed at the end.
.. seealso::
:class:`numpy.argsort`: Sorting implementation used internally.
"""
return self.data.argsort()
def unique(self) -> ExtensionArray: # noqa: D102
# https://github.com/pandas-dev/pandas/pull/19869
_, indices = numpy.unique(self.data, return_index=True)
data = self.data.take(numpy.sort(indices))
return self._from_ndarray(data)
_A = TypeVar("_A")
class BaseArray(numpy.lib.mixins.NDArrayOperatorsMixin, NumPyBackedExtensionArrayMixin):
ndim: int = 1
data: numpy.ndarray
@classmethod
def _from_ndarray(cls: _A, data: numpy.ndarray, copy: bool = False) -> _A:
"""
Zero-copy construction of a BaseArray from an ndarray.
:param data: This should have CelsiusType._record_type dtype
:param copy: Whether to copy the data.
:return:
"""
if copy:
data = data.copy()
new = cls([]) # type: ignore
new.data = data
return new
@property
def na_value(self):
"""
The missing value.
**Example:**
.. code-block::
>>> BaseArray([]).na_value
numpy.nan
"""
return self.dtype.na_value
def take(self, indices, allow_fill: bool = False, fill_value=None):
# Can't use pandas' take yet
# 1. axis
# 2. I don't know how to do the reshaping correctly.
indices = numpy.asarray(indices, dtype="int")
if allow_fill and fill_value is None:
fill_value = self.na_value
elif allow_fill and not isinstance(fill_value, tuple):
if not numpy.isnan(fill_value):
fill_value = int(fill_value)
if allow_fill:
mask = (indices == -1)
if not len(self):
if not (indices == -1).all():
msg = "Invalid take for empty array. Must be all -1."
raise IndexError(msg)
else:
# all NA take from and empty array
took = (
numpy.full(
(len(indices), 2),
fill_value,
dtype=">u8",
).reshape(-1).astype(self.dtype._record_type)
)
return self._from_ndarray(took)
if (indices < -1).any():
msg = "Invalid value in 'indicies'. Must be all >= -1 for 'allow_fill=True'"
raise ValueError(msg)
took = self.data.take(indices)
if allow_fill:
took[mask] = fill_value
return self._from_ndarray(took)
def __repr__(self) -> str:
formatted = self._format_values()
return f"{self.__class__.__name__}({formatted!r})"
def isna(self):
"""
Indicator for whether each element is missing.
"""
if numpy.isnan(self.na_value):
return numpy.isnan(self.data)
else:
return self.data == self.na_value
# From https://github.com/scikit-hep/awkward-array/blob/2bbdb68d7a4fff2eeaed81eb76195e59232e8c13/awkward/array/base.py#L611
def _isstringslice(self, where):
if isinstance(where, str):
return True
elif isinstance(where, bytes):
raise TypeError("column selection must be str, not bytes, in Python 3")
elif isinstance(where, tuple):
return False
elif (
isinstance(where, (numpy.ndarray, self.__class__))
and issubclass(where.dtype.type, (numpy.str, numpy.str_))
):
return True
elif isinstance(where, (numpy.ndarray, self.__class__)) and issubclass(
where.dtype.type, (numpy.object, numpy.object_)
) and not issubclass(where.dtype.type, (numpy.bool, numpy.bool_)):
return len(where) > 0 and all(isinstance(x, str) for x in where)
elif isinstance(where, (numpy.ndarray, self.__class__)):
return False
try:
assert len(where) > 0
assert all(isinstance(x, str) for x in where)
except (TypeError, AssertionError):
return False
else:
return True
def __delitem__(self, where):
if isinstance(where, str):
del self.data[where]
elif self._isstringslice(where):
for x in where:
del self.data[x]
else:
raise TypeError(f"invalid index for removing column from Table: {where}")
@property
@abstractmethod
def _parser(self):
raise NotImplementedError
def append(self, value) -> None:
"""
Append a value to this BaseArray.
:param value:
"""
self.data = numpy.append(self.data, self._parser(value).data)
def __setitem__(self, key, value):
value = self._parser(value).data
self.data[key] = value
class _SupportsIndex(Protocol):
def __index__(self) -> int:
...
_F = TypeVar("_F", bound="UserFloat")
@prettify_docstrings
class UserFloat(Real):
"""
Class that simulates a float.
:param value: Values to initialise the :class:`~domdf_python_tools.bases.UserFloat` with.
.. versionadded:: 1.6.0
"""
def __init__(self, value: Union[SupportsFloat, _SupportsIndex, str, bytes, bytearray] = 0.0):
self._value = (float(value), )
def as_integer_ratio(self) -> Tuple[int, int]:
return float(self).as_integer_ratio()
def hex(self) -> str: # noqa: A003 # pylint: disable=redefined-builtin
return float(self).hex()
def is_integer(self) -> bool:
return float(self).is_integer()
@classmethod
def fromhex(cls: Type[_F], __s: str) -> _F:
return cls(float.fromhex(__s))
def __add__(self: _F, other: float) -> _F:
return self.__class__(float(self).__add__(other))
def __sub__(self: _F, other: float) -> _F:
return self.__class__(float(self).__sub__(other))
def __mul__(self: _F, other: float) -> _F:
return self.__class__(float(self).__mul__(other))
def __floordiv__(self: _F, other: float) -> _F: # type: ignore
return self.__class__(float(self).__floordiv__(other))
def __truediv__(self: _F, other: float) -> _F:
return self.__class__(float(self).__truediv__(other))
def __mod__(self: _F, other: float) -> _F:
return self.__class__(float(self).__mod__(other))
def __divmod__(self: _F, other: float) -> Tuple[_F, _F]:
return tuple(self.__class__(x) for x in float(self).__divmod__(other)) # type: ignore
def __pow__(self: _F, other: float, mod=None) -> _F:
return self.__class__(float(self).__pow__(other, mod))
def __radd__(self: _F, other: float) -> _F:
return self.__class__(float(self).__radd__(other))
def __rsub__(self: _F, other: float) -> _F:
return self.__class__(float(self).__rsub__(other))
def __rmul__(self: _F, other: float) -> _F:
return self.__class__(float(self).__rmul__(other))
def __rfloordiv__(self: _F, other: float) -> _F: # type: ignore
return self.__class__(float(self).__rfloordiv__(other))
def __rtruediv__(self: _F, other: float) -> _F:
return self.__class__(float(self).__rtruediv__(other))
def __rmod__(self: _F, other: float) -> _F:
return self.__class__(float(self).__rmod__(other))
def __rdivmod__(self: _F, other: float) -> Tuple[_F, _F]:
return tuple(self.__class__(x) for x in float(self).__rdivmod__(other)) # type: ignore
def __rpow__(self: _F, other: float, mod=None) -> _F:
return self.__class__(float(self).__rpow__(other, mod))
def __getnewargs__(self) -> Tuple[float]:
return self._value
def __trunc__(self) -> int:
return float(self).__trunc__()
@overload
def __round__(self, ndigits: int) -> float:
...
@overload
def __round__(self, ndigits: None = ...) -> int:
...
def __round__(self, ndigits: Optional[int] = None) -> Union[int, float]:
return float(self).__round__(ndigits)
def __eq__(self, other: object) -> bool:
if isinstance(other, UserFloat):
return self._value == other._value
else:
return float(self).__eq__(other)
def __ne__(self, other: object) -> bool:
if isinstance(other, UserFloat):
return self._value != other._value
else:
return float(self).__ne__(other)
def __lt__(self, other: float) -> bool:
if isinstance(other, UserFloat):
return self._value < other._value
else:
return float(self).__lt__(other)
def __le__(self, other: float) -> bool:
if isinstance(other, UserFloat):
return self._value <= other._value
else:
return float(self).__le__(other)
def __gt__(self, other: float) -> bool:
if isinstance(other, UserFloat):
return self._value > other._value
else:
return float(self).__gt__(other)
def __ge__(self, other: float) -> bool:
if isinstance(other, UserFloat):
return self._value >= other._value
else:
return float(self).__ge__(other)
def __neg__(self: _F) -> _F:
return self.__class__(float(self).__neg__())
def __pos__(self: _F) -> _F:
return self.__class__(float(self).__pos__())
def __str__(self) -> str:
return str(float(self))
def __int__(self) -> int:
return int(float(self))
def __float__(self) -> float:
return self._value[0]
def __abs__(self: _F) -> _F:
return self.__class__(float(self).__abs__())
def __hash__(self) -> int:
return float(self).__hash__()
def __repr__(self) -> str:
return str(self)
def __ceil__(self):
raise NotImplementedError
def __floor__(self):
raise NotImplementedError
| 27.069098 | 124 | 0.704602 | 11,506 | 0.815855 | 0 | 0 | 6,555 | 0.464795 | 0 | 0 | 5,155 | 0.365525 |
46395d1d31524d94efb18967e41d8995a2059eda | 196 | py | Python | agents/admin.py | HerbertRamirez/inmo_web | 3c48911d5e3ee7a75580534664874ba5e8281e55 | [
"MIT"
]
| null | null | null | agents/admin.py | HerbertRamirez/inmo_web | 3c48911d5e3ee7a75580534664874ba5e8281e55 | [
"MIT"
]
| null | null | null | agents/admin.py | HerbertRamirez/inmo_web | 3c48911d5e3ee7a75580534664874ba5e8281e55 | [
"MIT"
]
| 3 | 2021-02-18T15:11:53.000Z | 2021-02-20T17:34:01.000Z | from django.contrib import admin
from .models import Agent
# Register your models here.
class AgentAdmin(admin.ModelAdmin):
readonly_fields = ('created','updated')
admin.site.register(Agent) | 24.5 | 43 | 0.77551 | 79 | 0.403061 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.234694 |
463af3b646e86f201f2bd7fe58504504be2cb376 | 4,203 | py | Python | modules/DEFA/MS_Office/compoundfiles/const.py | naaya17/carpe | fa2e3cfebe20f8839c985e5b9b78b538800172a1 | [
"Apache-2.0"
]
| 56 | 2019-02-07T06:21:45.000Z | 2022-03-21T08:19:24.000Z | DEFA/MS_Office/compoundfiles/const.py | sk-yaho/carpe | 077ef7ba1582b3de9f5c08d63431e744b77a9e09 | [
"Apache-2.0"
]
| 5 | 2020-05-25T17:29:00.000Z | 2021-12-13T20:49:08.000Z | DEFA/MS_Office/compoundfiles/const.py | sk-yaho/carpe | 077ef7ba1582b3de9f5c08d63431e744b77a9e09 | [
"Apache-2.0"
]
| 31 | 2019-03-13T10:23:49.000Z | 2021-11-04T12:14:58.000Z | #!/usr/bin/env python
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# A library for reading Microsoft's OLE Compound Document format
# Copyright (c) 2014 Dave Hughes <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
native_str = str
str = type('')
import struct as st
# Magic identifier at the start of the file
COMPOUND_MAGIC = b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1'
FREE_SECTOR = 0xFFFFFFFF # denotes an unallocated (free) sector
END_OF_CHAIN = 0xFFFFFFFE # denotes the end of a stream chain
NORMAL_FAT_SECTOR = 0xFFFFFFFD # denotes a sector used for the regular FAT
MASTER_FAT_SECTOR = 0xFFFFFFFC # denotes a sector used for the master FAT
MAX_NORMAL_SECTOR = 0xFFFFFFFA # the maximum sector in a file
MAX_REG_SID = 0xFFFFFFFA # maximum directory entry ID
NO_STREAM = 0xFFFFFFFF # unallocated directory entry
DIR_INVALID = 0 # unknown/empty(?) storage type
DIR_STORAGE = 1 # element is a storage (dir) object
DIR_STREAM = 2 # element is a stream (file) object
DIR_LOCKBYTES = 3 # element is an ILockBytes object
DIR_PROPERTY = 4 # element is an IPropertyStorage object
DIR_ROOT = 5 # element is the root storage object
FILENAME_ENCODING = 'latin-1'
COMPOUND_HEADER = st.Struct(native_str(''.join((
native_str('<'), # little-endian format
native_str('8s'), # magic string
native_str('16s'), # file UUID (unused)
native_str('H'), # file header major version
native_str('H'), # file header minor version
native_str('H'), # byte order mark
native_str('H'), # sector size (actual size is 2**sector_size)
native_str('H'), # mini sector size (actual size is 2**short_sector_size)
native_str('6s'), # unused
native_str('L'), # directory chain sector count
native_str('L'), # normal-FAT sector count
native_str('L'), # ID of first sector of the normal-FAT
native_str('L'), # transaction signature (unused)
native_str('L'), # minimum size of a normal stream
native_str('L'), # ID of first sector of the mini-FAT
native_str('L'), # mini-FAT sector count
native_str('L'), # ID of first sector of the master-FAT
native_str('L'), # master-FAT sector count
))))
DIR_HEADER = st.Struct(native_str(''.join((
native_str('<'), # little-endian format
native_str('64s'), # NULL-terminated filename in UTF-16 little-endian encoding
native_str('H'), # length of filename in bytes (why?!)
native_str('B'), # dir-entry type
native_str('B'), # red (0) or black (1) entry
native_str('L'), # ID of left-sibling node
native_str('L'), # ID of right-sibling node
native_str('L'), # ID of children's root node
native_str('16s'), # dir-entry UUID (unused)
native_str('L'), # user flags (unused)
native_str('Q'), # creation timestamp
native_str('Q'), # modification timestamp
native_str('L'), # start sector of stream
native_str('L'), # low 32-bits of stream size
native_str('L'), # high 32-bits of stream size
))))
| 42.454545 | 83 | 0.695218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,806 | 0.667618 |
463d1c29208fd7d810aca90bed20af06afc212fd | 3,272 | py | Python | Validation/valid_view_point_cloud.py | dtczhl/Slimmer | c93dac6a59828016484d8bef1c71e9ccceabab9c | [
"MIT"
]
| null | null | null | Validation/valid_view_point_cloud.py | dtczhl/Slimmer | c93dac6a59828016484d8bef1c71e9ccceabab9c | [
"MIT"
]
| null | null | null | Validation/valid_view_point_cloud.py | dtczhl/Slimmer | c93dac6a59828016484d8bef1c71e9ccceabab9c | [
"MIT"
]
| null | null | null | """
view predication for point cloud,
Run valid_one_point_cloud first
"""
import torch
import numpy as np
import sys
import os
import pptk
# ------ Configurations ------
# path to pth file
pth_file = "../tmp/scene0015_00_vh_clean_2.pth.Random.100"
show_gt = False # show groundtruth or not; groudtruth draw first, i.e., on back
# --- end of configurations ---
CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf',
'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink',
'bathtub', 'otherfurniture']
# CLASS_COLOR = [
# [138, 43, 226], [0, 128, 128], [0, 255, 0], [0, 0, 255], [255, 255, 0],
# [0, 255, 255], [255, 0, 255], [192, 192, 192], [128, 128, 128], [128, 0, 0],
# [128, 128, 0], [0, 128, 0], [128, 0, 128], [255, 0, 0], [0, 0, 128],
# [34, 139, 34], [64, 224, 208], [0, 0, 0], [75, 0, 130], [205, 133, 63]
# ]
SCANNET_COLOR_MAP = SCANNET_COLOR_MAP = {
0: (0., 0., 0.),
1: (174., 199., 232.),
2: (152., 223., 138.),
3: (31., 119., 180.),
4: (255., 187., 120.),
5: (188., 189., 34.),
6: (140., 86., 75.),
7: (255., 152., 150.),
8: (214., 39., 40.),
9: (197., 176., 213.),
10: (148., 103., 189.),
11: (196., 156., 148.),
12: (23., 190., 207.),
14: (247., 182., 210.),
15: (66., 188., 102.),
16: (219., 219., 141.),
17: (140., 57., 197.),
18: (202., 185., 52.),
19: (51., 176., 203.),
20: (200., 54., 131.),
21: (92., 193., 61.),
22: (78., 71., 183.),
23: (172., 114., 82.),
24: (255., 127., 14.),
25: (91., 163., 138.),
26: (153., 98., 156.),
27: (140., 153., 101.),
28: (158., 218., 229.),
29: (100., 125., 154.),
30: (178., 127., 135.),
32: (146., 111., 194.),
33: (44., 160., 44.),
34: (112., 128., 144.),
35: (96., 207., 209.),
36: (227., 119., 194.),
37: (213., 92., 176.),
38: (94., 106., 211.),
39: (82., 84., 163.),
40: (100., 85., 144.),
}
VALID_CLASS_IDS = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39
]
CLASS_COLOR = []
for valid_id in VALID_CLASS_IDS:
CLASS_COLOR.append(SCANNET_COLOR_MAP[valid_id])
CLASS_COLOR = np.array(CLASS_COLOR) / 255.0
def show_predication_result(pth_file, show_gt):
data = torch.load(pth_file)
coords, colors, labels, pred = data
ignore_index = labels == -100
coords = coords[~ignore_index]
colors = colors[~ignore_index]
labels = labels[~ignore_index]
pred = pred[~ignore_index]
gt_color = [CLASS_COLOR[x] for x in labels.astype("int32")]
pred_color = [CLASS_COLOR[x] for x in pred.astype("int32")]
if show_gt:
v1 = pptk.viewer(coords, gt_color)
v1.set(point_size=0.01, bg_color=[1, 1, 1, 1], floor_color=[1, 1, 1, 1], show_grid=False, show_axis=False, show_info=False)
v1.set(theta=1.8, lookat=[0, 0, 0], phi=0.52)
v2 = pptk.viewer(coords, pred_color)
v2.set(point_size=0.01, bg_color=[1, 1, 1, 1], floor_color=[1, 1, 1, 1], show_grid=False, show_axis=False, show_info=False)
v2.set(theta=1.8, lookat=[0, 0, 0], phi=0.52)
if __name__ == "__main__":
show_predication_result(pth_file, show_gt)
| 30.579439 | 131 | 0.537897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 797 | 0.243582 |
463e9059d21ae44008d266e66201d33e8c741f68 | 1,251 | py | Python | core/migrations/0009_measurement.py | Potanist/Potanist | bd9d82fe9ca8f4157b34e5174793777d6729999c | [
"BSD-3-Clause"
]
| null | null | null | core/migrations/0009_measurement.py | Potanist/Potanist | bd9d82fe9ca8f4157b34e5174793777d6729999c | [
"BSD-3-Clause"
]
| null | null | null | core/migrations/0009_measurement.py | Potanist/Potanist | bd9d82fe9ca8f4157b34e5174793777d6729999c | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0008_grow_owner'),
]
operations = [
migrations.CreateModel(
name='Measurement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('air_temperature', models.IntegerField(null=True, blank=True)),
('water_temperature', models.IntegerField(null=True, blank=True)),
('humidity', models.IntegerField(null=True, blank=True)),
('co2', models.IntegerField(null=True, blank=True)),
('ppm', models.IntegerField(null=True, blank=True)),
('tds', models.IntegerField(null=True, blank=True)),
('ec', models.IntegerField(null=True, blank=True)),
('ph', models.IntegerField(null=True, blank=True)),
('lumen', models.IntegerField(null=True, blank=True)),
('plant', models.ForeignKey(to='core.Plant')),
],
),
]
| 39.09375 | 114 | 0.576339 | 1,142 | 0.91287 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.138289 |
4642f66823f504498e1fb09dc0db99ad096fbcd2 | 7,257 | py | Python | mkt/purchase/models.py | muffinresearch/zamboni | 045a6f07c775b99672af6d9857d295ed02fe5dd9 | [
"BSD-3-Clause"
]
| null | null | null | mkt/purchase/models.py | muffinresearch/zamboni | 045a6f07c775b99672af6d9857d295ed02fe5dd9 | [
"BSD-3-Clause"
]
| null | null | null | mkt/purchase/models.py | muffinresearch/zamboni | 045a6f07c775b99672af6d9857d295ed02fe5dd9 | [
"BSD-3-Clause"
]
| null | null | null | import datetime
from django.conf import settings
from django.db import models
from django.utils import translation
import tower
from babel import Locale, numbers
from jingo import env
from jinja2.filters import do_dictsort
from tower import ugettext as _
import amo
from amo.fields import DecimalCharField
from amo.helpers import absolutify, urlparams
from amo.utils import get_locale_from_lang, send_mail, send_mail_jinja
class ContributionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Contribution(amo.models.ModelBase):
addon = models.ForeignKey('webapps.Addon', blank=True, null=True)
# For in-app purchases this links to the product.
inapp_product = models.ForeignKey('inapp.InAppProduct',
blank=True, null=True)
amount = DecimalCharField(max_digits=9, decimal_places=2,
nullify_invalid=True, null=True)
currency = models.CharField(max_length=3,
choices=do_dictsort(amo.PAYPAL_CURRENCIES),
default=amo.CURRENCY_DEFAULT)
source = models.CharField(max_length=255, null=True)
source_locale = models.CharField(max_length=10, null=True)
# This is the external id that you can communicate to the world.
uuid = models.CharField(max_length=255, null=True, db_index=True)
comment = models.CharField(max_length=255)
# This is the internal transaction id between us and a provider,
# for example paypal or solitude.
transaction_id = models.CharField(max_length=255, null=True, db_index=True)
paykey = models.CharField(max_length=255, null=True)
# Marketplace specific.
# TODO(andym): figure out what to do when we delete the user.
user = models.ForeignKey('users.UserProfile', blank=True, null=True)
type = models.PositiveIntegerField(default=amo.CONTRIB_TYPE_DEFAULT,
choices=do_dictsort(amo.CONTRIB_TYPES))
price_tier = models.ForeignKey('prices.Price', blank=True, null=True,
on_delete=models.PROTECT)
# If this is a refund or a chargeback, which charge did it relate to.
related = models.ForeignKey('self', blank=True, null=True,
on_delete=models.PROTECT)
class Meta:
db_table = 'stats_contributions'
def __unicode__(self):
return u'App {app}: in-app: {inapp}: {amount}'.format(
app=self.addon, amount=self.amount, inapp=self.inapp_product)
@property
def date(self):
try:
return datetime.date(self.created.year,
self.created.month, self.created.day)
except AttributeError:
# created may be None
return None
def _switch_locale(self):
if self.source_locale:
lang = self.source_locale
else:
lang = self.addon.default_locale
tower.activate(lang)
return Locale(translation.to_locale(lang))
def _mail(self, template, subject, context):
template = env.get_template(template)
body = template.render(context)
send_mail(subject, body, settings.MARKETPLACE_EMAIL,
[self.user.email], fail_silently=True)
def record_failed_refund(self, e, user):
self.enqueue_refund(amo.REFUND_FAILED, user,
rejection_reason=str(e))
self._switch_locale()
self._mail('users/support/emails/refund-failed.txt',
# L10n: the addon name.
_(u'%s refund failed' % self.addon.name),
{'name': self.addon.name})
send_mail_jinja(
'Refund failed', 'purchase/email/refund-failed.txt',
{'name': self.user.email,
'error': str(e)},
settings.MARKETPLACE_EMAIL,
[str(self.addon.support_email)], fail_silently=True)
def mail_approved(self):
"""The developer has approved a refund."""
locale = self._switch_locale()
amt = numbers.format_currency(abs(self.amount), self.currency,
locale=locale)
self._mail('users/support/emails/refund-approved.txt',
# L10n: the adddon name.
_(u'%s refund approved' % self.addon.name),
{'name': self.addon.name, 'amount': amt})
def mail_declined(self):
"""The developer has declined a refund."""
self._switch_locale()
self._mail('users/support/emails/refund-declined.txt',
# L10n: the adddon name.
_(u'%s refund declined' % self.addon.name),
{'name': self.addon.name})
def enqueue_refund(self, status, user, refund_reason=None,
rejection_reason=None):
"""Keep track of a contribution's refund status."""
from mkt.prices.models import Refund
refund, c = Refund.objects.safer_get_or_create(contribution=self,
user=user)
refund.status = status
# Determine which timestamps to update.
timestamps = []
if status in (amo.REFUND_PENDING, amo.REFUND_APPROVED_INSTANT,
amo.REFUND_FAILED):
timestamps.append('requested')
if status in (amo.REFUND_APPROVED, amo.REFUND_APPROVED_INSTANT):
timestamps.append('approved')
elif status == amo.REFUND_DECLINED:
timestamps.append('declined')
for ts in timestamps:
setattr(refund, ts, datetime.datetime.now())
if refund_reason:
refund.refund_reason = refund_reason
if rejection_reason:
refund.rejection_reason = rejection_reason
refund.save()
return refund
def get_amount_locale(self, locale=None):
"""Localise the amount paid into the current locale."""
if not locale:
lang = translation.get_language()
locale = get_locale_from_lang(lang)
return numbers.format_currency(self.amount or 0,
self.currency or 'USD',
locale=locale)
def get_refund_url(self):
return urlparams(self.addon.get_dev_url('issue_refund'),
transaction_id=self.transaction_id)
def get_absolute_refund_url(self):
return absolutify(self.get_refund_url())
def get_refund_contribs(self):
"""Get related set of refund contributions."""
return Contribution.objects.filter(
related=self, type=amo.CONTRIB_REFUND).order_by('-modified')
def is_refunded(self):
"""
If related has been set, then this transaction has been refunded or
charged back. This is a bit expensive, so refrain from using on listing
pages.
"""
return (Contribution.objects.filter(related=self,
type__in=[amo.CONTRIB_REFUND,
amo.CONTRIB_CHARGEBACK])
.exists())
| 40.316667 | 79 | 0.606587 | 6,825 | 0.940471 | 0 | 0 | 254 | 0.035001 | 0 | 0 | 1,384 | 0.190712 |
4643409696cd3d49a508459df5a413ef73fb761e | 301 | py | Python | src/kol/request/CampgroundRestRequest.py | danheath/temppykol | 7f9621b44df9f9d2d9fc0a5b2a06db116b9ccfab | [
"BSD-3-Clause"
]
| 19 | 2015-02-16T08:30:49.000Z | 2020-05-01T06:06:33.000Z | src/kol/request/CampgroundRestRequest.py | danheath/temppykol | 7f9621b44df9f9d2d9fc0a5b2a06db116b9ccfab | [
"BSD-3-Clause"
]
| 5 | 2015-01-13T23:01:54.000Z | 2016-11-30T15:23:43.000Z | src/kol/request/CampgroundRestRequest.py | danheath/temppykol | 7f9621b44df9f9d2d9fc0a5b2a06db116b9ccfab | [
"BSD-3-Clause"
]
| 19 | 2015-05-28T09:36:19.000Z | 2022-03-15T23:19:29.000Z | from kol.request.GenericRequest import GenericRequest
class CampgroundRestRequest(GenericRequest):
"Rests at the user's campground."
def __init__(self, session):
super(CampgroundRestRequest, self).__init__(session)
self.url = session.serverURL + 'campground.php?action=rest'
| 33.444444 | 67 | 0.750831 | 245 | 0.813953 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.202658 |
4643c3f57a3b1cef340d6b9803b645c83275e77f | 849 | py | Python | pdlearn/adaptor/methods.py | richlewis42/pandas-learn | 4330c642e4f62e8abc6dcd58ba33daf22519f41e | [
"MIT"
]
| 1 | 2015-12-16T04:03:19.000Z | 2015-12-16T04:03:19.000Z | pdlearn/adaptor/methods.py | lewisacidic/pandas-learn | 4330c642e4f62e8abc6dcd58ba33daf22519f41e | [
"MIT"
]
| 3 | 2015-12-10T02:05:13.000Z | 2015-12-16T04:04:16.000Z | pdlearn/adaptor/methods.py | lewisacidic/pandas-learn | 4330c642e4f62e8abc6dcd58ba33daf22519f41e | [
"MIT"
]
| null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of pandas-learn
# https://github.com/RichLewis42/pandas-learn
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
# Copyright (c) 2015, Rich Lewis <[email protected]>
"""
pdlearn.adaptor.methods
~~~~~~~~~~~~~~~~~~~~~~~
Module implementing methods for pdlearn classes.
"""
import pandas as pd
def feature_property(name):
"""
Create a method adapting a parent class' property to return a pandas frame.
"""
# pylint: disable=C0111
@property
def method(self):
# pylint: disable=W0212
with self._unyouthanize():
prop = getattr(self, name + '_')
if self.pandas_mode_:
return pd.Series(prop, index=self.feature_names_, name=name)
else:
return prop
return method
| 22.945946 | 79 | 0.628975 | 0 | 0 | 0 | 0 | 284 | 0.334511 | 0 | 0 | 495 | 0.583039 |
4643e75f0f9e71df8892955ee0441be6f4a0dd8e | 396 | py | Python | BroCode/lessons/13-nested_loops.py | sofiaEkn/Python_Exercises | 2ffbec63552a29c72de50ef5e5a3307195e1b546 | [
"MIT"
]
| null | null | null | BroCode/lessons/13-nested_loops.py | sofiaEkn/Python_Exercises | 2ffbec63552a29c72de50ef5e5a3307195e1b546 | [
"MIT"
]
| null | null | null | BroCode/lessons/13-nested_loops.py | sofiaEkn/Python_Exercises | 2ffbec63552a29c72de50ef5e5a3307195e1b546 | [
"MIT"
]
| null | null | null | # nested loops = The "inner loop" will finish all of it's iterations before
# finishing one iteration of the "outer loop"
rows = int(input("How many rows?: "))
columns = int(input("How many columns?: "))
symbol = input("Enter a symbol to use: ")
#symbol = int(input("Enter a symbol to use: "))
for i in range(rows):
for j in range(columns):
print(symbol, end="")
print() | 33 | 75 | 0.643939 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 249 | 0.628788 |
46441ef13bcf6070728bb55ba5a6a680d4610fc1 | 2,516 | py | Python | duct/sources/python/uwsgi.py | geostarling/duct | a862fc1948ed1ecce1e866cbffc7fcedcea81f55 | [
"MIT"
]
| 12 | 2017-01-06T11:59:59.000Z | 2021-03-16T17:57:46.000Z | duct/sources/python/uwsgi.py | geostarling/duct | a862fc1948ed1ecce1e866cbffc7fcedcea81f55 | [
"MIT"
]
| 20 | 2017-01-06T12:56:39.000Z | 2017-02-06T13:45:28.000Z | duct/sources/python/uwsgi.py | geostarling/duct | a862fc1948ed1ecce1e866cbffc7fcedcea81f55 | [
"MIT"
]
| 2 | 2017-02-09T14:02:13.000Z | 2018-10-16T13:18:17.000Z | """
.. module:: uwsgi
:platform: Any
:synopsis: Reads UWSGI stats
.. moduleauthor:: Colin Alston <[email protected]>
"""
import json
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from zope.interface import implementer
from twisted.internet import defer, reactor
from twisted.internet.protocol import ClientCreator, Protocol
from duct.interfaces import IDuctSource
from duct.objects import Source
class JSONProtocol(Protocol):
"""
JSON line protocol
"""
delimiter = '\n'
def __init__(self):
self.ready = False
self.buf = StringIO()
self.d = defer.Deferred()
def dataReceived(self, data):
self.buf.write(data)
def connectionLost(self, *_a):
self.buf.seek(0)
self.d.callback(json.load(self.buf))
def disconnect(self):
"""Disconnect transport
"""
return self.transport.loseConnection()
@implementer(IDuctSource)
class Emperor(Source):
"""Connects to UWSGI Emperor stats and creates useful metrics
**Configuration arguments:**
:param host: Hostname (default localhost)
:type host: str.
:param port: Port
:type port: int.
"""
@defer.inlineCallbacks
def get(self):
host = self.config.get('host', 'localhost')
port = int(self.config.get('port', 6001))
proto = yield ClientCreator(
reactor, JSONProtocol).connectTCP(host, port)
stats = yield proto.d
nodes = stats.get('vassals', [])
events = []
active = 0
accepting = 0
respawns = 0
for node in nodes:
if node['accepting'] > 0:
active += 1
accepting += node['accepting']
if node['respawns'] > 0:
respawns += 1
events.extend([
self.createEvent('ok', 'accepting', node['accepting'],
prefix=node['id'] + '.accepting'),
self.createEvent('ok', 'respawns', node['respawns'],
prefix=node['id'] + '.respawns'),
])
events.extend([
self.createEvent('ok', 'active', active, prefix='total.active'),
self.createEvent('ok', 'accepting', accepting,
prefix='total.accepting'),
self.createEvent('ok', 'respawns', respawns,
prefix='total.respawns'),
])
defer.returnValue(events)
| 24.666667 | 76 | 0.565183 | 2,039 | 0.810413 | 1,272 | 0.505564 | 1,573 | 0.625199 | 0 | 0 | 645 | 0.256359 |
46454325357264b47dbceec722a3e08a4d41a6be | 22,167 | py | Python | xform/utils.py | alisonamerico/Django-XForm | ad2e96455307b57ef3c485a006db478fe4352a36 | [
"MIT"
]
| 3 | 2019-07-25T14:46:14.000Z | 2020-12-14T22:43:46.000Z | xform/utils.py | alisonamerico/Django-XForm | ad2e96455307b57ef3c485a006db478fe4352a36 | [
"MIT"
]
| 4 | 2019-09-04T17:39:04.000Z | 2021-11-05T23:14:58.000Z | xform/utils.py | alisonamerico/Django-XForm | ad2e96455307b57ef3c485a006db478fe4352a36 | [
"MIT"
]
| 1 | 2021-11-05T23:05:48.000Z | 2021-11-05T23:05:48.000Z | import datetime
import importlib
import json
import logging
import math
import mimetypes
import os
import re
import sys
import uuid
import requests
from urllib.parse import urljoin
from wsgiref.util import FileWrapper
from xml.dom import minidom, Node
from django.conf import settings
from django.core.files.storage import get_storage_class
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.validators import ValidationError
from django.db import IntegrityError
from django.http import HttpResponse, Http404
from django.http import HttpResponseNotFound, StreamingHttpResponse
from django.utils import timezone
from rest_framework import exceptions
from .tags import XFORM_ID_STRING, VERSION
PENDING = 0
SUCCESSFUL = 1
FAILED = 2
EXTERNAL_EXPORT_TYPES = ['xls']
EXPORT_EXT = {
'csv': 'csv',
'csvzip': 'csv_zip',
'kml': 'kml',
'savzip': 'sav_zip',
'uuid': 'external',
'xls': 'xls',
'xlsx': 'xls',
'zip': 'zip',
}
class XLSFormError(Exception):
pass
class DuplicateInstance(Exception):
def __str__(self):
return 'Duplicate Instance'
class InstanceInvalidUserError(Exception):
def __str__(self):
return 'Could not determine the user.'
class InstanceParseError(Exception):
def __str__(self):
return 'The instance could not be parsed.'
class InstanceEmptyError(InstanceParseError):
def __str__(self):
return 'Empty instance'
class NonUniqueFormIdError(Exception):
pass
class InstanceMultipleNodeError(Exception):
pass
class FormIsMergedDatasetError(Exception):
"""Exception class for merged datasets"""
def __str__(self):
return 'Submissions are not allowed on merged datasets.'
class FormInactiveError(Exception):
"""Exception class for inactive forms"""
def __str__(self):
return 'Form is inactive'
def generate_content_disposition_header(name, extension, show_date=True):
if name is None:
return 'attachment;'
if show_date:
name = "%s-%s" % (name, timezone.now().strftime("%Y-%m-%d-%H-%M-%S"))
return 'attachment; filename=%s.%s' % (name, extension)
def _get_all_attributes(node):
"""
Go through an XML document returning all the attributes we see.
"""
if hasattr(node, "hasAttributes") and node.hasAttributes():
for key in node.attributes.keys():
yield key, node.getAttribute(key)
for child in node.childNodes:
for pair in _get_all_attributes(child):
yield pair
def _flatten_dict_nest_repeats(d, prefix):
"""
Return a list of XPath, value pairs.
:param d: A dictionary
:param prefix: A list of prefixes
"""
for key, value in d.items():
new_prefix = prefix + [key]
if isinstance(value, dict):
for pair in _flatten_dict_nest_repeats(value, new_prefix):
yield pair
elif isinstance(value, list):
repeats = []
for i, item in enumerate(value):
item_prefix = list(new_prefix) # make a copy
if isinstance(item, dict):
repeat = {}
for path, value in _flatten_dict_nest_repeats(
item, item_prefix):
# TODO: this only considers the first level of repeats
repeat.update({u"/".join(path[1:]): value})
repeats.append(repeat)
else:
repeats.append({u"/".join(item_prefix[1:]): item})
yield (new_prefix, repeats)
else:
yield (new_prefix, value)
def _gather_parent_node_list(node):
node_names = []
# also check for grand-parent node to skip document element
if node.parentNode and node.parentNode.parentNode:
node_names.extend(_gather_parent_node_list(node.parentNode))
node_names.extend([node.nodeName])
return node_names
def xpath_from_xml_node(node):
node_names = _gather_parent_node_list(node)
return "/".join(node_names[1:])
def _xml_node_to_dict(node, repeats=[], encrypted=False):
if len(node.childNodes) == 0:
# there's no data for this leaf node
return None
elif len(node.childNodes) == 1 and \
node.childNodes[0].nodeType == node.TEXT_NODE:
# there is data for this leaf node
return {node.nodeName: node.childNodes[0].nodeValue}
else:
# this is an internal node
value = {}
for child in node.childNodes:
# handle CDATA text section
if child.nodeType == child.CDATA_SECTION_NODE:
return {child.parentNode.nodeName: child.nodeValue}
d = _xml_node_to_dict(child, repeats)
if d is None:
continue
child_name = child.nodeName
child_xpath = xpath_from_xml_node(child)
if list(d) != [child_name]:
raise AssertionError()
node_type = dict
# check if name is in list of repeats and make it a list if so
# All the photo attachments in an encrypted form use name media
if child_xpath in repeats or (encrypted and child_name == 'media'):
node_type = list
if node_type == dict:
if child_name not in value:
value[child_name] = d[child_name]
else:
# node is repeated, aggregate node values
node_value = value[child_name]
# 1. check if the node values is a list
if not isinstance(node_value, list):
# if not a list create
value[child_name] = [node_value]
# 2. parse the node
d = _xml_node_to_dict(child, repeats)
# 3. aggregate
value[child_name].append(d[child_name])
else:
if child_name not in value:
value[child_name] = [d[child_name]]
else:
value[child_name].append(d[child_name])
if value == {}:
return None
else:
return {node.nodeName: value}
def set_uuid(obj):
"""
Only give an object a new UUID if it does not have one.
"""
if not obj.uuid:
obj.uuid = uuid.uuid4().hex
def clean_and_parse_xml(xml_string):
clean_xml_str = xml_string.strip()
try:
clean_xml_str = clean_xml_str.decode("utf-8")
except Exception:
pass
clean_xml_str = re.sub(r">\s+<", u"><", clean_xml_str)
xml_obj = minidom.parseString(clean_xml_str)
return xml_obj
def get_meta_from_xml(xml_str, meta_name):
xml = clean_and_parse_xml(xml_str)
children = xml.childNodes
# children ideally contains a single element
# that is the parent of all survey elements
if children.length == 0:
raise ValueError("XML string must have a survey element.")
survey_node = children[0]
meta_tags = [n for n in survey_node.childNodes if
n.nodeType == Node.ELEMENT_NODE and
(n.tagName.lower() == "meta" or
n.tagName.lower() == "orx:meta")]
if len(meta_tags) == 0:
return None
# get the requested tag
meta_tag = meta_tags[0]
uuid_tags = [n for n in meta_tag.childNodes if
n.nodeType == Node.ELEMENT_NODE and
(n.tagName.lower() == meta_name.lower() or
n.tagName.lower() == u'orx:%s' % meta_name.lower())]
if len(uuid_tags) == 0:
return None
uuid_tag = uuid_tags[0]
return uuid_tag.firstChild.nodeValue.strip() if uuid_tag.firstChild\
else None
def flatten(l):
return [item for sublist in l for item in sublist]
def _get_fields_of_type(xform, types):
k = []
survey_elements = flatten(
[xform.get_survey_elements_of_type(t) for t in types])
for element in survey_elements:
name = element.get_abbreviated_xpath()
k.append(name)
return k
def get_numeric_fields(xform):
"""List of numeric field names for specified xform"""
return _get_fields_of_type(xform, ['decimal', 'integer'])
def get_uuid_from_xml(xml):
def _uuid_only(uuid, regex):
matches = regex.match(uuid)
if matches and len(matches.groups()) > 0:
return matches.groups()[0]
return None
uuid = get_meta_from_xml(xml, "instanceID")
regex = re.compile(r"uuid:(.*)")
if uuid:
return _uuid_only(uuid, regex)
# check in survey_node attributes
xml = clean_and_parse_xml(xml)
children = xml.childNodes
# children ideally contains a single element
# that is the parent of all survey elements
if children.length == 0:
raise ValueError("XML string must have a survey element.")
survey_node = children[0]
uuid = survey_node.getAttribute('instanceID')
if uuid != '':
return _uuid_only(uuid, regex)
return None
def numeric_checker(string_value):
if string_value.isdigit():
return int(string_value)
else:
try:
value = float(string_value)
if math.isnan(value):
value = 0
return value
except ValueError:
pass
def get_values_matching_key(doc, key):
"""
Returns iterator of values in 'doc' with the matching 'key'.
"""
def _get_values(doc, key):
if doc is not None:
if key in doc:
yield doc[key]
for z in doc.items():
v = z[1]
if isinstance(v, dict):
for item in _get_values(v, key):
yield item
elif isinstance(v, list):
for i in v:
for j in _get_values(i, key):
yield j
return _get_values(doc, key)
class XFormInstanceParser(object):
def __init__(self, xml_str, data_dictionary):
self.dd = data_dictionary
self.parse(xml_str)
def parse(self, xml_str):
self._xml_obj = clean_and_parse_xml(xml_str)
self._root_node = self._xml_obj.documentElement
repeats = [e.get_abbreviated_xpath()
for e in self.dd.get_survey_elements_of_type(u"repeat")]
self._dict = _xml_node_to_dict(self._root_node, repeats)
self._flat_dict = {}
if self._dict is None:
raise InstanceEmptyError
for path, value in _flatten_dict_nest_repeats(self._dict, []):
self._flat_dict[u"/".join(path[1:])] = value
self._set_attributes()
def get_root_node(self):
return self._root_node
def get_root_node_name(self):
return self._root_node.nodeName
def get(self, abbreviated_xpath):
return self.to_flat_dict()[abbreviated_xpath]
def to_dict(self):
return self._dict
def to_flat_dict(self):
return self._flat_dict
def get_attributes(self):
return self._attributes
def _set_attributes(self):
self._attributes = {}
all_attributes = list(_get_all_attributes(self._root_node))
for key, value in all_attributes:
# Since enketo forms may have the template attribute in
# multiple xml tags, overriding and log when this occurs
if key in self._attributes:
logger = logging.getLogger("console_logger")
logger.debug("Skipping duplicate attribute: %s"
" with value %s" % (key, value))
logger.debug(str(all_attributes))
else:
self._attributes[key] = value
def get_xform_id_string(self):
return self._attributes[u"id"]
def get_version(self):
return self._attributes.get(u"version")
def get_flat_dict_with_attributes(self):
result = self.to_flat_dict().copy()
result[XFORM_ID_STRING] = self.get_xform_id_string()
version = self.get_version()
if version:
result[VERSION] = self.get_version()
return result
def response_with_mimetype_and_name(mimetype,
name,
extension=None,
show_date=True,
file_path=None,
use_local_filesystem=False,
full_mime=False):
if extension is None:
extension = mimetype
if not full_mime:
mimetype = "application/%s" % mimetype
if file_path:
try:
if isinstance(file_path, InMemoryUploadedFile):
response = StreamingHttpResponse(
file_path, content_type=mimetype)
response['Content-Length'] = file_path.size
elif not use_local_filesystem:
default_storage = get_storage_class()()
wrapper = FileWrapper(default_storage.open(file_path))
response = StreamingHttpResponse(
wrapper, content_type=mimetype)
response['Content-Length'] = default_storage.size(file_path)
else:
wrapper = FileWrapper(open(file_path))
response = StreamingHttpResponse(
wrapper, content_type=mimetype)
response['Content-Length'] = os.path.getsize(file_path)
except IOError:
response = HttpResponseNotFound(
"The requested file could not be found.")
else:
response = HttpResponse(content_type=mimetype)
response['Content-Disposition'] = generate_content_disposition_header(
name, extension, show_date)
return response
def _get_export_type(export_type):
if export_type in list(EXPORT_EXT):
export_type = EXPORT_EXT[export_type]
else:
raise exceptions.ParseError(
"'%(export_type)s' format not known or not implemented!" %
{'export_type': export_type})
return export_type
def get_file_extension(content_type):
return mimetypes.guess_extension(content_type)[1:]
def get_media_file_response(metadata, username=None):
"""
Returns a HTTP response for media files.
HttpResponse 200 if it represents a file on disk.
HttpResponseRedirect 302 incase the metadata represents a url.
HttpResponseNotFound 404 if the metadata file cannot be found.
"""
if metadata.data_type == 'media' and metadata.data_file:
file_path = metadata.data_file.name
filename, extension = os.path.splitext(file_path.split('/')[-1])
extension = extension.strip('.')
dfs = get_storage_class()()
if dfs.exists(file_path):
return response_with_mimetype_and_name(
metadata.data_file_type,
filename,
extension=extension,
show_date=False,
file_path=file_path,
full_mime=True)
elif metadata.data_type == 'url' and not metadata.data_file:
url = requests.Request(
'GET', metadata.data_value, params={
'username': username
}
).prepare().url
try:
data_file = metadata.get_file(url)
except Exception:
raise Http404
return response_with_mimetype_and_name(
mimetype=data_file.content_type,
name=data_file.name,
extension=get_file_extension(data_file.content_type),
show_date=False,
file_path=data_file,
use_local_filesystem=False,
full_mime=True
)
return HttpResponseNotFound()
def report_exception(*args, **kwargs):
# dummy
return
def publish_form(callback):
"""
Calls the callback function to publish a XLSForm and returns appropriate
message depending on exception throw during publishing of a XLSForm.
"""
try:
return callback()
# except (PyXFormError, XLSFormError) as e:
# return {'type': 'alert-error', 'text': str(e)}
except IntegrityError as e:
return {
'type': 'alert-error',
'text': 'Form with this id or SMS-keyword already exists.',
}
# except ProcessTimedOut as e:
# # catch timeout errors
# return {
# 'type': 'alert-error',
# 'text': 'Form validation timeout, please try again.',
# }
except (MemoryError, OSError) as e:
return {
'type': 'alert-error',
'text': (
'An error occurred while publishing the form. '
'Please try again.'
),
}
except (AttributeError, Exception, ValidationError) as e:
report_exception("Form publishing exception: {}".format(e), str(e),
sys.exc_info())
return {'type': 'alert-error', 'text': str(e)}
def _get_tag_or_element_type_xpath(xform, tag):
elems = xform.get_survey_elements_of_type(tag)
return elems[0].get_abbreviated_xpath() if elems else tag
def calculate_duration(start_time, end_time):
"""
This function calculates duration when given start and end times.
An empty string is returned if either of the time formats does
not match '_format' format else, the duration is returned
"""
_format = "%Y-%m-%dT%H:%M:%S"
try:
_start = datetime.datetime.strptime(start_time[:19], _format)
_end = datetime.datetime.strptime(end_time[:19], _format)
except (TypeError, ValueError):
return ''
duration = (_end - _start).total_seconds()
return duration
def inject_instanceid(xml_str, uuid):
if get_uuid_from_xml(xml_str) is None:
xml = clean_and_parse_xml(xml_str)
children = xml.childNodes
if children.length == 0:
raise ValueError("XML string must have a survey element.")
# check if we have a meta tag
survey_node = children.item(0)
meta_tags = [
n for n in survey_node.childNodes
if n.nodeType == Node.ELEMENT_NODE and n.tagName.lower() == "meta"
]
if len(meta_tags) == 0:
meta_tag = xml.createElement("meta")
xml.documentElement.appendChild(meta_tag)
else:
meta_tag = meta_tags[0]
# check if we have an instanceID tag
uuid_tags = [
n for n in meta_tag.childNodes
if n.nodeType == Node.ELEMENT_NODE and n.tagName == "instanceID"
]
if len(uuid_tags) == 0:
uuid_tag = xml.createElement("instanceID")
meta_tag.appendChild(uuid_tag)
else:
uuid_tag = uuid_tags[0]
# insert meta and instanceID
text_node = xml.createTextNode(u"uuid:%s" % uuid)
uuid_tag.appendChild(text_node)
return xml.toxml()
return xml_str
class EnketoError(Exception):
default_message = "There was a problem with your submissionor form. Please contact support."
def __init__(self, message=None):
if message is None:
self.message = self.default_message
else:
self.message = message
def __str__(self):
return "{}".format(self.message)
def handle_enketo_error(response):
"""Handle enketo error response."""
try:
data = json.loads(response.content)
except ValueError:
pass
if response.status_code == 502:
raise EnketoError(
u"Sorry, we cannot load your form right now. Please try "
"again later.")
raise EnketoError()
else:
if 'message' in data:
raise EnketoError(data['message'])
raise EnketoError(response.text)
def enketo_url(
form_url, id_string, instance_xml=None,
instance_id=None,
return_url=None,
offline=False
):
if (not hasattr(settings, 'ENKETO_URL') or
not hasattr(settings, 'ENKETO_API_SURVEY_PATH') or
not hasattr(settings, 'ENKETO_API_TOKEN') or
settings.ENKETO_API_TOKEN == ''):
return False
values = {'form_id': id_string, 'server_url': form_url}
if instance_id and instance_xml:
url = urljoin(settings.ENKETO_URL, settings.ENKETO_API_INSTANCE_PATH)
values.update({
'instance': instance_xml,
'instance_id': instance_id,
'return_url': return_url
})
else:
survey_path = settings.ENKETO_API_SURVEY_PATH
if offline:
survey_path += '/offline'
url = urljoin(settings.ENKETO_URL, survey_path)
response = requests.post(
url,
data=values,
auth=(settings.ENKETO_API_TOKEN, ''),
verify=getattr(settings, 'ENKETO_VERIFY_SSL', False))
if response.status_code in (200, 201):
try:
data = json.loads(response.content)
except ValueError:
pass
else:
url = (data.get('edit_url') or data.get('offline_url') or
data.get('url'))
if url:
return url
handle_enketo_error(response)
def get_form_url(
request, protocol='http', preview=False, # xform_pk=None
):
"""
Return a form list url endpoint to be used to make a request to Enketo.
For example, it will return https://example.com and Enketo will know to
look for the form list at https://example.com/formList. If a username is
provided then Enketo will request the form list from
https://example.com/[username]/formList. Same applies for preview if
preview is True and also to a single form when xform_pk is provided.
"""
http_host = request.META.get('HTTP_HOST', 'dev.monitora.sisicmbio.icmbio.gov.br')
url = '%s://%s' % (protocol, http_host)
if preview:
url = '%s/preview' % url
return "{}/xform".format(url)
def get_from_module(module_name, function_name):
module = importlib.import_module(module_name)
return getattr(module, function_name)
| 30.873259 | 96 | 0.606668 | 3,441 | 0.155231 | 2,100 | 0.094735 | 0 | 0 | 0 | 0 | 4,438 | 0.200208 |
4645ac5dfc9f12700491f7aec71f5e520201b23f | 9,050 | py | Python | Phase5/testing_query.py | MrKLawrence/Course-Registration-Data-Analytics | b1a88ab9336a545965752d175d8c4796209e487c | [
"MIT"
]
| null | null | null | Phase5/testing_query.py | MrKLawrence/Course-Registration-Data-Analytics | b1a88ab9336a545965752d175d8c4796209e487c | [
"MIT"
]
| null | null | null | Phase5/testing_query.py | MrKLawrence/Course-Registration-Data-Analytics | b1a88ab9336a545965752d175d8c4796209e487c | [
"MIT"
]
| null | null | null | import datetime
from pymongo import MongoClient
import pymongo
import pprint
try:
db = MongoClient("mongodb://localhost:27017")["hkust"]
f=0.05
try:
print("Querying Documents...")
listOfCourseWithWaitingListSize = db.course.aggregate([
{ "$unwind": "$sections" },
# { "$project": { "newProduct": {"$multiply": [f, "$sections.enrol"]}, "satisfied": satisfied} },
# { "$project": { "compareResult": {"$gte": ["$sections.wait", "$newProduct"]}, "match_ts" : "$sections.recordTime"} },
{"$match": #filter timeslot
{"$and":[
# {"compareResult": "true"},
# {"satisfied" : "Yes"},
#{"sections.sectionId": {"$ne": null}},
#{"sections.sectionId": {"$exists": true}},
# {"sections.sectionId": {"$regex": '^L'}},
{"sections.recordTime": {"$gte": datetime.datetime.strptime("2018-01-26T14:00Z", "%Y-%m-%dT%H:%MZ")}},
{"sections.recordTime": {"$lte": datetime.datetime.strptime("2018-02-01T11:30Z", "%Y-%m-%dT%H:%MZ")}}
]
}
},
{ "$project":
{"code": 1,
"title": 1,
"credits": 1,
"sections":1,
# "description":1,
"satisfied":{"$gte":["$sections.wait",{"$multiply":["$sections.enrol",float(f)]}]},
"lecSatisfied":{
"$cond":[{
"$and":[
{
"$gte":["$sections.wait",{"$multiply":["$sections.enrol",float(f)]}]
},
{
"$eq":[{"$substr": ["$sections.sectionId",0,1]},"L"]
}
]
},1,0]
}
},
},
{
"$sort": {"sections.sectionId": 1 }
},
{
"$group":{
"_id":{ "code": "$code", "recordTime":"$sections.recordTime"},
"code": {"$last": "$code"},
"title": {"$last": "$title"},
"credits": {"$last": "$credits"},
"recordTime":{"$last": "$sections.recordTime"},
"sections":{
"$push": {
"sectionId":"$sections.sectionId",
"dateAndTime":"$sections.offerings.dateAndTime",
"quota":"$sections.quota",
"enrol":"$sections.enrol",
"avail": { "$subtract": [ "$sections.quota", "$sections.enrol"] } ,
"wait":"$sections.wait",
"satisfied":"$satisfied",
}
},
"lecSatisfiedCount":{"$sum":"$lecSatisfied"}
}
},
{ "$match": {"lecSatisfiedCount": {"$gt":0}}
},
{
"$sort": {"recordTime": 1 }
},
{
"$group":{
"_id":{ "code": "$code"},
"code": {"$last": "$code"},
"title": {"$last": "$title"},
"credits": {"$last": "$credits"},
"recordTime":{"$last": "$recordTime"},
"sections":{"$last": "$sections"},
"lecSatisfiedCount":{"$last": "$lecSatisfiedCount"}
}
},
{
"$project":{
"_id":0,
"code": 1,
"title":1,
"credits": 1,
"recordTime":1,
"sections":1
}
}
]
)
# pprint.pprint(listOfCourseWithWaitingListSize)
recordNo = 0
for oneCourse in listOfCourseWithWaitingListSize:
recordNo = recordNo + 1
print("Record {:d}:".format(recordNo))
pprint.pprint(oneCourse)
# print("code: {:s}\ntitle: {:s}\ncredits: {:0.2f}\nquota: {:d}\nenrol: {:d}\navail: {:d}\nwait: {:d}".format(oneCourse["code"], oneCourse["title"], oneCourse["credits"],oneCourse["sections"][0]["quota"],oneCourse["sections"][0]["enrol"],oneCourse["sections"][0]["avail"],oneCourse["sections"][0]["wait"]))
# for oneSection in oneCourse["sections"]:
# print("sections: {:s}, Date & Time: {:s}".format(oneSection["sectionId"],' '.join(oneSection["dateAndTime"])))
# print("description: {:s}".format(oneCourse["description"]))
#pprint(" Record {:d}: (sid={:s}, sname={:s}, byear={:d})".format(recordNo, oneStudent["sid"], oneStudent["sname"], oneStudent["byear"]))
#print("Record {:d}: (course={:s})".format(recordNo, oneCourse))
except pymongo.errors.ConnectionFailure as error:
print("Document Querying Failed! Error Message: \"{}\"".format(error))
#return outputCourseDetails(courseCode, lectureSection, satisfied)
except pymongo.errors.ConnectionFailure as error:
print("Document Insertion Failed! Error Message: \"{}\"".format(error))
import numpy
import time
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import numpy
#Model 1
def trainModel(trainingDataFilename):
# to set a seed of a random number generator used in the "optimization" tool in the neural network model
numpy.random.seed(time.time())
# Step 1: to load the data
# Step 1a: to read the dataset with "numpy" function
dataset = numpy.loadtxt(trainingDataFilename, delimiter=",")
# Step 1b: to split the dataset into two datasets, namely the input attribute dataset (X) and the target attribute dataset (Y)
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(13, input_dim=4, activation='relu'))
model.add(Dense(7, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10)
# Step 5: To evaluate the model
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
# model 2:
def trainModel2(trainingDataFilename):
numpy.random.seed(time.time())
dataset = numpy.loadtxt(trainingDataFilename, delimiter=",")
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(10, input_dim=4, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss='mean_squared_error', optimizer='sgd', metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10)
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
# model 3:
def trainModel3(trainingDataFilename):
numpy.random.seed(time.time())
dataset = numpy.loadtxt(trainingDataFilename, delimiter=",")
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(64, input_dim=4, activation='softmax'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10)
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
# model 4:
def trainModel4(trainingDataFilename):
numpy.random.seed(time.time())
dataset = numpy.loadtxt(trainingDataFilename, delimiter=",")
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(13, input_dim=4, activation='softmax'))
model.add(Dense(7, activation='softmax'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss='logcosh', optimizer='rmsprop', metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.3, epochs=300, batch_size=7)
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
# model 5:
def trainModel5(trainingDataFilename):
def trainModel5_beforeAddDrop(trainingDataFile_beforeAddDrop):
numpy.random.seed(time.time())
dataset = numpy.loadtxt(trainingDataFile_beforeAddDrop, delimiter=",")
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(13, input_dim=4, activation='relu'))
model.add(Dense(7, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10)
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
def trainModel5_afterAddDrop(trainingDataFile_afterAddDrop):
numpy.random.seed(time.time())
dataset = numpy.loadtxt(trainingDataFile_afterAddDrop, delimiter=",")
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(13, input_dim=4, activation='relu'))
model.add(Dense(7, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10)
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
| 35.629921 | 309 | 0.644309 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,029 | 0.445193 |
4648df628268429b96e7a5ae13c51c1984b5c218 | 2,549 | py | Python | aws_iot/dashboard/migrations/0003_auto_20160427_1641.py | anduslim/aws_iot | 39bddc1af05313f5005b5147b9d469ad337d6f28 | [
"BSD-3-Clause"
]
| null | null | null | aws_iot/dashboard/migrations/0003_auto_20160427_1641.py | anduslim/aws_iot | 39bddc1af05313f5005b5147b9d469ad337d6f28 | [
"BSD-3-Clause"
]
| null | null | null | aws_iot/dashboard/migrations/0003_auto_20160427_1641.py | anduslim/aws_iot | 39bddc1af05313f5005b5147b9d469ad337d6f28 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0002_gatewaynode_sensorstickerreading'),
]
operations = [
migrations.CreateModel(
name='DerivedIntakeReading',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('modified_timestamp', models.DateTimeField(auto_now=True)),
('server_timestamp', models.DateTimeField(null=True, blank=True)),
('isOpen', models.NullBooleanField(verbose_name='Opened')),
],
options={
'verbose_name': 'Derived Intake Reading',
'verbose_name_plural': 'Derived Intake Reading',
},
),
migrations.RemoveField(
model_name='gatewaynode',
name='user2',
),
migrations.RemoveField(
model_name='medicationintake',
name='expected_intake',
),
migrations.RemoveField(
model_name='medicationintake',
name='user',
),
migrations.RemoveField(
model_name='sensornode',
name='medication_intake',
),
migrations.RemoveField(
model_name='sensorstickerreading',
name='gw_id',
),
migrations.RemoveField(
model_name='sensorstickerreading',
name='gw_timestamp',
),
migrations.AddField(
model_name='medicationintake',
name='expected_intake_timing',
field=models.TimeField(null=True, verbose_name='Expected Intake Time', blank=True),
),
migrations.AddField(
model_name='medicationintake',
name='med_desc',
field=models.CharField(max_length=32, null=True, blank=True),
),
migrations.AddField(
model_name='sensornode',
name='medication_intake_list',
field=models.ManyToManyField(to='dashboard.MedicationIntake', null=True, blank=True),
),
migrations.DeleteModel(
name='GatewayNode',
),
migrations.DeleteModel(
name='IntakeTime',
),
migrations.AddField(
model_name='derivedintakereading',
name='sensor_id',
field=models.ForeignKey(to='dashboard.SensorNode'),
),
]
| 32.679487 | 114 | 0.566497 | 2,440 | 0.957238 | 0 | 0 | 0 | 0 | 0 | 0 | 651 | 0.255394 |
464a4fad08a32bd81e6c1f0f95fd3057daa4e736 | 901 | py | Python | investing_algorithm_framework/core/models/__init__.py | coding-kitties/investing-algorithm-framework | 1156acf903345ec5e6787ee8767c68e24c4daffd | [
"Apache-2.0"
]
| 9 | 2020-09-14T13:46:32.000Z | 2022-02-01T15:40:12.000Z | investing_algorithm_framework/core/models/__init__.py | coding-kitties/investing-algorithm-framework | 1156acf903345ec5e6787ee8767c68e24c4daffd | [
"Apache-2.0"
]
| 44 | 2020-12-28T16:22:20.000Z | 2022-03-23T22:11:26.000Z | investing_algorithm_framework/core/models/__init__.py | coding-kitties/investing-algorithm-framework | 1156acf903345ec5e6787ee8767c68e24c4daffd | [
"Apache-2.0"
]
| 2 | 2020-12-25T06:14:39.000Z | 2022-01-19T19:00:20.000Z | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def create_all_tables():
db.create_all()
def initialize_db(app: Flask):
db.init_app(app)
db.app = app
from investing_algorithm_framework.core.models.order_status import OrderStatus
from investing_algorithm_framework.core.models.order_type import OrderType
from investing_algorithm_framework.core.models.order_side import OrderSide
from investing_algorithm_framework.core.models.time_unit import TimeUnit
from investing_algorithm_framework.core.models.order import Order
from investing_algorithm_framework.core.models.portfolio import Portfolio
from investing_algorithm_framework.core.models.position import Position
__all__ = [
"db",
"Portfolio",
"Position",
'Order',
"OrderType",
'OrderSide',
"TimeUnit",
"create_all_tables",
"initialize_db",
"OrderStatus"
]
| 25.742857 | 78 | 0.785794 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.123196 |
464a949d4e46e87b22e002325b18acfc9b8c6a90 | 592 | py | Python | src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage/v2018_03_28/file/__init__.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
]
| 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage/v2018_03_28/file/__init__.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
]
| 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage/v2018_03_28/file/__init__.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
]
| 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .fileservice import FileService
from .models import (
Share,
ShareProperties,
File,
FileProperties,
Directory,
DirectoryProperties,
FileRange,
ContentSettings,
CopyProperties,
SharePermissions,
FilePermissions,
DeleteSnapshot,
)
| 28.190476 | 76 | 0.540541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 305 | 0.515203 |
464a989a9cb27c1d1baa77e94436a8092fe897da | 2,633 | py | Python | backend/db/patient.py | wooque/openpacs | 4524bc7fade0934a70a53bb311f302828cc56905 | [
"MIT"
]
| 1 | 2021-01-04T23:58:54.000Z | 2021-01-04T23:58:54.000Z | backend/db/patient.py | wooque/openpacs | 4524bc7fade0934a70a53bb311f302828cc56905 | [
"MIT"
]
| null | null | null | backend/db/patient.py | wooque/openpacs | 4524bc7fade0934a70a53bb311f302828cc56905 | [
"MIT"
]
| 1 | 2022-03-05T12:58:09.000Z | 2022-03-05T12:58:09.000Z | from db.table import Table
from db.study import Study
from db.series import Series
from pypika.pseudocolumns import PseudoColumn
class Patient(Table):
name = 'patients'
async def sync_db(self):
await self.exec("""
CREATE TABLE IF NOT EXISTS patients (
id SERIAL PRIMARY KEY,
patient_id TEXT UNIQUE NOT NULL,
name TEXT NOT NULL,
birth_date TEXT,
sex TEXT,
meta JSONB
);
""")
await self.exec("""
CREATE INDEX IF NOT EXISTS patients_patient_id ON patients(patient_id);
""")
async def insert_or_select(self, data):
q = self.select('*').where(self.table.patient_id == data['patient_id'])
p = await self.fetchone(q)
if p:
return p
q = self.insert().columns(
'patient_id', 'name', 'birth_date', 'sex',
).insert((
data['patient_id'], data['patient_name'],
data['patient_birth_date'], data['patient_sex'],
),).on_conflict('patient_id').do_update(
self.table.name, PseudoColumn('EXCLUDED.name'),
).returning('id')
patient_id = await self.fetchval(q)
return {'id': patient_id}
async def get_extra(self, patient_id):
from db.files import Files
q = self.select('*').where(self.table.id == patient_id)
patient = await self.fetchone(q)
patient = dict(patient)
StudyT = Study(self.conn)
q = StudyT.select('*').where(
StudyT.table.patient_id == patient_id
)
studies_data = await self.fetch(q)
studies_data = [dict(s) for s in studies_data]
studies = {}
for s in studies_data:
s['series'] = {}
studies[s['id']] = s
SeriesT = Series(self.conn)
q = SeriesT.select('*').where(
SeriesT.table.study_id.isin(list(studies.keys()))
)
series_data = await self.fetch(q)
series_data = [dict(s) for s in series_data]
for s in series_data:
s['files'] = []
studies[s['study_id']]['series'][s['id']] = s
FilesT = Files(self.conn)
q = FilesT.select('*').where(FilesT.table.study_id.isin(list(studies.keys())))
files = await self.fetch(q)
files = [dict(f) for f in files]
for f in files:
studies[f['study_id']]['series'][f['series_id']]['files'].append(f)
for s in studies.values():
s['series'] = list(s['series'].values())
patient['studies'] = list(studies.values())
return patient
| 31.722892 | 86 | 0.557159 | 2,501 | 0.949867 | 0 | 0 | 0 | 0 | 2,440 | 0.9267 | 621 | 0.235853 |
464bc6633efce25b9cc1abebeb497f50584d997d | 1,424 | py | Python | egs/cops/s5/local/text2json.py | Shuang777/kaldi-2016 | 5373fe4bd80857b53134db566cad48b8445cf3b9 | [
"Apache-2.0"
]
| null | null | null | egs/cops/s5/local/text2json.py | Shuang777/kaldi-2016 | 5373fe4bd80857b53134db566cad48b8445cf3b9 | [
"Apache-2.0"
]
| null | null | null | egs/cops/s5/local/text2json.py | Shuang777/kaldi-2016 | 5373fe4bd80857b53134db566cad48b8445cf3b9 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
import sys
import json
def sec2str(seconds):
sec_int = int(round(seconds))
hh = sec_int / 3600
mm = (sec_int - hh * 3600) / 60
ss = sec_int - hh * 3600 - mm * 60
return "%d:%02d:%02d" % (hh, mm, ss)
if len(sys.argv) != 4:
print "Usage:", __file__, "<segment> <text> <json>"
print " e.g.:", __file__, "data/dev/segmetns data/dev/text trans.json"
sys.exit(1)
segment_filename = sys.argv[1]
text_filename = sys.argv[2]
output_filename = sys.argv[3]
start_time = {}
end_time = {}
utt2chn = {}
utt2id = {}
with open(segment_filename) as segmentfile:
for line in segmentfile:
fields = line.split()
utt = fields[0]
start_time[utt] = float(fields[2]);
end_time[utt] = float(fields[3]);
id, chn = fields[1].split("_", 1)
utt2chn[utt] = chn
utt2id[utt] = id
data = {}
with open(text_filename) as textfile:
for line in textfile:
utt, text = line.split(" ", 1)
chn = utt2chn[utt]
if chn not in data:
data[chn] = {
'EmpID1': utt2id[utt],
'transcript': []
}
start = sec2str(start_time[utt])
end = sec2str(end_time[utt])
utt_info = {
'start': start,
'end': end,
'usable': True,
'speaker': 'OFFICER',
'utterance': text.strip()
}
data[chn]['transcript'].append(utt_info)
with open(output_filename, 'w') as outfile:
json.dump(data, outfile)
| 21.907692 | 72 | 0.589888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.147472 |
464c2239f3ce7da17665c8d65313e5b7bfe44898 | 762 | py | Python | codeEval/hard/levenshtein_distance.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
]
| 25 | 2015-01-21T16:39:18.000Z | 2021-05-24T07:01:24.000Z | codeEval/hard/levenshtein_distance.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
]
| 2 | 2020-09-30T19:39:36.000Z | 2020-10-01T17:15:16.000Z | codeEval/hard/levenshtein_distance.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
]
| 15 | 2015-01-21T16:39:27.000Z | 2020-10-01T17:00:22.000Z | import sys
from string import ascii_lowercase as alphabet
def generate_neighbours(ws, s):
ls, l = set(), len(s)
for i in xrange(l + 1):
ls.add(s[:i] + s[i + 1 :])
for e in alphabet:
ls.add(s[:i] + e + s[i:])
if i < l and e != s[i]:
ls.add(s[:i] + e + s[i + 1 :])
return ls.intersection(ws)
def generate_network(ws, s):
gen, r = generate_neighbours(ws, s), set(s)
while len(gen) > 0:
s = gen.pop()
if s not in r:
r.add(s)
gen.update(generate_neighbours(ws, s))
return len(r.intersection(ws))
test_cases = open(sys.argv[1], "r")
words = set([test.strip() for test in test_cases])
test_cases.close()
print generate_network(words, "hello")
| 25.4 | 50 | 0.552493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.013123 |
464c4a39f52a6f1f848e5df28842de523721d469 | 385 | py | Python | models/RelSaleSizeProject.py | the-Minister-0001/cardano-nft-admin | 870ae5303808056af702d7e162a249567505d4c6 | [
"MIT"
]
| 1 | 2021-07-28T20:17:25.000Z | 2021-07-28T20:17:25.000Z | models/RelSaleSizeProject.py | the-Minister-0001/cardano-nft-admin | 870ae5303808056af702d7e162a249567505d4c6 | [
"MIT"
]
| null | null | null | models/RelSaleSizeProject.py | the-Minister-0001/cardano-nft-admin | 870ae5303808056af702d7e162a249567505d4c6 | [
"MIT"
]
| null | null | null | from sqlalchemy import Column, Integer
from sqlalchemy import ForeignKey
from sqlalchemy.orm import declarative_base
from .base import Base
class RelSaleSizeProject(Base):
__tablename__ = 'rel_salesizes_projects'
id = Column(Integer, primary_key=True)
project_id = Column(Integer, ForeignKey('projects.id'))
salesize_id = Column(Integer, ForeignKey('salesizes.id'))
| 29.615385 | 61 | 0.779221 | 241 | 0.625974 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.132468 |
464ce40b514f813772eac4adaecb88e87e70d261 | 1,415 | py | Python | sudoku/recursive_solver.py | mkomaiha/NERS570-Sudoku | 2448de19dd8ae97292b74937d397846d10176a8b | [
"MIT"
]
| null | null | null | sudoku/recursive_solver.py | mkomaiha/NERS570-Sudoku | 2448de19dd8ae97292b74937d397846d10176a8b | [
"MIT"
]
| null | null | null | sudoku/recursive_solver.py | mkomaiha/NERS570-Sudoku | 2448de19dd8ae97292b74937d397846d10176a8b | [
"MIT"
]
| null | null | null | from sudoku.constants import SIZE, BOX_SIZE
from sudoku import Sudoku
class RS(Sudoku):
def __init__(self, grade=0, id=None):
super().__init__(grade, id)
def possible(self, r, c, n):
for i in range(0, SIZE):
if self.solved[r, i] == n:
return False
for i in range(0, SIZE):
if self.solved[i, c] == n:
return False
c0 = (c//BOX_SIZE)*BOX_SIZE
r0 = (r//BOX_SIZE)*BOX_SIZE
for i in range(0, BOX_SIZE):
for j in range(0, BOX_SIZE):
if self.solved[r0+i, c0+j] == n:
return False
return True
def r_solve(self, printflag=False):
for r in range(SIZE):
for c in range(SIZE):
if self.solved[r, c] == 0:
for n in range(1, 10):
if self.possible(r, c, n):
self.solved[r, c] = n
# Prevent from reseting the board
if (self.r_solve(printflag)):
return True
self.solved[r, c] = 0
return False
if printflag == True:
print('recursive results:')
print(self.solved)
return True
def solve(self, printflag=False):
self.r_solve(printflag)
return self.solved
| 31.444444 | 61 | 0.467845 | 1,342 | 0.94841 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.037456 |
464ea27cbe788bd3f30824ac8262b6f8546e28e0 | 40 | py | Python | scraper.py | souravkaranjai/python-webscraper | b4a76846d80e724059eb7cb9abcd5ec13125258a | [
"MIT"
]
| null | null | null | scraper.py | souravkaranjai/python-webscraper | b4a76846d80e724059eb7cb9abcd5ec13125258a | [
"MIT"
]
| null | null | null | scraper.py | souravkaranjai/python-webscraper | b4a76846d80e724059eb7cb9abcd5ec13125258a | [
"MIT"
]
| null | null | null | #!/usr/bin/python3
print('Hello world') | 13.333333 | 20 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.775 |
464f62901d50ef275959e48f29f0c159e6183c2e | 571 | py | Python | src/algoritmia/problems/binpacking/firstfitbinpacker.py | DavidLlorens/algoritmia | 40ca0a89ea6de9b633fa5f697f0a28cae70816a2 | [
"MIT"
]
| 6 | 2018-09-15T15:09:10.000Z | 2022-02-27T01:23:11.000Z | src/algoritmia/problems/binpacking/firstfitbinpacker.py | JeromeIllgner/algoritmia | 406afe7206f2411557859bf03480c16db7dcce0d | [
"MIT"
]
| null | null | null | src/algoritmia/problems/binpacking/firstfitbinpacker.py | JeromeIllgner/algoritmia | 406afe7206f2411557859bf03480c16db7dcce0d | [
"MIT"
]
| 5 | 2018-07-10T20:19:55.000Z | 2021-03-31T03:32:22.000Z | from algoritmia.problems.binpacking.nextfitbinpacker import NextFitBinPacker
class FirstFitBinPacker(NextFitBinPacker):#[full
def pack(self, w: "IList<Real>", C: "Real") -> "IList<int>":
x = [None] * len(w)
free = []
for i in range(len(w)):
for j in range(len(free)):
if free[j] >= w[i]:
x[i] = j
free[j] -= w[i]
break
if x[i] == None:
x[i] = len(free)
free.append(C-w[i])
return x#]full | 35.6875 | 77 | 0.450088 | 491 | 0.859895 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.077058 |
465010f857bece90ac2bd5becc8c6c4644e18f35 | 54 | py | Python | facebook/matrixWordSearch.py | rando3/leetcode-python | b13bb35fb3cdc9813c62944547d260be2f9cab02 | [
"MIT"
]
| null | null | null | facebook/matrixWordSearch.py | rando3/leetcode-python | b13bb35fb3cdc9813c62944547d260be2f9cab02 | [
"MIT"
]
| null | null | null | facebook/matrixWordSearch.py | rando3/leetcode-python | b13bb35fb3cdc9813c62944547d260be2f9cab02 | [
"MIT"
]
| null | null | null | https://leetcode.com/problems/word-search/description/ | 54 | 54 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
46522be46094275849b7f3b3da9bf2d51591f8c8 | 10,754 | py | Python | contrib/make_hdf.py | scopatz/PyTables | 05a74def785688abd802224a5ba44393a701ebc7 | [
"BSD-3-Clause"
]
| 9 | 2021-09-28T05:20:22.000Z | 2022-03-16T11:09:06.000Z | contrib/make_hdf.py | scopatz/PyTables | 05a74def785688abd802224a5ba44393a701ebc7 | [
"BSD-3-Clause"
]
| null | null | null | contrib/make_hdf.py | scopatz/PyTables | 05a74def785688abd802224a5ba44393a701ebc7 | [
"BSD-3-Clause"
]
| 9 | 2018-09-14T02:42:36.000Z | 2021-07-12T02:37:45.000Z | #!/usr/bin/env python
from __future__ import generators
import tables, cPickle, time
#################################################################################
def is_scalar(item):
try:
iter(item)
#could be a string
try:
item[:0]+'' #check for string
return 'str'
except:
return 0
except:
return 'notstr'
def is_dict(item):
try:
item.iteritems()
return 1
except:
return 0
def make_col(row_type, row_name, row_item, str_len):
'''for strings it will always make at least 80 char or twice mac char size'''
set_len=80
if str_len:
if 2*str_len>set_len:
set_len=2*str_len
row_type[row_name]=tables.Col("CharType", set_len)
else:
type_matrix={
int: tables.Col("Int32", 1),
float: tables.Col("Float32", 4), #Col("Int16", 1)
}
row_type[row_name]=type_matrix[type(row_item)]
def make_row(data):
row_type={}
scalar_type=is_scalar(data)
if scalar_type:
if scalar_type=='str':
make_col(row_type, 'scalar', data, len(data))
else:
make_col(row_type, 'scalar', data, 0)
else: #it is a list-like
the_type=is_scalar(data[0])
if the_type=='str':
#get max length
the_max=0
for i in data:
if len(i)>the_max:
the_max=len(i)
make_col(row_type, 'col', data[0], the_max)
elif the_type:
make_col(row_type, 'col', data[0], 0)
else: #list within the list, make many columns
make_col(row_type, 'col_depth', 0, 0)
count=0
for col in data:
the_type=is_scalar(col[0])
if the_type=='str':
#get max length
the_max=0
for i in data:
if len(i)>the_max:
the_max=len(i)
make_col(row_type, 'col_'+str(count), col[0], the_max)
elif the_type:
make_col(row_type, 'col_'+str(count), col[0], 0)
else:
raise ValueError('too many nested levels of lists')
count+=1
return row_type
def add_table(fileh, group_obj, data, table_name):
#figure out if it is a list of lists or a single list
#get types of columns
row_type=make_row(data)
table1=fileh.createTable(group_obj, table_name, row_type, 'H', compress=1)
row=table1.row
if is_scalar(data):
row['scalar']=data
row.append()
else:
if is_scalar(data[0]):
for i in data:
row['col']=i
row.append()
else:
count=0
for col in data:
row['col_depth']=len(col)
for the_row in col:
if is_scalar(the_row):
row['col_'+str(count)]=the_row
row.append()
else:
raise ValueError('too many levels of lists')
count+=1
table1.flush()
def add_cache(fileh, cache):
group_name='pytables_cache_v0';table_name='cache0'
root=fileh.root
group_obj=fileh.createGroup(root, group_name)
cache_str=cPickle.dumps(cache, 0)
cache_str=cache_str.replace('\n', chr(1))
cache_pieces=[]
while cache_str:
cache_part=cache_str[:8000];cache_str=cache_str[8000:]
if cache_part:
cache_pieces.append(cache_part)
row_type={}
row_type['col_0']=tables.Col("CharType", 8000)
#
table_cache=fileh.createTable(group_obj, table_name, row_type, 'H', compress =1)
for piece in cache_pieces:
print len(piece)
table_cache.row['col_0']=piece
table_cache.row.append()
table_cache.flush()
def save2(hdf_file, data):
fileh=tables.openFile(hdf_file, mode='w', title='logon history')
root=fileh.root;cache_root=cache={}
root_path=root._v_pathname;root=0
stack = [ (root_path, data, cache) ]
table_num=0
count=0
while stack:
(group_obj_path, data, cache)=stack.pop()
#data='wilma':{'mother':[22,23,24]}}
#grp_name wilma
for grp_name in data:
#print 'fileh=',fileh
count+=1
cache[grp_name]={}
new_group_obj=fileh.createGroup(group_obj_path, grp_name)
#print 'path=',new_group_obj._v_pathname
new_path=new_group_obj._v_pathname
#if dict, you have a bunch of groups
if is_dict(data[grp_name]):#{'mother':[22,23,24]}
stack.append((new_path, data[grp_name], cache[grp_name]))
#you have a table
else:
#data[grp_name]=[110,130,140],[1,2,3]
add_table(fileh, new_path, data[grp_name], 'tbl_'+str(table_num))
table_num+=1
#fileh=tables.openFile(hdf_file,mode='a',title='logon history')
add_cache(fileh, cache_root)
fileh.close()
########################
class Hdf_dict(dict):
def __init__(self,hdf_file,hdf_dict={},stack=[]):
self.hdf_file=hdf_file
self.stack=stack
if stack:
self.hdf_dict=hdf_dict
else:
self.hdf_dict=self.get_cache()
self.cur_dict=self.hdf_dict
def get_cache(self):
fileh=tables.openFile(self.hdf_file, rootUEP='pytables_cache_v0')
table=fileh.root.cache0
total=[]
print 'reading'
begin=time.time()
for i in table.iterrows():
total.append(i['col_0'])
total=''.join(total)
total=total.replace(chr(1), '\n')
print 'loaded cache len=', len(total), time.time()-begin
begin=time.time()
a=cPickle.loads(total)
print 'cache', time.time()-begin
return a
def has_key(self, k):
return k in self.cur_dict
def keys(self):
return self.cur_dict.keys()
def get(self,key,default=None):
try:
return self.__getitem__(key)
except:
return default
def items(self):
return list(self.iteritems())
def values(self):
return list(self.itervalues())
###########################################
def __len__(self):
return len(self.cur_dict)
def __getitem__(self, k):
if k in self.cur_dict:
#now check if k has any data
if self.cur_dict[k]:
new_stack=self.stack[:]
new_stack.append(k)
return Hdf_dict(self.hdf_file, hdf_dict=self.cur_dict[k], stack=new_stack)
else:
new_stack=self.stack[:]
new_stack.append(k)
fileh=tables.openFile(self.hdf_file, rootUEP='/'.join(new_stack))
#cur_data=getattr(self.cur_group,k) #/wilma (Group) '' =getattr(/ (Group) 'logon history',wilma)
for table in fileh.root:
#return [ i['col_1'] for i in table.iterrows() ] #[9110,91]
#perhaps they stored a single item
try:
for item in table['scalar']:
return item
except:
#otherwise they stored a list of data
try:
return [ item for item in table['col']]
except:
cur_column=[]
total_columns=[]
col_num=0
cur_row=0
num_rows=0
for row in table:
if not num_rows:
num_rows=row['col_depth']
if cur_row==num_rows:
cur_row=num_rows=0
col_num+=1
total_columns.append(cur_column)
cur_column=[]
cur_column.append( row['col_'+str(col_num)])
cur_row+=1
total_columns.append(cur_column)
return total_columns
else:
raise KeyError(k)
def iterkeys(self):
for key in self.iterkeys():
yield key
def __iter__(self):
return self.iterkeys()
def itervalues(self):
for k in self.iterkeys():
v=self.__getitem__(k)
yield v
def iteritems(self):
# yield children
for k in self.iterkeys():
v=self.__getitem__(k)
yield (k, v)
def __repr__(self):
return '{Hdf dict}'
def __str__(self):
return self.__repr__()
#####
def setdefault(self,key,default=None):
try:
return self.__getitem__(key)
except:
self.__setitem__(key)
return default
def update(self, d):
for k, v in d.iteritems():
self.__setitem__(k, v)
def popitem(self):
try:
k, v = self.iteritems().next()
del self[k]
return k, v
except StopIteration:
raise KeyError("Hdf Dict is empty")
def __setitem__(self, key, value):
raise NotImplementedError
def __delitem__(self, key):
raise NotImplementedError
def __hash__(self):
raise TypeError("Hdf dict bjects are unhashable")
if __name__=='__main__':
def write_small(file=''):
data1={
'fred':['a', 'b', 'c'],
'barney':[[9110, 9130, 9140], [91, 92, 93]],
'wilma':{'mother':{'pebbles':[22, 23, 24],'bambam':[67, 68, 69]}}
}
print 'saving'
save2(file, data1)
print 'saved'
def read_small(file=''):
#a=make_hdf.Hdf_dict(file)
a=Hdf_dict(file)
print a['wilma']
b=a['wilma']
for i in b:
print i
print a.keys()
print 'has fred', bool('fred' in a)
print 'length a', len(a)
print 'get', a.get('fred'), a.get('not here')
print 'wilma keys', a['wilma'].keys()
print 'barney', a['barney']
print 'get items'
print a.items()
for i in a.iteritems():
print 'item', i
for i in a.itervalues():
print i
a=raw_input('enter y to write out test file to test.hdf')
if a.strip()=='y':
print 'writing'
write_small('test.hdf')
print 'reading'
read_small('test.hdf')
| 30.292958 | 112 | 0.505951 | 4,525 | 0.420774 | 339 | 0.031523 | 0 | 0 | 0 | 0 | 1,772 | 0.164776 |
465234088d4677a447f6bdb4657e058e1a45f5b8 | 2,234 | py | Python | jinja2content.py | firemark/new-site | b7d54320f8e1cfae489108f87f64761ce2510676 | [
"MIT"
]
| null | null | null | jinja2content.py | firemark/new-site | b7d54320f8e1cfae489108f87f64761ce2510676 | [
"MIT"
]
| null | null | null | jinja2content.py | firemark/new-site | b7d54320f8e1cfae489108f87f64761ce2510676 | [
"MIT"
]
| null | null | null | """
jinja2content.py
----------------
DONT EDIT THIS FILE
Pelican plugin that processes Markdown files as jinja templates.
"""
from jinja2 import Environment, FileSystemLoader, ChoiceLoader
import os
from pelican import signals
from pelican.readers import MarkdownReader, HTMLReader, RstReader
from pelican.utils import pelican_open
from tempfile import NamedTemporaryFile
class JinjaContentMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# will look first in 'JINJA2CONTENT_TEMPLATES', by default the
# content root path, then in the theme's templates
local_dirs = self.settings.get('JINJA2CONTENT_TEMPLATES', ['.'])
local_dirs = [os.path.join(self.settings['PATH'], folder)
for folder in local_dirs]
theme_dir = os.path.join(self.settings['THEME'], 'templates')
loaders = [FileSystemLoader(_dir) for _dir
in local_dirs + [theme_dir]]
if 'JINJA_ENVIRONMENT' in self.settings: # pelican 3.7
jinja_environment = self.settings['JINJA_ENVIRONMENT']
else:
jinja_environment = {
'trim_blocks': True,
'lstrip_blocks': True,
'extensions': self.settings['JINJA_EXTENSIONS']
}
self.env = Environment(
loader=ChoiceLoader(loaders),
**jinja_environment)
def read(self, source_path):
with pelican_open(source_path) as text:
text = self.env.from_string(text).render()
with NamedTemporaryFile(delete=False) as f:
f.write(text.encode())
f.close()
content, metadata = super().read(f.name)
os.unlink(f.name)
return content, metadata
class JinjaMarkdownReader(JinjaContentMixin, MarkdownReader):
pass
class JinjaRstReader(JinjaContentMixin, RstReader):
pass
class JinjaHTMLReader(JinjaContentMixin, HTMLReader):
pass
def add_reader(readers):
for Reader in [JinjaMarkdownReader, JinjaRstReader, JinjaHTMLReader]:
for ext in Reader.file_extensions:
readers.reader_classes[ext] = Reader
def register():
signals.readers_init.connect(add_reader)
| 31.027778 | 73 | 0.653984 | 1,594 | 0.713518 | 0 | 0 | 0 | 0 | 0 | 0 | 402 | 0.179946 |
465353204681d4b1df6397f2e8098a707504bf13 | 4,125 | py | Python | task3/code/video_process.py | haohaoqian/STD | 38eeb39474a46e8e66bdf4da6a0e28167e2174a2 | [
"MIT"
]
| 1 | 2021-11-30T13:07:41.000Z | 2021-11-30T13:07:41.000Z | task3/code/video_process.py | haohaoqian/STD | 38eeb39474a46e8e66bdf4da6a0e28167e2174a2 | [
"MIT"
]
| null | null | null | task3/code/video_process.py | haohaoqian/STD | 38eeb39474a46e8e66bdf4da6a0e28167e2174a2 | [
"MIT"
]
| null | null | null | import json
from tqdm import tqdm
from utils import *
from alexnet import AlexNet
def classify(net, folder_name, resize=(224, 224)):
transform = []
if resize:
transform.append(torchvision.transforms.Resize(resize))
transform.append(torchvision.transforms.ToTensor())
transform.append(torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])) # 归一化
transform = torchvision.transforms.Compose(transform)
results = []
img_dir = folder_name + '/rgb/'
img_names = list(filter(lambda x: x.endswith(".jpg"), os.listdir(img_dir)))
for img_name in img_names:
image = Image.open(img_dir + img_name)
image = transform(image)
results.append(net.predict(torch.unsqueeze(image, dim=0)))
results = torch.cat(results, dim=0)
return torch.mean(results, dim=0).cpu().numpy()
def dump_test(file_root, save_name):
json_data = {}
# root = './dataset/task2/test/'
# save_name = './dataset/task2.json'
root = file_root
for i in tqdm(range(10)):
sub_root = root + str(i) + '/'
folders = list(filter(lambda x: not x.endswith(".pkl"), os.listdir(sub_root)))
for folder in folders:
folder_path = sub_root + folder
images, is_moved = video_loader(folder_path)
json_data[folder_path] = collide_detection(images, is_moved)
with open(save_name, "w") as f:
json.dump(json_data, f)
def dump_train(file_root, save_name, blocks=True):
json_data = {}
# root = './dataset/train/'
# save_name = './dataset/train.json'
root = file_root
for sub_root in os.listdir(root):
print('\n collecting %s' % sub_root)
sub_root = root + sub_root + '/'
folders = list(filter(lambda x: not x.endswith(".pkl"), os.listdir(sub_root)))
for folder in tqdm(folders):
folder_path = sub_root + folder
images, is_moved = video_loader(folder_path)
if blocks:
json_data[folder_path] = collide_detection_blocks(images, is_moved)
else:
json_data[folder_path] = collide_detection(images, is_moved)
with open(save_name, "w") as f:
json.dump(json_data, f)
def dump_file(): # 用来标记各视频撞击位置的函数,分类用不到
dump_train('./dataset/train/', './dataset/train_blocks_0.2.json')
dump_test('./dataset/task2/test/', "./dataset/task2_blocks_0.2.json")
dump_test('./dataset/task3/test/', "./dataset/task3_blocks_0.2.json")
def get_video_feature(net, folder_name, resize=(224, 224)):
"""
:param folder_name: 从当前路径访问到‘video_0000’文件夹的路径
:param resize:默认为(224,224)
:return: 14维特征向量,前10维为分类标签
['061_foam_brick', 'green_basketball', 'salt_cylinder', 'shiny_toy_gun', 'stanley_screwdriver',
'strawberry', 'toothpaste_box', 'toy_elephant', 'whiteboard_spray', 'yellow_block']
后4维维撞击位置[上,下,左。右]
"""
class_feature = classify(net, folder_name, resize)
images, is_moved = video_loader(folder_name)
move_feature = collide_detection_blocks(images, is_moved)
#feature = np.concatenate([class_feature, move_feature])
return class_feature, move_feature
#if __name__ == '__main__':
#net = AlexNet()
#net.load_state_dict(torch.load('./alexnet.pt'))
# idx_to_class = ['061_foam_brick', 'green_basketball', 'salt_cylinder', 'shiny_toy_gun', 'stanley_screwdriver',
# 'strawberry', 'toothpaste_box', 'toy_elephant', 'whiteboard_spray', 'yellow_block']
# classes = classify(net, './dataset/task2/test/0/video_0006')
#import json
#import os
#label = dict()
#path='./dataset/train'
#for folder in os.listdir(path):
#for sample in os.listdir(os.path.join(path, folder)):
#images, is_moved = video_loader(os.path.join(path, folder, sample))
#move_feature = collide_detection_blocks(images, is_moved)
#label[folder + '/' + sample] = move_feature
#with open('./dataset/train.json', 'w') as f:
#json.dump(label,f) | 39.663462 | 117 | 0.634909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,683 | 0.394792 |
4654149e1951b836beb90c90a51fb7d22a7e21c8 | 200 | py | Python | Aulas/12aula(antigo)/readint.py | rafaelmcam/RTOs_ChibiOS | 08d8e21f2c7185d2c47846f67cbfba70c706d689 | [
"MIT"
]
| 1 | 2019-05-14T22:31:25.000Z | 2019-05-14T22:31:25.000Z | Aulas/12aula(antigo)/readint.py | rafaelmcam/RTOs_ChibiOS | 08d8e21f2c7185d2c47846f67cbfba70c706d689 | [
"MIT"
]
| null | null | null | Aulas/12aula(antigo)/readint.py | rafaelmcam/RTOs_ChibiOS | 08d8e21f2c7185d2c47846f67cbfba70c706d689 | [
"MIT"
]
| null | null | null | import serial
with serial.Serial("/dev/ttyUSB0", 115200) as ser:
while 1:
for i in range(5):
n = ser.read()[0]
print("{:x}".format(n))
print("--------")
| 18.181818 | 50 | 0.47 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.15 |
46548694a0ea521a51fee1aa569811ccc528f211 | 578 | py | Python | packages/pyre/parsing/Parser.py | avalentino/pyre | 7e1f0287eb7eba1c6d1ef385e5160079283ac363 | [
"BSD-3-Clause"
]
| 25 | 2018-04-23T01:45:39.000Z | 2021-12-10T06:01:23.000Z | packages/pyre/parsing/Parser.py | avalentino/pyre | 7e1f0287eb7eba1c6d1ef385e5160079283ac363 | [
"BSD-3-Clause"
]
| 53 | 2018-05-31T04:55:00.000Z | 2021-10-07T21:41:32.000Z | packages/pyre/parsing/Parser.py | avalentino/pyre | 7e1f0287eb7eba1c6d1ef385e5160079283ac363 | [
"BSD-3-Clause"
]
| 12 | 2018-04-23T22:50:40.000Z | 2022-02-20T17:27:23.000Z | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2021 all rights reserved
#
class Parser:
"""
The base class for parsers
"""
# types
from .exceptions import ParsingError, SyntaxError, TokenizationError
# meta methods
def __init__(self, **kwds):
# chain up
super().__init__(**kwds)
# build my scanner
self.scanner = self.lexer()
# all done
return
# implementation details
lexer = None # my scanner factory
scanner = None # my scanner instance
# end of file
| 16.514286 | 72 | 0.595156 | 459 | 0.791379 | 0 | 0 | 0 | 0 | 0 | 0 | 275 | 0.474138 |
4654a308feea7ad07bf6af611b62104666bd4e8d | 5,087 | py | Python | shfl/data_distribution/data_distribution_non_iid.py | SSSuperTIan/Sherpa.ai-Federated-Learning-Framework | a30d73a018526f1033ee0ec57489c4c6e2f15b0a | [
"Apache-2.0"
]
| 1 | 2021-03-18T07:31:36.000Z | 2021-03-18T07:31:36.000Z | shfl/data_distribution/data_distribution_non_iid.py | SSSuperTIan/Sherpa.ai-Federated-Learning-Framework | a30d73a018526f1033ee0ec57489c4c6e2f15b0a | [
"Apache-2.0"
]
| null | null | null | shfl/data_distribution/data_distribution_non_iid.py | SSSuperTIan/Sherpa.ai-Federated-Learning-Framework | a30d73a018526f1033ee0ec57489c4c6e2f15b0a | [
"Apache-2.0"
]
| null | null | null | import numpy as np
import random
import tensorflow as tf
from shfl.data_base.data_base import shuffle_rows
from shfl.data_distribution.data_distribution_sampling import SamplingDataDistribution
class NonIidDataDistribution(SamplingDataDistribution):
"""
Implementation of a non-independent and identically distributed data distribution using \
[Data Distribution](../data_distribution/#datadistribution-class)
In this data distribution we simulate the scenario in which clients have non-identical distribution since
they know partially the total classes of the problem.
This distribution only works with classification problems.
"""
@staticmethod
def choose_labels(num_nodes, total_labels):
"""
Method that randomly choose labels used for each client in non-iid scenario.
# Arguments:
num_nodes: Number of nodes
total_labels: Number of labels
# Returns:
labels_to_use
"""
random_labels = []
for i in range(0, num_nodes):
num_labels = random.randint(2, total_labels)
labels_to_use = []
for j in range(num_labels):
label = random.randint(0, total_labels - 1)
if label not in labels_to_use:
labels_to_use.append(label)
else:
while label in labels_to_use:
label = random.randint(0, total_labels - 1)
labels_to_use.append(label)
random_labels.append(labels_to_use)
return random_labels
def make_data_federated(self, data, labels, percent, num_nodes=1, weights=None, sampling="with_replacement"):
"""
Method that makes data and labels argument federated in a non-iid scenario.
# Arguments:
data: Data to federate
labels: Labels to federate
num_nodes: Number of nodes to create
percent: Percent of the data (between 0 and 100) to be distributed (default is 100)
weights: Array of weights for weighted distribution (default is None)
sampling: methodology between with or without sampling (default "without_sampling")
# Returns:
federated_data: A list containing the data for each client
federated_label: A list containing the labels for each client
"""
if weights is None:
weights = np.full(num_nodes, 1/num_nodes)
# Check label's format
if labels.ndim == 1:
one_hot = False
labels = tf.keras.utils.to_categorical(labels)
else:
one_hot = True
# Shuffle data
data, labels = shuffle_rows(data, labels)
# Select percent
data = data[0:int(percent * len(data) / 100)]
labels = labels[0:int(percent * len(labels) / 100)]
num_data = len(data)
# We generate random classes for each client
total_labels = np.unique(labels.argmax(axis=-1))
random_classes = self.choose_labels(num_nodes, len(total_labels))
federated_data = []
federated_label = []
if sampling == "with_replacement":
for i in range(0, num_nodes):
labels_to_use = random_classes[i]
idx = np.array([True if i in labels_to_use else False for i in labels.argmax(axis=-1)])
data_aux = data[idx]
labels_aux = labels[idx]
# Shuffle data
data_aux, labels_aux = shuffle_rows(data_aux, labels_aux)
percent_per_client = min(int(weights[i]*num_data), len(data_aux))
federated_data.append(np.array(data_aux[0:percent_per_client, ]))
federated_label.append(np.array(labels_aux[0:percent_per_client, ]))
else:
if sum(weights) > 1:
weights = np.array([float(i) / sum(weights) for i in weights])
for i in range(0, num_nodes):
labels_to_use = random_classes[i]
idx = np.array([True if i in labels_to_use else False for i in labels.argmax(axis=-1)])
data_aux = data[idx]
rest_data = data[~idx]
labels_aux = labels[idx]
rest_labels = labels[~idx]
data_aux, labels_aux = shuffle_rows(data_aux, labels_aux)
percent_per_client = min(int(weights[i] * num_data), len(data_aux))
federated_data.append(np.array(data_aux[0:percent_per_client, ]))
rest_data = np.append(rest_data, data_aux[percent_per_client:, ], axis=0)
federated_label.append(np.array(labels_aux[0:percent_per_client, ]))
rest_labels = np.append(rest_labels, labels_aux[percent_per_client:, ], axis=0)
data = rest_data
labels = rest_labels
if not one_hot:
federated_label = np.array([np.argmax(node, 1) for node in federated_label])
return federated_data, federated_label
| 36.597122 | 113 | 0.612149 | 4,890 | 0.961085 | 0 | 0 | 939 | 0.184552 | 0 | 0 | 1,492 | 0.293239 |
4656ff804d82461f52d5fb2b608b15a88f9feeb7 | 1,298 | py | Python | example/0_Basic_usage_of_the_library/python_pymongo/2_select.py | AndersonHJB/learning_spider | b855b7808fb5268e9564180cf73ba5b1fb133f58 | [
"MIT"
]
| 2 | 2021-09-07T13:28:34.000Z | 2021-12-13T06:17:10.000Z | example/0_Basic_usage_of_the_library/python_pymongo/2_select.py | AndersonHJB/learning_spider | b855b7808fb5268e9564180cf73ba5b1fb133f58 | [
"MIT"
]
| null | null | null | example/0_Basic_usage_of_the_library/python_pymongo/2_select.py | AndersonHJB/learning_spider | b855b7808fb5268e9564180cf73ba5b1fb133f58 | [
"MIT"
]
| null | null | null | # -*- encoding: utf-8 -*-
'''
@Time : 2021-06-08
@Author : EvilRecluse
@Contact : https://github.com/RecluseXU
@Desc : 增
'''
# here put the import lib
from pymongo import MongoClient
from bson import ObjectId
connection: MongoClient = MongoClient('mongodb://localhost:27017')
collection = connection['local']['startup_log']
# 查询方法
# 集合对象查看find开头的方法既是所求,一般使用: find 查询多个结果 与 find_one 查询单个结果
# collection.find
# collection.find_one
# 实际上,查询所需要的参数都是 mongo查询 本身所定义的,而不是 pymongo所自定义的
# 基本上在 mongo命令行 里能够执行的命令,pymongo 会有方法对应
# filter
# 用于说明需要的数据的情况. 类似于SQL语句中WHERE对于结果的限定
# 可以进行逻辑判断,类型判断等操作
_filter = {'pid': 4444} # pid的值为4444的记录
result = collection.find_one(_filter)
print(result)
# projection
# 用于设置返回记录所拥有的键
# 若指定某些键为1,则仅返回指定的键
# 若指定某些键位0, 则返回指定为0的键以外的键
# 若不加以指定,返回结果默认会带有 _id 这个键s
projection = {'_pid': 1, 'hostname': 1}
result = collection.find_one(_filter, projection)
print(result)
collection.find_one({'_id': ObjectId('EvilMass-1619315049192')}) # 根据_id查询时注意类型
# skip
# 用于跳过指定数量的查询结果
result = collection.find(_filter, projection, skip=1)
print(list(result))
# limit
# 用于限定返回结果的数量
result = collection.find(_filter, projection, limit=2)
print(list(result))
# collection.count_documents
# 用于统计结果数
result = collection.count_documents({'_pid': 4444})
print(result)
| 22 | 80 | 0.743451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,218 | 0.681208 |
4657a986bf5b4eb76c1f27d0639731c4204bb162 | 4,428 | py | Python | install-b9s.py | ihaveamac/hardmod-b9s-installer | 4b26cfff76bb00430aacbe80679623e3cc5bb46d | [
"MIT"
]
| 13 | 2017-05-20T03:54:55.000Z | 2021-10-09T22:10:09.000Z | install-b9s.py | ihaveamac/hardmod-b9s-installer | 4b26cfff76bb00430aacbe80679623e3cc5bb46d | [
"MIT"
]
| 2 | 2017-06-09T06:40:20.000Z | 2017-09-17T14:29:28.000Z | install-b9s.py | ihaveamac/hardmod-b9s-installer | 4b26cfff76bb00430aacbe80679623e3cc5bb46d | [
"MIT"
]
| 3 | 2017-12-24T19:10:09.000Z | 2020-12-04T09:01:53.000Z | #!/usr/bin/env python3
import hashlib
import os
import shutil
import subprocess
import sys
import time
def doexit(msg, errcode=0):
print(msg)
input('Press Enter to continue...')
sys.exit(errcode)
if not os.path.isfile('NAND.bin'):
doexit('NAND.bin not found.', errcode=1)
if os.path.isfile('firm0firm1.bak'):
doexit('firm0firm1.bak was found.\n'
'In order to prevent writing a good backup with a bad one, the '
'install has stopped. Please move or delete the old file if you '
'are sure you want to continue. If you would like to restore, use '
'`restore-firm0firm1`.',
errcode=1)
if os.path.isfile('NAND-patched.bin'):
doexit('NAND-patched.bin was found.\n'
'Please move or delete the patched NAND before patching another.',
errcode=1)
if not os.path.isfile('current.firm'):
doexit('current.firm not found.', errcode=1)
if not os.path.isfile('boot9strap.firm'):
doexit('boot9strap.firm not found.', errcode=1)
if not os.path.isfile('boot9strap.firm.sha'):
doexit('boot9strap.firm.sha not found.', errcode=1)
print('Verifying boot9strap.firm.')
with open('boot9strap.firm.sha', 'rb') as f:
b9s_hash = f.read(0x20)
with open('boot9strap.firm', 'rb') as f:
if hashlib.sha256(f.read(0x400000)).digest() != b9s_hash:
doexit('boot9strap.firm hash check failed.', errcode=1)
print('boot9strap.firm hash check passed.')
readsize = 0x100000 # must be divisible by 0x3AF00000 and 0x4D800000
shutil.rmtree('work', ignore_errors=True)
os.makedirs('work', exist_ok=True)
def runcommand(cmdargs):
proc = subprocess.Popen(cmdargs, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.wait()
procoutput = proc.communicate()[0]
# return(procoutput)
if proc.returncode != 0:
print('{} had an error.'.format(cmdargs[0]))
print('Full command: {}'.format(' '.join(cmdargs)))
print('Output:')
print(procoutput)
overall_time = time.time()
print('Trying to open NAND.bin...')
with open('NAND.bin', 'rb+') as nand:
print('Backing up FIRM0FIRM1 to firm0firm1.bin...')
nand.seek(0xB130000)
start_time = time.time()
with open('firm0firm1.bak', 'wb') as f:
for curr in range(0x800000 // readsize):
f.write(nand.read(readsize))
print('Reading {:06X} ({:>5.1f}%)'.format((curr + 1) * readsize,
(((curr + 1) * readsize) / 0x800000) * 100), end='\r')
print('\nReading finished in {:>.2f} seconds.\n'.format(
time.time() - start_time))
print('Creating FIRMs to xor from boot9strap.firm.')
start_time = time.time()
with open('current.firm', 'rb') as f:
with open('work/current_pad.bin', 'wb') as b9s:
b9s.write(f.read(0x400000).ljust(0x400000, b'\0') * 2)
with open('boot9strap.firm', 'rb') as f:
with open('work/boot9strap_pad.bin', 'wb') as b9s:
b9s.write(f.read(0x400000).ljust(0x400000, b'\0') * 2)
print('Creation finished in {:>.2f} seconds.\n'.format(
time.time() - start_time))
print('XORing FIRM0FIRM1 with current.firm.')
start_time = time.time()
runcommand(['tools/lazyxor-' + sys.platform, 'firm0firm1.bak',
'work/current_pad.bin', 'work/xored.bin'])
print('XORing finished in {:>.2f} seconds.\n'.format(
time.time() - start_time))
print('XORing FIRM0FIRM1 with boot9strap.firm.')
start_time = time.time()
runcommand(['tools/lazyxor-' + sys.platform, 'work/xored.bin',
'work/boot9strap_pad.bin', 'work/final.bin'])
print('XORing finished in {:>.2f} seconds.\n'.format(
time.time() - start_time))
print('Writing final FIRMs to NAND.bin.')
with open('work/final.bin', 'rb') as f:
firm_final = f.read(0x800000)
nand.seek(0xB130000)
start_time = time.time()
for curr in range(0x800000 // readsize):
print('Writing {:06X} ({:>5.1f}%)'.format((curr + 1) * readsize,
(((curr + 1) * readsize) / 0x800000) * 100), end='\r')
nand.write(bytes(firm_final[curr * readsize:(curr + 1) * readsize]))
print('\nWriting finished in {:>.2f} seconds.'.format(
time.time() - start_time))
os.rename('NAND.bin', 'NAND-patched.bin')
doexit('boot9strap install process finished in {:>.2f} seconds.'.format(
time.time() - overall_time))
| 35.709677 | 78 | 0.626016 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,752 | 0.395664 |
46586eef963ebf4f9bbb1677b780f95787d3e23e | 6,329 | py | Python | pygazebo/connection.py | robobe/pygazebo | 0ab53034c9cc5bf23bae46bf0e9c984349bdfa5a | [
"Apache-2.0"
]
| null | null | null | pygazebo/connection.py | robobe/pygazebo | 0ab53034c9cc5bf23bae46bf0e9c984349bdfa5a | [
"Apache-2.0"
]
| null | null | null | pygazebo/connection.py | robobe/pygazebo | 0ab53034c9cc5bf23bae46bf0e9c984349bdfa5a | [
"Apache-2.0"
]
| 1 | 2020-12-09T17:27:34.000Z | 2020-12-09T17:27:34.000Z | import concurrent
import time
import math
import sys
import asyncio
import logging
from . import msg
from .parse_error import ParseError
from . import DEBUG_LEVEL
logger = logging.getLogger(__name__)
logger.setLevel(DEBUG_LEVEL)
async def _wait_closed(stream):
assert(sys.version_info.major >= 3)
if sys.version_info.minor >= 7:
await stream.wait_closed()
class DisconnectError(Exception):
def __init__(self,
connection_name: str,
server_addr: tuple,
local_addr: tuple,
discarded_bytes: int):
"""
:param connection_name: Name of the connection
:param server_addr: remote address of the connection (address, port)
:type server_addr: tuple[str, int]
:param local_addr: local address of the connection (address, port)
:type local_addr: tuple[str, int]
:param discarded_bytes: number of bytes not read from the socket
"""
self._connection_name = connection_name
self._server_addr = server_addr
self._local_addr = local_addr
self._discarded_bytes = discarded_bytes
@staticmethod
def _to_addr(addr):
return f'{addr[0]}:{addr[1]}'
def __str__(self):
return f'DisconnectError' \
f'({self._connection_name}: {self._to_addr(self._local_addr)} -> {self._to_addr(self._server_addr)})' + \
(f' bytes not collected: {self._discarded_bytes}' if self._discarded_bytes is not None and self._discarded_bytes > 0 else '')
class Server(object):
def __init__(self, name: str):
self._name = name
self._server = None
self._listen_host = None
self._listen_port = None
self._running_server = None
async def serve(self, handler):
"""
Start TCP server
:param handler: called for each new connection. async function
:type handler: async lambda reader, writer -> None
:return:
"""
self._server = await asyncio.start_server(handler, host='0.0.0.0')
self._listen_host, self._listen_port = self._server.sockets[0].getsockname()
logger.info(f"Listening on {self._listen_port}:{self._listen_port}")
self._running_server = asyncio.ensure_future(self._server_loop())
return self._listen_host, self._listen_port
async def _server_loop(self):
if sys.version_info.minor >= 7:
async with self._server:
await self._server.serve_forever()
else:
await self._server.wait_closed()
async def close(self):
self._server.close()
await _wait_closed(self._server)
try:
await self._running_server
except concurrent.futures.CancelledError:
pass
@property
def listen_host(self):
assert self._server is not None
return self._listen_host
@property
def listen_port(self):
assert self._server is not None
return self._listen_port
class Connection(object):
"""Manages a Gazebo protocol connection.
"""
def __init__(self, name):
self.name = name
self._address = None
self._port = None
self._reader = None
self._writer = None
self._closed = True
async def connect(self, address, port):
logger.debug('Connection.connect')
self._address = address
self._port = port
reader, writer = await asyncio.open_connection(address, port)
self.accept_connection(reader, writer)
def accept_connection(self, reader, writer):
self._reader = reader
self._writer = writer
self._closed = False
async def close(self):
if self._closed:
logger.debug("Trying to close an already closed connection")
return
self._closed = True
self._writer.write_eof()
await self._writer.drain()
self._writer.close()
await _wait_closed(self._writer)
async def write_packet(self, name: str, message, timeout):
assert not self._closed
packet = msg.packet_pb2.Packet()
cur_time = time.time()
packet.stamp.sec = int(cur_time)
packet.stamp.nsec = int(math.fmod(cur_time, 1) * 1e9)
packet.type = name.encode()
packet.serialized_data = message.SerializeToString()
await self._write(packet.SerializeToString(), timeout)
async def write(self, message, timeout=None):
data = message.SerializeToString()
await self._write(data, timeout)
async def _write(self, data, timeout):
header = ('%08X' % len(data)).encode()
self._writer.write(header + data)
await asyncio.wait_for(self._writer.drain(), timeout=timeout)
async def read_raw(self):
"""
Read incoming packet without parsing it
:return: byte array of the packet
"""
header = None
try:
assert not self._closed
header = await self._reader.readexactly(8)
if len(header) < 8:
raise ParseError('malformed header: ' + str(header))
try:
size = int(header, 16)
except ValueError:
raise ParseError('invalid header: ' + str(header))
else:
data = await self._reader.readexactly(size)
return data
except (ConnectionResetError, asyncio.streams.IncompleteReadError) as e:
if self._closed:
return None
else:
local_addr, local_port = self._writer.transport.get_extra_info('sockname')
discarded_bytes = len(e.partial) if isinstance(e, asyncio.streams.IncompleteReadError) else None
if header is not None:
discarded_bytes += 8
raise DisconnectError(
connection_name=self.name,
server_addr=(self._address, self._port),
local_addr=(local_port, local_addr),
discarded_bytes=discarded_bytes
) from e
async def read_packet(self):
data = await self.read_raw()
if not self._closed:
packet = msg.packet_pb2.Packet.FromString(data)
return packet
| 32.623711 | 137 | 0.613367 | 5,945 | 0.939327 | 0 | 0 | 293 | 0.046295 | 4,005 | 0.632801 | 1,095 | 0.173013 |
4658a352b7ba7209186ef3d47f169f46b8660613 | 2,182 | py | Python | src/visualization/visualize_dataset.py | ivangarrera/MachineLearning | c13584cdcb7c4df1ab2814cf42a3c2bd3c203e75 | [
"MIT"
]
| null | null | null | src/visualization/visualize_dataset.py | ivangarrera/MachineLearning | c13584cdcb7c4df1ab2814cf42a3c2bd3c203e75 | [
"MIT"
]
| null | null | null | src/visualization/visualize_dataset.py | ivangarrera/MachineLearning | c13584cdcb7c4df1ab2814cf42a3c2bd3c203e75 | [
"MIT"
]
| null | null | null | from common_clustering import CommonClustering
#■clustering_features = CommonClustering(r'C:\Users\ivangarrera\Desktop\T2_cleaned.csv')
clustering_features = CommonClustering('D:\Ing. Informatica\Cuarto\Machine Learning\T2_cleaned_gyroscope.csv')
attr = list(clustering_features.data_set)[0][:list(clustering_features.data_set)[0].find('_')]
clustering_features.attr = attr
clustering_features.PrincipalComponentAnalysis(num_components=2)
# Get the number of clusters that provides the best results
ideal_number_of_clusters = clustering_features.getBestNumberOfClusters()
# Plot silhuettes array
clustering_features.PlotSilhouettes()
# Print k-means with the best number of clusters that have been found
labels = clustering_features.KMeansWithIdeal(ideal_number_of_clusters)
# Interprate k-means groups
clustering_features.data_set['labels'] = labels
data_set_labels_mean = clustering_features.data_set.groupby(['labels']).mean()
# Plot 3D graph to interpretate k-means groups
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(data_set_labels_mean.values[:,0],
data_set_labels_mean.values[:,1],
data_set_labels_mean.values[:,2])
plt.savefig(r'../../reports/figures/centroids3D_{}.png'.format(attr))
plt.show()
# Agglomerative clustering algorithm using nearest neighbors matrix
clustering_features.AgglomerativeClusteringWithNearestNeighbors()
# DBSCAN Clustering algorithm
labels = clustering_features.DBSCANClustering()
# Interprate outliers
clustering_features.data_set['labels'] = labels
data_set_outliers = clustering_features.data_set.loc[(clustering_features.data_set['labels'] == -1)]
# Show outliers in a 3D graph with all points in the dataset
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(clustering_features.data_set.values[:,0],
clustering_features.data_set.values[:,1],
clustering_features.data_set.values[:,2])
ax.scatter(data_set_outliers.values[:,0],
data_set_outliers.values[:,1],
data_set_outliers.values[:,2], c='red', s=50)
plt.savefig(r'../../reports/figures/outliers3D_{}.png'.format(attr))
plt.show()
| 36.983051 | 110 | 0.779102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 686 | 0.314103 |
465a22b29f2bfae5b6e8e5e3348394868002ce0f | 1,483 | py | Python | app.py | Geo-Gabriel/REST-Api-Hotels | e065f4725507e11a11480118f326e79aded533d5 | [
"MIT"
]
| null | null | null | app.py | Geo-Gabriel/REST-Api-Hotels | e065f4725507e11a11480118f326e79aded533d5 | [
"MIT"
]
| null | null | null | app.py | Geo-Gabriel/REST-Api-Hotels | e065f4725507e11a11480118f326e79aded533d5 | [
"MIT"
]
| null | null | null | from blacklist import BLACKLIST
from flask import Flask, jsonify
from flask_restful import Api
from resources.hotel import Hoteis, Hotel
from resources.user import User, UserLogin, UserLogout, UserRegister, Users
from resources.site import Site, Sites
from flask_jwt_extended import JWTManager
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['JWT_SECRET_KEY'] = 'Jbs8aGbbAyt7iMa878Pnsj'
app.config['JWT_BLACKLIST_ENABLED'] = True
api = Api(app)
jwt = JWTManager(app)
@app.before_first_request
def create_db():
db.create_all()
@jwt.token_in_blacklist_loader
def verify_block_list(token):
return token['jti'] in BLACKLIST
@jwt.revoked_token_loader
def revoked_access_token():
return jsonify({'message': "You have been logged out."}), 401 # Unautorized
# Hotels resource
api.add_resource(Hoteis, '/hoteis')
api.add_resource(Hotel, '/hoteis/<string:hotel_id>')
# Users resource
api.add_resource(Users, '/users')
api.add_resource(User, '/users/<string:user_id>')
# User register resource
api.add_resource(UserRegister, '/register')
# Login resource
api.add_resource(UserLogin, '/login')
# Logout resource
api.add_resource(UserLogout, '/logout')
# Sites resource
api.add_resource(Sites, '/sites')
api.add_resource(Site, '/sites/<string:site_url>')
if __name__ == '__main__':
from database.sql_alchemy import db
db.init_app(app)
app.run(debug=True)
| 26.017544 | 80 | 0.766689 | 0 | 0 | 0 | 0 | 293 | 0.197572 | 0 | 0 | 444 | 0.299393 |
465a6321a407b2ead52a83060d6a413f0b6c0e5a | 316 | py | Python | scdlbot/__init__.py | samillinier/habesha-skin-pack | 74d1c84b207c8d598f124a91dce11fb09109c772 | [
"MIT"
]
| null | null | null | scdlbot/__init__.py | samillinier/habesha-skin-pack | 74d1c84b207c8d598f124a91dce11fb09109c772 | [
"MIT"
]
| null | null | null | scdlbot/__init__.py | samillinier/habesha-skin-pack | 74d1c84b207c8d598f124a91dce11fb09109c772 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for Music Downloader Telegram Bot."""
# version as tuple for simple comparisons
VERSION = (0, 9, 16)
__author__ = """George Pchelkin"""
__email__ = '[email protected]'
# string created from tuple to avoid inconsistency
__version__ = ".".join([str(x) for x in VERSION])
| 26.333333 | 58 | 0.696203 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 214 | 0.677215 |
465afd4dc993d8e9672e85b05bdf2dd73ac767b6 | 4,355 | py | Python | uPython/lopyhelper.py | somervda/ourLora | 8ee21a3eefd13464ad5063174a7a7cab57229e0d | [
"MIT"
]
| null | null | null | uPython/lopyhelper.py | somervda/ourLora | 8ee21a3eefd13464ad5063174a7a7cab57229e0d | [
"MIT"
]
| 3 | 2022-02-14T19:32:32.000Z | 2022-02-24T18:22:28.000Z | uPython/lopyhelper.py | somervda/ourLora | 8ee21a3eefd13464ad5063174a7a7cab57229e0d | [
"MIT"
]
| null | null | null | import struct
import pycom
import time
from network import LoRa
def blink(seconds, rgb):
pycom.rgbled(rgb)
time.sleep(seconds)
pycom.rgbled(0x000000) # off
def setUSFrequencyPlan(lora):
""" Sets the frequency plan that matches the TTN gateway in the USA """
# remove all US915 channels
for channel in range(0, 72):
lora.remove_channel(channel)
# set all channels to the same frequency (must be before sending the OTAA join request)
ttn_start_frequency = 903900000
ttn_step_frequency = 200000
ttn_ch8_frequency = 904600000
# Set up first 8 US915 TTN uplink channels
for channel in range(0, 9):
if (channel == 8):
channel_frequency = ttn_ch8_frequency
# DR3 = SF8/500kHz
channel_dr_min = 4
channel_dr_max = 4
else:
channel_frequency = ttn_start_frequency + \
(channel * ttn_step_frequency)
# DR0 = SF10/125kHz
channel_dr_min = 0
# DR3 = SF7/125kHz
channel_dr_max = 3
lora.add_channel(channel, frequency=channel_frequency,
dr_min=channel_dr_min, dr_max=channel_dr_max)
print("Added channel", channel, channel_frequency,
channel_dr_min, channel_dr_max)
def join(app_eui, app_key, useADR):
""" Join the Lorawan network using OTAA. new lora session is returned """
# Set the power to 20db for US915
# You can also set the default dr value but I found that was problematic
# You need to turn on adr (auto data rate) at this point if it is to be used
# only use adr for static devices (Not moving)
# see https://lora-developers.semtech.com/library/tech-papers-and-guides/understanding-adr/
lora = LoRa(mode=LoRa.LORAWAN, region=LoRa.US915,
adr=useADR, tx_power=20)
setUSFrequencyPlan(lora)
print('Joining', end='')
lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0)
# wait until the module has joined the network
while not lora.has_joined():
time.sleep(2.5)
blink(.5, 0xff8f00) # dark orange
print('.', end='')
print('')
print('Joined')
blink(2, 0x006400) # dark green
return lora
def send(lora, socket, port, payload, useADR):
""" send data to the lorawan gateway on selected port """
blink(.5, 0x00008b) # dark blue
socket.setblocking(True)
socket.bind(port)
print("Sending data:", payload.pack(), " Size:", payload.calcsize())
socket.send(payload.pack())
# Give send a extra second to be returned before switching
# the socket blocking mode (May not need this)
time.sleep(1)
socket.setblocking(False)
lora.nvram_save()
class gps_payload:
""" Class for managing the GPS payload data that is transmitted to the lorawan service
update the class properties and struct definition for the particular use case """
longitude = 0
latitude = 0
pack_format = "ff"
def __init__(self, longitude, latitude):
self.longitude = longitude # Float
self.latitude = latitude # Float
# see format options here https://docs.python.org/2/library/struct.html#format-characters
# Noter: use single precision float f for GPS Lng/Lat to get locations down to a meter
def pack(self):
return struct.pack(self.pack_format, self.longitude, self.latitude)
def calcsize(self):
return struct.calcsize(self.pack_format)
class sensor_payload:
""" Class for managing the sensor payload data that is transmitted to the lorawan service
update the class properties and struct definition for the particular use case """
celsius = 0
humidity = 0
waterlevel = 0
voltage = 0
pack_format = "bBBB"
def __init__(self, celsius, humidity, waterlevel, voltage):
self.celsius = celsius # In +/- celsius
self.humidity = humidity # In percentage
self.waterlevel = waterlevel # in centimeters
self.voltage = voltage # In tenths of a volt
# see format options here https://docs.python.org/2/library/struct.html#format-characters
def pack(self):
return struct.pack(self.pack_format, self.celsius, self.humidity, self.waterlevel, self.voltage)
def calcsize(self):
return struct.calcsize(self.pack_format)
| 35.406504 | 105 | 0.665901 | 1,603 | 0.368083 | 0 | 0 | 0 | 0 | 0 | 0 | 1,689 | 0.38783 |
465b3dc1f585b6b0356f07d08239e727188800e8 | 2,980 | py | Python | configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py | chenxinfeng4/mmdetection | a99a1aaa5e4a7614f2f89f2350e1b917b2a8ca7e | [
"Apache-2.0"
]
| 6 | 2021-12-18T07:23:35.000Z | 2022-02-26T04:38:26.000Z | configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py | wondervictor/mmdetection | c72bc707e661d61cf09aca0a53ad21812ef874d0 | [
"Apache-2.0"
]
| null | null | null | configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py | wondervictor/mmdetection | c72bc707e661d61cf09aca0a53ad21812ef874d0 | [
"Apache-2.0"
]
| 1 | 2021-12-12T13:35:22.000Z | 2021-12-12T13:35:22.000Z | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='EfficientNet',
arch='b3',
drop_path_rate=0.2,
out_indices=(3, 4, 5),
frozen_stages=0,
norm_cfg=dict(
type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01),
norm_eval=False,
init_cfg=dict(
type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
neck=dict(
in_channels=[48, 136, 384],
start_level=0,
out_channels=256,
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_size = (896, 896)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=img_size,
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=img_size),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=img_size),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_size,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=img_size),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer_config = dict(grad_clip=None)
optimizer = dict(
type='SGD',
lr=0.04,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[8, 11])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=12)
# NOTE: This variable is for automatically scaling LR,
# USER SHOULD NOT CHANGE THIS VALUE.
default_batch_size = 32 # (8 GPUs) x (4 samples per GPU)
| 31.702128 | 147 | 0.645638 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 811 | 0.272148 |
465d72bc64541d0735329d0bb15e70e1a6c30e99 | 2,265 | py | Python | tests/test_model.py | Sebastiencreoff/mongo_tool | 048171ba2c172c0e8962a5408edbaec26cfdf4ca | [
"MIT"
]
| null | null | null | tests/test_model.py | Sebastiencreoff/mongo_tool | 048171ba2c172c0e8962a5408edbaec26cfdf4ca | [
"MIT"
]
| null | null | null | tests/test_model.py | Sebastiencreoff/mongo_tool | 048171ba2c172c0e8962a5408edbaec26cfdf4ca | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
import datetime
import mock
import mom
class ExampleClass(mom.Model):
JSON_SCHEMA = {
'$schema': 'http://json-schema.org/schema#',
'title': 'Test class for JSON',
'type': 'object',
'properties': {
'value_datetime': {'type': ['datetime', 'null']},
'value_int': {'type': ['number', 'null']},
'value_str': {'type': ['string', 'null']}}
}
EXCLUDED_KEYS = set('to_dict')
def __init__(self, data=None, value_int=None):
self.value_datetime = None
self.value_int = value_int
self.value_str = None
super().__init__(data=data)
def to_dict(self):
result = super().to_dict()
result.update({
'value_datetime': self.value_datetime,
'value_int': self.value_int,
'value_str': self.value_str})
return result
@mom.Model.with_update
def updates(self, value_datetime, value_str):
print('save_buy function')
self.value_datetime = value_datetime
self.value_str = value_str
def test_init():
mom.Model.session = mock.MagicMock()
# Test without data
obj = ExampleClass()
assert mom.Model.session.add.call_count == 1
assert mom.Model.session.update.call_count == 0
assert not obj.read_only
assert obj.id()
# Test with data
mom.Model.session.reset_mock()
obj2 = ExampleClass(data=obj.to_dict())
assert mom.Model.session.add.call_count == 0
assert mom.Model.session.update.call_count == 0
assert obj2.read_only
assert obj2.id() == obj.id()
def test_single_attr():
mom.Model.session = mock.MagicMock()
obj = ExampleClass()
mom.Model.session.reset_mock()
# Update one parameter.
obj.value_datetime = datetime.datetime.now()
assert mom.Model.session.add.call_count == 0
assert mom.Model.session.update.call_count == 1
def test_method():
mom.Model.session = mock.MagicMock()
obj = ExampleClass()
mom.Model.session.reset_mock()
# Update parameters with function.
obj.updates(value_datetime=datetime.datetime.now(), value_str='value')
assert mom.Model.session.add.call_count == 0
assert mom.Model.session.update.call_count == 1
| 25.449438 | 74 | 0.63532 | 1,029 | 0.454305 | 0 | 0 | 187 | 0.082561 | 0 | 0 | 381 | 0.168212 |
46604f1fc90b7c0ea8ac8ed35c4082c5ab9f172f | 247 | py | Python | src/koeda/utils/stopwords.py | toriving/KoEDA | 5dfbb0e88ede13da2e5e72ac94fe7cb12c0b7cd1 | [
"MIT"
]
| 48 | 2021-04-23T16:13:41.000Z | 2022-03-24T09:03:26.000Z | src/koeda/utils/stopwords.py | toriving/KoEDA | 5dfbb0e88ede13da2e5e72ac94fe7cb12c0b7cd1 | [
"MIT"
]
| 6 | 2020-11-19T13:56:29.000Z | 2021-09-26T12:13:23.000Z | src/koeda/utils/stopwords.py | toriving/KoEDA | 5dfbb0e88ede13da2e5e72ac94fe7cb12c0b7cd1 | [
"MIT"
]
| 3 | 2021-09-13T07:14:29.000Z | 2021-12-29T09:52:36.000Z | import os
import json
STOPWORDS_JSON_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir, "corpora/stopwords.json"
)
with open(STOPWORDS_JSON_PATH, "r", encoding="utf-8") as f:
STOPWORD = json.load(f)["stopwords"]
| 24.7 | 83 | 0.720648 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.182186 |
4660b825bf1a5e031627c3620c78b68944deb5c7 | 652 | py | Python | glue/core/tests/test_message.py | ejeschke/glue | 21689e3474aeaeb70e258d76c60755596856976c | [
"BSD-3-Clause"
]
| 3 | 2015-09-10T22:23:55.000Z | 2019-04-04T18:47:33.000Z | glue/core/tests/test_message.py | ejeschke/glue | 21689e3474aeaeb70e258d76c60755596856976c | [
"BSD-3-Clause"
]
| null | null | null | glue/core/tests/test_message.py | ejeschke/glue | 21689e3474aeaeb70e258d76c60755596856976c | [
"BSD-3-Clause"
]
| 1 | 2019-08-04T14:10:12.000Z | 2019-08-04T14:10:12.000Z | from __future__ import absolute_import, division, print_function
import pytest
from .. import message as msg
def test_invalid_subset_msg():
with pytest.raises(TypeError) as exc:
msg.SubsetMessage(None)
assert exc.value.args[0].startswith('Sender must be a subset')
def test_invalid_data_msg():
with pytest.raises(TypeError) as exc:
msg.DataMessage(None)
assert exc.value.args[0].startswith('Sender must be a data')
def test_invalid_data_collection_msg():
with pytest.raises(TypeError) as exc:
msg.DataCollectionMessage(None)
assert exc.value.args[0].startswith('Sender must be a DataCollection')
| 27.166667 | 74 | 0.739264 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.124233 |
4660df17d48e40efbff3c55617fa7393819b5977 | 1,358 | py | Python | fabfile/config.py | kurochan/config-collector | 656da97eb219eb5bcf913173dd7aa76d0cedd44c | [
"MIT"
]
| 1 | 2017-07-30T17:35:10.000Z | 2017-07-30T17:35:10.000Z | fabfile/config.py | kurochan/config-collector | 656da97eb219eb5bcf913173dd7aa76d0cedd44c | [
"MIT"
]
| null | null | null | fabfile/config.py | kurochan/config-collector | 656da97eb219eb5bcf913173dd7aa76d0cedd44c | [
"MIT"
]
| 1 | 2015-03-01T08:52:14.000Z | 2015-03-01T08:52:14.000Z | # -*- coding: utf-8 -*-
import os
import util
from fabric.api import *
from fabric.state import output
from fabric.colors import *
from base import BaseTask
from helper.print_helper import task_puts
class CollectConfig(BaseTask):
"""
collect configuration
"""
name = "collect"
def run_task(self, *args, **kwargs):
host_config = env.inventory.get_variables(env.host)
hostname = host_config['ssh_host']
if not util.tcping(hostname, 22, 1):
task_puts("host {0} does not exist. skip...".format(hostname))
return
config = self.get_config(hostname, host_config['ssh_user'], host_config['ssh_pass'], host_config['exec_pass'], host_config['type'])
self.write_config(env.host, config)
# print config
def get_config(self, hostname, ssh_user, ssh_pass, exec_pass, os_type):
script_name = "dump-config-cisco-{0}.sh".format(os_type)
config = local(os.path.dirname(os.path.abspath(__file__)) + "/../bin/{0} {1} {2} {3}".format(script_name, ssh_user, hostname, ssh_pass), capture = True)
return config
def write_config(self, hostname, config):
output_dir = os.path.dirname(os.path.abspath(__file__)) + "/../tmp/config"
local("mkdir -p {0}".format(output_dir))
file = open("{0}/{1}.txt".format(output_dir, hostname), 'w')
file.write(str(config))
file.close()
collect = CollectConfig()
| 33.121951 | 156 | 0.690722 | 1,129 | 0.83137 | 0 | 0 | 0 | 0 | 0 | 0 | 257 | 0.189249 |
4661333ffeca10b7026c68a47b44fc3be83ff093 | 2,334 | py | Python | python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py | LWhite027/PaddleBox | b14bcdf285dd8829e11ab12cc815ac1b1ab62694 | [
"Apache-2.0"
]
| 10 | 2021-05-12T07:20:32.000Z | 2022-03-04T08:21:56.000Z | python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py | AFLee/Paddle | 311b3b44fc7d51d4d66d90ab8a3fc0d42231afda | [
"Apache-2.0"
]
| 1 | 2020-09-10T09:05:52.000Z | 2020-09-10T09:06:22.000Z | python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py | AFLee/Paddle | 311b3b44fc7d51d4d66d90ab8a3fc0d42231afda | [
"Apache-2.0"
]
| 25 | 2019-12-07T02:14:14.000Z | 2021-12-30T06:16:30.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig
class TransposeFlattenConcatFusePassTRTTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data1 = fluid.data(
name="data1", shape=[8, 32, 128], dtype="float32")
data2 = fluid.data(
name="data2", shape=[8, 32, 128], dtype="float32")
trans1 = fluid.layers.transpose(data1, perm=[2, 1, 0])
trans2 = fluid.layers.transpose(data2, perm=[2, 1, 0])
flatt1 = fluid.layers.flatten(trans1)
flatt2 = fluid.layers.flatten(trans2)
concat_out = fluid.layers.concat([flatt1, flatt2])
# There is no parameters for above structure.
# Hence, append a batch_norm to avoid failure caused by load_combined.
out = fluid.layers.batch_norm(concat_out, is_test=True)
self.feeds = {
"data1": np.random.random([8, 32, 128]).astype("float32"),
"data2": np.random.random([8, 32, 128]).astype("float32")
}
self.enable_trt = True
self.trt_parameters = TransposeFlattenConcatFusePassTRTTest.TensorRTParam(
1 << 20, 8, 3, AnalysisConfig.Precision.Float32, False, False)
self.fetch_list = [out]
def test_check_output(self):
# There is no cpu pass for transpose_flatten_concat_fuse
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu)
if __name__ == "__main__":
unittest.main()
| 40.947368 | 83 | 0.673522 | 1,479 | 0.633676 | 0 | 0 | 0 | 0 | 0 | 0 | 844 | 0.361611 |
46627a350df5177a8548cf67ff0c02d30e501fb7 | 2,183 | py | Python | src/stage_02_base_model_creation.py | TUCchkul/Dog-Cat-Classification-with-MLflow | 543f55bd88c048da88f343ff4b1b15b3f84ec20f | [
"MIT"
]
| null | null | null | src/stage_02_base_model_creation.py | TUCchkul/Dog-Cat-Classification-with-MLflow | 543f55bd88c048da88f343ff4b1b15b3f84ec20f | [
"MIT"
]
| null | null | null | src/stage_02_base_model_creation.py | TUCchkul/Dog-Cat-Classification-with-MLflow | 543f55bd88c048da88f343ff4b1b15b3f84ec20f | [
"MIT"
]
| null | null | null | import argparse
import os
import shutil
from tqdm import tqdm
import logging
from src.utils.common import read_yaml, create_directories
import random
from src.utils.model import log_model_summary
import tensorflow as tf
STAGE= "Base Model Creation"
logging.basicConfig(
filename=os.path.join("logs",'running_logs.log'),
level=logging.INFO,
format="[%(asctime)s: %(levelname)s: %(module)s]: %(message)s",
filemode="a")
def main(config_path):
config=read_yaml(config_path)
params=config["params"]
logging.info("Layer Defined")
LAYERS=[
tf.keras.layers.Input(shape=tuple(params["img_shape"])),
tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), activation="relu"),
tf.keras.layers.MaxPool2D(pool_size=(2,2)),
tf.keras.layers.Conv2D(32,(3,3), activation="relu"),
tf.keras.layers.MaxPool2D(pool_size=(2,2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(8, activation="relu"),
tf.keras.layers.Dense(2, activation="softmax")
]
classifier=tf.keras.Sequential(LAYERS)
logging.info(f"Base Model Summary:\n{log_model_summary(classifier)}")
classifier.compile(optimizer=tf.keras.optimizers.Adam(params["lr"]),
loss=params["loss"],
metrics=params["metrics"]
)
path_to_model_dir=os.path.join(config["data"]["local_dir"],
config["data"]["model_dir"]
)
create_directories([path_to_model_dir])
path_to_model=os.path.join(path_to_model_dir,
config["data"]["init_model_file"])
classifier.save(path_to_model)
logging.info(f"model is save at : {path_to_model}")
if __name__=="__main__":
args=argparse.ArgumentParser()
args.add_argument("--config", "-c", default="configs/config.yaml")
parsed_args=args.parse_args()
try:
logging.info("\n*********************")
logging.info(f">>>>>>>stage {STAGE} started <<<<<<<")
main(config_path=parsed_args.config)
logging.info(f">>>>>>>> stage {STAGE} completed! <<<<<<<<\n")
except Exception as e:
logging.exception(e)
raise e
| 33.584615 | 81 | 0.633532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 488 | 0.223546 |
46628dc5cc0651a6487fc8978ac7257bc4e97455 | 3,774 | py | Python | test_calculator.py | Kidatoy/Advanced-Calculator | ebd3e436894116ec35b08872ffecdf762b079670 | [
"MIT"
]
| null | null | null | test_calculator.py | Kidatoy/Advanced-Calculator | ebd3e436894116ec35b08872ffecdf762b079670 | [
"MIT"
]
| null | null | null | test_calculator.py | Kidatoy/Advanced-Calculator | ebd3e436894116ec35b08872ffecdf762b079670 | [
"MIT"
]
| null | null | null | import unittest # https://docs.python.org/3/library/unittest.html
from modules.calculator import Calculator as Calc
class TestCalculator(unittest.TestCase):
"""
Test Driven Development Unittest File
Module: Calculator
Updated: 12/16/2019
Author: Kida Toy
"""
def test_addition(self):
"""
Evaluate addition corner cases
"""
self.assertEqual(2, Calc().eval('1+1'))
self.assertEqual(2, Calc().eval('1.0+1.0'))
self.assertEqual(0, Calc().eval('-1+1'))
self.assertEqual(-2, Calc().eval('-1+-1'))
def test_subtraction(self):
"""
Evaluate subtraction corner cases
"""
self.assertEqual(0, Calc().eval('1-1'))
self.assertEqual(-2, Calc().eval('-1-1'))
self.assertEqual(0, Calc().eval('-1--1'))
def test_multiplication(self):
"""
Evaluate multiplication corner cases
"""
self.assertEqual(0, Calc().eval('1*0'))
self.assertEqual(0, Calc().eval('0*-1'))
self.assertEqual(1, Calc().eval('1*1'))
self.assertEqual(-1, Calc().eval('-1*1'))
self.assertEqual(1, Calc().eval('-1*-1'))
self.assertEqual(1, Calc().eval('.25*4'))
def test_division(self):
"""
Test division corner cases
Note: division by zero is handled in test_exceptions
"""
self.assertEqual(1, Calc().eval('1/1'))
self.assertEqual(.25, Calc().eval('1/4'))
self.assertEqual(-1, Calc().eval('-1/1'))
self.assertEqual(1, Calc().eval('-1/-1'))
self.assertEqual(0, Calc().eval('0/-1'))
def test_exponents(self):
"""
Test exponent corner cases
"""
self.assertEqual(1, Calc().eval('2^0'))
self.assertEqual(2, Calc().eval('2^1'))
self.assertEqual(4, Calc().eval('2^2'))
self.assertEqual(.5, Calc().eval('2^-1'))
self.assertEqual(4, Calc().eval('-2^2'))
def test_parentheses(self):
"""
Test parentheses corner cases
"""
self.assertEqual(5.0, Calc().eval('(4.0)+1'))
self.assertEqual(3.0, Calc().eval('(4+1)-2'))
self.assertEqual(5.0, Calc().eval('(5+-5)+5'))
self.assertEqual(-5.0, Calc().eval('(-10+3)+2'))
self.assertEqual(-26.0, Calc().eval('10-(3*2)^2'))
def test_pi(self):
"""
Test pi corner cases
"""
self.assertEqual(4.1415926535, Calc().eval('(pi)+1'))
self.assertEqual(1.1415926535, Calc().eval('(pi)-2'))
self.assertEqual(3.1415926535, Calc().eval('(pi+-5)+5'))
self.assertEqual(1.8584073465, Calc().eval('(-pi+3)+2'))
self.assertEqual(-29.478417602100684, Calc().eval('10-(pi*2)^2'))
self.assertEqual(1.57079632675, Calc().eval('pi/2'))
def test_e(self):
"""
Test e corner cases
"""
self.assertEqual(3.7182818284, Calc().eval('(e)+1'))
self.assertEqual(0.7182818283999999, Calc().eval('(e)-2'))
self.assertEqual(2.7182818284, Calc().eval('(e+-5)+5'))
self.assertEqual(2.2817181716, Calc().eval('(-e+3)+2'))
self.assertEqual(-19.556224394438587, Calc().eval('10-(e*2)^2'))
self.assertEqual(1.3591409142, Calc().eval('e/2'))
def test_phi(self):
"""
Test phi corner cases
"""
self.assertEqual(2.6180339886999997, Calc().eval('(phi)+1'))
self.assertEqual(-0.3819660113000001, Calc().eval('(phi)-2'))
self.assertEqual(1.6180339886999997, Calc().eval('(phi+-5)+5'))
self.assertEqual(3.3819660113000003, Calc().eval('(-phi+3)+2'))
self.assertEqual(-0.47213595435372646, Calc().eval('10-(phi*2)^2'))
self.assertEqual(0.80901699435, Calc().eval('phi/2'))
| 36.640777 | 75 | 0.560943 | 3,655 | 0.968468 | 0 | 0 | 0 | 0 | 0 | 0 | 1,046 | 0.27716 |
4662eb3534b543f9d1857e55e3d0e8669cf078e7 | 9,315 | py | Python | pddf_psuutil/main.py | deran1980/sonic-utilities | a6ae218238e7e552f49191f81451bd55ff56ba51 | [
"Apache-2.0"
]
| null | null | null | pddf_psuutil/main.py | deran1980/sonic-utilities | a6ae218238e7e552f49191f81451bd55ff56ba51 | [
"Apache-2.0"
]
| 4 | 2020-04-17T06:53:05.000Z | 2020-12-01T02:37:34.000Z | pddf_psuutil/main.py | deran1980/sonic-utilities | a6ae218238e7e552f49191f81451bd55ff56ba51 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
#
# main.py
#
# Command-line utility for interacting with PSU Controller in PDDF mode in SONiC
#
try:
import sys
import os
import click
from tabulate import tabulate
from utilities_common.util_base import UtilHelper
except ImportError as e:
raise ImportError("%s - required module not found" % str(e))
VERSION = '2.0'
SYSLOG_IDENTIFIER = "psuutil"
PLATFORM_SPECIFIC_MODULE_NAME = "psuutil"
PLATFORM_SPECIFIC_CLASS_NAME = "PsuUtil"
# Global platform-specific psuutil class instance
platform_psuutil = None
platform_chassis = None
# Wrapper APIs so that this util is suited to both 1.0 and 2.0 platform APIs
def _wrapper_get_num_psus():
if platform_chassis is not None:
try:
return platform_chassis.get_num_psus()
except NotImplementedError:
pass
return platform_psuutil.get_num_psus()
def _wrapper_get_psu_name(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_name()
except NotImplementedError:
pass
return "PSU {}".format(idx)
def _wrapper_get_psu_presence(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_presence()
except NotImplementedError:
pass
return platform_psuutil.get_psu_presence(idx)
def _wrapper_get_psu_status(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_status()
except NotImplementedError:
pass
return platform_psuutil.get_psu_status(idx)
def _wrapper_get_psu_model(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_model()
except NotImplementedError:
pass
return platform_psuutil.get_model(idx)
def _wrapper_get_psu_mfr_id(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_mfr_id()
except NotImplementedError:
pass
return platform_psuutil.get_mfr_id(idx)
def _wrapper_get_psu_serial(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_serial()
except NotImplementedError:
pass
return platform_psuutil.get_serial(idx)
def _wrapper_get_psu_direction(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1)._fan_list[0].get_direction()
except NotImplementedError:
pass
return platform_psuutil.get_direction(idx)
def _wrapper_get_output_voltage(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_voltage()
except NotImplementedError:
pass
return platform_psuutil.get_output_voltage(idx)
def _wrapper_get_output_current(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_current()
except NotImplementedError:
pass
return platform_psuutil.get_output_current(idx)
def _wrapper_get_output_power(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_power()
except NotImplementedError:
pass
return platform_psuutil.get_output_power(idx)
def _wrapper_get_fan_rpm(idx, fan_idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1)._fan_list[fan_idx-1].get_speed_rpm()
except NotImplementedError:
pass
return platform_psuutil.get_fan_rpm(idx, fan_idx)
def _wrapper_dump_sysfs(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx).dump_sysfs()
except NotImplementedError:
pass
return platform_psuutil.dump_sysfs()
# ==================== CLI commands and groups ====================
# This is our main entrypoint - the main 'psuutil' command
@click.group()
def cli():
"""psuutil - Command line utility for providing PSU status"""
global platform_psuutil
global platform_chassis
if os.geteuid() != 0:
click.echo("Root privileges are required for this operation")
sys.exit(1)
# Load the helper class
helper = UtilHelper()
if not helper.check_pddf_mode():
click.echo("PDDF mode should be supported and enabled for this platform for this operation")
sys.exit(1)
# Load new platform api class
try:
import sonic_platform.platform
platform_chassis = sonic_platform.platform.Platform().get_chassis()
except Exception as e:
click.echo("Failed to load chassis due to {}".format(str(e)))
# Load platform-specific psuutil class if 2.0 implementation is not present
if platform_chassis is None:
try:
platform_psuutil = helper.load_platform_util(PLATFORM_SPECIFIC_MODULE_NAME, PLATFORM_SPECIFIC_CLASS_NAME)
except Exception as e:
click.echo("Failed to load {}: {}".format(PLATFORM_SPECIFIC_MODULE_NAME, str(e)))
sys.exit(2)
# 'version' subcommand
@cli.command()
def version():
"""Display version info"""
click.echo("PDDF psuutil version {0}".format(VERSION))
# 'numpsus' subcommand
@cli.command()
def numpsus():
"""Display number of supported PSUs on device"""
click.echo(_wrapper_get_num_psus())
# 'status' subcommand
@cli.command()
@click.option('-i', '--index', default=-1, type=int, help="the index of PSU")
def status(index):
"""Display PSU status"""
supported_psu = list(range(1, _wrapper_get_num_psus() + 1))
psu_ids = []
if (index < 0):
psu_ids = supported_psu
else:
psu_ids = [index]
header = ['PSU', 'Status']
status_table = []
for psu in psu_ids:
msg = ""
psu_name = _wrapper_get_psu_name(psu)
if psu not in supported_psu:
click.echo("Error! The {} is not available on the platform.\n" \
"Number of supported PSU - {}.".format(psu_name, len(supported_psu)))
continue
presence = _wrapper_get_psu_presence(psu)
if presence:
oper_status = _wrapper_get_psu_status(psu)
msg = 'OK' if oper_status else "NOT OK"
else:
msg = 'NOT PRESENT'
status_table.append([psu_name, msg])
if status_table:
click.echo(tabulate(status_table, header, tablefmt="simple"))
# 'mfrinfo' subcommand
@cli.command()
@click.option('-i', '--index', default=-1, type=int, help="the index of PSU")
def mfrinfo(index):
"""Display PSU manufacturer info"""
supported_psu = list(range(1, _wrapper_get_num_psus() + 1))
psu_ids = []
if (index < 0):
psu_ids = supported_psu
else:
psu_ids = [index]
for psu in psu_ids:
psu_name = _wrapper_get_psu_name(psu)
if psu not in supported_psu:
click.echo("Error! The {} is not available on the platform.\n" \
"Number of supported PSU - {}.".format(psu_name, len(supported_psu)))
continue
status = _wrapper_get_psu_status(psu)
if not status:
click.echo("{} is Not OK\n".format(psu_name))
continue
model_name = _wrapper_get_psu_model(psu)
mfr_id = _wrapper_get_psu_mfr_id(psu)
serial_num = _wrapper_get_psu_serial(psu)
airflow_dir = _wrapper_get_psu_direction(psu)
click.echo("{} is OK\nManufacture Id: {}\n" \
"Model: {}\nSerial Number: {}\n" \
"Fan Direction: {}\n".format(psu_name, mfr_id, model_name, serial_num, airflow_dir.capitalize()))
# 'seninfo' subcommand
@cli.command()
@click.option('-i', '--index', default=-1, type=int, help="the index of PSU")
def seninfo(index):
"""Display PSU sensor info"""
supported_psu = list(range(1, _wrapper_get_num_psus() + 1))
psu_ids = []
if (index < 0):
psu_ids = supported_psu
else:
psu_ids = [index]
for psu in psu_ids:
psu_name = _wrapper_get_psu_name(psu)
if psu not in supported_psu:
click.echo("Error! The {} is not available on the platform.\n" \
"Number of supported PSU - {}.".format(psu_name, len(supported_psu)))
continue
oper_status = _wrapper_get_psu_status(psu)
if not oper_status:
click.echo("{} is Not OK\n".format(psu_name))
continue
v_out = _wrapper_get_output_voltage(psu) * 1000
i_out = _wrapper_get_output_current(psu) * 1000
p_out = _wrapper_get_output_power(psu) * 1000
fan1_rpm = _wrapper_get_fan_rpm(psu, 1)
click.echo("{} is OK\nOutput Voltage: {} mv\n" \
"Output Current: {} ma\nOutput Power: {} mw\n" \
"Fan1 Speed: {} rpm\n".format(psu_name, v_out, i_out, p_out, fan1_rpm))
@cli.group()
def debug():
"""pddf_psuutil debug commands"""
pass
@debug.command()
def dump_sysfs():
"""Dump all PSU related SysFS paths"""
for psu in range(_wrapper_get_num_psus()):
status = _wrapper_dump_sysfs(psu)
if status:
for i in status:
click.echo(i)
if __name__ == '__main__':
cli()
| 31.05 | 117 | 0.646914 | 0 | 0 | 0 | 0 | 5,098 | 0.547289 | 0 | 0 | 1,795 | 0.1927 |
466394d9212459110bd5519845967eacdfeb9888 | 758 | py | Python | gifbox/core/serializers.py | timmygee/gifbox | ccdd88ad9424c8e2c519415cde619af3a61daf66 | [
"MIT"
]
| null | null | null | gifbox/core/serializers.py | timmygee/gifbox | ccdd88ad9424c8e2c519415cde619af3a61daf66 | [
"MIT"
]
| null | null | null | gifbox/core/serializers.py | timmygee/gifbox | ccdd88ad9424c8e2c519415cde619af3a61daf66 | [
"MIT"
]
| null | null | null | from rest_framework import serializers
from versatileimagefield.serializers import VersatileImageFieldSerializer
from .models import Image, AnimatedGif
class ImageSerializer(serializers.ModelSerializer):
class Meta:
model = Image
fields = ('image',)
image = VersatileImageFieldSerializer(
sizes=[
('full_size', 'url'),
('thumbnail', 'thumbnail__200x200'),
]
)
class AnimatedGifSerializer(serializers.ModelSerializer):
class Meta:
model = AnimatedGif
fields = ('id', 'image', 'created', 'period')
image = VersatileImageFieldSerializer(
sizes=[
('full_size_url', 'url'),
('thumbnail_url', 'thumbnail__200x200'),
]
)
| 23.6875 | 73 | 0.631926 | 598 | 0.788918 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.180739 |
466460f359120dda2e7fd00e3c8bae00cdec4a39 | 4,930 | py | Python | ginga/canvas/coordmap.py | saimn/ginga | 9daf1875b4c1b0fad0a053c5f258bf7d4c0f3455 | [
"BSD-3-Clause"
]
| null | null | null | ginga/canvas/coordmap.py | saimn/ginga | 9daf1875b4c1b0fad0a053c5f258bf7d4c0f3455 | [
"BSD-3-Clause"
]
| null | null | null | ginga/canvas/coordmap.py | saimn/ginga | 9daf1875b4c1b0fad0a053c5f258bf7d4c0f3455 | [
"BSD-3-Clause"
]
| null | null | null | #
# coordmap.py -- coordinate mappings.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga import trcalc
from ginga.util import wcs
from ginga.util.six.moves import map
__all__ = ['CanvasMapper', 'DataMapper', 'OffsetMapper', 'WCSMapper']
class CanvasMapper(object):
"""A coordinate mapper that maps to the viewer's canvas in
canvas coordinates.
"""
def __init__(self, viewer):
# record the viewer just in case
self.viewer = viewer
def to_canvas(self, canvas_x, canvas_y):
return (canvas_x, canvas_y)
def to_data(self, canvas_x, canvas_y):
return self.viewer.get_data_xy(canvas_x, canvas_y)
def data_to(self, data_x, data_y):
return self.viewer.get_canvas_xy(data_x, data_y)
def offset_pt(self, pt, xoff, yoff):
x, y = pt
return x + xoff, y + yoff
def rotate_pt(self, x, y, theta, xoff=0, yoff=0):
# TODO? Not sure if it is needed with this mapper type
return x, y
class CartesianMapper(object):
"""A coordinate mapper that maps to the viewer's canvas
in Cartesian coordinates that do not scale (unlike DataMapper).
"""
def __init__(self, viewer):
self.viewer = viewer
def to_canvas(self, crt_x, crt_y):
return self.viewer.offset_to_window(crt_x, crt_y)
def to_data(self, crt_x, crt_y):
return self.viewer.offset_to_data(crt_x, crt_y)
def data_to(self, data_x, data_y):
return self.viewer.data_to_offset(data_x, data_y)
def offset_pt(self, pt, xoff, yoff):
x, y = pt
return x + xoff, y + yoff
def rotate_pt(self, x, y, theta, xoff=0, yoff=0):
return trcalc.rotate_pt(x, y, theta, xoff=xoff, yoff=yoff)
class DataMapper(object):
"""A coordinate mapper that maps to the viewer's canvas
in data coordinates.
"""
def __init__(self, viewer):
self.viewer = viewer
def to_canvas(self, data_x, data_y):
return self.viewer.canvascoords(data_x, data_y)
def to_data(self, data_x, data_y):
return data_x, data_y
def data_to(self, data_x, data_y):
return data_x, data_y
def offset_pt(self, pt, xoff, yoff):
x, y = pt
return x + xoff, y + yoff
def rotate_pt(self, x, y, theta, xoff=0, yoff=0):
return trcalc.rotate_pt(x, y, theta, xoff=xoff, yoff=yoff)
class OffsetMapper(object):
"""A coordinate mapper that maps to the viewer's canvas
in data coordinates that are offsets relative to some other
reference object.
"""
def __init__(self, viewer, refobj):
# TODO: provide a keyword arg to specify which point in the obj
self.viewer = viewer
self.refobj = refobj
def calc_offsets(self, points):
ref_x, ref_y = self.refobj.get_reference_pt()
#return map(lambda x, y: x - ref_x, y - ref_y, points)
def _cvt(pt):
x, y = pt
return x - ref_x, y - ref_y
return map(_cvt, points)
def to_canvas(self, delta_x, delta_y):
data_x, data_y = self.to_data(delta_x, delta_y)
return self.viewer.canvascoords(data_x, data_y)
def to_data(self, delta_x, delta_y):
ref_x, ref_y = self.refobj.get_reference_pt()
data_x, data_y = self.refobj.crdmap.to_data(ref_x, ref_y)
return data_x + delta_x, data_y + delta_y
## def data_to(self, data_x, data_y):
## ref_x, ref_y = self.refobj.get_reference_pt()
## return data_x - ref_data_x, data_y - ref_data_y
def offset_pt(self, pt, xoff, yoff):
# A no-op because this object's points are always considered
# relative to the reference object
return pt
def rotate_pt(self, x, y, theta, xoff=0, yoff=0):
# TODO? Not sure if it is needed with this mapper type
return x, y
class WCSMapper(DataMapper):
"""A coordinate mapper that maps to the viewer's canvas
in WCS coordinates.
"""
def to_canvas(self, lon, lat):
data_x, data_y = self.to_data(lon, lat)
return super(WCSMapper, self).to_canvas(data_x, data_y)
def to_data(self, lon, lat):
image = self.viewer.get_image()
data_x, data_y = image.radectopix(lon, lat)
return data_x, data_y
def data_to(self, data_x, data_y):
image = self.viewer.get_image()
lon, lat = image.pixtoradec(data_x, data_y)
return lon, lat
def offset_pt(self, pt, xoff, yoff):
x, y = pt
return wcs.add_offset_radec(x, y, xoff, yoff)
def rotate_pt(self, x, y, theta, xoff=0, yoff=0):
# TODO: optomize by rotating in WCS space
xoff, yoff = self.to_data(xoff, yoff)
x, y = super(WCSMapper, self).rotate_pt(x, y, theta,
xoff=xoff, yoff=yoff)
x, y = self.data_to(x, y)
return x, y
#END
| 30.245399 | 71 | 0.631034 | 4,596 | 0.932252 | 0 | 0 | 0 | 0 | 0 | 0 | 1,283 | 0.260243 |
46656e54aaab662adced0d0bfa04fce707df2e88 | 6,658 | py | Python | train.py | zpc-666/Paddle-Stochastic-Depth-ResNet110 | bb8b5b90052feef39fafd2a790f08b80b45fbe41 | [
"Apache-2.0"
]
| null | null | null | train.py | zpc-666/Paddle-Stochastic-Depth-ResNet110 | bb8b5b90052feef39fafd2a790f08b80b45fbe41 | [
"Apache-2.0"
]
| null | null | null | train.py | zpc-666/Paddle-Stochastic-Depth-ResNet110 | bb8b5b90052feef39fafd2a790f08b80b45fbe41 | [
"Apache-2.0"
]
| 1 | 2021-08-07T14:56:44.000Z | 2021-08-07T14:56:44.000Z | # coding: utf-8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle
import paddle.nn as nn
import importlib
from visualdl import LogWriter
import numpy as np
import pickle
from models import utils
from config import parser_args
def train_model(args):
if args.dataset=='cifar10':
root = os.path.join(args.data_dir, args.dataset, 'cifar-10-python.tar.gz')
print(args)
model = importlib.import_module('models.__init__').__dict__[args.net](
None, drop_path_rate=args.drop_path_rate, use_drop_path=args.use_drop_path, use_official_implement=args.use_official_implement)
train_loader, val_loader, test_loader = importlib.import_module(
'dataset.' + args.dataset).__dict__['load_data'](root, args.train_batch_size,
args.test_batch_size, has_val_dataset=args.has_val_dataset)
writer = LogWriter(logdir=args.save_dir)
criterion = nn.CrossEntropyLoss()
if args.optimizer == 'sgd':
lr_scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=args.learning_rate, milestones=args.milestones, gamma=args.gamma)
optimizer = paddle.optimizer.Momentum(parameters=model.parameters(),
learning_rate=lr_scheduler,
momentum=args.momentum,
weight_decay=args.weight_decay,
use_nesterov=args.nesterov)
elif args.optimizer == 'adam':
optimizer = paddle.optimizer.AdamW(parameters=model.parameters(),
learning_rate=args.learning_rate,
weight_decay=args.weight_decay)
else:
raise ValueError("optimizer must be sgd or adam.")
best_acc = 0
for i in range(args.epochs):
utils.train_per_epoch(train_loader, model, criterion, optimizer, i, writer)
top1_acc, top5_acc = utils.validate(val_loader, model, criterion)
if args.optimizer == 'sgd':
lr_scheduler.step()
if best_acc < top1_acc:
paddle.save(model.state_dict(),
args.save_dir + '/model_best.pdparams')
best_acc = top1_acc
if not args.save_best:
if (i + 1) % args.save_interval == 0 and i != 0:
paddle.save(model.state_dict(),
args.save_dir + '/model.pdparams')
writer.add_scalar('val-acc', top1_acc, i)
writer.add_scalar('val-top5-acc', top5_acc, i)
writer.add_scalar('lr', optimizer.get_lr(), i)
print('best acc: {:.2f}'.format(best_acc))
model.set_state_dict(paddle.load(args.save_dir + '/model_best.pdparams'))
top1_acc, top5_acc = utils.validate(test_loader, model, criterion)
with open(os.path.join(args.save_dir, 'test_acc.txt'), 'w') as f:
f.write('test_acc:'+str(top1_acc))
def train_hl_api(args):
if args.dataset=='cifar10':
root = os.path.join(args.data_dir, args.dataset, 'cifar-10-python.tar.gz')
print(args)
model = importlib.import_module('models.__init__').__dict__[args.net](
None, drop_path_rate=args.drop_path_rate, use_drop_path=args.use_drop_path, use_official_implement=args.use_official_implement)
train_loader, val_loader, test_loader = importlib.import_module(
'dataset.' + args.dataset).__dict__['load_data'](root, args.train_batch_size,
args.test_batch_size, has_val_dataset=args.has_val_dataset)
criterion = nn.CrossEntropyLoss()
if args.optimizer == 'sgd':
# 因为高层API是每个iter就执行lr_scheduler.step(),故这里把间隔调成m*len(train_loader)才合适
lr_scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=args.learning_rate, milestones=[m*len(train_loader) for m in args.milestones], gamma=args.gamma)
optimizer = paddle.optimizer.Momentum(parameters=model.parameters(),
learning_rate=lr_scheduler,
momentum=args.momentum,
weight_decay=args.weight_decay,
use_nesterov=args.nesterov)
elif args.optimizer == 'adam':
optimizer = paddle.optimizer.AdamW(parameters=model.parameters(),
learning_rate=args.learning_rate,
weight_decay=args.weight_decay)
else:
raise ValueError("optimizer must be sgd or adam.")
model = paddle.Model(model)
model.prepare(optimizer=optimizer, #指定优化器
loss=criterion, #指定损失函数
metrics=paddle.metric.Accuracy()) #指定评估方法
#用于visualdl可视化
visualdl = paddle.callbacks.VisualDL(log_dir=args.save_dir)
#早停机制,这里使用只是为了在训练过程中保存验证集上的最佳模型,最后用于测试集验证
early_stop = paddle.callbacks.EarlyStopping('acc', mode='max', patience=args.epochs, verbose=1,
min_delta=0, baseline=None, save_best_model=True)
model.fit(train_data=train_loader, #训练数据集
eval_data=val_loader, #验证数据集
epochs=args.epochs, #迭代轮次
save_dir=args.save_dir, #把模型参数、优化器参数保存至自定义的文件夹
save_freq=args.save_interval, #设定每隔多少个epoch保存模型参数及优化器参数
verbose=1,
log_freq=20,
eval_freq=args.eval_freq,
callbacks=[visualdl, early_stop])
#用验证集上最好模型在测试集上验证精度
model.load(os.path.join(args.save_dir, 'best_model.pdparams'))
result = model.evaluate(eval_data=test_loader, verbose=1)
print('test acc:', result['acc'], 'test error:', 1-result['acc'])
if __name__ == '__main__':
args = parser_args()
utils.seed_paddle(args.seed)
if not args.high_level_api:
train_model(args)
else:
train_hl_api(args) | 47.899281 | 169 | 0.615801 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,613 | 0.23142 |
466669f55fd94ab7691c895cf787adfe7eec635c | 1,204 | py | Python | dataloader/EDSR/video.py | pidan1231239/SR-Stereo2 | 8e7ef8f33a10c9d857cc5383c02d126ee6ab8a29 | [
"MIT"
]
| 1 | 2020-03-11T12:19:13.000Z | 2020-03-11T12:19:13.000Z | dataloader/EDSR/video.py | pidan1231239/SR-Stereo2 | 8e7ef8f33a10c9d857cc5383c02d126ee6ab8a29 | [
"MIT"
]
| null | null | null | dataloader/EDSR/video.py | pidan1231239/SR-Stereo2 | 8e7ef8f33a10c9d857cc5383c02d126ee6ab8a29 | [
"MIT"
]
| null | null | null | import os
from . import common
import cv2
import numpy as np
import imageio
import torch
import torch.utils.data as data
class Video(data.Dataset):
def __init__(self, args, name='Video', train=False, benchmark=False):
self.args = args
self.name = name
self.scale = args.scale
self.idx_scale = 0
self.train = False
self.do_eval = False
self.benchmark = benchmark
self.filename, _ = os.path.splitext(os.path.basename(args.dir_demo))
self.vidcap = cv2.VideoCapture(args.dir_demo)
self.n_frames = 0
self.total_frames = int(self.vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
def __getitem__(self, idx):
success, lr = self.vidcap.read()
if success:
self.n_frames += 1
lr, = common.set_channel(lr, n_channels=self.args.n_colors)
lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range)
return lr_t, -1, '{}_{:0>5}'.format(self.filename, self.n_frames)
else:
vidcap.release()
return None
def __len__(self):
return self.total_frames
def set_scale(self, idx_scale):
self.idx_scale = idx_scale
| 26.755556 | 77 | 0.623754 | 1,077 | 0.894518 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.01495 |
46674d12a75c726caab7f069ff51c1295884c1f4 | 67 | py | Python | backend/views/__init__.py | chriamue/flask-unchained-react-spa | 610e099f3ece508f4c8a62d3704e4cc49f869194 | [
"MIT"
]
| 5 | 2018-10-15T15:33:32.000Z | 2021-01-13T23:03:48.000Z | backend/views/__init__.py | chriamue/flask-unchained-react-spa | 610e099f3ece508f4c8a62d3704e4cc49f869194 | [
"MIT"
]
| 18 | 2019-12-10T22:11:27.000Z | 2021-12-13T20:42:58.000Z | backend/views/__init__.py | chriamue/flask-unchained-react-spa | 610e099f3ece508f4c8a62d3704e4cc49f869194 | [
"MIT"
]
| 4 | 2018-10-15T15:59:25.000Z | 2020-04-11T17:48:35.000Z | from .contact_submission_resource import ContactSubmissionResource
| 33.5 | 66 | 0.925373 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4667ccf5ef5c78e64a0eadc56411d4151f24e864 | 6,395 | py | Python | blink_handler.py | oyiptong/chromium-dashboard | c94632a779da0c8ab51c2e029fdcceffad6ab4c1 | [
"Apache-2.0"
]
| null | null | null | blink_handler.py | oyiptong/chromium-dashboard | c94632a779da0c8ab51c2e029fdcceffad6ab4c1 | [
"Apache-2.0"
]
| null | null | null | blink_handler.py | oyiptong/chromium-dashboard | c94632a779da0c8ab51c2e029fdcceffad6ab4c1 | [
"Apache-2.0"
]
| 1 | 2020-09-29T19:23:59.000Z | 2020-09-29T19:23:59.000Z | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '[email protected] (Eric Bidelman)'
import collections
import json
import logging
import os
import webapp2
import yaml
# Appengine imports.
from google.appengine.api import memcache
import common
import models
import settings
import util
from schedule import construct_chrome_channels_details
class PopulateSubscribersHandler(common.ContentHandler):
def __populate_subscribers(self):
"""Seeds the database with the team in devrel_team.yaml and adds the team
member to the specified blink components in that file. Should only be ran
if the FeatureOwner database entries have been cleared"""
f = file('%s/data/devrel_team.yaml' % settings.ROOT_DIR, 'r')
for profile in yaml.load_all(f):
blink_components = profile.get('blink_components', [])
blink_components = [models.BlinkComponent.get_by_name(name).key() for name in blink_components]
blink_components = filter(None, blink_components) # Filter out None values
user = models.FeatureOwner(
name=unicode(profile['name']),
email=unicode(profile['email']),
twitter=profile.get('twitter', None),
blink_components=blink_components,
primary_blink_components=blink_components,
watching_all_features=False,
)
user.put()
f.close()
@common.require_edit_permission
def get(self):
if settings.PROD:
return self.response.out.write('Handler not allowed in production.')
models.BlinkComponent.update_db()
self.__populate_subscribers()
return self.redirect('/admin/blink')
class BlinkHandler(common.ContentHandler):
def __update_subscribers_list(self, add=True, user_id=None, blink_component=None, primary=False):
if not user_id or not blink_component:
return False
user = models.FeatureOwner.get_by_id(long(user_id))
if not user:
return True
if primary:
if add:
user.add_as_component_owner(blink_component)
else:
user.remove_as_component_owner(blink_component)
else:
if add:
user.add_to_component_subscribers(blink_component)
else:
user.remove_from_component_subscribers(blink_component)
return True
@common.require_edit_permission
@common.strip_trailing_slash
def get(self, path):
# key = '%s|blinkcomponentowners' % (settings.MEMCACHE_KEY_PREFIX)
# data = memcache.get(key)
# if data is None:
components = models.BlinkComponent.all().order('name').fetch(None)
subscribers = models.FeatureOwner.all().order('name').fetch(None)
# Format for django template
subscribers = [x.format_for_template() for x in subscribers]
for c in components:
c.primaries = [o.name for o in c.owners]
# wf_component_content = models.BlinkComponent.fetch_wf_content_for_components()
# for c in components:
# c.wf_urls = wf_component_content.get(c.name) or []
data = {
'subscribers': subscribers,
'components': components[1:] # ditch generic "Blink" component
}
# memcache.set(key, data)
self.render(data, template_path=os.path.join('admin/blink.html'))
# Remove user from component subscribers.
def put(self, path):
params = json.loads(self.request.body)
self.__update_subscribers_list(False, user_id=params.get('userId'),
blink_component=params.get('componentName'),
primary=params.get('primary'))
self.response.set_status(200, message='User removed from subscribers')
return self.response.write(json.dumps({'done': True}))
# Add user to component subscribers.
def post(self, path):
params = json.loads(self.request.body)
self.__update_subscribers_list(True, user_id=params.get('userId'),
blink_component=params.get('componentName'),
primary=params.get('primary'))
# memcache.flush_all()
# memcache.delete('%s|blinkcomponentowners' % (settings.MEMCACHE_KEY_PREFIX))
self.response.set_status(200, message='User added to subscribers')
return self.response.write(json.dumps(params))
class SubscribersHandler(common.ContentHandler):
@common.require_edit_permission
# @common.strip_trailing_slash
def get(self, path):
users = models.FeatureOwner.all().order('name').fetch(None)
feature_list = models.Feature.get_chronological()
milestone = self.request.get('milestone') or None
if milestone:
milestone = int(milestone)
feature_list = filter(lambda f: (f['shipped_milestone'] or f['shipped_android_milestone']) == milestone, feature_list)
list_features_per_owner = 'showFeatures' in self.request.GET
for user in users:
# user.subscribed_components = [models.BlinkComponent.get(key) for key in user.blink_components]
user.owned_components = [models.BlinkComponent.get(key) for key in user.primary_blink_components]
for component in user.owned_components:
component.features = []
if list_features_per_owner:
component.features = filter(lambda f: component.name in f['blink_components'], feature_list)
details = construct_chrome_channels_details()
data = {
'subscribers': users,
'channels': collections.OrderedDict([
('stable', details['stable']),
('beta', details['beta']),
('dev', details['dev']),
('canary', details['canary']),
]),
'selected_milestone': int(milestone) if milestone else None
}
self.render(data, template_path=os.path.join('admin/subscribers.html'))
app = webapp2.WSGIApplication([
('/admin/blink/populate_subscribers', PopulateSubscribersHandler),
('/admin/subscribers(.*)', SubscribersHandler),
('(.*)', BlinkHandler),
], debug=settings.DEBUG)
| 34.945355 | 124 | 0.70086 | 5,266 | 0.823456 | 0 | 0 | 2,569 | 0.40172 | 0 | 0 | 2,125 | 0.332291 |
46687db58d5ce22cf64d16f65406c0bb8f14b56a | 2,756 | py | Python | OBlog/blueprint/pages/main.py | OhYee/OBlog | a9d7e4fda5651cf9c5afd4c128c4df4442794e97 | [
"BSD-3-Clause"
]
| 23 | 2018-02-23T12:56:43.000Z | 2021-12-20T13:21:47.000Z | OBlog/blueprint/pages/main.py | OhYee/OBlog | a9d7e4fda5651cf9c5afd4c128c4df4442794e97 | [
"BSD-3-Clause"
]
| 17 | 2018-02-23T12:52:39.000Z | 2018-12-04T05:50:58.000Z | OBlog/blueprint/pages/main.py | OhYee/OBlog | a9d7e4fda5651cf9c5afd4c128c4df4442794e97 | [
"BSD-3-Clause"
]
| 2 | 2018-06-16T20:52:23.000Z | 2021-04-08T15:29:44.000Z | from OBlog import database as db
from flask import g, current_app
import re
def getPages():
if not hasattr(g, "getPages"):
res = db.query_db('select * from pages;')
res.sort(key=lambda x: int(x["idx"]))
g.getPages = res
return g.getPages
def getPagesDict():
if not hasattr(g, "getPagesDict"):
pages = getPages()
res = dict((page['url'], page) for page in pages)
g.getPagesDict = res
return g.getPagesDict
def addPages(postRequest):
current_app.logger.debug(postRequest)
if db.exist_db('pages', {'url': postRequest['url']}):
# 已经存在
return 1
if not (re.match(r'^[0-9]+$', postRequest["idx"])):
return 2
keyList = ['url', 'title', 'idx']
postRequest = dict((key, postRequest[key] if key in postRequest else "")for key in keyList)
postRequest['show'] = 'true'
db.insert_db('pages', postRequest)
return 0
def updatePage(postRequest):
current_app.logger.debug(postRequest)
oldurl = postRequest['oldurl']
url = postRequest['url']
if url != oldurl and db.exist_db('pages', {'url': url}):
# 重复url
return 1
if not (re.match(r'^[0-9]+$', postRequest["idx"])):
return 2
keyList = ['url', 'title', 'idx', 'show']
postRequest = dict((key, postRequest[key] if key in postRequest else "")for key in keyList)
db.update_db("pages", postRequest, {'url': oldurl})
return 0
def deletePage(postRequest):
current_app.logger.debug(postRequest)
url = postRequest['url']
if not db.exist_db('pages', {'url': url}):
# 不存在
return 1
db.delete_db("pages", {'url': url})
return 0
import os
def absPath(path):
from OBlog import app
path = os.path.join(app.config['ROOTPATH'],
"OBlog/templates/pages", path)
return path
def fileExist(path):
return os.path.exists(path) == True
def getPageTemplate(path):
path = absPath(path)
if not fileExist(path):
return (1, "")
content = ""
with open(path, 'r', encoding='utf-8') as f:
content = f.read()
return (0, content)
def getPageTemplateList():
return listFiles(absPath('.'))
def listFiles(path):
return [file
for file in os.listdir(path)
if os.path.isfile(os.path.join(path, file))]
def setPageTemplate(path, content):
path = absPath(path)
with open(path, 'w', encoding='utf-8') as f:
f.write(content)
return 0
def delPageTemplate(path):
path = absPath(path)
if not fileExist(path):
return 1
os.remove(path)
return 0
| 22.590164 | 96 | 0.576197 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 333 | 0.120043 |
4669e171fec58193272f58bd7b305ba7d5f7aed0 | 78,232 | py | Python | lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/Cheetah/Compiler.py | CymaticLabs/Unity3d.Amqp | 42ca5de66fcda21ef6a4040bade99118b2ad6374 | [
"MIT"
]
| 83 | 2017-03-15T12:43:25.000Z | 2022-03-31T12:38:44.000Z | lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/Cheetah/Compiler.py | CymaticLabs/Unity3d.Amqp | 42ca5de66fcda21ef6a4040bade99118b2ad6374 | [
"MIT"
]
| 18 | 2017-03-20T14:12:58.000Z | 2021-07-28T09:11:55.000Z | lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/Cheetah/Compiler.py | CymaticLabs/Unity3d.Amqp | 42ca5de66fcda21ef6a4040bade99118b2ad6374 | [
"MIT"
]
| 25 | 2017-04-01T01:40:02.000Z | 2022-02-20T11:08:12.000Z | #!/usr/bin/env python
# $Id: Compiler.py,v 1.148 2006/06/22 00:18:22 tavis_rudd Exp $
"""Compiler classes for Cheetah:
ModuleCompiler aka 'Compiler'
ClassCompiler
MethodCompiler
If you are trying to grok this code start with ModuleCompiler.__init__,
ModuleCompiler.compile, and ModuleCompiler.__getattr__.
Meta-Data
================================================================================
Author: Tavis Rudd <[email protected]>
Version: $Revision: 1.148 $
Start Date: 2001/09/19
Last Revision Date: $Date: 2006/06/22 00:18:22 $
"""
__author__ = "Tavis Rudd <[email protected]>"
__revision__ = "$Revision: 1.148 $"[11:-2]
import sys
import os
import os.path
from os.path import getmtime, exists
import re
import types
import time
import random
import warnings
import __builtin__
import copy
from Cheetah.Version import Version, VersionTuple
from Cheetah.SettingsManager import SettingsManager
from Cheetah.Parser import Parser, ParseError, specialVarRE, \
STATIC_CACHE, REFRESH_CACHE, SET_LOCAL, SET_GLOBAL,SET_MODULE
from Cheetah.Utils.Indenter import indentize # an undocumented preprocessor
from Cheetah import ErrorCatchers
from Cheetah import NameMapper
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
class Error(Exception): pass
DEFAULT_COMPILER_SETTINGS = {
## controlling the handling of Cheetah $placeholders
'useNameMapper': True, # Unified dotted notation and the searchList
'useSearchList': True, # if false, assume the first
# portion of the $variable (before the first dot) is a global,
# builtin, or local var that doesn't need
# looking up in the searchlist BUT use
# namemapper on the rest of the lookup
'allowSearchListAsMethArg': True,
'useAutocalling': True, # detect and call callable()'s, requires NameMapper
'useStackFrames': True, # use NameMapper.valueFromFrameOrSearchList
# rather than NameMapper.valueFromSearchList
'useErrorCatcher':False,
'alwaysFilterNone':True, # filter out None, before the filter is called
'useFilters':True, # use str instead if =False
'includeRawExprInFilterArgs':True,
#'lookForTransactionAttr':False,
'autoAssignDummyTransactionToSelf':False,
'useKWsDictArgForPassingTrans':True,
## controlling the aesthetic appearance / behaviour of generated code
'commentOffset': 1,
# should shorter str constant chunks be printed using repr rather than ''' quotes
'reprShortStrConstants': True,
'reprNewlineThreshold':3,
'outputRowColComments':True,
# should #block's be wrapped in a comment in the template's output
'includeBlockMarkers': False,
'blockMarkerStart':('\n<!-- START BLOCK: ',' -->\n'),
'blockMarkerEnd':('\n<!-- END BLOCK: ',' -->\n'),
'defDocStrMsg':'Autogenerated by CHEETAH: The Python-Powered Template Engine',
'setup__str__method': False,
'mainMethodName':'respond',
'mainMethodNameForSubclasses':'writeBody',
'indentationStep': ' '*4,
'initialMethIndentLevel': 2,
'monitorSrcFile':False,
'outputMethodsBeforeAttributes': True,
## customizing the #extends directive
'autoImportForExtendsDirective':True,
'handlerForExtendsDirective':None, # baseClassName = handler(compiler, baseClassName)
# a callback hook for customizing the
# #extends directive. It can manipulate
# the compiler's state if needed.
# also see allowExpressionsInExtendsDirective
# input filtering/restriction
# use lower case keys here!!
'disabledDirectives':[], # list of directive keys, without the start token
'enabledDirectives':[], # list of directive keys, without the start token
'disabledDirectiveHooks':[], # callable(parser, directiveKey)
'preparseDirectiveHooks':[], # callable(parser, directiveKey)
'postparseDirectiveHooks':[], # callable(parser, directiveKey)
'preparsePlaceholderHooks':[], # callable(parser)
'postparsePlaceholderHooks':[], # callable(parser)
# the above hooks don't need to return anything
'expressionFilterHooks':[], # callable(parser, expr, exprType, rawExpr=None, startPos=None)
# exprType is the name of the directive, 'psp', or 'placeholder'. all
# lowercase. The filters *must* return the expr or raise an exception.
# They can modify the expr if needed.
'templateMetaclass':None, # strictly optional. Only works with new-style baseclasses
'i18NFunctionName':'self.i18n',
## These are used in the parser, but I've put them here for the time being to
## facilitate separating the parser and compiler:
'cheetahVarStartToken':'$',
'commentStartToken':'##',
'multiLineCommentStartToken':'#*',
'multiLineCommentEndToken':'*#',
'gobbleWhitespaceAroundMultiLineComments':True,
'directiveStartToken':'#',
'directiveEndToken':'#',
'allowWhitespaceAfterDirectiveStartToken':False,
'PSPStartToken':'<%',
'PSPEndToken':'%>',
'EOLSlurpToken':'#',
'gettextTokens': ["_", "N_", "ngettext"],
'allowExpressionsInExtendsDirective': False, # the default restricts it to
# accepting dotted names
'allowEmptySingleLineMethods': False,
'allowNestedDefScopes': True,
'allowPlaceholderFilterArgs': True,
## See Parser.initDirectives() for the use of the next 3
#'directiveNamesAndParsers':{}
#'endDirectiveNamesAndHandlers':{}
#'macroDirectives':{}
}
class GenUtils:
"""An abstract baseclass for the Compiler classes that provides methods that
perform generic utility functions or generate pieces of output code from
information passed in by the Parser baseclass. These methods don't do any
parsing themselves.
"""
def genTimeInterval(self, timeString):
##@@ TR: need to add some error handling here
if timeString[-1] == 's':
interval = float(timeString[:-1])
elif timeString[-1] == 'm':
interval = float(timeString[:-1])*60
elif timeString[-1] == 'h':
interval = float(timeString[:-1])*60*60
elif timeString[-1] == 'd':
interval = float(timeString[:-1])*60*60*24
elif timeString[-1] == 'w':
interval = float(timeString[:-1])*60*60*24*7
else: # default to minutes
interval = float(timeString)*60
return interval
def genCacheInfo(self, cacheTokenParts):
"""Decipher a placeholder cachetoken
"""
cacheInfo = {}
if cacheTokenParts['REFRESH_CACHE']:
cacheInfo['type'] = REFRESH_CACHE
cacheInfo['interval'] = self.genTimeInterval(cacheTokenParts['interval'])
elif cacheTokenParts['STATIC_CACHE']:
cacheInfo['type'] = STATIC_CACHE
return cacheInfo # is empty if no cache
def genCacheInfoFromArgList(self, argList):
cacheInfo = {'type':REFRESH_CACHE}
for key, val in argList:
if val[0] in '"\'':
val = val[1:-1]
if key == 'timer':
key = 'interval'
val = self.genTimeInterval(val)
cacheInfo[key] = val
return cacheInfo
def genCheetahVar(self, nameChunks, plain=False):
if nameChunks[0][0] in self.setting('gettextTokens'):
self.addGetTextVar(nameChunks)
if self.setting('useNameMapper') and not plain:
return self.genNameMapperVar(nameChunks)
else:
return self.genPlainVar(nameChunks)
def addGetTextVar(self, nameChunks):
"""Output something that gettext can recognize.
This is a harmless side effect necessary to make gettext work when it
is scanning compiled templates for strings marked for translation.
@@TR: another marginally more efficient approach would be to put the
output in a dummy method that is never called.
"""
# @@TR: this should be in the compiler not here
self.addChunk("if False:")
self.indent()
self.addChunk(self.genPlainVar(nameChunks[:]))
self.dedent()
def genPlainVar(self, nameChunks):
"""Generate Python code for a Cheetah $var without using NameMapper
(Unified Dotted Notation with the SearchList).
"""
nameChunks.reverse()
chunk = nameChunks.pop()
pythonCode = chunk[0] + chunk[2]
while nameChunks:
chunk = nameChunks.pop()
pythonCode = (pythonCode + '.' + chunk[0] + chunk[2])
return pythonCode
def genNameMapperVar(self, nameChunks):
"""Generate valid Python code for a Cheetah $var, using NameMapper
(Unified Dotted Notation with the SearchList).
nameChunks = list of var subcomponents represented as tuples
[ (name,useAC,remainderOfExpr),
]
where:
name = the dotted name base
useAC = where NameMapper should use autocalling on namemapperPart
remainderOfExpr = any arglist, index, or slice
If remainderOfExpr contains a call arglist (e.g. '(1234)') then useAC
is False, otherwise it defaults to True. It is overridden by the global
setting 'useAutocalling' if this setting is False.
EXAMPLE
------------------------------------------------------------------------
if the raw Cheetah Var is
$a.b.c[1].d().x.y.z
nameChunks is the list
[ ('a.b.c',True,'[1]'), # A
('d',False,'()'), # B
('x.y.z',True,''), # C
]
When this method is fed the list above it returns
VFN(VFN(VFFSL(SL, 'a.b.c',True)[1], 'd',False)(), 'x.y.z',True)
which can be represented as
VFN(B`, name=C[0], executeCallables=(useAC and C[1]))C[2]
where:
VFN = NameMapper.valueForName
VFFSL = NameMapper.valueFromFrameOrSearchList
VFSL = NameMapper.valueFromSearchList # optionally used instead of VFFSL
SL = self.searchList()
useAC = self.setting('useAutocalling') # True in this example
A = ('a.b.c',True,'[1]')
B = ('d',False,'()')
C = ('x.y.z',True,'')
C` = VFN( VFN( VFFSL(SL, 'a.b.c',True)[1],
'd',False)(),
'x.y.z',True)
= VFN(B`, name='x.y.z', executeCallables=True)
B` = VFN(A`, name=B[0], executeCallables=(useAC and B[1]))B[2]
A` = VFFSL(SL, name=A[0], executeCallables=(useAC and A[1]))A[2]
Note, if the compiler setting useStackFrames=False (default is true)
then
A` = VFSL([locals()]+SL+[globals(), __builtin__], name=A[0], executeCallables=(useAC and A[1]))A[2]
This option allows Cheetah to be used with Psyco, which doesn't support
stack frame introspection.
"""
defaultUseAC = self.setting('useAutocalling')
useSearchList = self.setting('useSearchList')
nameChunks.reverse()
name, useAC, remainder = nameChunks.pop()
if not useSearchList:
firstDotIdx = name.find('.')
if firstDotIdx != -1 and firstDotIdx < len(name):
beforeFirstDot, afterDot = name[:firstDotIdx], name[firstDotIdx+1:]
pythonCode = ('VFN(' + beforeFirstDot +
',"' + afterDot +
'",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = name+remainder
elif self.setting('useStackFrames'):
pythonCode = ('VFFSL(SL,'
'"'+ name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = ('VFSL([locals()]+SL+[globals(), __builtin__],'
'"'+ name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
##
while nameChunks:
name, useAC, remainder = nameChunks.pop()
pythonCode = ('VFN(' + pythonCode +
',"' + name +
'",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
return pythonCode
##################################################
## METHOD COMPILERS
class MethodCompiler(GenUtils):
def __init__(self, methodName, classCompiler,
initialMethodComment=None,
decorator=None):
self._settingsManager = classCompiler
self._classCompiler = classCompiler
self._moduleCompiler = classCompiler._moduleCompiler
self._methodName = methodName
self._initialMethodComment = initialMethodComment
self._setupState()
self._decorator = decorator
def setting(self, key):
return self._settingsManager.setting(key)
def _setupState(self):
self._indent = self.setting('indentationStep')
self._indentLev = self.setting('initialMethIndentLevel')
self._pendingStrConstChunks = []
self._methodSignature = None
self._methodDef = None
self._docStringLines = []
self._methodBodyChunks = []
self._cacheRegionsStack = []
self._callRegionsStack = []
self._captureRegionsStack = []
self._filterRegionsStack = []
self._isErrorCatcherOn = False
self._hasReturnStatement = False
self._isGenerator = False
def cleanupState(self):
"""Called by the containing class compiler instance
"""
pass
def methodName(self):
return self._methodName
def setMethodName(self, name):
self._methodName = name
## methods for managing indentation
def indentation(self):
return self._indent * self._indentLev
def indent(self):
self._indentLev +=1
def dedent(self):
if self._indentLev:
self._indentLev -=1
else:
raise Error('Attempt to dedent when the indentLev is 0')
## methods for final code wrapping
def methodDef(self):
if self._methodDef:
return self._methodDef
else:
return self.wrapCode()
__str__ = methodDef
def wrapCode(self):
self.commitStrConst()
methodDefChunks = (
self.methodSignature(),
'\n',
self.docString(),
self.methodBody() )
methodDef = ''.join(methodDefChunks)
self._methodDef = methodDef
return methodDef
def methodSignature(self):
return self._indent + self._methodSignature + ':'
def setMethodSignature(self, signature):
self._methodSignature = signature
def methodBody(self):
return ''.join( self._methodBodyChunks )
def docString(self):
if not self._docStringLines:
return ''
ind = self._indent*2
docStr = (ind + '"""\n' + ind +
('\n' + ind).join([ln.replace('"""',"'''") for ln in self._docStringLines]) +
'\n' + ind + '"""\n')
return docStr
## methods for adding code
def addMethDocString(self, line):
self._docStringLines.append(line.replace('%','%%'))
def addChunk(self, chunk):
self.commitStrConst()
chunk = "\n" + self.indentation() + chunk
self._methodBodyChunks.append(chunk)
def appendToPrevChunk(self, appendage):
self._methodBodyChunks[-1] = self._methodBodyChunks[-1] + appendage
def addWriteChunk(self, chunk):
self.addChunk('write(' + chunk + ')')
def addFilteredChunk(self, chunk, filterArgs=None, rawExpr=None, lineCol=None):
if filterArgs is None:
filterArgs = ''
if self.setting('includeRawExprInFilterArgs') and rawExpr:
filterArgs += ', rawExpr=%s'%repr(rawExpr)
if self.setting('alwaysFilterNone'):
if rawExpr and rawExpr.find('\n')==-1 and rawExpr.find('\r')==-1:
self.addChunk("_v = %s # %r"%(chunk, rawExpr))
if lineCol:
self.appendToPrevChunk(' on line %s, col %s'%lineCol)
else:
self.addChunk("_v = %s"%chunk)
if self.setting('useFilters'):
self.addChunk("if _v is not None: write(_filter(_v%s))"%filterArgs)
else:
self.addChunk("if _v is not None: write(str(_v))")
else:
if self.setting('useFilters'):
self.addChunk("write(_filter(%s%s))"%(chunk,filterArgs))
else:
self.addChunk("write(str(%s))"%chunk)
def _appendToPrevStrConst(self, strConst):
if self._pendingStrConstChunks:
self._pendingStrConstChunks.append(strConst)
else:
self._pendingStrConstChunks = [strConst]
def _unescapeCheetahVars(self, theString):
"""Unescape any escaped Cheetah \$vars in the string.
"""
token = self.setting('cheetahVarStartToken')
return theString.replace('\\' + token, token)
def _unescapeDirectives(self, theString):
"""Unescape any escaped Cheetah \$vars in the string.
"""
token = self.setting('directiveStartToken')
return theString.replace('\\' + token, token)
def commitStrConst(self):
"""Add the code for outputting the pending strConst without chopping off
any whitespace from it.
"""
if self._pendingStrConstChunks:
strConst = self._unescapeCheetahVars(''.join(self._pendingStrConstChunks))
strConst = self._unescapeDirectives(strConst)
self._pendingStrConstChunks = []
if not strConst:
return
if self.setting('reprShortStrConstants') and \
strConst.count('\n') < self.setting('reprNewlineThreshold'):
self.addWriteChunk( repr(strConst).replace('\\012','\\n'))
else:
strConst = strConst.replace('\\','\\\\').replace("'''","'\'\'\'")
if strConst[0] == "'":
strConst = '\\' + strConst
if strConst[-1] == "'":
strConst = strConst[:-1] + '\\' + strConst[-1]
self.addWriteChunk("'''" + strConst + "'''" )
def handleWSBeforeDirective(self):
"""Truncate the pending strCont to the beginning of the current line.
"""
if self._pendingStrConstChunks:
src = self._pendingStrConstChunks[-1]
BOL = max(src.rfind('\n')+1, src.rfind('\r')+1, 0)
if BOL < len(src):
self._pendingStrConstChunks[-1] = src[:BOL]
def isErrorCatcherOn(self):
return self._isErrorCatcherOn
def turnErrorCatcherOn(self):
self._isErrorCatcherOn = True
def turnErrorCatcherOff(self):
self._isErrorCatcherOn = False
# @@TR: consider merging the next two methods into one
def addStrConst(self, strConst):
self._appendToPrevStrConst(strConst)
def addRawText(self, text):
self.addStrConst(text)
def addMethComment(self, comm):
offSet = self.setting('commentOffset')
self.addChunk('#' + ' '*offSet + comm)
def addPlaceholder(self, expr, filterArgs, rawPlaceholder,
cacheTokenParts, lineCol,
silentMode=False):
cacheInfo = self.genCacheInfo(cacheTokenParts)
if cacheInfo:
cacheInfo['ID'] = repr(rawPlaceholder)[1:-1]
self.startCacheRegion(cacheInfo, lineCol, rawPlaceholder=rawPlaceholder)
if self.isErrorCatcherOn():
methodName = self._classCompiler.addErrorCatcherCall(
expr, rawCode=rawPlaceholder, lineCol=lineCol)
expr = 'self.' + methodName + '(localsDict=locals())'
if silentMode:
self.addChunk('try:')
self.indent()
self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol)
self.dedent()
self.addChunk('except NotFound: pass')
else:
self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol)
if self.setting('outputRowColComments'):
self.appendToPrevChunk(' # from line %s, col %s' % lineCol + '.')
if cacheInfo:
self.endCacheRegion()
def addSilent(self, expr):
self.addChunk( expr )
def addEcho(self, expr, rawExpr=None):
self.addFilteredChunk(expr, rawExpr=rawExpr)
def addSet(self, expr, exprComponents, setStyle):
if setStyle is SET_GLOBAL:
(LVALUE, OP, RVALUE) = (exprComponents.LVALUE,
exprComponents.OP,
exprComponents.RVALUE)
# we need to split the LVALUE to deal with globalSetVars
splitPos1 = LVALUE.find('.')
splitPos2 = LVALUE.find('[')
if splitPos1 > 0 and splitPos2==-1:
splitPos = splitPos1
elif splitPos1 > 0 and splitPos1 < max(splitPos2,0):
splitPos = splitPos1
else:
splitPos = splitPos2
if splitPos >0:
primary = LVALUE[:splitPos]
secondary = LVALUE[splitPos:]
else:
primary = LVALUE
secondary = ''
LVALUE = 'self._CHEETAH__globalSetVars["' + primary + '"]' + secondary
expr = LVALUE + ' ' + OP + ' ' + RVALUE.strip()
if setStyle is SET_MODULE:
self._moduleCompiler.addModuleGlobal(expr)
else:
self.addChunk(expr)
def addInclude(self, sourceExpr, includeFrom, isRaw):
self.addChunk('self._handleCheetahInclude(' + sourceExpr +
', trans=trans, ' +
'includeFrom="' + includeFrom + '", raw=' +
repr(isRaw) + ')')
def addWhile(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addFor(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addRepeat(self, expr, lineCol=None):
#the _repeatCount stuff here allows nesting of #repeat directives
self._repeatCount = getattr(self, "_repeatCount", -1) + 1
self.addFor('for __i%s in range(%s)' % (self._repeatCount,expr), lineCol=lineCol)
def addIndentingDirective(self, expr, lineCol=None):
if expr and not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol )
self.indent()
def addReIndentingDirective(self, expr, dedent=True, lineCol=None):
self.commitStrConst()
if dedent:
self.dedent()
if not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol )
self.indent()
def addIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addOneLineIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addTernaryExpr(self, conditionExpr, trueExpr, falseExpr, lineCol=None):
"""For a single-lie #if ... then .... else ... directive
<condition> then <trueExpr> else <falseExpr>
"""
self.addIndentingDirective(conditionExpr, lineCol=lineCol)
self.addFilteredChunk(trueExpr)
self.dedent()
self.addIndentingDirective('else')
self.addFilteredChunk(falseExpr)
self.dedent()
def addElse(self, expr, dedent=True, lineCol=None):
expr = re.sub(r'else[ \f\t]+if','elif', expr)
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addElif(self, expr, dedent=True, lineCol=None):
self.addElse(expr, dedent=dedent, lineCol=lineCol)
def addUnless(self, expr, lineCol=None):
self.addIf('if not (' + expr + ')')
def addClosure(self, functionName, argsList, parserComment):
argStringChunks = []
for arg in argsList:
chunk = arg[0]
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
signature = "def " + functionName + "(" + ','.join(argStringChunks) + "):"
self.addIndentingDirective(signature)
self.addChunk('#'+parserComment)
def addTry(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addExcept(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addFinally(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addReturn(self, expr):
assert not self._isGenerator
self.addChunk(expr)
self._hasReturnStatement = True
def addYield(self, expr):
assert not self._hasReturnStatement
self._isGenerator = True
if expr.replace('yield','').strip():
self.addChunk(expr)
else:
self.addChunk('if _dummyTrans:')
self.indent()
self.addChunk('yield trans.response().getvalue()')
self.addChunk('trans = DummyTransaction()')
self.addChunk('write = trans.response().write')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk(
'raise TypeError("This method cannot be called with a trans arg")')
self.dedent()
def addPass(self, expr):
self.addChunk(expr)
def addDel(self, expr):
self.addChunk(expr)
def addAssert(self, expr):
self.addChunk(expr)
def addRaise(self, expr):
self.addChunk(expr)
def addBreak(self, expr):
self.addChunk(expr)
def addContinue(self, expr):
self.addChunk(expr)
def addPSP(self, PSP):
self.commitStrConst()
autoIndent = False
if PSP[0] == '=':
PSP = PSP[1:]
if PSP:
self.addWriteChunk('_filter(' + PSP + ')')
return
elif PSP.lower() == 'end':
self.dedent()
return
elif PSP[-1] == '$':
autoIndent = True
PSP = PSP[:-1]
elif PSP[-1] == ':':
autoIndent = True
for line in PSP.splitlines():
self.addChunk(line)
if autoIndent:
self.indent()
def nextCacheID(self):
return ('_'+str(random.randrange(100, 999))
+ str(random.randrange(10000, 99999)))
def startCacheRegion(self, cacheInfo, lineCol, rawPlaceholder=None):
# @@TR: we should add some runtime logging to this
ID = self.nextCacheID()
interval = cacheInfo.get('interval',None)
test = cacheInfo.get('test',None)
customID = cacheInfo.get('id',None)
if customID:
ID = customID
varyBy = cacheInfo.get('varyBy', repr(ID))
self._cacheRegionsStack.append(ID) # attrib of current methodCompiler
# @@TR: add this to a special class var as well
self.addChunk('')
self.addChunk('## START CACHE REGION: ID='+ID+
'. line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_RECACHE_%(ID)s = False'%locals())
self.addChunk('_cacheRegion_%(ID)s = self.getCacheRegion(regionID='%locals()
+ repr(ID)
+ ', cacheInfo=%r'%cacheInfo
+ ')')
self.addChunk('if _cacheRegion_%(ID)s.isNew():'%locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
self.addChunk('_cacheItem_%(ID)s = _cacheRegion_%(ID)s.getCacheItem('%locals()
+varyBy+')')
self.addChunk('if _cacheItem_%(ID)s.hasExpired():'%locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
if test:
self.addChunk('if ' + test + ':')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
self.addChunk('if (not _RECACHE_%(ID)s) and _cacheItem_%(ID)s.getRefreshTime():'%locals())
self.indent()
#self.addChunk('print "DEBUG"+"-"*50')
self.addChunk('try:')
self.indent()
self.addChunk('_output = _cacheItem_%(ID)s.renderOutput()'%locals())
self.dedent()
self.addChunk('except KeyError:')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
#self.addChunk('print "DEBUG"+"*"*50')
self.dedent()
self.addChunk('else:')
self.indent()
self.addWriteChunk('_output')
self.addChunk('del _output')
self.dedent()
self.dedent()
self.addChunk('if _RECACHE_%(ID)s or not _cacheItem_%(ID)s.getRefreshTime():'%locals())
self.indent()
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('trans = _cacheCollector_%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _cacheCollector_%(ID)s.response().write'%locals())
if interval:
self.addChunk(("_cacheItem_%(ID)s.setExpiryTime(currentTime() +"%locals())
+ str(interval) + ")")
def endCacheRegion(self):
ID = self._cacheRegionsStack.pop()
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('_cacheData = _cacheCollector_%(ID)s.response().getvalue()'%locals())
self.addChunk('_cacheItem_%(ID)s.setData(_cacheData)'%locals())
self.addWriteChunk('_cacheData')
self.addChunk('del _cacheData')
self.addChunk('del _cacheCollector_%(ID)s'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
self.dedent()
self.addChunk('## END CACHE REGION: '+ID)
self.addChunk('')
def nextCallRegionID(self):
return self.nextCacheID()
def startCallRegion(self, functionName, args, lineCol, regionTitle='CALL'):
class CallDetails: pass
callDetails = CallDetails()
callDetails.ID = ID = self.nextCallRegionID()
callDetails.functionName = functionName
callDetails.args = args
callDetails.lineCol = lineCol
callDetails.usesKeywordArgs = False
self._callRegionsStack.append((ID, callDetails)) # attrib of current methodCompiler
self.addChunk('## START %(regionTitle)s REGION: '%locals()
+ID
+' of '+functionName
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _callCollector%(ID)s.response().write'%locals())
def setCallArg(self, argName, lineCol):
ID, callDetails = self._callRegionsStack[-1]
if callDetails.usesKeywordArgs:
self._endCallArg()
else:
callDetails.usesKeywordArgs = True
self.addChunk('_callKws%(ID)s = {}'%locals())
self.addChunk('_currentCallArgname%(ID)s = %(argName)r'%locals())
callDetails.currentArgname = argName
def _endCallArg(self):
ID, callDetails = self._callRegionsStack[-1]
currCallArg = callDetails.currentArgname
self.addChunk(('_callKws%(ID)s[%(currCallArg)r] ='
' _callCollector%(ID)s.response().getvalue()')%locals())
self.addChunk('del _callCollector%(ID)s'%locals())
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _callCollector%(ID)s.response().write'%locals())
def endCallRegion(self, regionTitle='CALL'):
ID, callDetails = self._callRegionsStack[-1]
functionName, initialKwArgs, lineCol = (
callDetails.functionName, callDetails.args, callDetails.lineCol)
def reset(ID=ID):
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals())
self.addChunk('del _wasBuffering%(ID)s'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
if not callDetails.usesKeywordArgs:
reset()
self.addChunk('_callArgVal%(ID)s = _callCollector%(ID)s.response().getvalue()'%locals())
self.addChunk('del _callCollector%(ID)s'%locals())
if initialKwArgs:
initialKwArgs = ', '+initialKwArgs
self.addFilteredChunk('%(functionName)s(_callArgVal%(ID)s%(initialKwArgs)s)'%locals())
self.addChunk('del _callArgVal%(ID)s'%locals())
else:
if initialKwArgs:
initialKwArgs = initialKwArgs+', '
self._endCallArg()
reset()
self.addFilteredChunk('%(functionName)s(%(initialKwArgs)s**_callKws%(ID)s)'%locals())
self.addChunk('del _callKws%(ID)s'%locals())
self.addChunk('## END %(regionTitle)s REGION: '%locals()
+ID
+' of '+functionName
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('')
self._callRegionsStack.pop() # attrib of current methodCompiler
def nextCaptureRegionID(self):
return self.nextCacheID()
def startCaptureRegion(self, assignTo, lineCol):
class CaptureDetails: pass
captureDetails = CaptureDetails()
captureDetails.ID = ID = self.nextCaptureRegionID()
captureDetails.assignTo = assignTo
captureDetails.lineCol = lineCol
self._captureRegionsStack.append((ID,captureDetails)) # attrib of current methodCompiler
self.addChunk('## START CAPTURE REGION: '+ID
+' '+assignTo
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk('trans = _captureCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _captureCollector%(ID)s.response().write'%locals())
def endCaptureRegion(self):
ID, captureDetails = self._captureRegionsStack.pop()
assignTo, lineCol = (captureDetails.assignTo, captureDetails.lineCol)
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals())
self.addChunk('%(assignTo)s = _captureCollector%(ID)s.response().getvalue()'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
self.addChunk('del _captureCollector%(ID)s'%locals())
self.addChunk('del _wasBuffering%(ID)s'%locals())
def setErrorCatcher(self, errorCatcherName):
self.turnErrorCatcherOn()
self.addChunk('if self._CHEETAH__errorCatchers.has_key("' + errorCatcherName + '"):')
self.indent()
self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["' +
errorCatcherName + '"]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["'
+ errorCatcherName + '"] = ErrorCatchers.'
+ errorCatcherName + '(self)'
)
self.dedent()
def nextFilterRegionID(self):
return self.nextCacheID()
def setFilter(self, theFilter, isKlass):
class FilterDetails: pass
filterDetails = FilterDetails()
filterDetails.ID = ID = self.nextFilterRegionID()
filterDetails.theFilter = theFilter
filterDetails.isKlass = isKlass
self._filterRegionsStack.append((ID, filterDetails)) # attrib of current methodCompiler
self.addChunk('_orig_filter%(ID)s = _filter'%locals())
if isKlass:
self.addChunk('_filter = self._CHEETAH__currentFilter = ' + theFilter.strip() +
'(self).filter')
else:
if theFilter.lower() == 'none':
self.addChunk('_filter = self._CHEETAH__initialFilter')
else:
# is string representing the name of a builtin filter
self.addChunk('filterName = ' + repr(theFilter))
self.addChunk('if self._CHEETAH__filters.has_key("' + theFilter + '"):')
self.indent()
self.addChunk('_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('_filter = self._CHEETAH__currentFilter'
+' = \\\n\t\t\tself._CHEETAH__filters[filterName] = '
+ 'getattr(self._CHEETAH__filtersLib, filterName)(self).filter')
self.dedent()
def closeFilterBlock(self):
ID, filterDetails = self._filterRegionsStack.pop()
#self.addChunk('_filter = self._CHEETAH__initialFilter')
self.addChunk('_filter = _orig_filter%(ID)s'%locals())
class AutoMethodCompiler(MethodCompiler):
def _setupState(self):
MethodCompiler._setupState(self)
self._argStringList = [ ("self",None) ]
self._streamingEnabled = True
def _useKWsDictArgForPassingTrans(self):
alreadyHasTransArg = [argname for argname,defval in self._argStringList
if argname=='trans']
return (self.methodName()!='respond'
and not alreadyHasTransArg
and self.setting('useKWsDictArgForPassingTrans'))
def cleanupState(self):
MethodCompiler.cleanupState(self)
self.commitStrConst()
if self._cacheRegionsStack:
self.endCacheRegion()
if self._callRegionsStack:
self.endCallRegion()
if self._streamingEnabled:
kwargsName = None
positionalArgsListName = None
for argname,defval in self._argStringList:
if argname.strip().startswith('**'):
kwargsName = argname.strip().replace('**','')
break
elif argname.strip().startswith('*'):
positionalArgsListName = argname.strip().replace('*','')
if not kwargsName and self._useKWsDictArgForPassingTrans():
kwargsName = 'KWS'
self.addMethArg('**KWS', None)
self._kwargsName = kwargsName
if not self._useKWsDictArgForPassingTrans():
if not kwargsName and not positionalArgsListName:
self.addMethArg('trans', 'None')
else:
self._streamingEnabled = False
self._indentLev = self.setting('initialMethIndentLevel')
mainBodyChunks = self._methodBodyChunks
self._methodBodyChunks = []
self._addAutoSetupCode()
self._methodBodyChunks.extend(mainBodyChunks)
self._addAutoCleanupCode()
def _addAutoSetupCode(self):
if self._initialMethodComment:
self.addChunk(self._initialMethodComment)
if self._streamingEnabled:
if self._useKWsDictArgForPassingTrans() and self._kwargsName:
self.addChunk('trans = %s.get("trans")'%self._kwargsName)
self.addChunk('if (not trans and not self._CHEETAH__isBuffering'
' and not callable(self.transaction)):')
self.indent()
self.addChunk('trans = self.transaction'
' # is None unless self.awake() was called')
self.dedent()
self.addChunk('if not trans:')
self.indent()
self.addChunk('trans = DummyTransaction()')
if self.setting('autoAssignDummyTransactionToSelf'):
self.addChunk('self.transaction = trans')
self.addChunk('_dummyTrans = True')
self.dedent()
self.addChunk('else: _dummyTrans = False')
else:
self.addChunk('trans = DummyTransaction()')
self.addChunk('_dummyTrans = True')
self.addChunk('write = trans.response().write')
if self.setting('useNameMapper'):
argNames = [arg[0] for arg in self._argStringList]
allowSearchListAsMethArg = self.setting('allowSearchListAsMethArg')
if allowSearchListAsMethArg and 'SL' in argNames:
pass
elif allowSearchListAsMethArg and 'searchList' in argNames:
self.addChunk('SL = searchList')
else:
self.addChunk('SL = self._CHEETAH__searchList')
if self.setting('useFilters'):
self.addChunk('_filter = self._CHEETAH__currentFilter')
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## START - generated method body')
self.addChunk('')
def _addAutoCleanupCode(self):
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## END - generated method body')
self.addChunk('')
if not self._isGenerator:
self.addStop()
self.addChunk('')
def addStop(self, expr=None):
self.addChunk('return _dummyTrans and trans.response().getvalue() or ""')
def addMethArg(self, name, defVal=None):
self._argStringList.append( (name,defVal) )
def methodSignature(self):
argStringChunks = []
for arg in self._argStringList:
chunk = arg[0]
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
argString = (', ').join(argStringChunks)
output = []
if self._decorator:
output.append(self._indent + self._decorator+'\n')
output.append(self._indent + "def "
+ self.methodName() + "(" +
argString + "):\n\n")
return ''.join(output)
##################################################
## CLASS COMPILERS
_initMethod_initCheetah = """\
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
""".replace('\n','\n'+' '*8)
class ClassCompiler(GenUtils):
methodCompilerClass = AutoMethodCompiler
methodCompilerClassForInit = MethodCompiler
def __init__(self, className, mainMethodName='respond',
moduleCompiler=None,
fileName=None,
settingsManager=None):
self._settingsManager = settingsManager
self._fileName = fileName
self._className = className
self._moduleCompiler = moduleCompiler
self._mainMethodName = mainMethodName
self._setupState()
methodCompiler = self._spawnMethodCompiler(
mainMethodName,
initialMethodComment='## CHEETAH: main method generated for this template')
self._setActiveMethodCompiler(methodCompiler)
if fileName and self.setting('monitorSrcFile'):
self._addSourceFileMonitoring(fileName)
def setting(self, key):
return self._settingsManager.setting(key)
def __getattr__(self, name):
"""Provide access to the methods and attributes of the MethodCompiler
at the top of the activeMethods stack: one-way namespace sharing
WARNING: Use .setMethods to assign the attributes of the MethodCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead."""
if self.__dict__.has_key(name):
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeMethodsList and hasattr(self._activeMethodsList[-1], name):
return getattr(self._activeMethodsList[-1], name)
else:
raise AttributeError, name
def _setupState(self):
self._classDef = None
self._decoratorForNextMethod = None
self._activeMethodsList = [] # stack while parsing/generating
self._finishedMethodsList = [] # store by order
self._methodsIndex = {} # store by name
self._baseClass = 'Template'
self._classDocStringLines = []
# printed after methods in the gen class def:
self._generatedAttribs = ['_CHEETAH__instanceInitialized = False']
self._generatedAttribs.append('_CHEETAH_version = __CHEETAH_version__')
self._generatedAttribs.append(
'_CHEETAH_versionTuple = __CHEETAH_versionTuple__')
self._generatedAttribs.append('_CHEETAH_genTime = __CHEETAH_genTime__')
self._generatedAttribs.append('_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__')
self._generatedAttribs.append('_CHEETAH_src = __CHEETAH_src__')
self._generatedAttribs.append(
'_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__')
if self.setting('templateMetaclass'):
self._generatedAttribs.append('__metaclass__ = '+self.setting('templateMetaclass'))
self._initMethChunks = []
self._blockMetaData = {}
self._errorCatcherCount = 0
self._placeholderToErrorCatcherMap = {}
def cleanupState(self):
while self._activeMethodsList:
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
self._setupInitMethod()
if self._mainMethodName == 'respond':
if self.setting('setup__str__method'):
self._generatedAttribs.append('def __str__(self): return self.respond()')
self.addAttribute('_mainCheetahMethod_for_' + self._className +
'= ' + repr(self._mainMethodName) )
def _setupInitMethod(self):
__init__ = self._spawnMethodCompiler('__init__',
klass=self.methodCompilerClassForInit)
__init__.setMethodSignature("def __init__(self, *args, **KWs)")
__init__.addChunk("%s.__init__(self, *args, **KWs)" % self._baseClass)
__init__.addChunk(_initMethod_initCheetah%{'className':self._className})
for chunk in self._initMethChunks:
__init__.addChunk(chunk)
__init__.cleanupState()
self._swallowMethodCompiler(__init__, pos=0)
def _addSourceFileMonitoring(self, fileName):
# @@TR: this stuff needs auditing for Cheetah 2.0
# the first bit is added to init
self.addChunkToInit('self._filePath = ' + repr(fileName))
self.addChunkToInit('self._fileMtime = ' + str(getmtime(fileName)) )
# the rest is added to the main output method of the class ('mainMethod')
self.addChunk('if exists(self._filePath) and ' +
'getmtime(self._filePath) > self._fileMtime:')
self.indent()
self.addChunk('self._compile(file=self._filePath, moduleName='+className + ')')
self.addChunk(
'write(getattr(self, self._mainCheetahMethod_for_' + self._className +
')(trans=trans))')
self.addStop()
self.dedent()
def setClassName(self, name):
self._className = name
def className(self):
return self._className
def setBaseClass(self, baseClassName):
self._baseClass = baseClassName
def setMainMethodName(self, methodName):
if methodName == self._mainMethodName:
return
## change the name in the methodCompiler and add new reference
mainMethod = self._methodsIndex[self._mainMethodName]
mainMethod.setMethodName(methodName)
self._methodsIndex[methodName] = mainMethod
## make sure that fileUpdate code still works properly:
chunkToChange = ('write(self.' + self._mainMethodName + '(trans=trans))')
chunks = mainMethod._methodBodyChunks
if chunkToChange in chunks:
for i in range(len(chunks)):
if chunks[i] == chunkToChange:
chunks[i] = ('write(self.' + methodName + '(trans=trans))')
## get rid of the old reference and update self._mainMethodName
del self._methodsIndex[self._mainMethodName]
self._mainMethodName = methodName
def setMainMethodArgs(self, argsList):
mainMethodCompiler = self._methodsIndex[self._mainMethodName]
for argName, defVal in argsList:
mainMethodCompiler.addMethArg(argName, defVal)
def _spawnMethodCompiler(self, methodName, klass=None,
initialMethodComment=None):
if klass is None:
klass = self.methodCompilerClass
decorator = None
if self._decoratorForNextMethod:
decorator = self._decoratorForNextMethod
self._decoratorForNextMethod = None
methodCompiler = klass(methodName, classCompiler=self,
decorator=decorator,
initialMethodComment=initialMethodComment)
self._methodsIndex[methodName] = methodCompiler
return methodCompiler
def _setActiveMethodCompiler(self, methodCompiler):
self._activeMethodsList.append(methodCompiler)
def _getActiveMethodCompiler(self):
return self._activeMethodsList[-1]
def _popActiveMethodCompiler(self):
return self._activeMethodsList.pop()
def _swallowMethodCompiler(self, methodCompiler, pos=None):
methodCompiler.cleanupState()
if pos==None:
self._finishedMethodsList.append( methodCompiler )
else:
self._finishedMethodsList.insert(pos, methodCompiler)
return methodCompiler
def startMethodDef(self, methodName, argsList, parserComment):
methodCompiler = self._spawnMethodCompiler(
methodName, initialMethodComment=parserComment)
self._setActiveMethodCompiler(methodCompiler)
for argName, defVal in argsList:
methodCompiler.addMethArg(argName, defVal)
def _finishedMethods(self):
return self._finishedMethodsList
def addDecorator(self, decoratorExpr):
"""Set the decorator to be used with the next method in the source.
See _spawnMethodCompiler() and MethodCompiler for the details of how
this is used.
"""
self._decoratorForNextMethod = decoratorExpr
def addClassDocString(self, line):
self._classDocStringLines.append( line.replace('%','%%'))
def addChunkToInit(self,chunk):
self._initMethChunks.append(chunk)
def addAttribute(self, attribExpr):
## first test to make sure that the user hasn't used any fancy Cheetah syntax
# (placeholders, directives, etc.) inside the expression
if attribExpr.find('VFN(') != -1 or attribExpr.find('VFFSL(') != -1:
raise ParseError(self,
'Invalid #attr directive.' +
' It should only contain simple Python literals.')
## now add the attribute
self._generatedAttribs.append(attribExpr)
def addErrorCatcherCall(self, codeChunk, rawCode='', lineCol=''):
if self._placeholderToErrorCatcherMap.has_key(rawCode):
methodName = self._placeholderToErrorCatcherMap[rawCode]
if not self.setting('outputRowColComments'):
self._methodsIndex[methodName].addMethDocString(
'plus at line %s, col %s'%lineCol)
return methodName
self._errorCatcherCount += 1
methodName = '__errorCatcher' + str(self._errorCatcherCount)
self._placeholderToErrorCatcherMap[rawCode] = methodName
catcherMeth = self._spawnMethodCompiler(
methodName,
klass=MethodCompiler,
initialMethodComment=('## CHEETAH: Generated from ' + rawCode +
' at line %s, col %s'%lineCol + '.')
)
catcherMeth.setMethodSignature('def ' + methodName +
'(self, localsDict={})')
# is this use of localsDict right?
catcherMeth.addChunk('try:')
catcherMeth.indent()
catcherMeth.addChunk("return eval('''" + codeChunk +
"''', globals(), localsDict)")
catcherMeth.dedent()
catcherMeth.addChunk('except self._CHEETAH__errorCatcher.exceptions(), e:')
catcherMeth.indent()
catcherMeth.addChunk("return self._CHEETAH__errorCatcher.warn(exc_val=e, code= " +
repr(codeChunk) + " , rawCode= " +
repr(rawCode) + " , lineCol=" + str(lineCol) +")")
catcherMeth.cleanupState()
self._swallowMethodCompiler(catcherMeth)
return methodName
def closeDef(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
def closeBlock(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
methodName = methCompiler.methodName()
if self.setting('includeBlockMarkers'):
endMarker = self.setting('blockMarkerEnd')
methCompiler.addStrConst(endMarker[0] + methodName + endMarker[1])
self._swallowMethodCompiler(methCompiler)
#metaData = self._blockMetaData[methodName]
#rawDirective = metaData['raw']
#lineCol = metaData['lineCol']
## insert the code to call the block, caching if #cache directive is on
codeChunk = 'self.' + methodName + '(trans=trans)'
self.addChunk(codeChunk)
#self.appendToPrevChunk(' # generated from ' + repr(rawDirective) )
#if self.setting('outputRowColComments'):
# self.appendToPrevChunk(' at line %s, col %s' % lineCol + '.')
## code wrapping methods
def classDef(self):
if self._classDef:
return self._classDef
else:
return self.wrapClassDef()
__str__ = classDef
def wrapClassDef(self):
ind = self.setting('indentationStep')
classDefChunks = [self.classSignature(),
self.classDocstring(),
]
def addMethods():
classDefChunks.extend([
ind + '#'*50,
ind + '## CHEETAH GENERATED METHODS',
'\n',
self.methodDefs(),
])
def addAttributes():
classDefChunks.extend([
ind + '#'*50,
ind + '## CHEETAH GENERATED ATTRIBUTES',
'\n',
self.attributes(),
])
if self.setting('outputMethodsBeforeAttributes'):
addMethods()
addAttributes()
else:
addAttributes()
addMethods()
classDef = '\n'.join(classDefChunks)
self._classDef = classDef
return classDef
def classSignature(self):
return "class %s(%s):" % (self.className(), self._baseClass)
def classDocstring(self):
if not self._classDocStringLines:
return ''
ind = self.setting('indentationStep')
docStr = ('%(ind)s"""\n%(ind)s' +
'\n%(ind)s'.join(self._classDocStringLines) +
'\n%(ind)s"""\n'
) % {'ind':ind}
return docStr
def methodDefs(self):
methodDefs = [str(methGen) for methGen in self._finishedMethods() ]
return '\n\n'.join(methodDefs)
def attributes(self):
attribs = [self.setting('indentationStep') + str(attrib)
for attrib in self._generatedAttribs ]
return '\n\n'.join(attribs)
class AutoClassCompiler(ClassCompiler):
pass
##################################################
## MODULE COMPILERS
class ModuleCompiler(SettingsManager, GenUtils):
parserClass = Parser
classCompilerClass = AutoClassCompiler
def __init__(self, source=None, file=None,
moduleName='DynamicallyCompiledCheetahTemplate',
mainClassName=None, # string
mainMethodName=None, # string
baseclassName=None, # string
extraImportStatements=None, # list of strings
settings=None # dict
):
SettingsManager.__init__(self)
if settings:
self.updateSettings(settings)
# disable useStackFrames if the C version of NameMapper isn't compiled
# it's painfully slow in the Python version and bites Windows users all
# the time:
if not NameMapper.C_VERSION:
if not sys.platform.startswith('java'):
warnings.warn(
"\nYou don't have the C version of NameMapper installed! "
"I'm disabling Cheetah's useStackFrames option as it is "
"painfully slow with the Python version of NameMapper. "
"You should get a copy of Cheetah with the compiled C version of NameMapper."
)
self.setSetting('useStackFrames', False)
self._compiled = False
self._moduleName = moduleName
if not mainClassName:
self._mainClassName = moduleName
else:
self._mainClassName = mainClassName
self._mainMethodNameArg = mainMethodName
if mainMethodName:
self.setSetting('mainMethodName', mainMethodName)
self._baseclassName = baseclassName
self._filePath = None
self._fileMtime = None
if source and file:
raise TypeError("Cannot compile from a source string AND file.")
elif isinstance(file, types.StringType) or isinstance(file, types.UnicodeType): # it's a filename.
f = open(file) # Raises IOError.
source = f.read()
f.close()
self._filePath = file
self._fileMtime = os.path.getmtime(file)
elif hasattr(file, 'read'):
source = file.read() # Can't set filename or mtime--they're not accessible.
elif file:
raise TypeError("'file' argument must be a filename string or file-like object")
if self._filePath:
self._fileDirName, self._fileBaseName = os.path.split(self._filePath)
self._fileBaseNameRoot, self._fileBaseNameExt = \
os.path.splitext(self._fileBaseName)
if not (isinstance(source, str) or isinstance(source, unicode)):
source = str( source )
# by converting to string here we allow objects such as other Templates
# to be passed in
# Handle the #indent directive by converting it to other directives.
# (Over the long term we'll make it a real directive.)
if source == "":
warnings.warn("You supplied an empty string for the source!", )
if source.find('#indent') != -1: #@@TR: undocumented hack
source = indentize(source)
self._parser = self.parserClass(source, filename=self._filePath, compiler=self)
self._setupCompilerState()
def __getattr__(self, name):
"""Provide one-way access to the methods and attributes of the
ClassCompiler, and thereby the MethodCompilers as well.
WARNING: Use .setMethods to assign the attributes of the ClassCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead.
"""
if self.__dict__.has_key(name):
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeClassesList and hasattr(self._activeClassesList[-1], name):
return getattr(self._activeClassesList[-1], name)
else:
raise AttributeError, name
def _initializeSettings(self):
self.updateSettings(copy.deepcopy(DEFAULT_COMPILER_SETTINGS))
def _setupCompilerState(self):
self._activeClassesList = []
self._finishedClassesList = [] # listed by ordered
self._finishedClassIndex = {} # listed by name
self._moduleDef = None
self._moduleShBang = '#!/usr/bin/env python'
self._moduleEncoding = 'ascii'
self._moduleEncodingStr = ''
self._moduleHeaderLines = []
self._moduleDocStringLines = []
self._specialVars = {}
self._importStatements = [
"import sys",
"import os",
"import os.path",
"from os.path import getmtime, exists",
"import time",
"import types",
"import __builtin__",
"from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion",
"from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple",
"from Cheetah.Template import Template",
"from Cheetah.DummyTransaction import DummyTransaction",
"from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList",
"from Cheetah.CacheRegion import CacheRegion",
"import Cheetah.Filters as Filters",
"import Cheetah.ErrorCatchers as ErrorCatchers",
]
self._importedVarNames = ['sys',
'os',
'os.path',
'time',
'types',
'Template',
'DummyTransaction',
'NotFound',
'Filters',
'ErrorCatchers',
'CacheRegion',
]
self._moduleConstants = [
"try:",
" True, False",
"except NameError:",
" True, False = (1==1), (1==0)",
"VFFSL=valueFromFrameOrSearchList",
"VFSL=valueFromSearchList",
"VFN=valueForName",
"currentTime=time.time",
]
def compile(self):
classCompiler = self._spawnClassCompiler(self._mainClassName)
if self._baseclassName:
classCompiler.setBaseClass(self._baseclassName)
self._addActiveClassCompiler(classCompiler)
self._parser.parse()
self._swallowClassCompiler(self._popActiveClassCompiler())
self._compiled = True
self._parser.cleanup()
def _spawnClassCompiler(self, className, klass=None):
if klass is None:
klass = self.classCompilerClass
classCompiler = klass(className,
moduleCompiler=self,
mainMethodName=self.setting('mainMethodName'),
fileName=self._filePath,
settingsManager=self,
)
return classCompiler
def _addActiveClassCompiler(self, classCompiler):
self._activeClassesList.append(classCompiler)
def _getActiveClassCompiler(self):
return self._activeClassesList[-1]
def _popActiveClassCompiler(self):
return self._activeClassesList.pop()
def _swallowClassCompiler(self, classCompiler):
classCompiler.cleanupState()
self._finishedClassesList.append( classCompiler )
self._finishedClassIndex[classCompiler.className()] = classCompiler
return classCompiler
def _finishedClasses(self):
return self._finishedClassesList
def importedVarNames(self):
return self._importedVarNames
def addImportedVarNames(self, varNames):
self._importedVarNames.extend(varNames)
## methods for adding stuff to the module and class definitions
def setBaseClass(self, baseClassName):
if self._mainMethodNameArg:
self.setMainMethodName(self._mainMethodNameArg)
else:
self.setMainMethodName(self.setting('mainMethodNameForSubclasses'))
if self.setting('handlerForExtendsDirective'):
handler = self.setting('handlerForExtendsDirective')
baseClassName = handler(compiler=self, baseClassName=baseClassName)
self._getActiveClassCompiler().setBaseClass(baseClassName)
elif (not self.setting('autoImportForExtendsDirective')
or baseClassName=='object' or baseClassName in self.importedVarNames()):
self._getActiveClassCompiler().setBaseClass(baseClassName)
# no need to import
else:
##################################################
## If the #extends directive contains a classname or modulename that isn't
# in self.importedVarNames() already, we assume that we need to add
# an implied 'from ModName import ClassName' where ModName == ClassName.
# - This is the case in WebKit servlet modules.
# - We also assume that the final . separates the classname from the
# module name. This might break if people do something really fancy
# with their dots and namespaces.
chunks = baseClassName.split('.')
if len(chunks)==1:
self._getActiveClassCompiler().setBaseClass(baseClassName)
if baseClassName not in self.importedVarNames():
modName = baseClassName
# we assume the class name to be the module name
# and that it's not a builtin:
importStatement = "from %s import %s" % (modName, baseClassName)
self.addImportStatement(importStatement)
self.addImportedVarNames( [baseClassName,] )
else:
needToAddImport = True
modName = chunks[0]
#print chunks, ':', self.importedVarNames()
for chunk in chunks[1:-1]:
if modName in self.importedVarNames():
needToAddImport = False
finalBaseClassName = baseClassName.replace(modName+'.', '')
self._getActiveClassCompiler().setBaseClass(finalBaseClassName)
break
else:
modName += '.'+chunk
if needToAddImport:
modName, finalClassName = '.'.join(chunks[:-1]), chunks[-1]
#if finalClassName != chunks[:-1][-1]:
if finalClassName != chunks[-2]:
# we assume the class name to be the module name
modName = '.'.join(chunks)
self._getActiveClassCompiler().setBaseClass(finalClassName)
importStatement = "from %s import %s" % (modName, finalClassName)
self.addImportStatement(importStatement)
self.addImportedVarNames( [finalClassName,] )
def setCompilerSetting(self, key, valueExpr):
self.setSetting(key, eval(valueExpr) )
self._parser.configureParser()
def setCompilerSettings(self, keywords, settingsStr):
KWs = keywords
merge = True
if 'nomerge' in KWs:
merge = False
if 'reset' in KWs:
# @@TR: this is actually caught by the parser at the moment.
# subject to change in the future
self._initializeSettings()
self._parser.configureParser()
return
elif 'python' in KWs:
settingsReader = self.updateSettingsFromPySrcStr
# this comes from SettingsManager
else:
# this comes from SettingsManager
settingsReader = self.updateSettingsFromConfigStr
settingsReader(settingsStr)
self._parser.configureParser()
def setShBang(self, shBang):
self._moduleShBang = shBang
def setModuleEncoding(self, encoding):
self._moduleEncoding = encoding
self._moduleEncodingStr = '# -*- coding: %s -*-' %encoding
def getModuleEncoding(self):
return self._moduleEncoding
def addModuleHeader(self, line):
"""Adds a header comment to the top of the generated module.
"""
self._moduleHeaderLines.append(line)
def addModuleDocString(self, line):
"""Adds a line to the generated module docstring.
"""
self._moduleDocStringLines.append(line)
def addModuleGlobal(self, line):
"""Adds a line of global module code. It is inserted after the import
statements and Cheetah default module constants.
"""
self._moduleConstants.append(line)
def addSpecialVar(self, basename, contents, includeUnderscores=True):
"""Adds module __specialConstant__ to the module globals.
"""
name = includeUnderscores and '__'+basename+'__' or basename
self._specialVars[name] = contents.strip()
def addImportStatement(self, impStatement):
self._importStatements.append(impStatement)
#@@TR 2005-01-01: there's almost certainly a cleaner way to do this!
importVarNames = impStatement[impStatement.find('import') + len('import'):].split(',')
importVarNames = [var.split()[-1] for var in importVarNames] # handles aliases
importVarNames = [var for var in importVarNames if var!='*']
self.addImportedVarNames(importVarNames) #used by #extend for auto-imports
def addAttribute(self, attribName, expr):
self._getActiveClassCompiler().addAttribute(attribName + ' =' + expr)
def addComment(self, comm):
if re.match(r'#+$',comm): # skip bar comments
return
specialVarMatch = specialVarRE.match(comm)
if specialVarMatch:
# @@TR: this is a bit hackish and is being replaced with
# #set module varName = ...
return self.addSpecialVar(specialVarMatch.group(1),
comm[specialVarMatch.end():])
elif comm.startswith('doc:'):
addLine = self.addMethDocString
comm = comm[len('doc:'):].strip()
elif comm.startswith('doc-method:'):
addLine = self.addMethDocString
comm = comm[len('doc-method:'):].strip()
elif comm.startswith('doc-module:'):
addLine = self.addModuleDocString
comm = comm[len('doc-module:'):].strip()
elif comm.startswith('doc-class:'):
addLine = self.addClassDocString
comm = comm[len('doc-class:'):].strip()
elif comm.startswith('header:'):
addLine = self.addModuleHeader
comm = comm[len('header:'):].strip()
else:
addLine = self.addMethComment
for line in comm.splitlines():
addLine(line)
## methods for module code wrapping
def getModuleCode(self):
if not self._compiled:
self.compile()
if self._moduleDef:
return self._moduleDef
else:
return self.wrapModuleDef()
__str__ = getModuleCode
def wrapModuleDef(self):
self.addSpecialVar('CHEETAH_docstring', self.setting('defDocStrMsg'))
self.addModuleGlobal('__CHEETAH_version__ = %r'%Version)
self.addModuleGlobal('__CHEETAH_versionTuple__ = %r'%(VersionTuple,))
self.addModuleGlobal('__CHEETAH_genTime__ = %r'%time.time())
self.addModuleGlobal('__CHEETAH_genTimestamp__ = %r'%self.timestamp())
if self._filePath:
timestamp = self.timestamp(self._fileMtime)
self.addModuleGlobal('__CHEETAH_src__ = %r'%self._filePath)
self.addModuleGlobal('__CHEETAH_srcLastModified__ = %r'%timestamp)
else:
self.addModuleGlobal('__CHEETAH_src__ = None')
self.addModuleGlobal('__CHEETAH_srcLastModified__ = None')
moduleDef = """%(header)s
%(docstring)s
##################################################
## DEPENDENCIES
%(imports)s
##################################################
## MODULE CONSTANTS
%(constants)s
%(specialVars)s
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %%s. Templates compiled before version %%s must be recompiled.'%%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
%(classes)s
## END CLASS DEFINITION
if not hasattr(%(mainClassName)s, '_initCheetahAttributes'):
templateAPIClass = getattr(%(mainClassName)s, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(%(mainClassName)s)
%(footer)s
""" % {'header':self.moduleHeader(),
'docstring':self.moduleDocstring(),
'specialVars':self.specialVars(),
'imports':self.importStatements(),
'constants':self.moduleConstants(),
'classes':self.classDefs(),
'footer':self.moduleFooter(),
'mainClassName':self._mainClassName,
}
self._moduleDef = moduleDef
return moduleDef
def timestamp(self, theTime=None):
if not theTime:
theTime = time.time()
return time.asctime(time.localtime(theTime))
def moduleHeader(self):
header = self._moduleShBang + '\n'
header += self._moduleEncodingStr + '\n'
if self._moduleHeaderLines:
offSet = self.setting('commentOffset')
header += (
'#' + ' '*offSet +
('\n#'+ ' '*offSet).join(self._moduleHeaderLines) + '\n')
return header
def moduleDocstring(self):
if not self._moduleDocStringLines:
return ''
return ('"""' +
'\n'.join(self._moduleDocStringLines) +
'\n"""\n')
def specialVars(self):
chunks = []
theVars = self._specialVars
keys = theVars.keys()
keys.sort()
for key in keys:
chunks.append(key + ' = ' + repr(theVars[key]) )
return '\n'.join(chunks)
def importStatements(self):
return '\n'.join(self._importStatements)
def moduleConstants(self):
return '\n'.join(self._moduleConstants)
def classDefs(self):
classDefs = [str(klass) for klass in self._finishedClasses() ]
return '\n\n'.join(classDefs)
def moduleFooter(self):
return """
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=%(className)s()).run()
""" % {'className':self._mainClassName}
##################################################
## Make Compiler an alias for ModuleCompiler
Compiler = ModuleCompiler
| 39.631206 | 117 | 0.585323 | 71,727 | 0.91685 | 0 | 0 | 0 | 0 | 0 | 0 | 24,056 | 0.307496 |
466a34ecb0421da1e44f26b4a2ebb96b4fc1273b | 1,267 | py | Python | tests/simple_cmd_checks.py | Rhoynar/plmn-regression | fa58819a405b45430bbde28e52b356e04867aaa3 | [
"MIT"
]
| 11 | 2019-02-07T16:13:59.000Z | 2021-08-14T03:53:14.000Z | tests/simple_cmd_checks.py | Rhoynar/plmn-regression | fa58819a405b45430bbde28e52b356e04867aaa3 | [
"MIT"
]
| null | null | null | tests/simple_cmd_checks.py | Rhoynar/plmn-regression | fa58819a405b45430bbde28e52b356e04867aaa3 | [
"MIT"
]
| 3 | 2019-02-07T16:14:09.000Z | 2021-08-14T05:09:17.000Z | # -*- coding: utf-8 -*-
import compat
import unittest
import sys
from plmn.utils import *
from plmn.results import *
from plmn.modem_cmds import *
from plmn.simple_cmds import *
class SimpleCmdChecks(unittest.TestCase):
def test_simple_status_cmd(self):
SimpleCmds.simple_status_cmd()
assert Results.get_state('Simple Status') is not None
def test_simple_status_get_reg_status(self):
SimpleCmds.simple_status_get_reg_status()
def test_simple_status_is_registered(self):
assert SimpleCmds.simple_status_is_registered() is True
def test_simple_status_is_home(self):
assert SimpleCmds.simple_status_is_home() is True
assert SimpleCmds.simple_status_is_roaming() is False
@unittest.skip('Skipping this test since this is only applicable in connected state')
def test_simple_status_is_connected(self):
assert SimpleCmds.simple_status_is_connected() is True
@unittest.skip('Skipping this as this is only applicable for Roaming scenario')
def test_simple_status_is_roaming(self):
assert SimpleCmds.simple_status_is_roaming() is True
if __name__ == '__main__':
nargs = process_args()
unittest.main(argv=sys.argv[nargs:], exit=False)
Results.print_results()
| 31.675 | 89 | 0.750592 | 948 | 0.748224 | 0 | 0 | 380 | 0.299921 | 0 | 0 | 180 | 0.142068 |
466a6d9821a84e031f7dcd282011c9bf05adc133 | 13,877 | py | Python | mogan/tests/unit/notifications/test_notification.py | GURUIFENG9139/rocky-mogan | 6008c1d12b00e70d2cc651f7bd5d47968fc3aec7 | [
"Apache-2.0"
]
| null | null | null | mogan/tests/unit/notifications/test_notification.py | GURUIFENG9139/rocky-mogan | 6008c1d12b00e70d2cc651f7bd5d47968fc3aec7 | [
"Apache-2.0"
]
| null | null | null | mogan/tests/unit/notifications/test_notification.py | GURUIFENG9139/rocky-mogan | 6008c1d12b00e70d2cc651f7bd5d47968fc3aec7 | [
"Apache-2.0"
]
| null | null | null | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_versionedobjects import fixture as object_fixture
from mogan.notifications import base as notification_base
from mogan.notifications.objects import base as notification
from mogan.objects import base
from mogan.objects import fields
from mogan.objects import server as server_obj
from mogan.tests import base as test_base
from mogan.tests.unit.db import utils as db_utils
class TestNotificationBase(test_base.TestCase):
@base.MoganObjectRegistry.register_if(False)
class TestObject(base.MoganObject):
VERSION = '1.0'
fields = {
'field_1': fields.StringField(),
'field_2': fields.IntegerField(),
'not_important_field': fields.IntegerField(),
}
@base.MoganObjectRegistry.register_if(False)
class TestNotificationPayload(notification.NotificationPayloadBase):
VERSION = '1.0'
SCHEMA = {
'field_1': ('source_field', 'field_1'),
'field_2': ('source_field', 'field_2'),
}
fields = {
'extra_field': fields.StringField(), # filled by ctor
'field_1': fields.StringField(), # filled by the schema
'field_2': fields.IntegerField(), # filled by the schema
}
def populate_schema(self, source_field):
super(TestNotificationBase.TestNotificationPayload,
self).populate_schema(source_field=source_field)
@base.MoganObjectRegistry.register_if(False)
class TestNotificationPayloadEmptySchema(
notification.NotificationPayloadBase):
VERSION = '1.0'
fields = {
'extra_field': fields.StringField(), # filled by ctor
}
@notification.notification_sample('test-update-1.json')
@notification.notification_sample('test-update-2.json')
@base.MoganObjectRegistry.register_if(False)
class TestNotification(notification.NotificationBase):
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('TestNotificationPayload')
}
@base.MoganObjectRegistry.register_if(False)
class TestNotificationEmptySchema(notification.NotificationBase):
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('TestNotificationPayloadEmptySchema')
}
expected_payload = {
'mogan_object.name': 'TestNotificationPayload',
'mogan_object.data': {
'extra_field': 'test string',
'field_1': 'test1',
'field_2': 42},
'mogan_object.version': '1.0',
'mogan_object.namespace': 'mogan'}
def setUp(self):
super(TestNotificationBase, self).setUp()
self.my_obj = self.TestObject(field_1='test1',
field_2=42,
not_important_field=13)
self.payload = self.TestNotificationPayload(
extra_field='test string')
self.payload.populate_schema(source_field=self.my_obj)
self.notification = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE,
phase=fields.NotificationPhase.START),
publisher=notification.NotificationPublisher(
host='fake-host', binary='mogan-fake'),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
def _verify_notification(self, mock_notifier, mock_context,
expected_event_type,
expected_payload):
mock_notifier.prepare.assert_called_once_with(
publisher_id='mogan-fake:fake-host')
mock_notify = mock_notifier.prepare.return_value.info
self.assertTrue(mock_notify.called)
self.assertEqual(mock_notify.call_args[0][0], mock_context)
self.assertEqual(mock_notify.call_args[1]['event_type'],
expected_event_type)
actual_payload = mock_notify.call_args[1]['payload']
self.assertJsonEqual(expected_payload, actual_payload)
@mock.patch('mogan.common.rpc.NOTIFIER')
def test_emit_notification(self, mock_notifier):
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
self.notification.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update.start',
expected_payload=self.expected_payload)
@mock.patch('mogan.common.rpc.NOTIFIER')
def test_emit_with_host_and_binary_as_publisher(self, mock_notifier):
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher(
host='fake-host', binary='mogan-fake'),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload=self.expected_payload)
@mock.patch('mogan.common.rpc.NOTIFIER')
def test_emit_event_type_without_phase(self, mock_notifier):
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher(
host='fake-host', binary='mogan-fake'),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload=self.expected_payload)
@mock.patch('mogan.common.rpc.NOTIFIER')
def test_not_possible_to_emit_if_not_populated(self, mock_notifier):
non_populated_payload = self.TestNotificationPayload(
extra_field='test string')
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher(
host='fake-host', binary='mogan-fake'),
priority=fields.NotificationPriority.INFO,
payload=non_populated_payload)
mock_context = mock.Mock()
self.assertRaises(AssertionError, noti.emit, mock_context)
mock_notifier.assert_not_called()
@mock.patch('mogan.common.rpc.NOTIFIER')
def test_empty_schema(self, mock_notifier):
non_populated_payload = self.TestNotificationPayloadEmptySchema(
extra_field='test string')
noti = self.TestNotificationEmptySchema(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher(
host='fake-host', binary='mogan-fake'),
priority=fields.NotificationPriority.INFO,
payload=non_populated_payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload={
'mogan_object.name': 'TestNotificationPayloadEmptySchema',
'mogan_object.data': {'extra_field': u'test string'},
'mogan_object.version': '1.0',
'mogan_object.namespace': 'mogan'})
def test_sample_decorator(self):
self.assertEqual(2, len(self.TestNotification.samples))
self.assertIn('test-update-1.json', self.TestNotification.samples)
self.assertIn('test-update-2.json', self.TestNotification.samples)
notification_object_data = {
'ServerPayload': '1.0-30fefa8478f1b9b35c66868377fb6dfd',
'ServerAddressesPayload': '1.0-69caf4c36f36756bb1f6970d093ee1f6',
'ServerActionPayload': '1.0-8dc4429afa34d86ab92c9387e3ccd0c3',
'ServerActionNotification': '1.0-20087e599436bd9db62ae1fb5e2dfef2',
'ExceptionPayload': '1.0-7c31986d8d78bed910c324965c431e18',
'EventType': '1.0-589894aac7c98fb640eca394f67ad621',
'NotificationPublisher': '1.0-4b0b0d662b21eeed0b23617f3f11794b'
}
class TestNotificationObjectVersions(test_base.TestCase):
def setUp(self):
super(test_base.TestCase, self).setUp()
base.MoganObjectRegistry.register_notification_objects()
def test_versions(self):
noti_class = base.MoganObjectRegistry.notification_classes
classes = {cls.__name__: [cls] for cls in noti_class}
checker = object_fixture.ObjectVersionChecker(obj_classes=classes)
# Compute the difference between actual fingerprints and
# expect fingerprints. expect = actual = {} if there is no change.
expect, actual = checker.test_hashes(notification_object_data)
self.assertEqual(expect, actual,
"Some objects fields or remotable methods have been "
"modified. Please make sure the version of those "
"objects have been bumped and then update "
"expected_object_fingerprints with the new hashes. ")
def test_notification_payload_version_depends_on_the_schema(self):
@base.MoganObjectRegistry.register_if(False)
class TestNotificationPayload(notification.NotificationPayloadBase):
VERSION = '1.0'
SCHEMA = {
'field_1': ('source_field', 'field_1'),
'field_2': ('source_field', 'field_2'),
}
fields = {
'extra_field': fields.StringField(), # filled by ctor
'field_1': fields.StringField(), # filled by the schema
'field_2': fields.IntegerField(), # filled by the schema
}
checker = object_fixture.ObjectVersionChecker(
{'TestNotificationPayload': (TestNotificationPayload,)})
old_hash = checker.get_hashes(extra_data_func=get_extra_data)
TestNotificationPayload.SCHEMA['field_3'] = ('source_field',
'field_3')
new_hash = checker.get_hashes(extra_data_func=get_extra_data)
self.assertNotEqual(old_hash, new_hash)
def get_extra_data(obj_class):
extra_data = tuple()
# Get the SCHEMA items to add to the fingerprint
# if we are looking at a notification
if issubclass(obj_class, notification.NotificationPayloadBase):
schema_data = collections.OrderedDict(
sorted(obj_class.SCHEMA.items()))
extra_data += (schema_data,)
return extra_data
class TestServerActionNotification(test_base.TestCase):
@mock.patch('mogan.notifications.objects.server.'
'ServerActionNotification._emit')
def test_send_version_server_action(self, mock_emit):
# Make sure that the notification payload chooses the values in
# server.flavor.$value instead of server.$value
fake_server_values = db_utils.get_test_server()
server = server_obj.Server(**fake_server_values)
notification_base.notify_about_server_action(
mock.MagicMock(),
server,
'test-host',
fields.NotificationAction.CREATE,
fields.NotificationPhase.START,
'mogan-compute')
self.assertEqual('server.create.start',
mock_emit.call_args_list[0][1]['event_type'])
self.assertEqual('mogan-compute:test-host',
mock_emit.call_args_list[0][1]['publisher_id'])
payload = mock_emit.call_args_list[0][1]['payload'][
'mogan_object.data']
self.assertEqual(fake_server_values['uuid'], payload['uuid'])
self.assertEqual(fake_server_values['flavor_uuid'],
payload['flavor_uuid'])
self.assertEqual(fake_server_values['status'], payload['status'])
self.assertEqual(fake_server_values['user_id'], payload['user_id'])
self.assertEqual(fake_server_values['availability_zone'],
payload['availability_zone'])
self.assertEqual(fake_server_values['name'], payload['name'])
self.assertEqual(fake_server_values['image_uuid'],
payload['image_uuid'])
self.assertEqual(fake_server_values['project_id'],
payload['project_id'])
self.assertEqual(fake_server_values['description'],
payload['description'])
self.assertEqual(fake_server_values['power_state'],
payload['power_state'])
| 41.423881 | 79 | 0.64906 | 11,988 | 0.863875 | 0 | 0 | 8,241 | 0.59386 | 0 | 0 | 3,365 | 0.242488 |
466b2847bd0a3e11bd815c4ef8485277011347fd | 1,208 | py | Python | plash/macros/packagemanagers.py | 0xflotus/plash | 9dd66a06413d5c1f12fd9a7e7b56a05b797ad309 | [
"MIT"
]
| null | null | null | plash/macros/packagemanagers.py | 0xflotus/plash | 9dd66a06413d5c1f12fd9a7e7b56a05b797ad309 | [
"MIT"
]
| null | null | null | plash/macros/packagemanagers.py | 0xflotus/plash | 9dd66a06413d5c1f12fd9a7e7b56a05b797ad309 | [
"MIT"
]
| null | null | null | from plash.eval import eval, register_macro, shell_escape_args
@register_macro()
def defpm(name, *lines):
'define a new package manager'
@register_macro(name, group='package managers')
@shell_escape_args
def package_manager(*packages):
if not packages:
return
sh_packages = ' '.join(pkg for pkg in packages)
expanded_lines = [line.format(sh_packages) for line in lines]
return eval([['run'] + expanded_lines])
package_manager.__doc__ = "install packages with {}".format(name)
eval([[
'defpm',
'apt',
'apt-get update',
'apt-get install -y {}',
], [
'defpm',
'add-apt-repository',
'apt-get install software-properties-common',
'run add-apt-repository -y {}',
], [
'defpm',
'apk',
'apk update',
'apk add {}',
], [
'defpm',
'yum',
'yum install -y {}',
], [
'defpm',
'dnf',
'dnf install -y {}',
], [
'defpm',
'pip',
'pip install {}',
], [
'defpm',
'pip3',
'pip3 install {}',
], [
'defpm',
'npm',
'npm install -g {}',
], [
'defpm',
'pacman',
'pacman -Sy --noconfirm {}',
], [
'defpm',
'emerge',
'emerge {}',
]])
| 18.875 | 69 | 0.537252 | 0 | 0 | 0 | 0 | 478 | 0.395695 | 0 | 0 | 489 | 0.404801 |
466ccc900104e36f636478253e917a965c1df4d3 | 371 | py | Python | app/schemas/email.py | waynesun09/notify-service | 768a0db264a9e57eecce283108878e24e8d3b740 | [
"MIT"
]
| 5 | 2020-12-20T17:10:46.000Z | 2021-08-20T05:00:58.000Z | app/schemas/email.py | RedHatQE/notify-service | 579e995fae0c472f9fbd27471371a2c404d94f66 | [
"MIT"
]
| 13 | 2021-01-07T14:17:14.000Z | 2022-01-05T20:36:36.000Z | app/schemas/email.py | RedHatQE/notify-service | 579e995fae0c472f9fbd27471371a2c404d94f66 | [
"MIT"
]
| 1 | 2022-01-06T22:21:09.000Z | 2022-01-06T22:21:09.000Z | from typing import Optional, List
from pydantic import BaseModel, EmailStr
from . import result
class EmailBase(BaseModel):
email: Optional[EmailStr] = None
class EmailSend(EmailBase):
msg: str
class EmailResult(BaseModel):
pre_header: Optional[str] = None
begin: Optional[str] = None
content: List[result.Result]
end: Optional[str] = None
| 18.55 | 40 | 0.719677 | 265 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
466d26384d26ffba4886645e21f0b784ab726d0b | 470 | py | Python | example.py | ErikPel/rankedchoicevoting | 7ea9149d2d7f48d5b4c323537a701b7ccf8a8616 | [
"MIT"
]
| 1 | 2021-11-25T07:50:10.000Z | 2021-11-25T07:50:10.000Z | example.py | ErikPel/rankedchoicevoting | 7ea9149d2d7f48d5b4c323537a701b7ccf8a8616 | [
"MIT"
]
| null | null | null | example.py | ErikPel/rankedchoicevoting | 7ea9149d2d7f48d5b4c323537a701b7ccf8a8616 | [
"MIT"
]
| null | null | null | from rankedchoicevoting import Poll
candidatesA = {"Bob": 0, "Sue": 0, "Bill": 0}
#votes in array sorted by first choice to last choice
votersA = {
"a": ['Bob', 'Bill', 'Sue'],
"b": ['Sue', 'Bob', 'Bill'],
"c": ['Bill', 'Sue', 'Bob'],
"d": ['Bob', 'Bill', 'Sue'],
"f": ['Sue', 'Bob', 'Bill']
}
election = Poll(candidatesA,votersA)
election.addCandidate("Joe", 0)
election.addVoter("g",['Joe','Bob'])
print("Winner: " + election.getPollResults())
| 24.736842 | 53 | 0.576596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 192 | 0.408511 |
466d4b83456bbb93d38bc63179c0f99d00a30a62 | 2,422 | py | Python | DeployScript.py | junoteam/TelegramBot | 3e679637a5918c4f9595beaa2f0f67c9e4467056 | [
"Apache-2.0"
]
| 3 | 2015-04-08T18:41:02.000Z | 2015-10-28T09:54:47.000Z | DeployScript.py | junoteam/TelegramBot | 3e679637a5918c4f9595beaa2f0f67c9e4467056 | [
"Apache-2.0"
]
| null | null | null | DeployScript.py | junoteam/TelegramBot | 3e679637a5918c4f9595beaa2f0f67c9e4467056 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- author: Alex -*-
from Centos6_Bit64 import *
from SystemUtils import *
# Checking version of OS should happened before menu appears
# Check version of CentOS
SystemUtils.check_centos_version()
# Clear screen before to show menu
os.system('clear')
answer = True
while answer:
print ("""
LAMP Deploy Script V: 0.1 for CentOS 6.5/6.6 64Bit:
---------------------------------------------------
1. Check version of your CentOS
2. Check Internet connection
3. Show me my local IP address
4. Open port 80 to Web
5. Show me my localhost name
------- LAMP for CentOS 6.x -----------
6. Install EPEL & IUS repository
7. Install Web Server - Apache
8. Install Database - MySQL
9. Install Language - PHP
10. Install LAMP in "One Click" - CentOS 6.x
11. Exit/Quit
""")
answer = input("Please make your choice: ")
if answer == 1:
os.system('clear')
print ('\nChecking version of the system: ')
SystemUtils.check_centos_version()
elif answer == 2:
os.system('clear')
print ('\nChecking if you connected to the Internet')
SystemUtils.check_internet_connection()
elif answer == 3:
os.system('clear')
print ('\nYour local IP address is: ' + SystemUtils.check_local_ip())
elif answer == 4:
os.system('clear')
print('\nChecking firewall')
Centos6Deploy.iptables_port()
elif answer == 5:
print "Checking local hostname..."
SystemUtils.check_host_name()
elif answer == 6:
print ('\nInstalling EPEL and IUS repository to the system...')
Centos6Deploy.add_repository()
elif answer == 7:
print ('\nInstalling Web Server Apache...')
Centos6Deploy.install_apache()
elif answer == 8:
print ('\nInstalling database MySQL...')
Centos6Deploy.install_mysql()
elif answer == 9:
print('\nInstalling PHP...')
Centos6Deploy.install_php()
elif answer == 10:
print ('Install LAMP in "One Click" - CentOS 6.x')
Centos6Deploy.iptables_port()
Centos6Deploy.add_repository()
Centos6Deploy.install_mysql()
Centos6Deploy.install_php()
elif answer == 11:
print("\nGoodbye...\n")
answer = None
else:
print ('\nNot valid Choice, Try Again')
answer = True | 31.051282 | 77 | 0.604872 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,173 | 0.48431 |
466dad9d3b71c7956f19885bfc3e8b7004d94303 | 1,923 | py | Python | distributed/register/application.py | ADKosm/concurrency | e3bda762ec12d8988ffda060f8842ffda5526f2b | [
"MIT"
]
| null | null | null | distributed/register/application.py | ADKosm/concurrency | e3bda762ec12d8988ffda060f8842ffda5526f2b | [
"MIT"
]
| null | null | null | distributed/register/application.py | ADKosm/concurrency | e3bda762ec12d8988ffda060f8842ffda5526f2b | [
"MIT"
]
| null | null | null | import asyncio
import os
import time
from dataclasses import dataclass
import requests_unixsocket
from aiohttp import ClientSession, web
@dataclass(frozen=True)
class Replica:
replica_id: str
ip: str
is_self: bool
def replicas_discovery():
session = requests_unixsocket.Session()
number_of_replicas = int(os.environ['REPLICAS'])
app_codename = os.environ['APP_CODENAME']
self_hostname = os.environ['HOSTNAME']
registered_replicas = set()
while len(registered_replicas) < number_of_replicas:
cluster_config = session.get('http+unix://%2Fvar%2Frun%2Fdocker.sock/v1.24/containers/json').json()
replicas = {
Replica(
replica_id=x['Id'],
ip=x['NetworkSettings']['Networks']['register_default']['IPAddress'],
is_self=x['Id'].startswith(self_hostname)
)
for x in cluster_config
if app_codename in x['Labels']
}
registered_replicas.update(replicas)
if len(registered_replicas) < number_of_replicas:
time.sleep(2)
return registered_replicas
replicas = replicas_discovery()
self_id = next(filter(lambda x: x.is_self, replicas)).replica_id
async def index(request):
for replica in replicas:
async with ClientSession() as session:
async with session.get("http://{}:8080/hello".format(replica.ip), headers={'ReplicaId': self_id}) as r:
await r.text()
return web.Response(text='ok')
# print(r.headers['ReplicaId'], flush=True)
async def hello(request):
requested_id = request.headers['ReplicaId']
print("Hello from {}".format(requested_id), flush=True)
return web.Response(text='ok')
print(replicas, flush=True)
app = web.Application()
app.add_routes([web.get('/', index),
web.get('/hello', hello)])
web.run_app(app, host='0.0.0.0', port=8080)
| 28.279412 | 115 | 0.651586 | 64 | 0.033281 | 0 | 0 | 88 | 0.045762 | 511 | 0.265731 | 298 | 0.154966 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.