hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
fe90eb5d4db9dcb42eabad6cf0007baab0fc7833
18,598
py
Python
levels/sombie.py
superhasduper/PythonGames
64995d3e0b619006a2cf80d0da3c0fdf97db6fd9
[ "MIT" ]
1
2019-07-07T19:55:39.000Z
2019-07-07T19:55:39.000Z
levels/sombie.py
superhasduper/PythonGames
64995d3e0b619006a2cf80d0da3c0fdf97db6fd9
[ "MIT" ]
null
null
null
levels/sombie.py
superhasduper/PythonGames
64995d3e0b619006a2cf80d0da3c0fdf97db6fd9
[ "MIT" ]
null
null
null
import arcade import os SPRITE_SCALING = 0.5 SPRITE_NATIVE_SIZE = 128 SPRITE_SIZE = int(SPRITE_NATIVE_SIZE * SPRITE_SCALING) SCREEN_WIDTH = SPRITE_SIZE * 14 SCREEN_HEIGHT = SPRITE_SIZE * 10 MOVEMENT_SPEED = 5 COIN_SCALE = 0.7 class Room: """ This class holds all the information about the different rooms. """ def __init__(self): # You may want many lists. Lists for coins, monsters, etc. self.wall_list = None self.coin_list = None self.door_list = None self.smallpotion_list = None self.bigpotion_list = None # This holds the background images. If you don't want changing # background images, you can delete this part. self.background = None self.score = 0 def setup_room_1(): """ Create and return room 1. If your program gets large, you may want to separate this into different files. """ room = Room() """ Set up the game and initialize the variables. """ # Sprite lists room.wall_list = arcade.SpriteList() room.door_list = arcade.SpriteList() room.coin_list = arcade.SpriteList() room.smallpotion_list = arcade.SpriteList() room.bigpotion_list = arcade.SpriteList() for y in (0, SCREEN_HEIGHT - SPRITE_SIZE): # Loop for each box going across for x in range(0, SCREEN_WIDTH, SPRITE_SIZE): wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING) wall.left = x wall.bottom = y room.wall_list.append(wall) # Create left and right column of boxes for x in (0, SCREEN_WIDTH - SPRITE_SIZE): # Loop for each box going across for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE): # Skip making a block 4 and 5 blocks up on the right side if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x == 0: wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING) wall.left = x wall.bottom = y room.wall_list.append(wall) for x in (0, SCREEN_WIDTH - SPRITE_SIZE): # Loop for each box going across for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE): if not (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x == 0: door = arcade.Sprite("fence.png", SPRITE_SCALING) door.left = x door.bottom = y room.door_list.append(door) wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING) wall.left = 7 * SPRITE_SIZE wall.bottom = 5 * SPRITE_SIZE room.wall_list.append(wall) # If you want coins or monsters in a level, then add that code here. # Load the background image for this level. room.background = arcade.load_texture("g.png") for i in range(300,600,75): coin = arcade.Sprite("coin.png",COIN_SCALE) coin.center_x = i coin.center_y = 500 room.coin_list.append(coin) smallpotion = arcade.Sprite("big.png",0.05) smallpotion.center_x = 100 smallpotion.center_y = 900 room.smallpotion_list.append(smallpotion) return room def setup_room_2(): """ Create and return room 2. """ room = Room() """ Set up the game and initialize the variables. """ # Sprite lists room.door_list = arcade.SpriteList() room.wall_list = arcade.SpriteList() room.coin_list = arcade.SpriteList() room.smallpotion_list = arcade.SpriteList() room.bigpotion_list = arcade.SpriteList() # -- Set up the walls # Create bottom and top row of boxes # This y loops a list of two, the coordinate 0, and just under the top of window for y in (0, SCREEN_HEIGHT - SPRITE_SIZE): # Loop for each box going across for x in range(0, SCREEN_WIDTH, SPRITE_SIZE): wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = x wall.bottom = y room.wall_list.append(wall) # Create left and right column of boxes for x in (0, SCREEN_WIDTH - SPRITE_SIZE): # Loop for each box going across for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE): # Skip making a block 4 and 5 blocks up if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x != 0: wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = x wall.bottom = y room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 1 * SPRITE_SIZE wall.bottom = 6 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 1 * SPRITE_SIZE wall.bottom = 3 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 2 * SPRITE_SIZE wall.bottom = 5.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 2 * SPRITE_SIZE wall.bottom = 3.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 3 * SPRITE_SIZE wall.bottom = 3.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 4 * SPRITE_SIZE wall.bottom = 3.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 4 * SPRITE_SIZE wall.bottom = 4.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 2 * SPRITE_SIZE wall.bottom = 5.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 2 * SPRITE_SIZE wall.bottom = 6.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 3 * SPRITE_SIZE wall.bottom = 6.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 4 * SPRITE_SIZE wall.bottom = 6.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 5 * SPRITE_SIZE wall.bottom = 6.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 6 * SPRITE_SIZE wall.bottom = 6.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 6 * SPRITE_SIZE wall.bottom = 5.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 6 * SPRITE_SIZE wall.bottom = 4.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 4 * SPRITE_SIZE wall.bottom = 2.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 6 * SPRITE_SIZE wall.bottom =3.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 6 * SPRITE_SIZE wall.bottom = 4.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 6 * SPRITE_SIZE wall.bottom = 0.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 6 * SPRITE_SIZE wall.bottom = 1.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 7 * SPRITE_SIZE wall.bottom = 3.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 7 * SPRITE_SIZE wall.bottom = 1.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 8 * SPRITE_SIZE wall.bottom = 1.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 8 * SPRITE_SIZE wall.bottom = 3.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 9 * SPRITE_SIZE wall.bottom = 1.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 10 * SPRITE_SIZE wall.bottom = 1.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 10 * SPRITE_SIZE wall.bottom = 2.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 10 * SPRITE_SIZE wall.bottom = 3.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 10 * SPRITE_SIZE wall.bottom = 4.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 8 * SPRITE_SIZE wall.bottom = 4.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 10 * SPRITE_SIZE wall.bottom = 5.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 10 * SPRITE_SIZE wall.bottom = 6.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 9 * SPRITE_SIZE wall.bottom = 6.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 8 * SPRITE_SIZE wall.bottom = 6.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 8 * SPRITE_SIZE wall.bottom = 7.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 8 * SPRITE_SIZE wall.bottom = 8 * SPRITE_SIZE room.wall_list.append(wall) room.background = arcade.load_texture("g.png") bigpotion = arcade.Sprite("small.png",0.05) bigpotion.center_x = 800 bigpotion.center_y = 100 room.bigpotion_list.append(bigpotion) return room class MyGame(arcade.Window): """ Main application class. """ def __init__(self, width, height): """ Initializer """ super().__init__(width, height,"Tocate el pnnywise") # Set the working directory (where we expect to find files) to the same # directory this .py file is in. You can leave this out of your own # code, but it is needed to easily run the examples using "python -m" # as mentioned at the top of this program. file_path = os.path.dirname(os.path.abspath(__file__)) os.chdir(file_path) # Sprite lists self.current_room = 0 # Set up the player self.game_over = False self.door_list = None self.rooms = None self.score = 0 self.coin_list = None self.player_sprite = None self.physics_engine = None self.smallpotion_list = None self.bigpotion_list = None def setup(self): """ Set up the game and initialize the variables. """ # Set up the player self.player_sprite = arcade.AnimatedWalkingSprite() self.score = 0 self.coin_list = arcade.SpriteList() self.smallpotion_list = arcade.SpriteList() self.bigpotion_list = arcade.SpriteList() self.player_sprite.center_x = 100 self.player_sprite.center_y = 150 character_scale = 0.75 self.player_sprite.stand_right_textures = [] self.player_sprite.stand_right_textures.append(arcade.load_texture("zombie_stand.png", scale=character_scale)) self.player_sprite.stand_left_textures = [] self.player_sprite.stand_left_textures.append(arcade.load_texture("zombie_stand.png", scale=character_scale, mirrored=True)) self.player_sprite.walk_right_textures = [] self.player_sprite.walk_right_textures.append(arcade.load_texture("zombie_walk1.png", scale=character_scale)) self.player_sprite.walk_right_textures.append(arcade.load_texture("zombie_walk2.png", scale=character_scale)) self.player_sprite.walk_left_textures = [] self.player_sprite.walk_left_textures.append(arcade.load_texture("zombie_walk1.png", scale=character_scale, mirrored=True)) self.player_sprite.walk_left_textures.append(arcade.load_texture("zombie_walk2.png", scale=character_scale, mirrored=True)) # Our list of rooms self.rooms = [] # Create the rooms. Extend the pattern for each room. room = setup_room_1() self.rooms.append(room) room = setup_room_2() self.rooms.append(room) # Our starting room number self.current_room = 0 # Create a physics engine for this room self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].wall_list) self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].door_list) def on_draw(self): """ Render the screen. """ # This command has to happen before we start drawing arcade.start_render() # Draw the background texture arcade.draw_texture_rectangle(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2, SCREEN_WIDTH, SCREEN_HEIGHT, self.rooms[self.current_room].background) # Draw all the walls in this room self.rooms[self.current_room].door_list.draw() self.rooms[self.current_room].wall_list.draw() self.rooms[self.current_room].coin_list.draw() self.rooms[self.current_room].bigpotion_list.draw() self.rooms[self.current_room].smallpotion_list.draw() # If you have coins or monsters, then copy and modify the line # above for each list. output = "Score: {}".format(self.score) arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14) self.player_sprite.draw() def on_key_press(self, key, modifiers): """Called whenever a key is pressed. """ if key == arcade.key.W: self.player_sprite.change_y = MOVEMENT_SPEED elif key == arcade.key.S: self.player_sprite.change_y = -MOVEMENT_SPEED elif key == arcade.key.A: self.player_sprite.change_x = -MOVEMENT_SPEED elif key == arcade.key.D: self.player_sprite.change_x = MOVEMENT_SPEED def on_key_release(self, key, modifiers): """Called when the user releases a key. """ if key == arcade.key.W or key == arcade.key.S: self.player_sprite.change_y = 0 elif key == arcade.key.A or key == arcade.key.D: self.player_sprite.change_x = 0 def update(self, delta_time): """ Movement and game logic """ self.player_sprite.update_animation() # Call update on all sprites (The sprites don't do much in this # example though.) self.physics_engine.update() # Do some logic here to figure out what room we are in, and if we need to go # to a different room. if self.player_sprite.center_x > SCREEN_WIDTH and self.current_room == 0: self.current_room = 1 self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].wall_list) self.player_sprite.center_x = 0 elif self.player_sprite.center_x < 0 and self.current_room == 1: self.current_room = 0 self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].wall_list) self.player_sprite.center_x = SCREEN_WIDTH hit_list = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].coin_list) hit_list2 = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].bigpotion_list) hit_list3 = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].smallpotion_list) for coin in hit_list: coin.kill() self.score += 1 my_sound = arcade.load_sound("coinsound.wav") arcade.play_sound(my_sound) if self.score == 4: for i in self.rooms[self.current_room].door_list: i.kill() your_sound = arcade.load_sound("door.wav") arcade.play_sound(your_sound) for smallpotion in hit_list3: smallpotion.kill() self.player_sprite.scale=0.5 tu_sound = arcade.load_sound("shrink.wav") arcade.play_sound(tu_sound) for bigpotion in hit_list2: bigpotion.kill() self.player_sprite.scale=1 yo_sound = arcade.load_sound("grow.wav") arcade.play_sound(yo_sound) def main(): """ Main method """ window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT) window.setup() arcade.run() if __name__ == "__main__": main()
36.324219
124
0.614367
8,102
0.435638
0
0
0
0
0
0
3,206
0.172384
fe916e74f3d8c5dd73c18e07f1aa14f15ee3d8d0
4,869
py
Python
venv/lib/python3.6/site-packages/gevent/testing/openfiles.py
Guillaume-Fernandez/phishfinder
b459a30202fd5dfb1340b43c70363705de7cedd9
[ "MIT" ]
10
2021-03-23T03:46:19.000Z
2022-03-08T07:20:25.000Z
venv/lib/python3.6/site-packages/gevent/testing/openfiles.py
Guillaume-Fernandez/phishfinder
b459a30202fd5dfb1340b43c70363705de7cedd9
[ "MIT" ]
7
2021-05-21T16:51:48.000Z
2022-03-12T00:50:26.000Z
venv/lib/python3.6/site-packages/gevent/testing/openfiles.py
Guillaume-Fernandez/phishfinder
b459a30202fd5dfb1340b43c70363705de7cedd9
[ "MIT" ]
4
2021-04-21T00:49:34.000Z
2021-11-21T09:18:29.000Z
# Copyright (c) 2018 gevent community # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import absolute_import, print_function, division import os import unittest import re from . import sysinfo # Linux/OS X/BSD platforms can implement this by calling out to lsof if sysinfo.WIN: def _run_lsof(): raise unittest.SkipTest("lsof not expected on Windows") else: def _run_lsof(): import tempfile pid = os.getpid() fd, tmpname = tempfile.mkstemp('get_open_files') os.close(fd) lsof_command = 'lsof -p %s > %s' % (pid, tmpname) if os.system(lsof_command): # XXX: This prints to the console an annoying message: 'lsof is not recognized' raise unittest.SkipTest("lsof failed") with open(tmpname) as fobj: data = fobj.read().strip() os.remove(tmpname) return data def default_get_open_files(pipes=False): data = _run_lsof() results = {} for line in data.split('\n'): line = line.strip() if not line or line.startswith("COMMAND"): # Skip header and blank lines continue split = re.split(r'\s+', line) _command, _pid, _user, fd = split[:4] # Pipes (on OS X, at least) get an fd like "3" while normal files get an fd like "1u" if fd[:-1].isdigit() or fd.isdigit(): if not pipes and fd[-1].isdigit(): continue fd = int(fd[:-1]) if not fd[-1].isdigit() else int(fd) if fd in results: params = (fd, line, split, results.get(fd), data) raise AssertionError('error when parsing lsof output: duplicate fd=%r\nline=%r\nsplit=%r\nprevious=%r\ndata:\n%s' % params) results[fd] = line if not results: raise AssertionError('failed to parse lsof:\n%s' % (data, )) results['data'] = data return results def default_get_number_open_files(): if os.path.exists('/proc/'): # Linux only fd_directory = '/proc/%d/fd' % os.getpid() return len(os.listdir(fd_directory)) try: return len(get_open_files(pipes=True)) - 1 except (OSError, AssertionError, unittest.SkipTest): return 0 lsof_get_open_files = default_get_open_files try: # psutil import subprocess which on Python 3 imports selectors. # This can expose issues with monkey-patching. import psutil except ImportError: get_open_files = default_get_open_files get_number_open_files = default_get_number_open_files else: # If psutil is available (it is cross-platform) use that. # It is *much* faster than shelling out to lsof each time # (Running 14 tests takes 3.964s with lsof and 0.046 with psutil) # However, it still doesn't completely solve the issue on Windows: fds are reported # as -1 there, so we can't fully check those. def get_open_files(): """ Return a list of popenfile and pconn objects. Note that other than `fd`, they have different attributes. .. important:: If you want to find open sockets, on Windows and linux, it is important that the socket at least be listening (socket.listen(1)). Unlike the lsof implementation, this will only return sockets in a state like that. """ results = dict() process = psutil.Process() results['data'] = process.open_files() + process.connections('all') for x in results['data']: results[x.fd] = x results['data'] += ['From psutil', process] return results def get_number_open_files(): process = psutil.Process() try: return process.num_fds() except AttributeError: # num_fds is unix only. Is num_handles close enough on Windows? return 0
38.642857
139
0.657835
0
0
0
0
0
0
0
0
2,513
0.516122
fe93e83fe7e8770b4f2c1e2cf97bec6cd0abb158
1,628
py
Python
examples/multiprocess_example.py
ct-clmsn/distributed-tensorflow-orchestration
c841659881e98209149bd6e3e09774a50e3c748e
[ "Apache-2.0" ]
5
2016-07-27T08:25:17.000Z
2022-02-07T19:41:45.000Z
examples/multiprocess_example.py
ct-clmsn/distributed-tensorflow-orchestration
c841659881e98209149bd6e3e09774a50e3c748e
[ "Apache-2.0" ]
null
null
null
examples/multiprocess_example.py
ct-clmsn/distributed-tensorflow-orchestration
c841659881e98209149bd6e3e09774a50e3c748e
[ "Apache-2.0" ]
1
2022-02-07T19:41:46.000Z
2022-02-07T19:41:46.000Z
''' marathon_example.py performs a simple matrix multiply using 3 compute nodes ''' def parseargs(): parser = argparse.ArgumentParser(description='Marathon for TensorFlow.') parser.add_argument('--n_tasks', default=1, help='an integer for the accumulator') parser.add_argument('--cpu', default=100.0, help='an integer for the accumulator') parser.add_argument('--mem', default=100.0, help='an integer for the accumulator') parser.add_argument('--taskname', default=uuid.uuid1(), help='name for the task') parser.add_argument('--url', help='DNS addr to marathon') parser.add_argument('--usr', help='marathon username') parser.add_argument('--usrpwd', help='marathon password') parser.add_argument('--uri', help='curl-friendly URI to the tensorflow client executable (url?, hdfs?, docker?)') args = parser.parse_args() return args if __name__ == '__main__': from sys import argv import tensorflow as tf from dtforchestrator import * args = parseargs() with MultiprocessTensorFlowSession(args.taskname, args.n_tasks) as tfdevices: with tf.device(tfdevices.getDeviceSpec(1)): matrix1 = tf.constant([[3.],[3.]]) with tf.device(tfdevices.getDeviceSpec(2)): matrix2 = tf.constant([[3.,3.]]) with tf.device(tfdevices.getDeviceSpec(0)): matrix0 = tf.constant([[3.,3.]]) product1 = tf.matmul(matrix0, matrix1) product2 = tf.matmul(matrix2, matrix1) with tf.Session(tfdevices.localGRPC()) as sess: res = sess.run(product1) print res res = sess.run(product2) print res
34.638298
116
0.673219
0
0
0
0
0
0
0
0
449
0.275799
fe97b6953c22bb335b56638721adf4a720e34f5f
2,922
py
Python
FAUCovidCrawler/AWSLambda/lambda_function.py
Awannaphasch2016/CDKFAUCovid19Cralwer
a84d90612314cb4d4618da95238617a524b1b280
[ "MIT" ]
null
null
null
FAUCovidCrawler/AWSLambda/lambda_function.py
Awannaphasch2016/CDKFAUCovid19Cralwer
a84d90612314cb4d4618da95238617a524b1b280
[ "MIT" ]
null
null
null
FAUCovidCrawler/AWSLambda/lambda_function.py
Awannaphasch2016/CDKFAUCovid19Cralwer
a84d90612314cb4d4618da95238617a524b1b280
[ "MIT" ]
null
null
null
''' Original code contributor: mentzera Article link: https://aws.amazon.com/blogs/big-data/building-a-near-real-time-discovery-platform-with-aws/ ''' import boto3 import json import twitter_to_es # from Examples.Demo.AWS_Related.TwitterStreamWithAWS.LambdaWithS3Trigger import \ # twitter_to_es from tweet_utils import \ get_tweet, id_field, get_tweet_mapping headers = {"Content-Type": "application/json"} s3 = boto3.client('s3') kinesis_client = boto3.client('kinesis') # dynamoDb_client = boto3.client('dynamodb') # Lambda execution starts here def handler(event, context): for record in event['Records']: # Get the bucket name and key for the new file bucket = record['s3']['bucket']['name'] key = record['s3']['object']['key'] # Get s3 object, read, and split the file into lines try: obj = s3.get_object(Bucket=bucket, Key=key) except Exception as e: print(e) print( 'Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format( key, bucket)) raise e # Parse s3 object content (JSON) try: # https://stackoverflow.com/questions/31976273/open-s3-object-as-a-string-with-boto3 s3_file_content = obj['Body'].read().decode('utf-8') # clean trailing comma if s3_file_content.endswith(',\n'): s3_file_content = s3_file_content[:-2] tweets_str = '[' + s3_file_content + ']' # print(tweets_str) tweets = json.loads(tweets_str) except Exception as e: print(e) print('Error loading json from object {} in bucket {}'.format(key, bucket)) raise e for doc in tweets: tweet = get_tweet(doc) # print(tweet['sentiments']) print(tweet) print('===\n\n\n') #===================== #==send data to dynamoDB #===================== # Get the service resource. dynamodb = boto3.resource('dynamodb') # Instantiate a table resource object without actually # creating a DynamoDB table. Note that the attributes of this table # are lazy-loaded: a request is not made nor are the attribute # values populated until the attributes # on the table resource are accessed or its load() method is called. table = dynamodb.Table('faucovidstream_twitter_with_sentiment') # Print out some data about the table. # This will cause a request to be made to DynamoDB and its attribute # values will be set based on the response. print(table.creation_date_time) dynamodb.put_item( Item=tweet )
33.204545
142
0.589322
0
0
0
0
0
0
0
0
1,481
0.506845
fe97e4775b3fbd1abdf826717d17fd4e96f2144c
353
py
Python
user_messages/context_processors.py
everaccountable/django-user-messages
101d539b785bdb440bf166fb16ad25eb66e4174a
[ "MIT" ]
21
2018-04-18T17:58:12.000Z
2022-01-19T12:41:01.000Z
user_messages/context_processors.py
everaccountable/django-user-messages
101d539b785bdb440bf166fb16ad25eb66e4174a
[ "MIT" ]
4
2018-04-24T11:04:15.000Z
2022-02-03T18:35:21.000Z
user_messages/context_processors.py
everaccountable/django-user-messages
101d539b785bdb440bf166fb16ad25eb66e4174a
[ "MIT" ]
7
2018-03-04T16:03:44.000Z
2022-02-03T15:50:39.000Z
from django.contrib.messages.constants import DEFAULT_LEVELS from user_messages.api import get_messages def messages(request): """ Return a lazy 'messages' context variable as well as 'DEFAULT_MESSAGE_LEVELS'. """ return { "messages": get_messages(request=request), "DEFAULT_MESSAGE_LEVELS": DEFAULT_LEVELS, }
23.533333
60
0.708215
0
0
0
0
0
0
0
0
132
0.373938
fe98a505a6e3e05977900098d14a4c4efb60654a
502
py
Python
Day_5/highest_score.py
ecanro/100DaysOfCode_Python
a86ebe5a793fd4743e0de87454ba76925efdd23d
[ "MIT" ]
null
null
null
Day_5/highest_score.py
ecanro/100DaysOfCode_Python
a86ebe5a793fd4743e0de87454ba76925efdd23d
[ "MIT" ]
null
null
null
Day_5/highest_score.py
ecanro/100DaysOfCode_Python
a86ebe5a793fd4743e0de87454ba76925efdd23d
[ "MIT" ]
null
null
null
## Highest Score # 🚨 Don't change the code below 👇 student_scores = input("Input a list of student scores: ").split() for n in range(0, len(student_scores)): student_scores[n] = int(student_scores[n]) print(student_scores) # 🚨 Don't change the code above 👆 # Write your code below this row 👇 highest_score = 0 for scores in student_scores: if scores > highest_score: highest_score = scores print(f'The highest score is: {highest_score}') # functional code print(max(student_scores))
26.421053
66
0.721116
0
0
0
0
0
0
0
0
222
0.4294
fe9913a9a0d00104117bbc4e7f42cf9196b11854
8,791
py
Python
finetune/finetune.py
zaixizhang/MGSSL
fdb7e78bb927d735ed64dc78fb792adb13352e1c
[ "Apache-2.0" ]
43
2021-10-15T01:11:36.000Z
2022-03-31T02:05:41.000Z
finetune/finetune.py
zaixizhang/MGSSL
fdb7e78bb927d735ed64dc78fb792adb13352e1c
[ "Apache-2.0" ]
5
2021-12-09T08:07:22.000Z
2022-03-02T07:34:34.000Z
finetune/finetune.py
zaixizhang/MGSSL
fdb7e78bb927d735ed64dc78fb792adb13352e1c
[ "Apache-2.0" ]
7
2021-11-23T01:15:36.000Z
2022-03-07T16:30:30.000Z
import argparse from loader import MoleculeDataset from torch_geometric.data import DataLoader import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from tqdm import tqdm import numpy as np from model import GNN, GNN_graphpred from sklearn.metrics import roc_auc_score from splitters import scaffold_split, random_split import pandas as pd import os import shutil from tensorboardX import SummaryWriter criterion = nn.BCEWithLogitsLoss(reduction = "none") def train(args, model, device, loader, optimizer): model.train() for step, batch in enumerate(tqdm(loader, desc="Iteration")): batch = batch.to(device) pred = model(batch.x, batch.edge_index, batch.edge_attr, batch.batch) y = batch.y.view(pred.shape).to(torch.float64) #Whether y is non-null or not. is_valid = y**2 > 0 #Loss matrix loss_mat = criterion(pred.double(), (y+1)/2) #loss matrix after removing null target loss_mat = torch.where(is_valid, loss_mat, torch.zeros(loss_mat.shape).to(loss_mat.device).to(loss_mat.dtype)) optimizer.zero_grad() loss = torch.sum(loss_mat)/torch.sum(is_valid) loss.backward() optimizer.step() def eval(args, model, device, loader): model.eval() y_true = [] y_scores = [] for step, batch in enumerate(tqdm(loader, desc="Iteration")): batch = batch.to(device) with torch.no_grad(): pred = model(batch.x, batch.edge_index, batch.edge_attr, batch.batch) y_true.append(batch.y.view(pred.shape)) y_scores.append(pred) y_true = torch.cat(y_true, dim = 0).cpu().numpy() y_scores = torch.cat(y_scores, dim = 0).cpu().numpy() roc_list = [] for i in range(y_true.shape[1]): #AUC is only defined when there is at least one positive data. if np.sum(y_true[:,i] == 1) > 0 and np.sum(y_true[:,i] == -1) > 0: is_valid = y_true[:,i]**2 > 0 roc_list.append(roc_auc_score((y_true[is_valid,i] + 1)/2, y_scores[is_valid,i])) if len(roc_list) < y_true.shape[1]: print("Some target is missing!") print("Missing ratio: %f" %(1 - float(len(roc_list))/y_true.shape[1])) return sum(roc_list)/len(roc_list) #y_true.shape[1] def main(): # Training settings parser = argparse.ArgumentParser(description='PyTorch implementation of pre-training of graph neural networks') parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)') parser.add_argument('--batch_size', type=int, default=32, help='input batch size for training (default: 32)') parser.add_argument('--epochs', type=int, default=100, help='number of epochs to train (default: 100)') parser.add_argument('--lr', type=float, default=0.001, help='learning rate (default: 0.001)') parser.add_argument('--lr_scale', type=float, default=1, help='relative learning rate for the feature extraction layer (default: 1)') parser.add_argument('--decay', type=float, default=0, help='weight decay (default: 0)') parser.add_argument('--num_layer', type=int, default=5, help='number of GNN message passing layers (default: 5).') parser.add_argument('--emb_dim', type=int, default=300, help='embedding dimensions (default: 300)') parser.add_argument('--dropout_ratio', type=float, default=0.5, help='dropout ratio (default: 0.5)') parser.add_argument('--graph_pooling', type=str, default="mean", help='graph level pooling (sum, mean, max, set2set, attention)') parser.add_argument('--JK', type=str, default="last", help='how the node features across layers are combined. last, sum, max or concat') parser.add_argument('--gnn_type', type=str, default="gin") parser.add_argument('--dataset', type=str, default = 'sider', help='root directory of dataset. For now, only classification.') parser.add_argument('--input_model_file', type=str, default = '../motif_based_pretrain/saved_model/motif_pretrain.pth', help='filename to read the model (if there is any)') parser.add_argument('--filename', type=str, default = '', help='output filename') parser.add_argument('--seed', type=int, default=42, help = "Seed for splitting the dataset.") parser.add_argument('--runseed', type=int, default=0, help = "Seed for minibatch selection, random initialization.") parser.add_argument('--split', type = str, default="scaffold", help = "random or scaffold or random_scaffold") parser.add_argument('--eval_train', type=int, default = 1, help='evaluating training or not') parser.add_argument('--num_workers', type=int, default = 4, help='number of workers for dataset loading') args = parser.parse_args() torch.manual_seed(args.runseed) np.random.seed(args.runseed) device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu") if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.runseed) #Bunch of classification tasks if args.dataset == "tox21": num_tasks = 12 elif args.dataset == "hiv": num_tasks = 1 elif args.dataset == "pcba": num_tasks = 128 elif args.dataset == "muv": num_tasks = 17 elif args.dataset == "bace": num_tasks = 1 elif args.dataset == "bbbp": num_tasks = 1 elif args.dataset == "toxcast": num_tasks = 617 elif args.dataset == "sider": num_tasks = 27 elif args.dataset == "clintox": num_tasks = 2 else: raise ValueError("Invalid dataset name.") #set up dataset dataset = MoleculeDataset("dataset/" + args.dataset, dataset=args.dataset) print(dataset) if args.split == "scaffold": smiles_list = pd.read_csv('dataset/' + args.dataset + '/processed/smiles.csv', header=None)[0].tolist() train_dataset, valid_dataset, test_dataset = scaffold_split(dataset, smiles_list, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1) print("scaffold") elif args.split == "random": train_dataset, valid_dataset, test_dataset = random_split(dataset, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1, seed = args.seed) print("random") elif args.split == "random_scaffold": smiles_list = pd.read_csv('dataset/' + args.dataset + '/processed/smiles.csv', header=None)[0].tolist() train_dataset, valid_dataset, test_dataset = random_scaffold_split(dataset, smiles_list, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1, seed = args.seed) print("random scaffold") else: raise ValueError("Invalid split option.") print(train_dataset[0]) train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers = args.num_workers) val_loader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers) #set up model model = GNN_graphpred(args.num_layer, args.emb_dim, num_tasks, JK = args.JK, drop_ratio = args.dropout_ratio, graph_pooling = args.graph_pooling, gnn_type = args.gnn_type) if not args.input_model_file == "": model.from_pretrained(args.input_model_file) model.to(device) #set up optimizer #different learning rate for different part of GNN model_param_group = [] model_param_group.append({"params": model.gnn.parameters()}) if args.graph_pooling == "attention": model_param_group.append({"params": model.pool.parameters(), "lr":args.lr*args.lr_scale}) model_param_group.append({"params": model.graph_pred_linear.parameters(), "lr":args.lr*args.lr_scale}) optimizer = optim.Adam(model_param_group, lr=args.lr, weight_decay=args.decay) print(optimizer) for epoch in range(1, args.epochs+1): print("====epoch " + str(epoch)) train(args, model, device, train_loader, optimizer) print("====Evaluation") if args.eval_train: train_acc = eval(args, model, device, train_loader) else: print("omit the training accuracy computation") train_acc = 0 val_acc = eval(args, model, device, val_loader) test_acc = eval(args, model, device, test_loader) print("train: %f val: %f test: %f" %(train_acc, val_acc, test_acc)) if __name__ == "__main__": main()
42.674757
176
0.657604
0
0
0
0
0
0
0
0
2,005
0.228074
fe995885e2a5bd2844820d9d11a66c6433d1051b
1,166
py
Python
jumpscale/packages/vdc_dashboard/bottle/api/exceptions.py
threefoldtech/js-sdk
811f783ac34a60225175bab2d806802a87b9d5c7
[ "Apache-2.0" ]
13
2020-09-02T09:05:08.000Z
2022-03-12T02:43:24.000Z
jumpscale/packages/vdc_dashboard/bottle/api/exceptions.py
threefoldtech/js-sdk
811f783ac34a60225175bab2d806802a87b9d5c7
[ "Apache-2.0" ]
1,998
2020-06-15T11:46:10.000Z
2022-03-24T22:12:41.000Z
jumpscale/packages/vdc_dashboard/bottle/api/exceptions.py
threefoldtech/js-sdk
811f783ac34a60225175bab2d806802a87b9d5c7
[ "Apache-2.0" ]
8
2020-09-29T06:50:35.000Z
2021-06-14T03:30:52.000Z
from jumpscale.core import exceptions class BaseError(exceptions.Base): """a generic base error for bcdb rest, with status code""" def __init__(self, status, *args, **kwargs): super().__init__(*args, *kwargs) self.status = status class VDCNotFound(BaseError): pass class MissingAuthorizationHeader(BaseError): pass class InvalidCredentials(BaseError): pass class MissingArgument(BaseError): pass class StellarServiceDown(BaseError): pass class FlavorNotSupported(BaseError): pass class NoEnoughCapacity(BaseError): pass class AdddingNodeFailed(BaseError): pass class VirtualMachineDeploymentFailed(BaseError): pass class CannotDeleteMasterNode(BaseError): pass class ZDBDeploymentFailed(BaseError): pass class ZDBDeletionFailed(BaseError): pass class KubeConfigNotFound(BaseError): pass class InvalidKubeConfig(BaseError): pass class ZStorConfigNotFound(BaseError): pass class InvalidZStorConfig(BaseError): pass class NoEnoughFunds(BaseError): pass class BadRequestError(BaseError): pass class UnknownError(BaseError): pass
13.55814
62
0.732419
1,068
0.915952
0
0
0
0
0
0
58
0.049743
fe99a748e2fcbf259f6611afd0ca5930032c99b6
5,703
py
Python
neurokit2/signal/signal_plot.py
gutierrezps/NeuroKit
a30f76e64b4108abdc652a20391dc0288c62501d
[ "MIT" ]
1
2022-03-20T21:09:34.000Z
2022-03-20T21:09:34.000Z
neurokit2/signal/signal_plot.py
Lei-I-Zhang/NeuroKit
a30f76e64b4108abdc652a20391dc0288c62501d
[ "MIT" ]
null
null
null
neurokit2/signal/signal_plot.py
Lei-I-Zhang/NeuroKit
a30f76e64b4108abdc652a20391dc0288c62501d
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import matplotlib.pyplot as plt import numpy as np import pandas as pd from ..events import events_plot from ..stats import standardize as nk_standardize def signal_plot( signal, sampling_rate=None, subplots=False, standardize=False, labels=None, **kwargs ): """Plot signal with events as vertical lines. Parameters ---------- signal : array or DataFrame Signal array (can be a dataframe with many signals). sampling_rate : int The sampling frequency of the signal (in Hz, i.e., samples/second). Needs to be supplied if the data should be plotted over time in seconds. Otherwise the data is plotted over samples. Defaults to None. subplots : bool If True, each signal is plotted in a subplot. standardize : bool If True, all signals will have the same scale (useful for visualisation). labels : str or list Defaults to None. **kwargs : optional Arguments passed to matplotlib plotting. Examples ---------- >>> import numpy as np >>> import pandas as pd >>> import neurokit2 as nk >>> >>> signal = nk.signal_simulate(duration=10, sampling_rate=1000) >>> nk.signal_plot(signal, sampling_rate=1000, color="red") >>> >>> data = pd.DataFrame({"Signal2": np.cos(np.linspace(start=0, stop=20, num=1000)), ... "Signal3": np.sin(np.linspace(start=0, stop=20, num=1000)), ... "Signal4": nk.signal_binarize(np.cos(np.linspace(start=0, stop=40, num=1000)))}) >>> nk.signal_plot(data, labels=['signal_1', 'signal_2', 'signal_3'], subplots=True) >>> nk.signal_plot([signal, data], standardize=True) """ # Sanitize format if isinstance(signal, list): try: for i in signal: len(i) except TypeError: signal = np.array(signal) if isinstance(signal, pd.DataFrame) is False: # If list is passed if isinstance(signal, list) or len(np.array(signal).shape) > 1: out = pd.DataFrame() for i, content in enumerate(signal): if isinstance(content, (pd.DataFrame, pd.Series)): out = pd.concat([out, content], axis=1, sort=True) else: out = pd.concat( [out, pd.DataFrame({"Signal" + str(i + 1): content})], axis=1, sort=True, ) signal = out # If vector is passed else: signal = pd.DataFrame({"Signal": signal}) # Copy signal signal = signal.copy() # Guess continuous and events columns continuous_columns = list(signal.columns.values) events_columns = [] for col in signal.columns: vector = signal[col] if vector.nunique() == 2: indices = np.where(vector == np.max(vector.unique())) if bool(np.any(np.diff(indices) == 1)) is False: events_columns.append(col) continuous_columns.remove(col) # Adjust for sampling rate if sampling_rate is not None: signal.index = signal.index / sampling_rate title_x = "Time (seconds)" else: title_x = "Time" # x_axis = np.linspace(0, signal.shape[0] / sampling_rate, signal.shape[0]) # x_axis = pd.DataFrame(x_axis, columns=["Time (s)"]) # signal = pd.concat([signal, x_axis], axis=1) # signal = signal.set_index("Time (s)") # Plot accordingly if len(events_columns) > 0: events = [] for col in events_columns: vector = signal[col] events.append(np.where(vector == np.max(vector.unique()))[0]) plot = events_plot(events, signal=signal[continuous_columns]) if sampling_rate is None and signal.index.is_integer(): plot.gca().set_xlabel("Samples") else: plot.gca().set_xlabel(title_x) else: # Aesthetics colors = [ "#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf", ] if len(continuous_columns) > len(colors): colors = plt.cm.viridis(np.linspace(0, 1, len(continuous_columns))) # Plot if standardize is True: signal[continuous_columns] = nk_standardize(signal[continuous_columns]) if subplots is True: _, axes = plt.subplots(nrows=len(continuous_columns), ncols=1, sharex=True, **kwargs) for ax, col, color in zip(axes, continuous_columns, colors): ax.plot(signal[col], c=color, **kwargs) else: plot = signal[continuous_columns].plot(subplots=False, sharex=True, **kwargs) if sampling_rate is None and signal.index.is_integer(): plt.xlabel("Samples") else: plt.xlabel(title_x) # Tidy legend locations and add labels if labels is None: labels = continuous_columns.copy() if isinstance(labels, str): n_labels = len([labels]) labels = [labels] elif isinstance(labels, list): n_labels = len(labels) if len(signal[continuous_columns].columns) != n_labels: raise ValueError( "NeuroKit error: signal_plot(): number of labels does not equal the number of plotted signals." ) if subplots is False: plt.legend(labels, loc=1) else: for i, label in enumerate(labels): axes[i].legend([label], loc=1)
33.946429
109
0.57198
0
0
0
0
0
0
0
0
2,142
0.375592
fe9d9591df2f2c4858eb64ae4def8e712c9e40a0
1,183
py
Python
migrations/versions/1a89721126f7_only_one_validation_per_mission_user_.py
MTES-MCT/mobilic-api
b3754de2282262fd60a27dc90e40777df9c1e230
[ "MIT" ]
null
null
null
migrations/versions/1a89721126f7_only_one_validation_per_mission_user_.py
MTES-MCT/mobilic-api
b3754de2282262fd60a27dc90e40777df9c1e230
[ "MIT" ]
8
2021-04-19T17:47:55.000Z
2022-02-16T17:40:18.000Z
migrations/versions/1a89721126f7_only_one_validation_per_mission_user_.py
MTES-MCT/mobilic-api
b3754de2282262fd60a27dc90e40777df9c1e230
[ "MIT" ]
null
null
null
"""Only one validation per mission, user and actor Revision ID: 1a89721126f7 Revises: fa96dfc8237d Create Date: 2021-10-14 11:22:01.124488 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = "1a89721126f7" down_revision = "fa96dfc8237d" branch_labels = None depends_on = None def upgrade(): op.execute( """ WITH validation_duplicates AS ( SELECT id, ROW_NUMBER() OVER (PARTITION BY user_id, mission_id, submitter_id ORDER BY reception_time DESC) AS rn FROM mission_validation ) DELETE FROM mission_validation mv USING validation_duplicates vd WHERE mv.id = vd.id AND vd.rn >= 2 """ ) op.execute( """ ALTER TABLE mission_validation ADD CONSTRAINT only_one_validation_per_submitter_mission_and_user EXCLUDE USING GIST ( mission_id WITH =, submitter_id WITH =, user_id WITH = ) """ ) def downgrade(): op.drop_constraint( "only_one_validation_per_submitter_mission_and_user", "mission_validation", )
23.66
117
0.633136
0
0
0
0
0
0
0
0
916
0.774303
fe9dfa2f69a678e6192380ed28bf692cc55ff822
1,979
py
Python
packages/facilities/rtdb/python/rtdb2_get.py
Falcons-Robocup/code
2281a8569e7f11cbd3238b7cc7341c09e2e16249
[ "Apache-2.0" ]
2
2021-01-15T13:27:19.000Z
2021-08-04T08:40:52.000Z
packages/facilities/rtdb/python/rtdb2_get.py
Falcons-Robocup/code
2281a8569e7f11cbd3238b7cc7341c09e2e16249
[ "Apache-2.0" ]
null
null
null
packages/facilities/rtdb/python/rtdb2_get.py
Falcons-Robocup/code
2281a8569e7f11cbd3238b7cc7341c09e2e16249
[ "Apache-2.0" ]
5
2018-05-01T10:39:31.000Z
2022-03-25T03:02:35.000Z
# Copyright 2020 Jan Feitsma (Falcons) # SPDX-License-Identifier: Apache-2.0 #!/usr/bin/python import os import sys import argparse from rtdb2 import RtDB2Store, RTDB2_DEFAULT_PATH import rtdb2tools from hexdump import hexdump # Main structure of the program if __name__ == "__main__": # Argument parsing. descriptionTxt = 'This tool reads a value from the database given an RtDB key.\n' exampleTxt = """Example: rtdb2_get.py -a 6 ROBOT_STATE age: 2h shared: True list: False value: [2, [1581172987, 618438], [0.05368572473526001, -0.2938263416290283, 5.330356597900391], [0.1385340541601181, -0.8020891547203064, 0.7817431688308716], False, [0.0, 0.0], 6, 'A'] Example: rtdb2_get.py -a 2 DIAG_WORLDMODEL_LOCAL -x "['balls'][0]['result']" [[5.3209381103515625, 0.5837346315383911, 0.15281200408935547], [-0.0029433025047183037, 0.01433953270316124, 1.2758345292240847e-05], 1.0, [22033, 1889585904]] """ parser = argparse.ArgumentParser(description=descriptionTxt, epilog=exampleTxt, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-a', '--agent', help='agent ID to use', type=int, default=rtdb2tools.guessAgentId()) parser.add_argument('-s', '--serialized', help='also show serialized string (as hexdump)', action='store_true') parser.add_argument('-p', '--path', help='database path to use', type=str, default=RTDB2_DEFAULT_PATH) parser.add_argument('-x', '--expression', help='evaluate expression, useful to fetch a specific element', type=str) parser.add_argument('key', help='RtDB key to read') args = parser.parse_args() # Create instance of RtDB2Store and read databases from disk rtdb2Store = RtDB2Store(args.path) item = rtdb2Store.get(args.agent, args.key, timeout=None) if args.expression: print(eval("item.value" + args.expression)) else: print(str(item)) if args.serialized: hexdump(item.value_serialized) rtdb2Store.closeAll()
42.106383
186
0.723598
0
0
0
0
0
0
0
0
1,031
0.52097
fe9ed7b6294e532592cc4dcafea632566b56df4d
2,219
py
Python
algorithms/A3C/atari/atari_env_deprecated.py
what3versin/reinforce_py
46769da50aea65346cd3a300b55306d25f1f2683
[ "MIT" ]
1
2018-11-09T02:56:27.000Z
2018-11-09T02:56:27.000Z
algorithms/A3C/atari/atari_env_deprecated.py
syd951186545/reinforce_py
46769da50aea65346cd3a300b55306d25f1f2683
[ "MIT" ]
null
null
null
algorithms/A3C/atari/atari_env_deprecated.py
syd951186545/reinforce_py
46769da50aea65346cd3a300b55306d25f1f2683
[ "MIT" ]
null
null
null
from __future__ import print_function from __future__ import division import os import gym import numpy as np from skimage.transform import resize from skimage.color import rgb2gray class Atari(object): s_dim = [84, 84, 1] a_dim = 3 def __init__(self, args, record_video=False): self.env = gym.make('BreakoutNoFrameskip-v4') self.ale = self.env.env.ale # ale interface if record_video: video_dir = os.path.join(args.save_path, 'videos') if not os.path.exists(video_dir): os.makedirs(video_dir) self.env = gym.wrappers.Monitor( self.env, video_dir, video_callable=lambda x: True, resume=True) self.ale = self.env.env.env.ale self.screen_size = Atari.s_dim[:2] # 84x84 self.noop_max = 30 self.frame_skip = 4 self.frame_feq = 4 self.s_dim = Atari.s_dim self.a_dim = Atari.a_dim self.action_space = [1, 2, 3] # Breakout specify self.done = True def new_round(self): if not self.done: # dead but not done # no-op step to advance from terminal/lost life state obs, _, _, _ = self.env.step(0) obs = self.preprocess(obs) else: # terminal self.env.reset() # No-op for _ in range(np.random.randint(1, self.noop_max + 1)): obs, _, done, _ = self.env.step(0) obs = self.preprocess(obs) return obs def preprocess(self, observ): return resize(rgb2gray(observ), self.screen_size) def step(self, action): observ, reward, dead = None, 0, False for _ in range(self.frame_skip): lives_before = self.ale.lives() o, r, self.done, _ = self.env.step(self.action_space[action]) lives_after = self.ale.lives() reward += r if lives_before > lives_after: dead = True break observ = self.preprocess(o) observ = np.reshape(observ, newshape=self.screen_size + [1]) self.state = np.append(self.state[:, :, 1:], observ, axis=2) return self.state, reward, dead, self.done
32.632353
80
0.581794
2,032
0.915728
0
0
0
0
0
0
161
0.072555
fe9f7091809e30b40cd88cb5967081a6b1484eed
5,935
py
Python
content/_build/jupyter_execute/macm.py
NBCLab/nimare-paper
2b9e70febcfde4ca12420adc3c2910ff622252f2
[ "MIT" ]
3
2020-10-20T10:24:04.000Z
2021-12-20T13:31:01.000Z
content/_build/jupyter_execute/macm.py
NBCLab/nimare-paper
2b9e70febcfde4ca12420adc3c2910ff622252f2
[ "MIT" ]
20
2021-03-07T17:18:48.000Z
2022-03-09T15:13:02.000Z
content/_build/jupyter_execute/macm.py
NBCLab/nimare-paper
2b9e70febcfde4ca12420adc3c2910ff622252f2
[ "MIT" ]
3
2020-05-05T14:42:18.000Z
2021-11-30T19:52:27.000Z
#!/usr/bin/env python # coding: utf-8 # # Meta-Analytic Coactivation Modeling # In[1]: # First, import the necessary modules and functions import os from datetime import datetime import matplotlib.pyplot as plt from myst_nb import glue from repo2data.repo2data import Repo2Data import nimare start = datetime.now() # Install the data if running locally, or points to cached data if running on neurolibre DATA_REQ_FILE = os.path.join("../binder/data_requirement.json") FIG_DIR = os.path.abspath("../images") # Download data repo2data = Repo2Data(DATA_REQ_FILE) data_path = repo2data.install() data_path = os.path.join(data_path[0], "data") # Now, load the Datasets we will use in this chapter neurosynth_dset = nimare.dataset.Dataset.load(os.path.join(data_path, "neurosynth_dataset.pkl.gz")) # Meta-analytic coactivation modeling (MACM) {cite:p}`Laird2009-gc,Robinson2010-iv,Eickhoff2010-vx`, also known as meta-analytic connectivity modeling, uses meta-analytic data to measure co-occurrence of activations between brain regions providing evidence of functional connectivity of brain regions across tasks. # In coordinate-based MACM, whole-brain studies within the database are selected based on whether or not they report at least one peak in a region of interest specified for the analysis. # These studies are then subjected to a meta-analysis, often comparing the selected studies to those remaining in the database. # In this way, the significance of each voxel in the analysis corresponds to whether there is greater convergence of foci at the voxel among studies which also report foci in the region of interest than those which do not. # # <!-- TODO: Determine appropriate citation style here. --> # # MACM results have historically been accorded a similar interpretation to task-related functional connectivity (e.g., {cite:p}`Hok2015-lt,Kellermann2013-en`), although this approach is quite removed from functional connectivity analyses of task fMRI data (e.g., beta-series correlations, psychophysiological interactions, or even seed-to-voxel functional connectivity analyses on task data). # Nevertheless, MACM analyses do show high correspondence with resting-state functional connectivity {cite:p}`Reid2017-ez`. # MACM has been used to characterize the task-based functional coactivation of the cerebellum {cite:p}`Riedel2015-tx`, lateral prefrontal cortex {cite:p}`Reid2016-ba`, fusiform gyrus {cite:p}`Caspers2014-ja`, and several other brain regions. # # Within NiMARE, MACMs can be performed by selecting studies in a Dataset based on the presence of activation within a target mask or coordinate-centered sphere. # # In this section, we will perform two MACMs- one with a target mask and one with a coordinate-centered sphere. # For the former, we use {py:meth}`nimare.dataset.Dataset.get_studies_by_mask`. # For the latter, we use {py:meth}`nimare.dataset.Dataset.get_studies_by_coordinate`. # In[2]: # Create Dataset only containing studies with peaks within the amygdala mask amygdala_mask = os.path.join(data_path, "amygdala_roi.nii.gz") amygdala_ids = neurosynth_dset.get_studies_by_mask(amygdala_mask) dset_amygdala = neurosynth_dset.slice(amygdala_ids) # Create Dataset only containing studies with peaks within the sphere ROI sphere_ids = neurosynth_dset.get_studies_by_coordinate([[24, -2, -20]], r=6) dset_sphere = neurosynth_dset.slice(sphere_ids) # In[3]: import numpy as np from nilearn import input_data, plotting # In order to plot a sphere with a precise radius around a coordinate with # nilearn, we need to use a NiftiSpheresMasker mask_img = neurosynth_dset.masker.mask_img sphere_masker = input_data.NiftiSpheresMasker([[24, -2, -20]], radius=6, mask_img=mask_img) sphere_masker.fit(mask_img) sphere_img = sphere_masker.inverse_transform(np.array([[1]])) fig, axes = plt.subplots(figsize=(6, 4), nrows=2) display = plotting.plot_roi( amygdala_mask, annotate=False, draw_cross=False, axes=axes[0], figure=fig, ) axes[0].set_title("Amygdala ROI") display = plotting.plot_roi( sphere_img, annotate=False, draw_cross=False, axes=axes[1], figure=fig, ) axes[1].set_title("Spherical ROI") glue("figure_macm_rois", fig, display=False) # ```{glue:figure} figure_macm_rois # :name: figure_macm_rois # :align: center # # Region of interest masks for (1) a target mask-based MACM and (2) a coordinate-based MACM. # ``` # Once the `Dataset` has been reduced to studies with coordinates within the mask or sphere requested, any of the supported CBMA Estimators can be run. # In[4]: from nimare import meta meta_amyg = meta.cbma.ale.ALE(kernel__sample_size=20) results_amyg = meta_amyg.fit(dset_amygdala) meta_sphere = meta.cbma.ale.ALE(kernel__sample_size=20) results_sphere = meta_sphere.fit(dset_sphere) # In[5]: meta_results = { "Amygdala ALE MACM": results_amyg.get_map("z", return_type="image"), "Sphere ALE MACM": results_sphere.get_map("z", return_type="image"), } fig, axes = plt.subplots(figsize=(6, 4), nrows=2) for i_meta, (name, file_) in enumerate(meta_results.items()): display = plotting.plot_stat_map( file_, annotate=False, axes=axes[i_meta], cmap="Reds", cut_coords=[24, -2, -20], draw_cross=False, figure=fig, ) axes[i_meta].set_title(name) colorbar = display._cbar colorbar_ticks = colorbar.get_ticks() if colorbar_ticks[0] < 0: new_ticks = [colorbar_ticks[0], 0, colorbar_ticks[-1]] else: new_ticks = [colorbar_ticks[0], colorbar_ticks[-1]] colorbar.set_ticks(new_ticks, update_ticks=True) glue("figure_macm", fig, display=False) # ```{glue:figure} figure_macm # :name: figure_macm # :align: center # # Unthresholded z-statistic maps for (1) the target mask-based MACM and (2) the coordinate-based MACM. # ``` # In[6]: end = datetime.now() print(f"macm.md took {end - start} to build.")
36.411043
392
0.752148
0
0
0
0
0
0
0
0
3,466
0.583993
fe9f96734192b94aa40844f25ed620f799a5da53
50,863
py
Python
cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IPSLA_ECHO_MIB.py
Maikor/ydk-py
b86c4a7c570ae3b2c5557d098420446df5de4929
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IPSLA_ECHO_MIB.py
Maikor/ydk-py
b86c4a7c570ae3b2c5557d098420446df5de4929
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IPSLA_ECHO_MIB.py
Maikor/ydk-py
b86c4a7c570ae3b2c5557d098420446df5de4929
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
""" CISCO_IPSLA_ECHO_MIB This MIB module defines the templates for IP SLA operations of ICMP echo, UDP echo and TCP connect. The ICMP echo operation measures end\-to\-end response time between a Cisco router and any IP enabled device by computing the time taken between sending an ICMP echo request message to the destination and receiving an ICMP echo reply. The UDP echo operation measures end\-to\-end response time between a Cisco router and any IP enabled device by computing the time taken between sending an UDP echo request message to the destination and receiving an UDP echo reply. The TCP connect operation measures end\-to\-end response time between a Cisco router and any IP enabled device by computing the time taken to perform a TCP connect operation. """ from collections import OrderedDict from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from ydk.filters import YFilter from ydk.errors import YError, YModelError from ydk.errors.error_handler import handle_type_error as _handle_type_error class CISCOIPSLAECHOMIB(Entity): """ .. attribute:: cipslaicmpechotmpltable A table that contains ICMP echo template definitions **type**\: :py:class:`CipslaIcmpEchoTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable>` .. attribute:: cipslaudpechotmpltable A table that contains UDP echo template specific definitions **type**\: :py:class:`CipslaUdpEchoTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable>` .. attribute:: cipslatcpconntmpltable A table that contains TCP connect template specific definitions **type**\: :py:class:`CipslaTcpConnTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable>` """ _prefix = 'CISCO-IPSLA-ECHO-MIB' _revision = '2007-08-16' def __init__(self): super(CISCOIPSLAECHOMIB, self).__init__() self._top_entity = None self.yang_name = "CISCO-IPSLA-ECHO-MIB" self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB" self.is_top_level_class = True self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("cipslaIcmpEchoTmplTable", ("cipslaicmpechotmpltable", CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable)), ("cipslaUdpEchoTmplTable", ("cipslaudpechotmpltable", CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable)), ("cipslaTcpConnTmplTable", ("cipslatcpconntmpltable", CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable))]) self._leafs = OrderedDict() self.cipslaicmpechotmpltable = CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable() self.cipslaicmpechotmpltable.parent = self self._children_name_map["cipslaicmpechotmpltable"] = "cipslaIcmpEchoTmplTable" self.cipslaudpechotmpltable = CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable() self.cipslaudpechotmpltable.parent = self self._children_name_map["cipslaudpechotmpltable"] = "cipslaUdpEchoTmplTable" self.cipslatcpconntmpltable = CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable() self.cipslatcpconntmpltable.parent = self self._children_name_map["cipslatcpconntmpltable"] = "cipslaTcpConnTmplTable" self._segment_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(CISCOIPSLAECHOMIB, [], name, value) class CipslaIcmpEchoTmplTable(Entity): """ A table that contains ICMP echo template definitions. .. attribute:: cipslaicmpechotmplentry A row entry representing an IPSLA ICMP echo template **type**\: list of :py:class:`CipslaIcmpEchoTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry>` """ _prefix = 'CISCO-IPSLA-ECHO-MIB' _revision = '2007-08-16' def __init__(self): super(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable, self).__init__() self.yang_name = "cipslaIcmpEchoTmplTable" self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("cipslaIcmpEchoTmplEntry", ("cipslaicmpechotmplentry", CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry))]) self._leafs = OrderedDict() self.cipslaicmpechotmplentry = YList(self) self._segment_path = lambda: "cipslaIcmpEchoTmplTable" self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable, [], name, value) class CipslaIcmpEchoTmplEntry(Entity): """ A row entry representing an IPSLA ICMP echo template. .. attribute:: cipslaicmpechotmplname (key) This field is used to specify the ICMP echo template name **type**\: str **length:** 1..64 .. attribute:: cipslaicmpechotmpldescription This field is used to provide description for the ICMP echo template **type**\: str **length:** 0..128 .. attribute:: cipslaicmpechotmplsrcaddrtype An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaIcmpEchoTmplSrcAddr object **type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>` .. attribute:: cipslaicmpechotmplsrcaddr A string which specifies the IP address of the source **type**\: str **length:** 0..255 .. attribute:: cipslaicmpechotmpltimeout Specifies the duration to wait for a IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout **type**\: int **range:** 0..604800000 **units**\: milliseconds .. attribute:: cipslaicmpechotmplverifydata When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size **type**\: bool .. attribute:: cipslaicmpechotmplreqdatasize This object represents the number of octets to be placed into the ARR Data portion of the request message, when using SNA protocols. For non\-ARR protocols' IP SLA request/responses, this value represents the native payload size. REMEMBER\: The ARR Header overhead is not included in this value **type**\: int **range:** 0..16384 **units**\: octets .. attribute:: cipslaicmpechotmpltos This object represents the type of service octet in an IP header **type**\: int **range:** 0..255 .. attribute:: cipslaicmpechotmplvrfname This field is used to specify the VRF name with which the IP SLA operation will be used. For regular IP SLA operation this field should not be configured. The agent will use this field to identify the VRF routing table for this operation **type**\: str **length:** 0..32 .. attribute:: cipslaicmpechotmplthreshold This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaIcmpEchoTmplHistFilter is satisfied, one threshold crossing occurrence will be counted **type**\: int **range:** 0..2147483647 **units**\: milliseconds .. attribute:: cipslaicmpechotmplhistlives The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection **type**\: int **range:** 0..2 .. attribute:: cipslaicmpechotmplhistbuckets The maximum number of history buckets to record. This value is set to the number of operations to keep per lifetime. After cipslaIcmpEchoTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaIcmpEchoTmplHistBuckets buckets are retained **type**\: int **range:** 1..60 .. attribute:: cipslaicmpechotmplhistfilter Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaIcmpEchoTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded **type**\: :py:class:`CipslaIcmpEchoTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry.CipslaIcmpEchoTmplHistFilter>` .. attribute:: cipslaicmpechotmplstatshours The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection **type**\: int **range:** 0..25 **units**\: hours .. attribute:: cipslaicmpechotmpldistbuckets The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaIcmpEchoTmplStatsNumDistBuckets will be kept. The last cipslaIcmpEchoTmplStatsNumDistBucket will contain all entries from its distribution interval start point to infinity **type**\: int **range:** 1..20 .. attribute:: cipslaicmpechotmpldistinterval The statistical distribution buckets interval. Distribution Bucket Example\: cipslaIcmpEchoTmplDistBuckets = 5 buckets cipslaIcmpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaIcmpEchoTmplDistBuckets = 1 buckets cipslaIcmpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaIcmpEchoTmplDistInterval does not apply when cipslaIcmpEchoTmplDistBuckets is one **type**\: int **range:** 1..100 **units**\: milliseconds .. attribute:: cipslaicmpechotmplstoragetype The storage type of this conceptual row **type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>` .. attribute:: cipslaicmpechotmplrowstatus The status of the conceptual ICMP echo template control row. When the status is active, all the read\-create objects in that row can be modified **type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>` """ _prefix = 'CISCO-IPSLA-ECHO-MIB' _revision = '2007-08-16' def __init__(self): super(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry, self).__init__() self.yang_name = "cipslaIcmpEchoTmplEntry" self.yang_parent_name = "cipslaIcmpEchoTmplTable" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = ['cipslaicmpechotmplname'] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('cipslaicmpechotmplname', (YLeaf(YType.str, 'cipslaIcmpEchoTmplName'), ['str'])), ('cipslaicmpechotmpldescription', (YLeaf(YType.str, 'cipslaIcmpEchoTmplDescription'), ['str'])), ('cipslaicmpechotmplsrcaddrtype', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplSrcAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])), ('cipslaicmpechotmplsrcaddr', (YLeaf(YType.str, 'cipslaIcmpEchoTmplSrcAddr'), ['str'])), ('cipslaicmpechotmpltimeout', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplTimeOut'), ['int'])), ('cipslaicmpechotmplverifydata', (YLeaf(YType.boolean, 'cipslaIcmpEchoTmplVerifyData'), ['bool'])), ('cipslaicmpechotmplreqdatasize', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplReqDataSize'), ['int'])), ('cipslaicmpechotmpltos', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplTOS'), ['int'])), ('cipslaicmpechotmplvrfname', (YLeaf(YType.str, 'cipslaIcmpEchoTmplVrfName'), ['str'])), ('cipslaicmpechotmplthreshold', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplThreshold'), ['int'])), ('cipslaicmpechotmplhistlives', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplHistLives'), ['int'])), ('cipslaicmpechotmplhistbuckets', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplHistBuckets'), ['int'])), ('cipslaicmpechotmplhistfilter', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplHistFilter'), [('ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB', 'CISCOIPSLAECHOMIB', 'CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry.CipslaIcmpEchoTmplHistFilter')])), ('cipslaicmpechotmplstatshours', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplStatsHours'), ['int'])), ('cipslaicmpechotmpldistbuckets', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplDistBuckets'), ['int'])), ('cipslaicmpechotmpldistinterval', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplDistInterval'), ['int'])), ('cipslaicmpechotmplstoragetype', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])), ('cipslaicmpechotmplrowstatus', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])), ]) self.cipslaicmpechotmplname = None self.cipslaicmpechotmpldescription = None self.cipslaicmpechotmplsrcaddrtype = None self.cipslaicmpechotmplsrcaddr = None self.cipslaicmpechotmpltimeout = None self.cipslaicmpechotmplverifydata = None self.cipslaicmpechotmplreqdatasize = None self.cipslaicmpechotmpltos = None self.cipslaicmpechotmplvrfname = None self.cipslaicmpechotmplthreshold = None self.cipslaicmpechotmplhistlives = None self.cipslaicmpechotmplhistbuckets = None self.cipslaicmpechotmplhistfilter = None self.cipslaicmpechotmplstatshours = None self.cipslaicmpechotmpldistbuckets = None self.cipslaicmpechotmpldistinterval = None self.cipslaicmpechotmplstoragetype = None self.cipslaicmpechotmplrowstatus = None self._segment_path = lambda: "cipslaIcmpEchoTmplEntry" + "[cipslaIcmpEchoTmplName='" + str(self.cipslaicmpechotmplname) + "']" self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/cipslaIcmpEchoTmplTable/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry, ['cipslaicmpechotmplname', 'cipslaicmpechotmpldescription', 'cipslaicmpechotmplsrcaddrtype', 'cipslaicmpechotmplsrcaddr', 'cipslaicmpechotmpltimeout', 'cipslaicmpechotmplverifydata', 'cipslaicmpechotmplreqdatasize', 'cipslaicmpechotmpltos', 'cipslaicmpechotmplvrfname', 'cipslaicmpechotmplthreshold', 'cipslaicmpechotmplhistlives', 'cipslaicmpechotmplhistbuckets', 'cipslaicmpechotmplhistfilter', 'cipslaicmpechotmplstatshours', 'cipslaicmpechotmpldistbuckets', 'cipslaicmpechotmpldistinterval', 'cipslaicmpechotmplstoragetype', 'cipslaicmpechotmplrowstatus'], name, value) class CipslaIcmpEchoTmplHistFilter(Enum): """ CipslaIcmpEchoTmplHistFilter (Enum Class) Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaIcmpEchoTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded. .. data:: none = 1 .. data:: all = 2 .. data:: overThreshold = 3 .. data:: failures = 4 """ none = Enum.YLeaf(1, "none") all = Enum.YLeaf(2, "all") overThreshold = Enum.YLeaf(3, "overThreshold") failures = Enum.YLeaf(4, "failures") class CipslaUdpEchoTmplTable(Entity): """ A table that contains UDP echo template specific definitions. .. attribute:: cipslaudpechotmplentry A row entry representing an IPSLA UDP echo template **type**\: list of :py:class:`CipslaUdpEchoTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry>` """ _prefix = 'CISCO-IPSLA-ECHO-MIB' _revision = '2007-08-16' def __init__(self): super(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable, self).__init__() self.yang_name = "cipslaUdpEchoTmplTable" self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("cipslaUdpEchoTmplEntry", ("cipslaudpechotmplentry", CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry))]) self._leafs = OrderedDict() self.cipslaudpechotmplentry = YList(self) self._segment_path = lambda: "cipslaUdpEchoTmplTable" self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable, [], name, value) class CipslaUdpEchoTmplEntry(Entity): """ A row entry representing an IPSLA UDP echo template. .. attribute:: cipslaudpechotmplname (key) A string which specifies the UDP echo template name **type**\: str **length:** 1..64 .. attribute:: cipslaudpechotmpldescription A string which provides description to the UDP echo template **type**\: str **length:** 0..128 .. attribute:: cipslaudpechotmplcontrolenable If this object is enabled, then the IP SLA application will send control messages to a responder, residing on the target router to respond to the data request packets being sent by the source router **type**\: bool .. attribute:: cipslaudpechotmplsrcaddrtype An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaUdpEchoTmplSrcAddr object **type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>` .. attribute:: cipslaudpechotmplsrcaddr A string which specifies the IP address of the source **type**\: str **length:** 0..255 .. attribute:: cipslaudpechotmplsrcport This object represents the source's port number. If this object is not specified, the application will get a port allocated by the system **type**\: int **range:** 0..65535 .. attribute:: cipslaudpechotmpltimeout Specifies the duration to wait for an IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout **type**\: int **range:** 0..604800000 **units**\: milliseconds .. attribute:: cipslaudpechotmplverifydata When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size **type**\: bool .. attribute:: cipslaudpechotmplreqdatasize This object represents the number of octets to be placed into the ARR Data portion of the request message, when using SNA protocols. For non\-ARR protocols' RTT request/responses, this value represents the native payload size. REMEMBER\: The ARR Header overhead is not included in this value **type**\: int **range:** 4..1500 **units**\: octets .. attribute:: cipslaudpechotmpltos This object represents the type of service octet in an IP header **type**\: int **range:** 0..255 .. attribute:: cipslaudpechotmplvrfname This field is used to specify the VRF name with which the IP SLA operation will be used. For regular IP SLA operation this field should not be configured. The agent will use this field to identify the VRF routing Table for this operation **type**\: str **length:** 0..32 .. attribute:: cipslaudpechotmplthreshold This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaUdpEchoTmplHistFilter is satisfied, one threshold crossing occurrence will be counted **type**\: int **range:** 0..2147483647 **units**\: milliseconds .. attribute:: cipslaudpechotmplhistlives The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection **type**\: int **range:** 0..2 .. attribute:: cipslaudpechotmplhistbuckets The maximum number of history buckets to record. This value should be set to the number of operations to keep per lifetime. After cipslaUdpEchoTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaUdpEchoTmplHistBuckets buckets are retained **type**\: int **range:** 1..60 .. attribute:: cipslaudpechotmplhistfilter Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaUdpEchoTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded **type**\: :py:class:`CipslaUdpEchoTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry.CipslaUdpEchoTmplHistFilter>` .. attribute:: cipslaudpechotmplstatshours The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection **type**\: int **range:** 0..25 **units**\: hours .. attribute:: cipslaudpechotmpldistbuckets The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaUdpEchoTmplStatsNumDistBuckets will be kept. The last cipslaUdpEchoTmplStatsNumDistBuckets will contain all entries from its distribution interval start point to infinity **type**\: int **range:** 1..20 .. attribute:: cipslaudpechotmpldistinterval The statistical distribution buckets interval. Distribution Bucket Example\: cipslaUdpEchoTmplDistBuckets = 5 buckets cipslaUdpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaUdpEchoTmplDistBuckets = 1 buckets cipslaUdpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaUdpEchoTmplDistInterval does not apply when cipslaUdpEchoTmplDistBuckets is one **type**\: int **range:** 1..100 **units**\: milliseconds .. attribute:: cipslaudpechotmplstoragetype The storage type of this conceptual row **type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>` .. attribute:: cipslaudpechotmplrowstatus The status of the conceptual UDP echo template control row. When the status is active, all the read\-create objects in that row can be modified **type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>` """ _prefix = 'CISCO-IPSLA-ECHO-MIB' _revision = '2007-08-16' def __init__(self): super(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry, self).__init__() self.yang_name = "cipslaUdpEchoTmplEntry" self.yang_parent_name = "cipslaUdpEchoTmplTable" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = ['cipslaudpechotmplname'] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('cipslaudpechotmplname', (YLeaf(YType.str, 'cipslaUdpEchoTmplName'), ['str'])), ('cipslaudpechotmpldescription', (YLeaf(YType.str, 'cipslaUdpEchoTmplDescription'), ['str'])), ('cipslaudpechotmplcontrolenable', (YLeaf(YType.boolean, 'cipslaUdpEchoTmplControlEnable'), ['bool'])), ('cipslaudpechotmplsrcaddrtype', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplSrcAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])), ('cipslaudpechotmplsrcaddr', (YLeaf(YType.str, 'cipslaUdpEchoTmplSrcAddr'), ['str'])), ('cipslaudpechotmplsrcport', (YLeaf(YType.uint16, 'cipslaUdpEchoTmplSrcPort'), ['int'])), ('cipslaudpechotmpltimeout', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplTimeOut'), ['int'])), ('cipslaudpechotmplverifydata', (YLeaf(YType.boolean, 'cipslaUdpEchoTmplVerifyData'), ['bool'])), ('cipslaudpechotmplreqdatasize', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplReqDataSize'), ['int'])), ('cipslaudpechotmpltos', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplTOS'), ['int'])), ('cipslaudpechotmplvrfname', (YLeaf(YType.str, 'cipslaUdpEchoTmplVrfName'), ['str'])), ('cipslaudpechotmplthreshold', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplThreshold'), ['int'])), ('cipslaudpechotmplhistlives', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplHistLives'), ['int'])), ('cipslaudpechotmplhistbuckets', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplHistBuckets'), ['int'])), ('cipslaudpechotmplhistfilter', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplHistFilter'), [('ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB', 'CISCOIPSLAECHOMIB', 'CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry.CipslaUdpEchoTmplHistFilter')])), ('cipslaudpechotmplstatshours', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplStatsHours'), ['int'])), ('cipslaudpechotmpldistbuckets', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplDistBuckets'), ['int'])), ('cipslaudpechotmpldistinterval', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplDistInterval'), ['int'])), ('cipslaudpechotmplstoragetype', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])), ('cipslaudpechotmplrowstatus', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])), ]) self.cipslaudpechotmplname = None self.cipslaudpechotmpldescription = None self.cipslaudpechotmplcontrolenable = None self.cipslaudpechotmplsrcaddrtype = None self.cipslaudpechotmplsrcaddr = None self.cipslaudpechotmplsrcport = None self.cipslaudpechotmpltimeout = None self.cipslaudpechotmplverifydata = None self.cipslaudpechotmplreqdatasize = None self.cipslaudpechotmpltos = None self.cipslaudpechotmplvrfname = None self.cipslaudpechotmplthreshold = None self.cipslaudpechotmplhistlives = None self.cipslaudpechotmplhistbuckets = None self.cipslaudpechotmplhistfilter = None self.cipslaudpechotmplstatshours = None self.cipslaudpechotmpldistbuckets = None self.cipslaudpechotmpldistinterval = None self.cipslaudpechotmplstoragetype = None self.cipslaudpechotmplrowstatus = None self._segment_path = lambda: "cipslaUdpEchoTmplEntry" + "[cipslaUdpEchoTmplName='" + str(self.cipslaudpechotmplname) + "']" self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/cipslaUdpEchoTmplTable/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry, ['cipslaudpechotmplname', 'cipslaudpechotmpldescription', 'cipslaudpechotmplcontrolenable', 'cipslaudpechotmplsrcaddrtype', 'cipslaudpechotmplsrcaddr', 'cipslaudpechotmplsrcport', 'cipslaudpechotmpltimeout', 'cipslaudpechotmplverifydata', 'cipslaudpechotmplreqdatasize', 'cipslaudpechotmpltos', 'cipslaudpechotmplvrfname', 'cipslaudpechotmplthreshold', 'cipslaudpechotmplhistlives', 'cipslaudpechotmplhistbuckets', 'cipslaudpechotmplhistfilter', 'cipslaudpechotmplstatshours', 'cipslaudpechotmpldistbuckets', 'cipslaudpechotmpldistinterval', 'cipslaudpechotmplstoragetype', 'cipslaudpechotmplrowstatus'], name, value) class CipslaUdpEchoTmplHistFilter(Enum): """ CipslaUdpEchoTmplHistFilter (Enum Class) Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaUdpEchoTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded. .. data:: none = 1 .. data:: all = 2 .. data:: overThreshold = 3 .. data:: failures = 4 """ none = Enum.YLeaf(1, "none") all = Enum.YLeaf(2, "all") overThreshold = Enum.YLeaf(3, "overThreshold") failures = Enum.YLeaf(4, "failures") class CipslaTcpConnTmplTable(Entity): """ A table that contains TCP connect template specific definitions. .. attribute:: cipslatcpconntmplentry A row entry representing an IPSLA TCP connect template **type**\: list of :py:class:`CipslaTcpConnTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry>` """ _prefix = 'CISCO-IPSLA-ECHO-MIB' _revision = '2007-08-16' def __init__(self): super(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable, self).__init__() self.yang_name = "cipslaTcpConnTmplTable" self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("cipslaTcpConnTmplEntry", ("cipslatcpconntmplentry", CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry))]) self._leafs = OrderedDict() self.cipslatcpconntmplentry = YList(self) self._segment_path = lambda: "cipslaTcpConnTmplTable" self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable, [], name, value) class CipslaTcpConnTmplEntry(Entity): """ A row entry representing an IPSLA TCP connect template. .. attribute:: cipslatcpconntmplname (key) A string which specifies the TCP connect template name **type**\: str **length:** 1..64 .. attribute:: cipslatcpconntmpldescription A string which provides description for the TCP connect template **type**\: str **length:** 0..128 .. attribute:: cipslatcpconntmplcontrolenable If this object is enabled, then the IP SLA application will send control messages to a responder, residing on the target router to respond to the data request packets being sent by the source router **type**\: bool .. attribute:: cipslatcpconntmplsrcaddrtype An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaTcpConnTmplSrcAddr object **type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>` .. attribute:: cipslatcpconntmplsrcaddr A string which specifies the IP address of the source **type**\: str **length:** 0..255 .. attribute:: cipslatcpconntmplsrcport This object represents the source's port number. If this object is not specified, the application will get a port allocated by the system **type**\: int **range:** 0..65535 .. attribute:: cipslatcpconntmpltimeout Specifies the duration to wait for an IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout **type**\: int **range:** 0..604800000 **units**\: milliseconds .. attribute:: cipslatcpconntmplverifydata When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size **type**\: bool .. attribute:: cipslatcpconntmpltos This object represents the type of service octet in an IP header **type**\: int **range:** 0..255 .. attribute:: cipslatcpconntmplthreshold This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaTcpConnTmplHistFilter is satisfied, one threshold crossing occurrence will be counted **type**\: int **range:** 0..2147483647 **units**\: milliseconds .. attribute:: cipslatcpconntmplhistlives The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection **type**\: int **range:** 0..2 .. attribute:: cipslatcpconntmplhistbuckets The maximum number of history buckets to record. This value should be set to the number of operations to keep per lifetime. After cipslaTcpConnTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaTcpConnTmplHistBuckets buckets are retained **type**\: int **range:** 1..60 .. attribute:: cipslatcpconntmplhistfilter Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaTcpConnTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded **type**\: :py:class:`CipslaTcpConnTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry.CipslaTcpConnTmplHistFilter>` .. attribute:: cipslatcpconntmplstatshours The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection **type**\: int **range:** 0..25 **units**\: hours .. attribute:: cipslatcpconntmpldistbuckets The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaTcpConnTmplDistBuckets will be kept. The last cipslaTcpConnTmplDistBuckets will contain all entries from its distribution interval start point to infinity **type**\: int **range:** 1..20 .. attribute:: cipslatcpconntmpldistinterval The statistical distribution buckets interval. Distribution Bucket Example\: cipslaTcpConnTmplDistBuckets = 5 buckets cipslaTcpConnTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaTcpConnTmplDistBuckets = 1 buckets cipslaTcpConnTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaTcpConnTmplDistInterval does not apply when cipslaTcpConnTmplDistBuckets is one **type**\: int **range:** 1..100 **units**\: milliseconds .. attribute:: cipslatcpconntmplstoragetype The storage type of this conceptual row **type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>` .. attribute:: cipslatcpconntmplrowstatus The status of the conceptual tcp connect control row. When the status is active, all the read\-create objects in that row can be modified **type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>` """ _prefix = 'CISCO-IPSLA-ECHO-MIB' _revision = '2007-08-16' def __init__(self): super(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry, self).__init__() self.yang_name = "cipslaTcpConnTmplEntry" self.yang_parent_name = "cipslaTcpConnTmplTable" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = ['cipslatcpconntmplname'] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('cipslatcpconntmplname', (YLeaf(YType.str, 'cipslaTcpConnTmplName'), ['str'])), ('cipslatcpconntmpldescription', (YLeaf(YType.str, 'cipslaTcpConnTmplDescription'), ['str'])), ('cipslatcpconntmplcontrolenable', (YLeaf(YType.boolean, 'cipslaTcpConnTmplControlEnable'), ['bool'])), ('cipslatcpconntmplsrcaddrtype', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplSrcAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])), ('cipslatcpconntmplsrcaddr', (YLeaf(YType.str, 'cipslaTcpConnTmplSrcAddr'), ['str'])), ('cipslatcpconntmplsrcport', (YLeaf(YType.uint16, 'cipslaTcpConnTmplSrcPort'), ['int'])), ('cipslatcpconntmpltimeout', (YLeaf(YType.uint32, 'cipslaTcpConnTmplTimeOut'), ['int'])), ('cipslatcpconntmplverifydata', (YLeaf(YType.boolean, 'cipslaTcpConnTmplVerifyData'), ['bool'])), ('cipslatcpconntmpltos', (YLeaf(YType.uint32, 'cipslaTcpConnTmplTOS'), ['int'])), ('cipslatcpconntmplthreshold', (YLeaf(YType.uint32, 'cipslaTcpConnTmplThreshold'), ['int'])), ('cipslatcpconntmplhistlives', (YLeaf(YType.uint32, 'cipslaTcpConnTmplHistLives'), ['int'])), ('cipslatcpconntmplhistbuckets', (YLeaf(YType.uint32, 'cipslaTcpConnTmplHistBuckets'), ['int'])), ('cipslatcpconntmplhistfilter', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplHistFilter'), [('ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB', 'CISCOIPSLAECHOMIB', 'CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry.CipslaTcpConnTmplHistFilter')])), ('cipslatcpconntmplstatshours', (YLeaf(YType.uint32, 'cipslaTcpConnTmplStatsHours'), ['int'])), ('cipslatcpconntmpldistbuckets', (YLeaf(YType.uint32, 'cipslaTcpConnTmplDistBuckets'), ['int'])), ('cipslatcpconntmpldistinterval', (YLeaf(YType.uint32, 'cipslaTcpConnTmplDistInterval'), ['int'])), ('cipslatcpconntmplstoragetype', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])), ('cipslatcpconntmplrowstatus', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])), ]) self.cipslatcpconntmplname = None self.cipslatcpconntmpldescription = None self.cipslatcpconntmplcontrolenable = None self.cipslatcpconntmplsrcaddrtype = None self.cipslatcpconntmplsrcaddr = None self.cipslatcpconntmplsrcport = None self.cipslatcpconntmpltimeout = None self.cipslatcpconntmplverifydata = None self.cipslatcpconntmpltos = None self.cipslatcpconntmplthreshold = None self.cipslatcpconntmplhistlives = None self.cipslatcpconntmplhistbuckets = None self.cipslatcpconntmplhistfilter = None self.cipslatcpconntmplstatshours = None self.cipslatcpconntmpldistbuckets = None self.cipslatcpconntmpldistinterval = None self.cipslatcpconntmplstoragetype = None self.cipslatcpconntmplrowstatus = None self._segment_path = lambda: "cipslaTcpConnTmplEntry" + "[cipslaTcpConnTmplName='" + str(self.cipslatcpconntmplname) + "']" self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/cipslaTcpConnTmplTable/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry, ['cipslatcpconntmplname', 'cipslatcpconntmpldescription', 'cipslatcpconntmplcontrolenable', 'cipslatcpconntmplsrcaddrtype', 'cipslatcpconntmplsrcaddr', 'cipslatcpconntmplsrcport', 'cipslatcpconntmpltimeout', 'cipslatcpconntmplverifydata', 'cipslatcpconntmpltos', 'cipslatcpconntmplthreshold', 'cipslatcpconntmplhistlives', 'cipslatcpconntmplhistbuckets', 'cipslatcpconntmplhistfilter', 'cipslatcpconntmplstatshours', 'cipslatcpconntmpldistbuckets', 'cipslatcpconntmpldistinterval', 'cipslatcpconntmplstoragetype', 'cipslatcpconntmplrowstatus'], name, value) class CipslaTcpConnTmplHistFilter(Enum): """ CipslaTcpConnTmplHistFilter (Enum Class) Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaTcpConnTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded. .. data:: none = 1 .. data:: all = 2 .. data:: overThreshold = 3 .. data:: failures = 4 """ none = Enum.YLeaf(1, "none") all = Enum.YLeaf(2, "all") overThreshold = Enum.YLeaf(3, "overThreshold") failures = Enum.YLeaf(4, "failures") def clone_ptr(self): self._top_entity = CISCOIPSLAECHOMIB() return self._top_entity
55.527293
720
0.624855
49,758
0.978275
0
0
0
0
0
0
36,648
0.720524
fea2c153f85345b8df258b2faf5084ce932ff128
4,057
py
Python
example/model-parallel/matrix_factorization/train.py
tkameyama/incubator-mxnet
47b0bdd00e7c5e1c9a448809b02e68c0e4b72e96
[ "Apache-2.0" ]
1
2022-01-22T02:29:24.000Z
2022-01-22T02:29:24.000Z
example/model-parallel/matrix_factorization/train.py
tkameyama/incubator-mxnet
47b0bdd00e7c5e1c9a448809b02e68c0e4b72e96
[ "Apache-2.0" ]
null
null
null
example/model-parallel/matrix_factorization/train.py
tkameyama/incubator-mxnet
47b0bdd00e7c5e1c9a448809b02e68c0e4b72e96
[ "Apache-2.0" ]
null
null
null
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import argparse import logging import time import mxnet as mx import numpy as np from get_data import get_movielens_iter, get_movielens_data from model import matrix_fact_model_parallel_net logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser(description="Run model parallel version of matrix factorization", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--num-epoch', type=int, default=3, help='number of epochs to train') parser.add_argument('--batch-size', type=int, default=256, help='number of examples per batch') parser.add_argument('--print-every', type=int, default=100, help='logging interval') parser.add_argument('--factor-size', type=int, default=128, help="the factor size of the embedding operation") parser.add_argument('--num-gpus', type=int, default=2, help="number of gpus to use") MOVIELENS = { 'dataset': 'ml-10m', 'train': './ml-10M100K/r1.train', 'val': './ml-10M100K/r1.test', 'max_user': 71569, 'max_movie': 65135, } if __name__ == '__main__': head = '%(asctime)-15s %(message)s' logging.basicConfig(level=logging.INFO, format=head) # arg parser args = parser.parse_args() logging.info(args) num_epoch = args.num_epoch batch_size = args.batch_size optimizer = 'sgd' factor_size = args.factor_size print_every = args.print_every num_gpus = args.num_gpus momentum = 0.9 learning_rate = 0.1 # prepare dataset and iterators max_user = MOVIELENS['max_user'] max_movies = MOVIELENS['max_movie'] get_movielens_data(MOVIELENS['dataset']) train_iter = get_movielens_iter(MOVIELENS['train'], batch_size) val_iter = get_movielens_iter(MOVIELENS['val'], batch_size) # construct the model net = matrix_fact_model_parallel_net(factor_size, factor_size, max_user, max_movies) # construct the module # map the ctx_group attribute to the context assignment group2ctxs={'dev1':[mx.cpu()]*num_gpus, 'dev2':[mx.gpu(i) for i in range(num_gpus)]} # Creating a module by passing group2ctxs attribute which maps # the ctx_group attribute to the context assignment mod = mx.module.Module(symbol=net, context=[mx.cpu()]*num_gpus, data_names=['user', 'item'], label_names=['score'], group2ctxs=group2ctxs) # the initializer used to initialize the parameters initializer = mx.init.Xavier(factor_type="in", magnitude=2.34) # the parameters for the optimizer constructor optimizer_params = { 'learning_rate': learning_rate, 'wd': 1e-4, 'momentum': momentum, 'rescale_grad': 1.0/batch_size} # use MSE as the metric metric = mx.gluon.metric.create(['MSE']) speedometer = mx.callback.Speedometer(batch_size, print_every) # start training mod.fit(train_iter, val_iter, eval_metric = metric, num_epoch = num_epoch, optimizer = optimizer, optimizer_params = optimizer_params, initializer = initializer, batch_end_callback = speedometer)
36.881818
98
0.682031
0
0
0
0
0
0
0
0
1,685
0.415332
fea4ed769af71f922b55fc3fe0ad5f2f54ffbfef
762
py
Python
scripts/libfranka_gui_gripper_run.py
nbfigueroa/franka_interactive_controllers
7befdd5fbaa3c7a83b931292fab39ab98754a60c
[ "MIT" ]
6
2021-12-08T09:32:57.000Z
2022-03-20T09:22:29.000Z
scripts/libfranka_gui_gripper_run.py
nbfigueroa/franka_interactive_controllers
7befdd5fbaa3c7a83b931292fab39ab98754a60c
[ "MIT" ]
null
null
null
scripts/libfranka_gui_gripper_run.py
nbfigueroa/franka_interactive_controllers
7befdd5fbaa3c7a83b931292fab39ab98754a60c
[ "MIT" ]
3
2022-02-01T12:30:47.000Z
2022-03-24T10:31:04.000Z
#!/usr/bin/env python3 import shlex from tkinter import * from tkinter import messagebox from psutil import Popen top = Tk() top.title("Franka Gripper Control") top.geometry("300x75") def open(): node_process = Popen(shlex.split('rosrun franka_interactive_controllers libfranka_gripper_run 1')) messagebox.showinfo("Open Gripper", "Gripper Opened") node_process.terminate() def close(): node_process = Popen(shlex.split('rosrun franka_interactive_controllers libfranka_gripper_run 0')) messagebox.showinfo("Close Gripper", "Gripper Closed") node_process.terminate() B1 = Button(top, text = "Open Gripper", command = open) B1.place(x = 30,y = 20) B2 = Button(top, text = "Close Gripper", command = close) B2.place(x = 160,y = 20) top.mainloop()
25.4
99
0.745407
0
0
0
0
0
0
0
0
270
0.354331
fea585d93413c287bd31eaa0525d97e26cbdcd0b
742
py
Python
codeforces.com/1669F/solution.py
zubtsov/competitive-programming
919d63130144347d7f6eddcf8f5bc2afb85fddf3
[ "MIT" ]
null
null
null
codeforces.com/1669F/solution.py
zubtsov/competitive-programming
919d63130144347d7f6eddcf8f5bc2afb85fddf3
[ "MIT" ]
null
null
null
codeforces.com/1669F/solution.py
zubtsov/competitive-programming
919d63130144347d7f6eddcf8f5bc2afb85fddf3
[ "MIT" ]
null
null
null
for i in range(int(input())): number_of_candies = int(input()) candies_weights = list(map(int, input().split())) bob_pos = number_of_candies - 1 alice_pos = 0 bob_current_weight = 0 alice_current_weight = 0 last_equal_candies_total_number = 0 while alice_pos <= bob_pos: if alice_current_weight <= bob_current_weight: alice_current_weight += candies_weights[alice_pos] alice_pos += 1 else: bob_current_weight += candies_weights[bob_pos] bob_pos -= 1 if alice_current_weight == bob_current_weight: last_equal_candies_total_number = alice_pos + (number_of_candies - bob_pos - 1) print(last_equal_candies_total_number)
29.68
91
0.665768
0
0
0
0
0
0
0
0
0
0
fea64ce26f29e53484b8013f735f948fef203460
12,293
py
Python
client/client_build.py
patriotemeritus/grr
bf2b9268c8b9033ab091e27584986690438bd7c3
[ "Apache-2.0" ]
1
2015-06-24T09:07:20.000Z
2015-06-24T09:07:20.000Z
client/client_build.py
patriotemeritus/grr
bf2b9268c8b9033ab091e27584986690438bd7c3
[ "Apache-2.0" ]
3
2020-02-11T22:29:15.000Z
2021-06-10T17:44:31.000Z
client/client_build.py
wandec/grr
7fb7e6d492d1325a5fe1559d3aeae03a301c4baa
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python """This tool builds or repacks the client binaries. This handles invocations for the build across the supported platforms including handling Visual Studio, pyinstaller and other packaging mechanisms. """ import logging import os import platform import time # pylint: disable=unused-import from grr.client import client_plugins # pylint: enable=unused-import from grr.lib import build from grr.lib import builders from grr.lib import config_lib from grr.lib import flags from grr.lib import startup parser = flags.PARSER # Guess which arch we should be building based on where we are running. if platform.architecture()[0] == "32bit": default_arch = "i386" else: default_arch = "amd64" default_platform = platform.system().lower() parser.add_argument( "--platform", choices=["darwin", "linux", "windows"], default=default_platform, help="The platform to build or repack for. This will default to " "the current platform: %s." % platform.system()) parser.add_argument( "--arch", choices=["amd64", "i386"], default=default_arch, help="The architecture to build or repack for.") # Guess which package format we should be building based on where we are # running. if default_platform == "linux": distro = platform.linux_distribution()[0] if distro in ["Ubuntu", "debian"]: default_package = "deb" elif distro in ["CentOS Linux", "CentOS", "centos", "redhat", "fedora"]: default_package = "rpm" else: default_package = None elif default_platform == "darwin": default_package = "dmg" elif default_platform == "windows": default_package = "exe" parser.add_argument( "--package_format", choices=["deb", "rpm"], default=default_package, help="The packaging format to use when building a Linux client.") # Initialize sub parsers and their arguments. subparsers = parser.add_subparsers( title="subcommands", dest="subparser_name", description="valid subcommands") # Build arguments. parser_build = subparsers.add_parser( "build", help="Build a client from source.") parser_repack = subparsers.add_parser( "repack", help="Repack a zip file into an installer (Only useful when " "signing).") parser_repack.add_argument("--template", default=None, help="The template zip file to repack.") parser_repack.add_argument("--output", default=None, help="The path to write the output installer.") parser_repack.add_argument("--outputdir", default="", help="The directory to which we should write the " "output installer. Installers will be named " "automatically from config options. Incompatible" " with --output") parser_repack.add_argument("--debug_build", action="store_true", default=False, help="Create a debug client.") parser_repack.add_argument("-p", "--plugins", default=[], nargs="+", help="Additional python files that will be loaded " "as custom plugins.") parser_deploy = subparsers.add_parser( "deploy", help="Build a deployable self installer from a package.") parser_deploy.add_argument("--template", default=None, help="The template zip file to deploy.") parser_deploy.add_argument("--templatedir", default="", help="Directory containing template zip files to " "repack. Incompatible with --template") parser_deploy.add_argument("--output", default=None, help="The path to write the output installer.") parser_deploy.add_argument("--outputdir", default="", help="The directory to which we should write the " "output installer. Installers will be named " "automatically from config options. Incompatible" " with --output") parser_deploy.add_argument("-p", "--plugins", default=[], nargs="+", help="Additional python files that will be loaded " "as custom plugins.") parser_deploy.add_argument("--debug_build", action="store_true", default=False, help="Create a debug client.") parser_buildanddeploy = subparsers.add_parser( "buildanddeploy", help="Build and deploy clients for multiple labels and architectures.") parser_buildanddeploy.add_argument("--template", default=None, help="The template zip file to repack, if " "none is specified we will build it.") args = parser.parse_args() def GetBuilder(context): """Get the appropriate builder based on the selected flags.""" try: if args.platform == "darwin": context = ["Platform:Darwin"] + context builder_obj = builders.DarwinClientBuilder elif args.platform == "windows": context = ["Platform:Windows"] + context builder_obj = builders.WindowsClientBuilder elif args.platform == "linux": if args.package_format == "deb": context = ["Platform:Linux"] + context builder_obj = builders.LinuxClientBuilder elif args.package_format == "rpm": context = ["Platform:Linux", "Target:LinuxRpm"] + context builder_obj = builders.CentosClientBuilder else: parser.error("Couldn't guess packaging format for: %s" % platform.linux_distribution()[0]) else: parser.error("Unsupported build platform: %s" % args.platform) except AttributeError: raise RuntimeError("Unable to build for platform %s when running " "on current platform." % args.platform) return builder_obj(context=context) def GetDeployer(context): """Get the appropriate client deployer based on the selected flags.""" if args.platform == "darwin": context = ["Platform:Darwin"] + context deployer_obj = build.DarwinClientDeployer elif args.platform == "windows": context = ["Platform:Windows"] + context deployer_obj = build.WindowsClientDeployer elif args.platform == "linux": if args.package_format == "deb": context = ["Platform:Linux"] + context deployer_obj = build.LinuxClientDeployer else: context = ["Platform:Linux", "Target:LinuxRpm"] + context deployer_obj = build.CentosClientDeployer else: parser.error("Unsupported build platform: %s" % args.platform) return deployer_obj(context=context) def TemplateInputFilename(context): """Build template file name from config.""" if args.templatedir: filename = config_lib.CONFIG.Get("PyInstaller.template_filename", context=context) return os.path.join(args.templatedir, filename) return None def BuildAndDeploy(context): """Run build and deploy to create installers.""" # ISO 8601 date timestamp = time.strftime("%Y-%m-%dT%H:%M:%S%z") if args.plugins: config_lib.CONFIG.Set("Client.plugins", args.plugins) # Output directory like: 2015-02-13T21:48:47-0800/linux_amd64_deb/ spec = "_".join((args.platform, args.arch, args.package_format)) output_dir = os.path.join(config_lib.CONFIG.Get( "ClientBuilder.executables_path", context=context), timestamp, spec) # If we weren't passed a template, build one if args.template: template_path = args.template else: template_path = os.path.join(output_dir, config_lib.CONFIG.Get( "PyInstaller.template_filename", context=context)) builder_obj = GetBuilder(context) builder_obj.MakeExecutableTemplate(output_file=template_path) # Get the list of contexts which we should be building. context_list = config_lib.CONFIG.Get("ClientBuilder.BuildTargets") logging.info("Building installers for: %s", context_list) config_orig = config_lib.CONFIG.ExportState() deployed_list = [] for deploycontext in context_list: # Add the settings for this context for newcontext in deploycontext.split(","): config_lib.CONFIG.AddContext(newcontext) context.append(newcontext) try: # If the ClientBuilder.target_platforms doesn't match our environment, # skip. if not config_lib.CONFIG.MatchBuildContext(args.platform, args.arch, args.package_format): continue deployer = GetDeployer(context) # Make a nicer filename out of the context string. context_filename = deploycontext.replace( "AllPlatforms Context,", "").replace(",", "_").replace(" ", "_") deployed_list.append(context_filename) output_filename = os.path.join( output_dir, context_filename, config_lib.CONFIG.Get("ClientBuilder.output_filename", context=deployer.context)) logging.info("Deploying %s as %s with labels: %s", deploycontext, config_lib.CONFIG.Get( "Client.name", context=deployer.context), config_lib.CONFIG.Get( "Client.labels", context=deployer.context)) deployer.MakeDeployableBinary(template_path, output_filename) finally: # Remove the custom settings for the next deploy for newcontext in deploycontext.split(","): context.remove(newcontext) config_lib.ImportConfigManger(config_orig) logging.info("Complete, installers for %s are in %s", deployed_list, output_dir) def main(_): """Launch the appropriate builder.""" config_lib.CONFIG.AddContext( "ClientBuilder Context", "Context applied when we run the client builder script.") startup.ClientInit() # Make sure we have all the secondary configs since they may be set under the # ClientBuilder Context for secondconfig in config_lib.CONFIG["ConfigIncludes"]: config_lib.CONFIG.LoadSecondaryConfig(secondconfig) # Use basic console output logging so we can see what is happening. logger = logging.getLogger() handler = logging.StreamHandler() handler.setLevel(logging.INFO) logger.handlers = [handler] # The following is used to change the identity of the builder based on the # target platform. context = flags.FLAGS.context if args.arch == "amd64": context.append("Arch:amd64") else: context.append("Arch:i386") if args.subparser_name == "build": builder_obj = GetBuilder(context) builder_obj.MakeExecutableTemplate() elif args.subparser_name == "repack": if args.plugins: config_lib.CONFIG.Set("Client.plugins", args.plugins) if args.debug_build: context += ["DebugClientBuild Context"] deployer = GetDeployer(context) output_filename = os.path.join( args.outputdir, config_lib.CONFIG.Get( "ClientBuilder.output_filename", context=deployer.context)) deployer.RepackInstaller(open(args.template, "rb").read(), args.output or output_filename) elif args.subparser_name == "deploy": if args.plugins: config_lib.CONFIG.Set("Client.plugins", args.plugins) if args.debug_build: context += ["DebugClientBuild Context"] deployer = GetDeployer(context) template_path = (args.template or TemplateInputFilename(deployer.context) or config_lib.CONFIG.Get("ClientBuilder.template_path", context=deployer.context)) # If neither output filename or output directory is specified, # use the default location from the config file. output = None if args.output: output = args.output elif args.outputdir: # If output filename isn't specified, write to args.outputdir with a # .deployed extension so we can distinguish it from repacked binaries. filename = ".".join( (config_lib.CONFIG.Get("ClientBuilder.output_filename", context=deployer.context), "deployed")) output = os.path.join(args.outputdir, filename) deployer.MakeDeployableBinary(template_path, output) elif args.subparser_name == "buildanddeploy": BuildAndDeploy(context) if __name__ == "__main__": flags.StartMain(main)
35.631884
80
0.663467
0
0
0
0
0
0
0
0
4,487
0.365004
fea677c9a939d2a74e86aae5f8b7734e53289cfd
1,549
py
Python
Greyatom-projects/code.py
naveena41/greyatom-python-for-data-science
3aa63878ff12e0e8cdf0e63bafe9b4a2c082f7b1
[ "MIT" ]
null
null
null
Greyatom-projects/code.py
naveena41/greyatom-python-for-data-science
3aa63878ff12e0e8cdf0e63bafe9b4a2c082f7b1
[ "MIT" ]
null
null
null
Greyatom-projects/code.py
naveena41/greyatom-python-for-data-science
3aa63878ff12e0e8cdf0e63bafe9b4a2c082f7b1
[ "MIT" ]
null
null
null
# -------------- # Code starts here # Create the lists class_1 = ['geoffrey hinton', 'andrew ng', 'sebastian raschka', 'yoshu bengio'] class_2 = ['hilary mason', 'carla gentry', 'corinna cortes'] # Concatenate both the strings new_class = class_1+class_2 print(new_class) # Append the list new_class.append('peter warden') # Print updated list print(new_class) # Remove the element from the list new_class.remove('carla gentry') # Print the list print(new_class) # Create the Dictionary courses = {"math": 65, "english": 70, "history": 80, "french": 70, "science":60} # Slice the dict and stores the all subjects marks in variable total = 65+70+80+70+60 print(total) # Store the all the subject in one variable `Total` # Print the total # Insert percentage formula percentage =float(total)*(100/500) # Print the percentage print(percentage) # Create the Dictionary mathematics = {"geoffery hinton" :78, "andrew ng" :95, "sebastian raschka" :65, "yoshua benjio" :50, "hilary mason" :70, "corinna cortes" :66, "peter warden" :75} topper = max(mathematics,key = mathematics.get) print(topper) # Given string print(topper.split()) # Create variable first_name first_name = 'andrew' # Create variable Last_name and store last two element in the list Last_name ='ng' # Concatenate the string full_name = Last_name+' '+first_name # print the full_name print(full_name) # print the name in upper case certificate_name = full_name.upper() print(certificate_name) # Code ends here
24.983871
163
0.701097
0
0
0
0
0
0
0
0
911
0.588121
fea776840ba3b32f75565766babfd041aa64ab68
1,830
py
Python
environments/recommenders/recsim_wrapper_test.py
jackblandin/ml-fairness-gym
dce1feaacf2588e0a2d6187e896796241a25ed81
[ "Apache-2.0" ]
null
null
null
environments/recommenders/recsim_wrapper_test.py
jackblandin/ml-fairness-gym
dce1feaacf2588e0a2d6187e896796241a25ed81
[ "Apache-2.0" ]
null
null
null
environments/recommenders/recsim_wrapper_test.py
jackblandin/ml-fairness-gym
dce1feaacf2588e0a2d6187e896796241a25ed81
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2022 The ML Fairness Gym Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for recsim.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import absltest import test_util from environments.recommenders import recsim_wrapper from recsim.environments import interest_exploration class RecommenderTest(absltest.TestCase): def test_interest_exploration_can_run(self): env_config = { 'num_candidates': 5, 'slate_size': 2, 'resample_documents': False, 'seed': 100, } params = recsim_wrapper.Params( recsim_env=interest_exploration.create_environment(env_config)) env = recsim_wrapper.RecsimWrapper(params) test_util.run_test_simulation(env=env, stackelberg=True) def test_interest_exploration_can_run_with_resampling(self): env_config = { 'num_candidates': 5, 'slate_size': 2, 'resample_documents': True, 'seed': 100, } params = recsim_wrapper.Params( recsim_env=interest_exploration.create_environment(env_config)) env = recsim_wrapper.RecsimWrapper(params) test_util.run_test_simulation(env=env, stackelberg=True) if __name__ == '__main__': absltest.main()
31.551724
74
0.742623
858
0.468852
0
0
0
0
0
0
755
0.412568
fea7d2eca288a3ef4c60e731703c65a5e9641808
3,034
py
Python
moss_client_cli.py
mernst32/dl-searchcode-code
504fe59df245ba123ad8ad6e45f03b17de6ef236
[ "MIT" ]
null
null
null
moss_client_cli.py
mernst32/dl-searchcode-code
504fe59df245ba123ad8ad6e45f03b17de6ef236
[ "MIT" ]
null
null
null
moss_client_cli.py
mernst32/dl-searchcode-code
504fe59df245ba123ad8ad6e45f03b17de6ef236
[ "MIT" ]
null
null
null
import argparse import csv import os from moss_client.core import submit_and_dl, parse_moss_reports data_folder = 'data' def handle_input(user_id, base_folder, parse, only_parse, join_file, batch): global data_folder abs_path = os.path.abspath(os.path.dirname(__file__)) root_data_folder = os.path.join(abs_path, data_folder) if not os.path.exists(root_data_folder): os.makedirs(root_data_folder) report_links_file = os.path.join(root_data_folder, 'links_to_moss_reports.html') report_csv_file = os.path.join(root_data_folder, 'moss_report.csv') if not os.path.isabs(base_folder): base_folder = os.path.join(abs_path, base_folder) if len(join_file) > 0: expected_keys = ["SC_Filepath", "Stackoverflow_Links"] with open(join_file, mode='r', encoding='utf-8') as csv_file: csv_reader = csv.DictReader(csv_file) actual_keys = csv_reader.fieldnames if expected_keys[0] != actual_keys[0] or expected_keys[1] != actual_keys[1]: print("Error: Unexpected Headers! SC_Filepath and Stackoverflow_Links are required!") return -1 if not only_parse: submit_and_dl(user_id, base_folder, report_links_file, batch) if parse or only_parse: print("Parsing the moss reports...") parse_moss_reports(report_links_file, report_csv_file, join_file) if __name__ == "__main__": parser = argparse.ArgumentParser( description="MOSS CLI client for submitting java files to the service and downloading the report from the " "service locally. Will go through the sub folders of the given folder and submit the java files " "for plagiarism checks and download the reports locally, creating a linking file in the process") parser.add_argument('user_id', metavar='U', nargs=1, help="Your user-id for the MOSS service.") parser.add_argument('folder', metavar='F', nargs=1, help="The folder whose contents you want to submit.") parser.add_argument('-p', '--parse', action='store_true', help="Parses the moss reports into a csv file.") parser.add_argument('-o', '--only-parse', action='store_true', help="Only parses the local moss reports and does not submit files and download the reports. " "Requires the reports and the links_to_reports html file created normally by this app.") parser.add_argument('-j', '--join-file', nargs=1, default=[""], help="When the parse or only-parse option is given, joins the parsed data with the parsed data.") parser.add_argument('-b', '--batch-mode', action='store_true', help="Only submits a 100 folders to the Moss Service, also looks for already processed folders so " "that it does not submit those again.") args = parser.parse_args() handle_input(args.user_id[0], args.folder[0], args.parse, args.only_parse, args.join_file[0], args.batch_mode)
57.245283
123
0.680949
0
0
0
0
0
0
0
0
1,151
0.379367
fea81883e0bc239697344b2c58f07b4a45f346d3
6,495
py
Python
catkin_ws/src/localization/src/localization_node.py
DiegoOrtegoP/Software
4a07dd2dab29db910ca2e26848fa6b53b7ab00cd
[ "CC-BY-2.0" ]
12
2016-04-14T12:21:46.000Z
2021-06-18T07:51:40.000Z
catkin_ws/src/localization/src/localization_node.py
DiegoOrtegoP/Software
4a07dd2dab29db910ca2e26848fa6b53b7ab00cd
[ "CC-BY-2.0" ]
14
2017-03-03T23:33:05.000Z
2018-04-03T18:07:53.000Z
catkin_ws/src/localization/src/localization_node.py
DiegoOrtegoP/Software
4a07dd2dab29db910ca2e26848fa6b53b7ab00cd
[ "CC-BY-2.0" ]
113
2016-05-03T06:11:42.000Z
2019-06-01T14:37:38.000Z
#!/usr/bin/env python import rospy #from apriltags_ros.msg import AprilTagDetectionArray from duckietown_msgs.msg import AprilTagsWithInfos import tf2_ros from tf2_msgs.msg import TFMessage import tf.transformations as tr from geometry_msgs.msg import Transform, TransformStamped import numpy as np from localization import PoseAverage from visualization_msgs.msg import Marker # Localization Node # Author: Teddy Ort # Inputs: apriltags/duckietown_msgs/AprilTags - A list of april tags in a camera frame # Outputs: pose2d/duckietown_msgs/Pose2dStamped - The estimated pose of the robot in the world frame in 2D coordinates # pose3d/geometry_msgs/PoseStamped - The estimated pose of the robot in the world frame in 3D coordinates class LocalizationNode(object): def __init__(self): self.node_name = 'localization_node' # Constants self.world_frame = "world" self.duckiebot_frame = "duckiebot" self.duckiebot_lifetime = self.setupParam("~duckiebot_lifetime", 5) # The number of seconds to keep the duckiebot alive bewtween detections self.highlight_lifetime = self.setupParam("~highlight_lifetime", 3) # The number of seconds to keep a sign highlighted after a detection # Setup the publishers and subscribers self.sub_april = rospy.Subscriber("~apriltags", AprilTagsWithInfos, self.tag_callback) self.pub_tf = rospy.Publisher("/tf", TFMessage, queue_size=1, latch=True) self.pub_rviz = rospy.Publisher("/sign_highlights", Marker, queue_size=1, latch=True) # Setup the transform listener self.tfbuf = tf2_ros.Buffer() self.tfl = tf2_ros.TransformListener(self.tfbuf) # Use a timer to make the duckiebot disappear self.lifetimer = rospy.Time.now() self.publish_duckie_marker() rospy.loginfo("[%s] has started", self.node_name) def tag_callback(self, msg_tag): # Listen for the transform of the tag in the world avg = PoseAverage.PoseAverage() for tag in msg_tag.detections: try: Tt_w = self.tfbuf.lookup_transform(self.world_frame, "tag_{id}".format(id=tag.id), rospy.Time(), rospy.Duration(1)) Mtbase_w=self.transform_to_matrix(Tt_w.transform) Mt_tbase = tr.concatenate_matrices(tr.translation_matrix((0,0,0.17)), tr.euler_matrix(0,0,np.pi)) Mt_w = tr.concatenate_matrices(Mtbase_w,Mt_tbase) Mt_r=self.pose_to_matrix(tag.pose) Mr_t=np.linalg.inv(Mt_r) Mr_w=np.dot(Mt_w,Mr_t) Tr_w = self.matrix_to_transform(Mr_w) avg.add_pose(Tr_w) self.publish_sign_highlight(tag.id) except(tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as ex: rospy.logwarn("Error looking up transform for tag_%s", tag.id) rospy.logwarn(ex.message) Tr_w = avg.get_average() # Average of the opinions # Broadcast the robot transform if Tr_w is not None: # Set the z translation, and x and y rotations to 0 Tr_w.translation.z = 0 rot = Tr_w.rotation rotz=tr.euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))[2] (rot.x, rot.y, rot.z, rot.w) = tr.quaternion_from_euler(0, 0, rotz) T = TransformStamped() T.transform = Tr_w T.header.frame_id = self.world_frame T.header.stamp = rospy.Time.now() T.child_frame_id = self.duckiebot_frame self.pub_tf.publish(TFMessage([T])) self.lifetimer = rospy.Time.now() def publish_duckie_marker(self): # Publish a duckiebot transform far away unless the timer was reset rate = rospy.Rate(10) while not rospy.is_shutdown(): rate.sleep() if rospy.Time.now() - self.lifetimer > rospy.Duration(self.duckiebot_lifetime): T = TransformStamped() T.transform.translation.z = 1000 # Throw it 1km in the air T.transform.rotation.w = 1 T.header.frame_id = self.world_frame T.header.stamp = rospy.Time.now() T.child_frame_id = self.duckiebot_frame self.pub_tf.publish(TFMessage([T])) def publish_sign_highlight(self, id): # Publish a highlight marker on the sign that is seen by the robot m = Marker() m.header.frame_id="tag_{id}".format(id=id) m.header.stamp = rospy.Time.now() m.id=id m.lifetime = rospy.Duration(self.highlight_lifetime) m.type = Marker.CYLINDER p = m.pose.position o = m.pose.orientation c = m.color s = m.scale s.x, s.y, s.z = (0.1, 0.1, 0.3) p.z = 0.15 c.a, c.r, c.g, c.b = (0.2, 0.9, 0.9, 0.0) o.w = 1 self.pub_rviz.publish(m) def pose_to_matrix(self, p): # Return the 4x4 homogeneous matrix for a PoseStamped.msg p from the geometry_msgs trans = (p.pose.position.x, p.pose.position.y, p.pose.position.z) rot = (p.pose.orientation.x, p.pose.orientation.y, p.pose.orientation.z, p.pose.orientation.w) return np.dot(tr.translation_matrix(trans), tr.quaternion_matrix(rot)) def transform_to_matrix(self, T): # Return the 4x4 homogeneous matrix for a TransformStamped.msg T from the geometry_msgs trans = (T.translation.x, T.translation.y, T.translation.z) rot = (T.rotation.x, T.rotation.y, T.rotation.z, T.rotation.w) return np.dot(tr.translation_matrix(trans), tr.quaternion_matrix(rot)) def matrix_to_transform(self, M): # Return a TransformStamped.msg T from the geometry_msgs from a 4x4 homogeneous matrix T=Transform() (T.translation.x, T.translation.y, T.translation.z) = tr.translation_from_matrix(M) (T.rotation.x, T.rotation.y, T.rotation.z, T.rotation.w) = tr.quaternion_from_matrix(M) return T def setupParam(self, param_name, default_value): value = rospy.get_param(param_name, default_value) rospy.set_param(param_name, value) #Write to parameter server for transparancy rospy.loginfo("[%s] %s = %s " % (self.node_name, param_name, value)) return value if __name__ == '__main__': rospy.init_node('localization_node', anonymous=False) localization_node = LocalizationNode() rospy.spin()
45.41958
147
0.652194
5,606
0.863125
0
0
0
0
0
0
1,541
0.237259
fea8219f00f084855cf10ddacc7d1729db19658a
1,030
py
Python
gen_data/get_teams.py
wusui/NCAA2019
d33a69926dc2d5355f33f9b69e39475c54d03c56
[ "MIT" ]
null
null
null
gen_data/get_teams.py
wusui/NCAA2019
d33a69926dc2d5355f33f9b69e39475c54d03c56
[ "MIT" ]
null
null
null
gen_data/get_teams.py
wusui/NCAA2019
d33a69926dc2d5355f33f9b69e39475c54d03c56
[ "MIT" ]
null
null
null
#!/usr/bin/python # pylint: disable=W0223 """ Get a list of teams """ from html.parser import HTMLParser import requests class ChkTeams(HTMLParser): """ Extract team names from page """ def __init__(self): HTMLParser.__init__(self) self.retval = [] def handle_starttag(self, tag, attrs): for apt in attrs: if apt[0] == 'title': if apt[1] != "ESPN Search": self.retval.append(apt[1]) DATALOC = "http://www.espn.com/mens-college-basketball/tournament/bracket" def check_teams(): """ Extract a list of teams (schools) """ req = requests.get(DATALOC) parser = ChkTeams() parser.feed(req.text) retv = parser.retval return retv[8:] def make_team_list(): """ Call check_teams and stick result in text file """ listv = check_teams() with open('teams.txt', 'w') as ofile: for team in listv: ofile.write(team + '\n') if __name__ == '__main__': make_team_list()
20.196078
74
0.590291
354
0.343689
0
0
0
0
0
0
334
0.324272
fea8cf21ba50623dff52ac8ea09d727a155060be
32,904
py
Python
pysnmp-with-texts/Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
8
2019-05-09T17:04:00.000Z
2021-06-09T06:50:51.000Z
pysnmp-with-texts/Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
4
2019-05-31T16:42:59.000Z
2020-01-31T21:57:17.000Z
pysnmp-with-texts/Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
10
2019-04-30T05:51:36.000Z
2022-02-16T03:33:41.000Z
# # PySNMP MIB module Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB # Produced by pysmi-0.3.4 at Wed May 1 14:31:21 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint") mscMod, mscModIndex = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscMod", "mscModIndex") DisplayString, RowStatus, StorageType, Unsigned32, Integer32 = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-StandardTextualConventionsMIB", "DisplayString", "RowStatus", "StorageType", "Unsigned32", "Integer32") DigitString, NonReplicated = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-TextualConventionsMIB", "DigitString", "NonReplicated") mscPassportMIBs, = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-UsefulDefinitionsMIB", "mscPassportMIBs") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") Counter32, Counter64, IpAddress, ObjectIdentity, Bits, iso, Unsigned32, Gauge32, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Integer32, TimeTicks, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Counter64", "IpAddress", "ObjectIdentity", "Bits", "iso", "Unsigned32", "Gauge32", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Integer32", "TimeTicks", "ModuleIdentity") DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention") subnetInterfaceMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45)) mscModVcs = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2)) mscModVcsRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1), ) if mibBuilder.loadTexts: mscModVcsRowStatusTable.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsRowStatusTable.setDescription('This entry controls the addition and deletion of mscModVcs components.') mscModVcsRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscModIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsIndex")) if mibBuilder.loadTexts: mscModVcsRowStatusEntry.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsRowStatusEntry.setDescription('A single entry in the table represents a single mscModVcs component.') mscModVcsRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1, 1, 1), RowStatus()).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsRowStatus.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsRowStatus.setDescription('This variable is used as the basis for SNMP naming of mscModVcs components. These components can be added and deleted.') mscModVcsComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: mscModVcsComponentName.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsComponentName.setDescription("This variable provides the component's string name for use with the ASCII Console Interface") mscModVcsStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1, 1, 4), StorageType()).setMaxAccess("readonly") if mibBuilder.loadTexts: mscModVcsStorageType.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsStorageType.setDescription('This variable represents the storage type value for the mscModVcs tables.') mscModVcsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 1, 1, 10), NonReplicated()) if mibBuilder.loadTexts: mscModVcsIndex.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsIndex.setDescription('This variable represents the index for the mscModVcs tables.') mscModVcsAccOptTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10), ) if mibBuilder.loadTexts: mscModVcsAccOptTable.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsAccOptTable.setDescription("Accounting information is owned by the Vc System; it is stored in the Vc Accounting component, which itself is considered to be a component on the switch. The Accounting Component contains a bit map indicating which of the accounting facilities are to be spooled in the accounting record - for example, bit '0' if set indicates that the accounting facility with facility code H.00 should be spooled if present in the Vc for accounting purposes. The data contained in the Vc Accounting must be identical network wide even though the component can be changed and upgraded on a module by module basis.") mscModVcsAccOptEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscModIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsIndex")) if mibBuilder.loadTexts: mscModVcsAccOptEntry.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsAccOptEntry.setDescription('An entry in the mscModVcsAccOptTable.') mscModVcsSegmentSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("n1", 0), ("n2", 1), ("n4", 2), ("n8", 3), ("n16", 4), ("n32", 5), ("n64", 6), ("n128", 7), ("n256", 8), ("n512", 9), ("n1024", 10), ("n2048", 11), ("n4096", 12))).clone('n128')).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsSegmentSize.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsSegmentSize.setDescription('This attribute specifies the segment size for accounting of national calls. Minimum allowed segment size is 1. If data segment is sent which is less than segmentSize it is still counted as one segment.') mscModVcsUnitsCounted = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("segments", 0), ("frames", 1))).clone('segments')).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsUnitsCounted.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsUnitsCounted.setDescription('This attribute specifies what is counted by frame services. If set to frames, frames are counted, else segments are counted.') mscModVcsAccountingFax = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="20")).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsAccountingFax.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsAccountingFax.setDescription('Each value corresponds to an accounting facility code, of which there are currently 10 facility codes defined with codes H.00 to H.09, and corresponding to the above 10 facilities. Each of the above facilities may or may not be present and stored in the Vc for accounting purposes, depending on the nature of the call. For example, only those Vcs where a NUI (Network User Identifier) is used for charging or identification purposes will have a NUI stored in the Vc. Description of bits: notused0(0) notused1(1) originalCalledAddressFax(2)') mscModVcsGenerationMode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 10, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("bothEnds", 0), ("singleEnd", 1))).clone('singleEnd')).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsGenerationMode.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsGenerationMode.setDescription('This attribute specifies part of the rules by which the network generates accounting records. If set to bothEnds, then both ends of the Vc generate accounting records. If set to singleEnd, then the charged end of the Vc generates accounting records. In single end generation mode, if the call does not clear gracefully, both ends of the Vc will generate accounting record.') mscModVcsAddOptTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12), ) if mibBuilder.loadTexts: mscModVcsAddOptTable.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsAddOptTable.setDescription('The Vc AddressingOptions group describes the addressing parameters. It is currently owned by the Vc. Most of the data contained in the Vc AddressingOptions group is identical network wide even though the group can be changed and upgraded on a module by module basis.') mscModVcsAddOptEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscModIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsIndex")) if mibBuilder.loadTexts: mscModVcsAddOptEntry.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsAddOptEntry.setDescription('An entry in the mscModVcsAddOptTable.') mscModVcsDefaultNumberingPlan = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("x121", 0), ("e164", 1))).clone('x121')).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsDefaultNumberingPlan.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsDefaultNumberingPlan.setDescription('This attribute specifies the numbering plan used which determines the address format: X.121-- the international numbering plan for public packet switched data networks or E.164-- the international numbering plan for ISDN and PSTN. The default numbering plan does not need to be consistent across all of the nodes in the network.') mscModVcsNetworkIdType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("dnic", 0), ("inic", 1))).clone('dnic')).setMaxAccess("readonly") if mibBuilder.loadTexts: mscModVcsNetworkIdType.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsNetworkIdType.setDescription('This attribute specifies whether the network uses a DNIC or INIC. It is used by X.75 Gateways to indicate whether in network the DNIC or INIC is used in various utilities. If it is DNIC it can be DNIC or DCC type. If it is INIC it can be 4 digits only.') mscModVcsX121Type = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("dnic", 0), ("dcc", 1))).clone('dnic')).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsX121Type.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsX121Type.setDescription('This attribute specifies whether DNIC mode or DCC mode is used in X.121 address of international calls. If DCC is specified, then the first 3 digits of each DNA must be the Network ID Code. If this attribute is changed all Dnas in the network must start with this code. Numbering plan is affected by the change.') mscModVcsNetworkIdCode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 6), DigitString().subtype(subtypeSpec=ValueSizeConstraint(3, 4))).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsNetworkIdCode.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsNetworkIdCode.setDescription('This attribute specifies the DNIC (Data Network ID Code) of the network or DCC code.') mscModVcsX121IntlAddresses = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disallowed", 0), ("allowed", 1))).clone('allowed')).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsX121IntlAddresses.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsX121IntlAddresses.setDescription('This attribute indicates if any DTE is allowed to signal international addresses.') mscModVcsX121IntllPrefixDigit = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 9)).clone(9)).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsX121IntllPrefixDigit.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsX121IntllPrefixDigit.setDescription('This attribute indicates the prefix digit to be used for X.121 international calls. When this digit is provided the call will have full international address.') mscModVcsX121MinAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(1)).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsX121MinAddressLength.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsX121MinAddressLength.setDescription('This attribute indicates minimum length of x121 address.') mscModVcsX121MaxAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 12), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(15)).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsX121MaxAddressLength.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsX121MaxAddressLength.setDescription('This attribute indicates maximum length of x121 address.') mscModVcsX121ToE164EscapeSignificance = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1))).clone('no')).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsX121ToE164EscapeSignificance.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsX121ToE164EscapeSignificance.setDescription('This attribute specifies whether an X.121 to E.164 escape digit has significance in selecting an X.32 (analog) or an ISDN switched path. If two values are significant (the value 0 or the value 9) then yes is set to this attribute. If the value of the originally entered escape digit is not significant in routing the call then value of no is assigned to this attribute.') mscModVcsE164IntlFormatAllowed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disallowed", 0), ("allowed", 1))).clone('allowed')).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsE164IntlFormatAllowed.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsE164IntlFormatAllowed.setDescription("This attribute indicates whether or not to allow national format E.164 addresses. If this attribute is set to a value of Yes (=1) then national format E.164 addresses are not allowed and international format addresses only are allowed. If this attribute is set to a value of No (=0), then national format E.164 addresses are allowed. If only international format E.164 addresses are allowed, then the 'e164NatlPrefixDigit' attribute is not required, nor is the 'e164IntlPrefixDigits' required.") mscModVcsE164IntlPrefixDigits = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 15), DigitString().subtype(subtypeSpec=ValueSizeConstraint(0, 3)).clone(hexValue="30")).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsE164IntlPrefixDigits.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsE164IntlPrefixDigits.setDescription("This attribute specifies the E.164 international prefix digits. If applicable, it is specified as 1 to 3 BCD digits. The 3 BCD digits are stored with the length of the international prefix in the low order nibble, nibble [0] followed by the most significant digit of the international prefix in the next low order nibble, nibble [1], etc. This attribute is not required if the corresponding attribute, 'e164IntlFormatOnly' is set to a value of Yes (=1).") mscModVcsE164NatlPrefixDigit = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 16), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 9)).clone(1)).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsE164NatlPrefixDigit.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsE164NatlPrefixDigit.setDescription('This attribute contains the E.164 national prefix which may be added in front of E.164 local or national call. If e164IntlFormatOnly is set to 1, this attribute is not needed.') mscModVcsE164LocalAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 17), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(4, 15)).clone(7)).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsE164LocalAddressLength.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsE164LocalAddressLength.setDescription('This attribute indicates the length of a local E.164 DNA on this module. This attribute is not required if the corresponding attribute, e164IntlFormatOnly is set to a value of yes. This attribute does not need to be consistent across all of the nodes in the network.') mscModVcsE164TeleCountryCode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 18), DigitString().subtype(subtypeSpec=ValueSizeConstraint(1, 4)).clone(hexValue="31")).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsE164TeleCountryCode.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsE164TeleCountryCode.setDescription('This attribute specifies the E.164 Telephone Country Code (TCC) for the country in which the network resides. If applicable, it is specified as 1 to 3 BCD digits. The 3 BCD digits are stored with the length of the TCC in the low order nibble, nibble [0] followed by the most significant digit of the TCC in the next low order nibble, nibble [1], etc.') mscModVcsE164NatlMinAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 20), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(1)).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsE164NatlMinAddressLength.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsE164NatlMinAddressLength.setDescription('This attribute indicates minimum length of e164 national address.') mscModVcsE164NatlMaxAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 21), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(15)).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsE164NatlMaxAddressLength.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsE164NatlMaxAddressLength.setDescription('This attribute indicates maximum length of e164 national address.') mscModVcsE164IntlMinAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 22), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(1)).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsE164IntlMinAddressLength.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsE164IntlMinAddressLength.setDescription('This attribute indicates minimum length of e164 international address.') mscModVcsE164IntlMaxAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 23), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(15)).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsE164IntlMaxAddressLength.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsE164IntlMaxAddressLength.setDescription('This attribute indicates maximum length of e164 international address.') mscModVcsE164LocalMinAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 24), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(1)).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsE164LocalMinAddressLength.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsE164LocalMinAddressLength.setDescription('This attribute indicates minimum length of e164 local address.') mscModVcsE164LocalMaxAddressLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 12, 1, 25), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(15)).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsE164LocalMaxAddressLength.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsE164LocalMaxAddressLength.setDescription('This attribute indicates maximum length of e164 local address.') mscModVcsIntOptTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13), ) if mibBuilder.loadTexts: mscModVcsIntOptTable.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsIntOptTable.setDescription('The Vc InterfaceOptions group defines Vc system parameters common in the network. It is owned by the Vc and is considered to be a module wide component on the switch. The data contained in the Vc InterfaceOptions group must be identical network wide even though this group can be changed and upgraded on a module by module basis.') mscModVcsIntOptEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscModIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsIndex")) if mibBuilder.loadTexts: mscModVcsIntOptEntry.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsIntOptEntry.setDescription('An entry in the mscModVcsIntOptTable.') mscModVcsHighPriorityPacketSizes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2).clone(hexValue="ff80")).setMaxAccess("readonly") if mibBuilder.loadTexts: mscModVcsHighPriorityPacketSizes.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsHighPriorityPacketSizes.setDescription('This attribute indicates which packet sizes are supported for high priority calls within the network. Description of bits: n16(0) n32(1) n64(2) n128(3) n256(4) n512(5) n1024(6) n2048(7) n4096(8)') mscModVcsMaxSubnetPacketSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("n16", 4), ("n32", 5), ("n64", 6), ("n128", 7), ("n256", 8), ("n512", 9), ("n1024", 10), ("n2048", 11), ("n4096", 12))).clone('n512')).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsMaxSubnetPacketSize.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsMaxSubnetPacketSize.setDescription('This attribute specifies the maximum subnet packet size used for the connections originating or terminating on this module. All modules in the same network should have the same maxSubnetPacketSize. If this value is not identical throughout the network, the following points need to be considered: a) When Passport and DPN switches are connected in the same network, the maxSubnetPacketSize on a DPN switch can be at most 2048 and the DPN part of the network must be configured with hardware which supports this size: - Dedicated PE386 Network link/Trunk - Minimum measured link speed of 256Kbits/sec This hardware has to be present on every potential data path between connecting end points! b) The calling end of the connection signals the maxSubnetPacketSize value to the called end. The called end then compares this value to its own provisioned value and selects the smaller value. Note that this smaller value is not signalled back to the calling end. The calling and called ends can therefore have different maxSubnetPacketSize values.') mscModVcsCallSetupTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(5, 100)).clone(5)).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsCallSetupTimer.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsCallSetupTimer.setDescription('This attribute specifies the Vc callSetupTimer in units of 1 second ticks. This timer specifies how long the Vc will wait, after sending a subnet Call Request packet into the network, for a response from the remote end of the Vc (in the form of a subnet Raccept packet). If, after sending a subnet Call packet into the network, a response is not received within this time period, the Vc will time out, clearing the call in the assumption that the remote end is unreachable. This timer must be long enough to take into account the time required for routing the subnet Call Request through the Source Call Routing and the Destination Call Routing systems in order to be delivered to the final destination.') mscModVcsCallRetryTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(30, 300)).clone(60)).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsCallRetryTimer.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsCallRetryTimer.setDescription('This attribute specifies, for Vc implementing Direct Calls with the auto-call retry feature (including PVCs), the Vc callRetryTimer in units of 1 second ticks. This timer specifies how long the Vc will wait between unsuccessful call attempts.') mscModVcsDelaySubnetAcks = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 13, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1))).clone('no')).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsDelaySubnetAcks.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsDelaySubnetAcks.setDescription('This attribute specifies delay acknowledgment timer mechanism. If this attribute is set to no, then the Vc will automatically return acknowledgment packets without delay. If this attribute is set to yes, then the Vc will wait for one second in an attempt to piggyback the acknowledgment packet on another credit or data packet. If the Vc cannot piggyback the acknowledgment packet within this time, then the packet is returned without piggybacking.') mscModVcsWinsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 213), ) if mibBuilder.loadTexts: mscModVcsWinsTable.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsWinsTable.setDescription('This is the windowSize corresponding to the given packet size and throughput class. All Vcs using the windowSize matrix support large Vc windows on both ends of the Vc, and support the signalling of the chosen Vc window size from the destination (called) end to the source (calling) end. This is the only matrix supported. The windowSize should be configured in the same way network wide, though it can be upgraded on a module by module basis. Vcs using the windowSize matrix will run properly if the matrices on different nodes differ since the Vc window is selected by the destination (called) side of the Vc.') mscModVcsWinsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 213, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-BaseShelfMIB", "mscModIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsWinsPktIndex"), (0, "Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", "mscModVcsWinsTptIndex")) if mibBuilder.loadTexts: mscModVcsWinsEntry.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsWinsEntry.setDescription('An entry in the mscModVcsWinsTable.') mscModVcsWinsPktIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 213, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("n16", 0), ("n32", 1), ("n64", 2), ("n128", 3), ("n256", 4), ("n512", 5), ("n1024", 6), ("n2048", 7), ("n4096", 8), ("n8192", 9), ("n32768", 10), ("n65535", 11)))) if mibBuilder.loadTexts: mscModVcsWinsPktIndex.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsWinsPktIndex.setDescription('This variable represents the next to last index for the mscModVcsWinsTable.') mscModVcsWinsTptIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 213, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))) if mibBuilder.loadTexts: mscModVcsWinsTptIndex.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsWinsTptIndex.setDescription('This variable represents the final index for the mscModVcsWinsTable.') mscModVcsWinsValue = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 16, 2, 213, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 63))).setMaxAccess("readwrite") if mibBuilder.loadTexts: mscModVcsWinsValue.setStatus('mandatory') if mibBuilder.loadTexts: mscModVcsWinsValue.setDescription('This variable represents an individual value for the mscModVcsWinsTable.') subnetInterfaceGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 1)) subnetInterfaceGroupCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 1, 1)) subnetInterfaceGroupCA02 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 1, 1, 3)) subnetInterfaceGroupCA02A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 1, 1, 3, 2)) subnetInterfaceCapabilities = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 3)) subnetInterfaceCapabilitiesCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 3, 1)) subnetInterfaceCapabilitiesCA02 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 3, 1, 3)) subnetInterfaceCapabilitiesCA02A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 45, 3, 1, 3, 2)) mibBuilder.exportSymbols("Nortel-MsCarrier-MscPassport-SubnetInterfaceMIB", mscModVcsStorageType=mscModVcsStorageType, mscModVcs=mscModVcs, mscModVcsRowStatusEntry=mscModVcsRowStatusEntry, mscModVcsX121MinAddressLength=mscModVcsX121MinAddressLength, mscModVcsRowStatus=mscModVcsRowStatus, mscModVcsE164NatlMinAddressLength=mscModVcsE164NatlMinAddressLength, mscModVcsAccOptTable=mscModVcsAccOptTable, mscModVcsE164LocalAddressLength=mscModVcsE164LocalAddressLength, mscModVcsE164IntlMinAddressLength=mscModVcsE164IntlMinAddressLength, mscModVcsE164IntlMaxAddressLength=mscModVcsE164IntlMaxAddressLength, mscModVcsE164LocalMaxAddressLength=mscModVcsE164LocalMaxAddressLength, mscModVcsWinsTptIndex=mscModVcsWinsTptIndex, mscModVcsE164IntlPrefixDigits=mscModVcsE164IntlPrefixDigits, mscModVcsComponentName=mscModVcsComponentName, mscModVcsIndex=mscModVcsIndex, subnetInterfaceGroupCA=subnetInterfaceGroupCA, mscModVcsX121IntllPrefixDigit=mscModVcsX121IntllPrefixDigit, mscModVcsDelaySubnetAcks=mscModVcsDelaySubnetAcks, mscModVcsX121Type=mscModVcsX121Type, mscModVcsWinsTable=mscModVcsWinsTable, mscModVcsE164NatlPrefixDigit=mscModVcsE164NatlPrefixDigit, subnetInterfaceMIB=subnetInterfaceMIB, mscModVcsAccountingFax=mscModVcsAccountingFax, mscModVcsMaxSubnetPacketSize=mscModVcsMaxSubnetPacketSize, mscModVcsAddOptTable=mscModVcsAddOptTable, mscModVcsWinsValue=mscModVcsWinsValue, subnetInterfaceCapabilitiesCA02A=subnetInterfaceCapabilitiesCA02A, subnetInterfaceCapabilities=subnetInterfaceCapabilities, subnetInterfaceGroupCA02=subnetInterfaceGroupCA02, subnetInterfaceCapabilitiesCA=subnetInterfaceCapabilitiesCA, mscModVcsX121MaxAddressLength=mscModVcsX121MaxAddressLength, mscModVcsE164IntlFormatAllowed=mscModVcsE164IntlFormatAllowed, subnetInterfaceGroup=subnetInterfaceGroup, mscModVcsSegmentSize=mscModVcsSegmentSize, mscModVcsX121IntlAddresses=mscModVcsX121IntlAddresses, mscModVcsGenerationMode=mscModVcsGenerationMode, mscModVcsWinsEntry=mscModVcsWinsEntry, mscModVcsUnitsCounted=mscModVcsUnitsCounted, mscModVcsNetworkIdType=mscModVcsNetworkIdType, mscModVcsAccOptEntry=mscModVcsAccOptEntry, mscModVcsAddOptEntry=mscModVcsAddOptEntry, mscModVcsX121ToE164EscapeSignificance=mscModVcsX121ToE164EscapeSignificance, mscModVcsDefaultNumberingPlan=mscModVcsDefaultNumberingPlan, mscModVcsIntOptTable=mscModVcsIntOptTable, mscModVcsCallRetryTimer=mscModVcsCallRetryTimer, mscModVcsWinsPktIndex=mscModVcsWinsPktIndex, mscModVcsCallSetupTimer=mscModVcsCallSetupTimer, mscModVcsE164NatlMaxAddressLength=mscModVcsE164NatlMaxAddressLength, subnetInterfaceGroupCA02A=subnetInterfaceGroupCA02A, mscModVcsNetworkIdCode=mscModVcsNetworkIdCode, mscModVcsE164TeleCountryCode=mscModVcsE164TeleCountryCode, mscModVcsIntOptEntry=mscModVcsIntOptEntry, subnetInterfaceCapabilitiesCA02=subnetInterfaceCapabilitiesCA02, mscModVcsE164LocalMinAddressLength=mscModVcsE164LocalMinAddressLength, mscModVcsRowStatusTable=mscModVcsRowStatusTable, mscModVcsHighPriorityPacketSizes=mscModVcsHighPriorityPacketSizes)
197.02994
2,993
0.792973
0
0
0
0
0
0
0
0
13,763
0.418277
fea8eab09203e9965fd3c37311110a5d329a6d18
2,882
py
Python
svgserver/app.py
omniscale/svgserver
a98f75ec9547fda25941129e854af046ba8f5dfe
[ "Apache-2.0" ]
2
2018-10-18T07:15:58.000Z
2020-04-09T20:42:07.000Z
svgserver/app.py
omniscale/svgserver
a98f75ec9547fda25941129e854af046ba8f5dfe
[ "Apache-2.0" ]
null
null
null
svgserver/app.py
omniscale/svgserver
a98f75ec9547fda25941129e854af046ba8f5dfe
[ "Apache-2.0" ]
2
2019-06-20T01:29:59.000Z
2021-12-01T12:18:55.000Z
import codecs import tempfile from contextlib import closing from .cgi import CGIClient from .combine import CombineSVG from .mapserv import MapServer, InternalError from .tree import build_tree def _recursive_add_layer(nodes, params, svg, mapserver, translations): for node in nodes: group_name = format_group_name(node, translations) svg.push_group(group_name) if node.layer: params["layers"] = node.layer params["format"] = "image/svg+xml" resp = mapserver.get(params) if resp.headers["Content-type"] != "image/svg+xml": raise InternalError( "received non SVG response for layer %s:\n%s\n%s" % (node.layer, resp.headers, resp.read()) ) svg.add(resp) if node.subs: _recursive_add_layer(node.subs, params, svg, mapserver, translations) svg.pop_group() def format_group_name(node, translations): if isinstance(node.name, tuple): return ', '.join(translations.get(n, n) for n in node.name) return translations.get(node.name, node.name) def layered_svg(params, translations={}, mapserver_binary="mapserv", root_id='map'): mapserver = MapServer(binary=mapserver_binary) layers = mapserver.layer_names(params) nodes = build_tree(layers) root_id = translations.get(root_id, root_id) f = tempfile.TemporaryFile() try: with CombineSVG(f, root_id=root_id) as svg: _recursive_add_layer( nodes, params=params, svg=svg, mapserver=mapserver, translations=translations, ) f.seek(0) return f except: # close to remove temporary file f.close() raise def load_translations(filename): if not filename: return {} translations = {} with codecs.open(filename, encoding="utf8") as f: for line in f: line = line.strip() if not line or line.startswith('#'): continue if '=' not in line: continue key, translation = line.split('=', 1) translations[key.strip()] = translation.strip() return translations if __name__ == "__main__": import os import logging logging.basicConfig(level=logging.DEBUG) params = { "service": "WMS", "version": "1.1.1", "request": "GetMap", "width": 1234, "height": 769, "srs": "EPSG:3857", "styles": "", "format": "image/svg+xml", "bbox": "775214.9923087133,6721788.224989068,776688.4414913012,6722705.993822992", "map": os.path.abspath(os.path.dirname(__file__) + "/../tests/ms.map"), } with closing(layered_svg(params)) as f: print(f.read())
29.408163
90
0.586051
0
0
0
0
0
0
0
0
397
0.137752
feaaec4a50d5a134457fe10cd74a02481c434561
440
py
Python
11_app/script/purchase_order.py
israillaky/ERPOSAPP11
90dd26213fecce7f6301bfa2f2356d8f5d3a8086
[ "MIT" ]
null
null
null
11_app/script/purchase_order.py
israillaky/ERPOSAPP11
90dd26213fecce7f6301bfa2f2356d8f5d3a8086
[ "MIT" ]
null
null
null
11_app/script/purchase_order.py
israillaky/ERPOSAPP11
90dd26213fecce7f6301bfa2f2356d8f5d3a8086
[ "MIT" ]
null
null
null
import frappe @frappe.whitelist() def filt_itemby_supplier(doctype, txt, searchfield, start, page_len, filters): return frappe.db.sql("""Select parent from `tabItem Supplier` where supplier= %s""",(filters.get("supplier"))); @frappe.whitelist() def filteritem(doctype, txt, searchfield, start, page_len, filters): return frappe.db.sql("""select item_code, item_name, item_group, volume, item_type,stock_uom from `tabItem`""");
44
117
0.740909
0
0
0
0
420
0.954545
0
0
162
0.368182
feab2f73df218463681f43ce0d3584c476b63adb
925
py
Python
src/common/bio/smiles.py
duttaprat/proteinGAN
92b32192ab959e327e1d713d09fc9b40dc01d757
[ "MIT" ]
8
2020-12-23T21:44:47.000Z
2021-07-09T05:46:16.000Z
src/common/bio/smiles.py
duttaprat/proteinGAN
92b32192ab959e327e1d713d09fc9b40dc01d757
[ "MIT" ]
null
null
null
src/common/bio/smiles.py
duttaprat/proteinGAN
92b32192ab959e327e1d713d09fc9b40dc01d757
[ "MIT" ]
null
null
null
from common.bio.constants import SMILES_CHARACTER_TO_ID, ID_TO_SMILES_CHARACTER def from_smiles_to_id(data, column): """Converts sequences from smiles to ids Args: data: data that contains characters that need to be converted to ids column: a column of the dataframe that contains characters that need to be converted to ids Returns: array of ids """ return [[SMILES_CHARACTER_TO_ID[char] for char in val] for index, val in data[column].iteritems()] def from_id_from_smiles(data, column): """Converts sequences from ids to smiles characters Args: data: data that contains ids that need to be converted to characters column: a column of the dataframe that contains ids that need to be converted to characters Returns: array of characters """ return [[ID_TO_SMILES_CHARACTER[id] for id in val] for index, val in data[column].iteritems()]
28.030303
102
0.721081
0
0
0
0
0
0
0
0
550
0.594595
feab97b0913494abc7216c346f3470dd95d2e154
1,001
py
Python
test/lib_config_test.py
yokoyama-flogics/ibp_monitor_2
1a7df55a524ff3a7908df330e7e02c9f27e24ae0
[ "BSD-2-Clause" ]
3
2017-11-23T13:29:47.000Z
2021-01-08T09:28:35.000Z
test/lib_config_test.py
yokoyama-flogics/ibp_monitor_2
1a7df55a524ff3a7908df330e7e02c9f27e24ae0
[ "BSD-2-Clause" ]
null
null
null
test/lib_config_test.py
yokoyama-flogics/ibp_monitor_2
1a7df55a524ff3a7908df330e7e02c9f27e24ae0
[ "BSD-2-Clause" ]
2
2018-02-15T08:11:24.000Z
2021-01-08T09:28:43.000Z
import os import sys import unittest # Set Python search path to the parent directory sys.path.append(os.path.join(os.path.dirname(__file__), '..')) from lib.config import * class TestLibConfig(unittest.TestCase): def test_config_noconfigfile(self): config = BeaconConfigParser('not_exist.cfg') with self.assertRaises(ConfigParser.NoSectionError): config.getpath('Test', 'dbdir') def test_config_default(self): import os os.environ['HOME'] = 'notexist' config = BeaconConfigParser() with self.assertRaises(ConfigParser.NoSectionError): config.get('Signal', 'samplerate') def test_config_items(self): config = BeaconConfigParser('test_config.cfg') self.assertEqual(config.get('Test', 'dbdir'), 'nodb') self.assertEqual(config.getpath('Test', 'dbdir'), 'nodb') self.assertEqual(config.getint('Signal', 'samplerate'), 16000) if __name__ == "__main__": unittest.main(buffer=True)
33.366667
70
0.679321
765
0.764236
0
0
0
0
0
0
201
0.200799
feac612781029aac47e6d21c85d8519de53dcb55
7,188
py
Python
tests/test_installation.py
phdye/nimporter
64eccc74950811e03efdde50649e84ca1fe87ae4
[ "MIT" ]
null
null
null
tests/test_installation.py
phdye/nimporter
64eccc74950811e03efdde50649e84ca1fe87ae4
[ "MIT" ]
null
null
null
tests/test_installation.py
phdye/nimporter
64eccc74950811e03efdde50649e84ca1fe87ae4
[ "MIT" ]
null
null
null
""" Test to make sure that libraries built with Nimporter can be installed via Pip. """ import sys, os, subprocess, shutil, pkg_resources, json, warnings from pathlib import Path import pytest import nimporter PYTHON = 'python' if sys.platform == 'win32' else 'python3' PIP = 'pip' if shutil.which('pip') else 'pip3' @pytest.mark.integration_test def test_ensure_nimporter_installed(): "Make sure that Nimporter is installed before running integration tests." libs = {lib.key.lower() for lib in pkg_resources.working_set} assert 'nimporter' in libs, ( f'Nimporter is not installed. Please install via:' f'`{PIP} install .` before running the integration tests.' ) @pytest.mark.integration_test def test_create_sdist(): "Test the successful creation of a source distribution." with nimporter.cd('tests/proj1'): subprocess.Popen(f'{PYTHON} setup.py sdist'.split()).wait() dist = Path('dist') egg = Path('project1.egg-info') try: assert dist.exists() assert egg.exists() targets = list(dist.glob('project1*')) assert len(targets) == 1 assert targets[0].exists() # Make sure the appropriate compiler is being used for extension in Path('nim-extensions').iterdir(): (nim_build_data_file,) = extension.glob('*json') nim_build_data = json.loads(nim_build_data_file.read_text()) expected = nimporter.NimCompiler.get_compatible_compiler() installed_ccs = nimporter.NimCompiler.get_installed_compilers() if not expected: warnings.warn( f'No compatible C compiler installed: {installed_ccs}' ) else: cc_path = installed_ccs[expected] actual = nim_build_data['linkcmd'].split()[0].strip() if not actual.startswith(cc_path.stem): warnings.warn( f'Nim used a different C compiler than what Python ' f'expects. Python uses {cc_path.stem} and Nim used ' f'{actual}' ) finally: shutil.rmtree(str(dist.absolute())) shutil.rmtree(str(egg.absolute())) @pytest.mark.integration_test def test_create_bdist(): "Test the successful create of a wheel." with nimporter.cd('tests/proj1'): subprocess.Popen(f'{PYTHON} setup.py bdist_wheel'.split()).wait() dist = Path('dist') build = Path('build') egg = Path('project1.egg-info') try: assert dist.exists() assert build.exists() assert egg.exists() targets = list(Path('dist').glob('project1*.whl')) assert len(targets) == 1 assert targets[0].exists() # Make sure the appropriate compiler is being used for extension in Path('nim-extensions').iterdir(): (nim_build_data_file,) = extension.glob('*json') nim_build_data = json.loads(nim_build_data_file.read_text()) expected = nimporter.NimCompiler.get_compatible_compiler() installed_ccs = nimporter.NimCompiler.get_installed_compilers() if not expected: warnings.warn( f'No compatible C compiler installed: {installed_ccs}' ) else: cc_path = installed_ccs[expected] actual = nim_build_data['linkcmd'].split()[0].strip() if not actual.startswith(cc_path.stem): warnings.warn( f'Nim used a different C compiler than what Python ' f'expects. Python uses {cc_path.stem} and Nim used ' f'{actual}' ) finally: shutil.rmtree(str(dist.absolute())) shutil.rmtree(str(build.absolute())) shutil.rmtree(str(egg.absolute())) @pytest.mark.slow_integration_test def test_install_sdist(): "Make sure that the project can be installed by Pip" with nimporter.cd('tests/proj1'): subprocess.Popen(f'{PYTHON} setup.py sdist'.split()).wait() dist = Path('dist') egg = Path('project1.egg-info') try: assert dist.exists() assert egg.exists() targets = list(dist.glob('project1*')) assert len(targets) == 1 (target,) = targets assert target.exists() subprocess.Popen(f'{PIP} install {target}'.split()).wait() finally: shutil.rmtree(str(dist.absolute())) shutil.rmtree(str(egg.absolute())) # Make sure that `tests/proj1` is not imported as a SimpleNamespace and that # the installed library in `site-packages` is used. with nimporter.cd('../..'): try: import proj1 assert proj1 import proj1.performance assert proj1.performance import proj1.lib1 assert proj1.lib1 assert proj1.foo assert proj1.bar assert proj1.baz assert proj1.baz() == 1 except Exception as e: warnings.warn(str(e)) # Cannot delete a DLL in use by another process on Windows if sys.platform != 'win32': subprocess.Popen(f'{PIP} uninstall project1 -y'.split()).wait() @pytest.mark.slow_integration_test def test_install_bdist(): "Make sure that the wheel can be installed by Pip" with nimporter.cd('tests/proj1'): subprocess.Popen(f'{PYTHON} setup.py bdist_wheel'.split()).wait() dist = Path('dist') build = Path('build') egg = Path('project1.egg-info') try: assert dist.exists() assert build.exists() assert egg.exists() targets = list(Path('dist').glob('project1*.whl')) assert len(targets) == 1 wheel = targets[0] assert wheel.exists() subprocess.Popen(f'{PIP} install {wheel}'.split()).wait() finally: shutil.rmtree(str(dist.absolute())) shutil.rmtree(str(build.absolute())) shutil.rmtree(str(egg.absolute())) # Make sure that `tests/proj1` is not imported as a SimpleNamespace and that # the installed library in `site-packages` is used. with nimporter.cd('../..'): try: import proj1 assert proj1 import proj1.performance assert proj1.performance import proj1.lib1 assert proj1.lib1 assert proj1.foo assert proj1.bar assert proj1.baz assert proj1.baz() == 1 except Exception as e: warnings.warn(str(e)) # Cannot delete a DLL in use by another process on Windows if sys.platform != 'win32': subprocess.Popen(f'{PIP} uninstall project1 -y'.split()).wait()
38.854054
80
0.564969
0
0
0
0
6,856
0.953812
0
0
1,872
0.260434
feae2347f1d740037425173028bb1b3d8af9f2a3
153
py
Python
hotpot_sample_dict.py
bvanaken/pytorch-pretrained-BERT
71c1660fb082fa5ebde4afd8c7db2bc96b80bb59
[ "Apache-2.0" ]
1
2022-02-06T15:59:12.000Z
2022-02-06T15:59:12.000Z
hotpot_sample_dict.py
bvanaken/pytorch-pretrained-BERT
71c1660fb082fa5ebde4afd8c7db2bc96b80bb59
[ "Apache-2.0" ]
null
null
null
hotpot_sample_dict.py
bvanaken/pytorch-pretrained-BERT
71c1660fb082fa5ebde4afd8c7db2bc96b80bb59
[ "Apache-2.0" ]
null
null
null
samples = { "2_brother_plays": { "question_parts": [range(1, 13), range(13, 17)], "sp_parts": [range(20, 43), range(50, 60)] } }
21.857143
56
0.51634
0
0
0
0
0
0
0
0
43
0.281046
feb04d32f16beda0e1b583eb23a6f47a91df44ef
695
py
Python
src/applications/blog/migrations/0003_post_author.py
alexander-sidorov/tms-z43
61ecd204f5de4e97ff0300f6ef91c36c2bcda31c
[ "MIT" ]
2
2020-12-17T20:19:21.000Z
2020-12-22T12:46:43.000Z
src/applications/blog/migrations/0003_post_author.py
alexander-sidorov/tms-z43
61ecd204f5de4e97ff0300f6ef91c36c2bcda31c
[ "MIT" ]
4
2021-04-20T08:40:30.000Z
2022-02-10T07:50:30.000Z
src/applications/blog/migrations/0003_post_author.py
alexander-sidorov/tms-z43
61ecd204f5de4e97ff0300f6ef91c36c2bcda31c
[ "MIT" ]
1
2021-02-10T06:42:19.000Z
2021-02-10T06:42:19.000Z
# Generated by Django 3.1.7 on 2021-03-24 17:41 import django.db.models.deletion from django.conf import settings from django.db import migrations from django.db import models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ("blog", "0002_auto_20210323_1834"), ] operations = [ migrations.AddField( model_name="post", name="author", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, ), ), ]
24.821429
66
0.604317
515
0.741007
0
0
0
0
0
0
92
0.132374
feb0e950cc084ec84da234840633db92453d5121
16,227
py
Python
sdk/python/pulumi_aws/cloudformation/stack_set.py
mdop-wh/pulumi-aws
05bb32e9d694dde1c3b76d440fd2cd0344d23376
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_aws/cloudformation/stack_set.py
mdop-wh/pulumi-aws
05bb32e9d694dde1c3b76d440fd2cd0344d23376
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_aws/cloudformation/stack_set.py
mdop-wh/pulumi-aws
05bb32e9d694dde1c3b76d440fd2cd0344d23376
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Dict, List, Mapping, Optional, Tuple, Union from .. import _utilities, _tables __all__ = ['StackSet'] class StackSet(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, administration_role_arn: Optional[pulumi.Input[str]] = None, capabilities: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None, description: Optional[pulumi.Input[str]] = None, execution_role_name: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, template_body: Optional[pulumi.Input[str]] = None, template_url: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): """ Manages a CloudFormation StackSet. StackSets allow CloudFormation templates to be easily deployed across multiple accounts and regions via StackSet Instances (`cloudformation.StackSetInstance` resource). Additional information about StackSets can be found in the [AWS CloudFormation User Guide](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/what-is-cfnstacksets.html). > **NOTE:** All template parameters, including those with a `Default`, must be configured or ignored with the `lifecycle` configuration block `ignore_changes` argument. > **NOTE:** All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument. ## Example Usage ```python import pulumi import pulumi_aws as aws a_ws_cloud_formation_stack_set_administration_role_assume_role_policy = aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs( actions=["sts:AssumeRole"], effect="Allow", principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs( identifiers=["cloudformation.amazonaws.com"], type="Service", )], )]) a_ws_cloud_formation_stack_set_administration_role = aws.iam.Role("aWSCloudFormationStackSetAdministrationRole", assume_role_policy=a_ws_cloud_formation_stack_set_administration_role_assume_role_policy.json) example = aws.cloudformation.StackSet("example", administration_role_arn=a_ws_cloud_formation_stack_set_administration_role.arn, parameters={ "VPCCidr": "10.0.0.0/16", }, template_body=\"\"\"{ "Parameters" : { "VPCCidr" : { "Type" : "String", "Default" : "10.0.0.0/16", "Description" : "Enter the CIDR block for the VPC. Default is 10.0.0.0/16." } }, "Resources" : { "myVpc": { "Type" : "AWS::EC2::VPC", "Properties" : { "CidrBlock" : { "Ref" : "VPCCidr" }, "Tags" : [ {"Key": "Name", "Value": "Primary_CF_VPC"} ] } } } } \"\"\") a_ws_cloud_formation_stack_set_administration_role_execution_policy_policy_document = example.execution_role_name.apply(lambda execution_role_name: aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs( actions=["sts:AssumeRole"], effect="Allow", resources=[f"arn:aws:iam::*:role/{execution_role_name}"], )])) a_ws_cloud_formation_stack_set_administration_role_execution_policy_role_policy = aws.iam.RolePolicy("aWSCloudFormationStackSetAdministrationRoleExecutionPolicyRolePolicy", policy=a_ws_cloud_formation_stack_set_administration_role_execution_policy_policy_document.json, role=a_ws_cloud_formation_stack_set_administration_role.name) ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] administration_role_arn: Amazon Resource Number (ARN) of the IAM Role in the administrator account. :param pulumi.Input[List[pulumi.Input[str]]] capabilities: A list of capabilities. Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, `CAPABILITY_AUTO_EXPAND`. :param pulumi.Input[str] description: Description of the StackSet. :param pulumi.Input[str] execution_role_name: Name of the IAM Role in all target accounts for StackSet operations. Defaults to `AWSCloudFormationStackSetExecutionRole`. :param pulumi.Input[str] name: Name of the StackSet. The name must be unique in the region where you create your StackSet. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Key-value map of input parameters for the StackSet template. All template parameters, including those with a `Default`, must be configured or ignored with `lifecycle` configuration block `ignore_changes` argument. All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of tags to associate with this StackSet and the Stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the Stacks. A maximum number of 50 tags can be specified. :param pulumi.Input[str] template_body: String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with `template_url`. :param pulumi.Input[str] template_url: String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with `template_body`. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if administration_role_arn is None: raise TypeError("Missing required property 'administration_role_arn'") __props__['administration_role_arn'] = administration_role_arn __props__['capabilities'] = capabilities __props__['description'] = description __props__['execution_role_name'] = execution_role_name __props__['name'] = name __props__['parameters'] = parameters __props__['tags'] = tags __props__['template_body'] = template_body __props__['template_url'] = template_url __props__['arn'] = None __props__['stack_set_id'] = None super(StackSet, __self__).__init__( 'aws:cloudformation/stackSet:StackSet', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, administration_role_arn: Optional[pulumi.Input[str]] = None, arn: Optional[pulumi.Input[str]] = None, capabilities: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None, description: Optional[pulumi.Input[str]] = None, execution_role_name: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, stack_set_id: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, template_body: Optional[pulumi.Input[str]] = None, template_url: Optional[pulumi.Input[str]] = None) -> 'StackSet': """ Get an existing StackSet resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] administration_role_arn: Amazon Resource Number (ARN) of the IAM Role in the administrator account. :param pulumi.Input[str] arn: Amazon Resource Name (ARN) of the StackSet. :param pulumi.Input[List[pulumi.Input[str]]] capabilities: A list of capabilities. Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, `CAPABILITY_AUTO_EXPAND`. :param pulumi.Input[str] description: Description of the StackSet. :param pulumi.Input[str] execution_role_name: Name of the IAM Role in all target accounts for StackSet operations. Defaults to `AWSCloudFormationStackSetExecutionRole`. :param pulumi.Input[str] name: Name of the StackSet. The name must be unique in the region where you create your StackSet. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Key-value map of input parameters for the StackSet template. All template parameters, including those with a `Default`, must be configured or ignored with `lifecycle` configuration block `ignore_changes` argument. All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument. :param pulumi.Input[str] stack_set_id: Unique identifier of the StackSet. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of tags to associate with this StackSet and the Stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the Stacks. A maximum number of 50 tags can be specified. :param pulumi.Input[str] template_body: String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with `template_url`. :param pulumi.Input[str] template_url: String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with `template_body`. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["administration_role_arn"] = administration_role_arn __props__["arn"] = arn __props__["capabilities"] = capabilities __props__["description"] = description __props__["execution_role_name"] = execution_role_name __props__["name"] = name __props__["parameters"] = parameters __props__["stack_set_id"] = stack_set_id __props__["tags"] = tags __props__["template_body"] = template_body __props__["template_url"] = template_url return StackSet(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="administrationRoleArn") def administration_role_arn(self) -> pulumi.Output[str]: """ Amazon Resource Number (ARN) of the IAM Role in the administrator account. """ return pulumi.get(self, "administration_role_arn") @property @pulumi.getter def arn(self) -> pulumi.Output[str]: """ Amazon Resource Name (ARN) of the StackSet. """ return pulumi.get(self, "arn") @property @pulumi.getter def capabilities(self) -> pulumi.Output[Optional[List[str]]]: """ A list of capabilities. Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, `CAPABILITY_AUTO_EXPAND`. """ return pulumi.get(self, "capabilities") @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ Description of the StackSet. """ return pulumi.get(self, "description") @property @pulumi.getter(name="executionRoleName") def execution_role_name(self) -> pulumi.Output[Optional[str]]: """ Name of the IAM Role in all target accounts for StackSet operations. Defaults to `AWSCloudFormationStackSetExecutionRole`. """ return pulumi.get(self, "execution_role_name") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Name of the StackSet. The name must be unique in the region where you create your StackSet. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters. """ return pulumi.get(self, "name") @property @pulumi.getter def parameters(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Key-value map of input parameters for the StackSet template. All template parameters, including those with a `Default`, must be configured or ignored with `lifecycle` configuration block `ignore_changes` argument. All `NoEcho` template parameters must be ignored with the `lifecycle` configuration block `ignore_changes` argument. """ return pulumi.get(self, "parameters") @property @pulumi.getter(name="stackSetId") def stack_set_id(self) -> pulumi.Output[str]: """ Unique identifier of the StackSet. """ return pulumi.get(self, "stack_set_id") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Key-value map of tags to associate with this StackSet and the Stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the Stacks. A maximum number of 50 tags can be specified. """ return pulumi.get(self, "tags") @property @pulumi.getter(name="templateBody") def template_body(self) -> pulumi.Output[str]: """ String containing the CloudFormation template body. Maximum size: 51,200 bytes. Conflicts with `template_url`. """ return pulumi.get(self, "template_body") @property @pulumi.getter(name="templateUrl") def template_url(self) -> pulumi.Output[Optional[str]]: """ String containing the location of a file containing the CloudFormation template body. The URL must point to a template that is located in an Amazon S3 bucket. Maximum location file size: 460,800 bytes. Conflicts with `template_body`. """ return pulumi.get(self, "template_url") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
57.747331
403
0.680841
15,859
0.977322
0
0
7,675
0.472977
0
0
10,666
0.6573
feb1798a65bfb807865b5bcdd876a894d5048086
319
py
Python
code/config/imports.py
farioso-fernando/cover-meu-beat
b15a9c0c97086e51e42cee4dd40e7d0650130d0e
[ "MIT" ]
null
null
null
code/config/imports.py
farioso-fernando/cover-meu-beat
b15a9c0c97086e51e42cee4dd40e7d0650130d0e
[ "MIT" ]
null
null
null
code/config/imports.py
farioso-fernando/cover-meu-beat
b15a9c0c97086e51e42cee4dd40e7d0650130d0e
[ "MIT" ]
null
null
null
from kivy.uix.screenmanager import ScreenManager from kivy.uix.boxlayout import BoxLayout from kivy.lang.builder import Builder from kivy.animation import Animation from kivy.core.window import Window from kivymd.app import MDApp import kivymd import kivy print( ) def version(): kivy.require('2.0.0') print( )
16.789474
48
0.789969
0
0
0
0
0
0
0
0
7
0.021944
feb1c1e0c98bd37c082895d1888d0fe15b8aaccf
19,367
py
Python
claripy/vsa/valueset.py
kwalberg/claripy
b5cfa0a355eaa3cd5403e1d81f0b80bb3db20c90
[ "BSD-2-Clause" ]
null
null
null
claripy/vsa/valueset.py
kwalberg/claripy
b5cfa0a355eaa3cd5403e1d81f0b80bb3db20c90
[ "BSD-2-Clause" ]
null
null
null
claripy/vsa/valueset.py
kwalberg/claripy
b5cfa0a355eaa3cd5403e1d81f0b80bb3db20c90
[ "BSD-2-Clause" ]
null
null
null
import functools import itertools import numbers from ..backend_object import BackendObject from ..annotation import Annotation def normalize_types_two_args(f): @functools.wraps(f) def normalizer(self, region, o): """ Convert any object to an object that we can process. """ if isinstance(o, Base): raise ClaripyValueError("BoolResult can't handle AST objects directly") if not isinstance(o, StridedInterval): raise ClaripyVSAOperationError('Unsupported operand type %s' % type(o)) return f(self, region, o) return normalizer def normalize_types_one_arg(f): @functools.wraps(f) def normalizer(self, o): """ Convert any object to an object that we can process. """ if isinstance(o, Base): raise ClaripyValueError("BoolResult can't handle AST objects directly") return f(self, o) return normalizer vs_id_ctr = itertools.count() class RegionAnnotation(Annotation): """ Use RegionAnnotation to annotate ASTs. Normally, an AST annotated by RegionAnnotations is treated as a ValueSet. Note that Annotation objects are immutable. Do not change properties of an Annotation object without creating a new one. """ def __init__(self, region_id, region_base_addr, offset): self.region_id = region_id self.region_base_addr = region_base_addr self.offset = offset # Do necessary conversion here if isinstance(self.region_base_addr, Base): self.region_base_addr = self.region_base_addr._model_vsa if isinstance(self.offset, Base): self.offset = self.offset._model_vsa @property def eliminatable(self): """ A Region annotation is not eliminatable in simplifications. :return: False :rtype: bool """ return False @property def relocatable(self): """ A Region annotation is not relocatable in simplifications. :return: False :rtype: bool """ return False # # Public methods # def relocate(self, src, dst): """ Override Annotation.relocate(). :param src: The old AST :param dst: The new AST, as the result of a simplification :return: The new annotation that should be applied on the new AST """ raise ClaripyVSAError('RegionAnnotation is not relocatable') # # Overriding base methods # def __hash__(self): return hash((self.region_id, self.region_base_addr, hash(self.offset))) def __repr__(self): return "<RegionAnnotation %s:%#08x>" % (self.region_id, self.offset) class ValueSet(BackendObject): """ ValueSet is a mapping between memory regions and corresponding offsets. """ def __init__(self, name=None, region=None, region_base_addr=None, bits=None, val=None): """ Constructor. :param str name: Name of this ValueSet object. Only for debugging purposes. :param str region: Region ID. :param int region_base_addr: Base address of the region. :param int bits: Size of the ValueSet. :param val: an initial offset """ self._name = 'VS_%d' % next(vs_id_ctr) if name is None else name if bits is None: raise ClaripyVSAError('bits must be specified when creating a ValueSet.') self._bits = bits self._si = StridedInterval.empty(bits) self._regions = {} self._region_base_addrs = {} self._reversed = False # Shortcuts for initialization # May not be useful though... if region is not None and region_base_addr is not None and val is not None: if isinstance(region_base_addr, numbers.Number): # Convert it to a StridedInterval region_base_addr = StridedInterval(bits=self._bits, stride=1, lower_bound=region_base_addr, upper_bound=region_base_addr) if isinstance(val, numbers.Number): val = StridedInterval(bits=bits, stride=0, lower_bound=val, upper_bound=val) if isinstance(val, StridedInterval): self._set_si(region, region_base_addr, val) else: raise ClaripyVSAError("Unsupported type '%s' for argument 'val'" % type(val)) else: if region is not None or val is not None: raise ClaripyVSAError("You must specify 'region' and 'val' at the same time.") # # Properties # @property def name(self): return self._name @property def bits(self): return self._bits @property def regions(self): return self._regions @property def reversed(self): return self._reversed @property def unique(self): return len(self.regions) == 1 and self.regions.values()[0].unique @property def cardinality(self): card = 0 for region in self._regions: card += self._regions[region].cardinality return card @property def is_empty(self): return len(self._regions) == 0 @property def valueset(self): return self # # Private methods # def _set_si(self, region, region_base_addr, si): if isinstance(si, numbers.Number): si = StridedInterval(bits=self.bits, stride=0, lower_bound=si, upper_bound=si) if isinstance(region_base_addr, numbers.Number): region_base_addr = StridedInterval(bits=self.bits, stride=0, lower_bound=region_base_addr, upper_bound=region_base_addr ) if not isinstance(si, StridedInterval): raise ClaripyVSAOperationError('Unsupported type %s for si' % type(si)) self._regions[region] = si self._region_base_addrs[region] = region_base_addr self._si = self._si.union(region_base_addr + si) def _merge_si(self, region, region_base_addr, si): if isinstance(region_base_addr, numbers.Number): region_base_addr = StridedInterval(bits=self.bits, stride=0, lower_bound=region_base_addr, upper_bound=region_base_addr ) if region not in self._regions: self._set_si(region, region_base_addr, si) else: self._regions[region] = self._regions[region].union(si) self._region_base_addrs[region] = self._region_base_addrs[region].union(region_base_addr) self._si = self._si.union(region_base_addr + si) # # Public methods # @staticmethod def empty(bits): return ValueSet(bits=bits) def items(self): return self._regions.items() def size(self): return len(self) def copy(self): """ Make a copy of self and return. :return: A new ValueSet object. :rtype: ValueSet """ vs = ValueSet(bits=self.bits) vs._regions = self._regions.copy() vs._region_base_addrs = self._region_base_addrs.copy() vs._reversed = self._reversed vs._si = self._si.copy() return vs def get_si(self, region): if region in self._regions: return self._regions[region] # TODO: Should we return a None, or an empty SI instead? return None def stridedinterval(self): return self._si def apply_annotation(self, annotation): """ Apply a new annotation onto self, and return a new ValueSet object. :param RegionAnnotation annotation: The annotation to apply. :return: A new ValueSet object :rtype: ValueSet """ vs = self.copy() vs._merge_si(annotation.region_id, annotation.region_base_addr, annotation.offset) return vs def __repr__(self): s = "" for region, si in self._regions.items(): s = "%s: %s" % (region, si) return "(" + s + ")" def __len__(self): return self._bits def __hash__(self): return hash(tuple((r, hash(self._regions[r])) for r in self._regions)) # # Arithmetic operations # @normalize_types_one_arg def __add__(self, other): """ Binary operation: addition Note that even if "other" is a ValueSet object. we still treat it as a StridedInterval. Adding two ValueSets together does not make sense (which is essentially adding two pointers together). :param StridedInterval other: The other operand. :return: A new ValueSet object :rtype: ValueSet """ new_vs = ValueSet(bits=self.bits) # Call __add__ on self._si new_vs._si = self._si.__add__(other) for region in self._regions: new_vs._regions[region] = self._regions[region] + other return new_vs @normalize_types_one_arg def __radd__(self, other): return self.__add__(other) @normalize_types_one_arg def __sub__(self, other): """ Binary operation: subtraction :param other: The other operand :return: A StridedInterval or a ValueSet. """ deltas = [ ] # TODO: Handle more cases if isinstance(other, ValueSet): # A subtraction between two ValueSets produces a StridedInterval if self.regions.keys() == other.regions.keys(): for region in self._regions: deltas.append(self._regions[region] - other._regions[region]) else: # TODO: raise the proper exception here raise NotImplementedError() delta = StridedInterval.empty(self.bits) for d in deltas: delta = delta.union(d) return delta else: # A subtraction between a ValueSet and a StridedInterval produces another ValueSet new_vs = self.copy() # Call __sub__ on the base class new_vs._si = self._si.__sub__(other) for region, si in new_vs._regions.items(): new_vs._regions[region] = si - other return new_vs @normalize_types_one_arg def __and__(self, other): """ Binary operation: and Note that even if `other` is a ValueSet object, it will be treated as a StridedInterval as well. Doing & between two pointers that are not the same do not make sense. :param other: The other operand :return: A ValueSet as the result :rtype: ValueSet """ if type(other) is ValueSet: # The only case where calling & between two points makes sense if self.identical(other): return self.copy() if BoolResult.is_true(other == 0): # Corner case: a & 0 = 0 return StridedInterval(bits=self.bits, stride=0, lower_bound=0, upper_bound=0) if BoolResult.is_true(other < 0x100): # Special case - sometimes (addr & mask) is used for testing whether the address is aligned or not # We return a StridedInterval instead ret = None for region, si in self._regions.items(): r = si.__and__(other) ret = r if ret is None else ret.union(r) return ret else: # We should return a ValueSet here new_vs = self.copy() for region, si in self._regions.items(): r = si.__and__(other) new_vs._regions[region] = r return new_vs def __eq__(self, other): """ Binary operation: == :param other: The other operand :return: True/False/Maybe """ if isinstance(other, ValueSet): same = False different = False for region, si in other.regions.items(): if region in self.regions: comp_ret = self.regions[region] == si if BoolResult.has_true(comp_ret): same = True if BoolResult.has_false(comp_ret): different = True else: different = True if same and not different: return TrueResult() if same and different: return MaybeResult() return FalseResult() elif isinstance(other, StridedInterval): if 'global' in self.regions: return self.regions['global'] == other else: return FalseResult() else: return FalseResult() def __ne__(self, other): """ Binary operation: == :param other: The other operand :return: True/False/Maybe """ return ~ (self == other) # # Backend operations # def eval(self, n, signed=False): if signed: # How are you going to deal with a negative pointer? raise ClaripyVSAOperationError('`signed` cannot be True when calling ValueSet.eval().') results = [] for _, si in self._regions.items(): if len(results) < n: results.extend(si.eval(n)) return results @property def min(self): """ The minimum integer value of a value-set. It is only defined when there is exactly one region. :return: A integer that represents the minimum integer value of this value-set. :rtype: int """ if len(self.regions) != 1: raise ClaripyVSAOperationError("'min()' onlly works on single-region value-sets.") return self.get_si(next(iter(self.regions))).min @property def max(self): """ The maximum integer value of a value-set. It is only defined when there is exactly one region. :return: A integer that represents the maximum integer value of this value-set. :rtype: int """ if len(self.regions) != 1: raise ClaripyVSAOperationError("'max()' onlly works on single-region value-sets.") return self.get_si(next(iter(self.regions))).max def reverse(self): # TODO: obviously valueset.reverse is not properly implemented. I'm disabling the old annoying output line for # TODO: now. I will implement the proper reversing support soon. vs = self.copy() vs._reversed = not vs._reversed return vs def extract(self, high_bit, low_bit): """ Operation extract - A cheap hack is implemented: a copy of self is returned if (high_bit - low_bit + 1 == self.bits), which is a ValueSet instance. Otherwise a StridedInterval is returned. :param high_bit: :param low_bit: :return: A ValueSet or a StridedInterval """ if high_bit - low_bit + 1 == self.bits: return self.copy() if ('global' in self._regions and len(self._regions.keys()) > 1) or \ len(self._regions.keys()) > 0: si_ret = StridedInterval.top(high_bit - low_bit + 1) else: if 'global' in self._regions: si = self._regions['global'] si_ret = si.extract(high_bit, low_bit) else: si_ret = StridedInterval.empty(high_bit - low_bit + 1) return si_ret def concat(self, b): new_vs = ValueSet(bits=self.bits + b.bits) # TODO: This logic is obviously flawed. Correct it later :-( if isinstance(b, StridedInterval): for region, si in self._regions.items(): new_vs._set_si(region, self._region_base_addrs[region], si.concat(b)) elif isinstance(b, ValueSet): for region, si in self._regions.items(): new_vs._set_si(region, self._region_base_addrs[region], si.concat(b.get_si(region))) else: raise ClaripyVSAOperationError('ValueSet.concat() got an unsupported operand %s (type %s)' % (b, type(b))) return new_vs @normalize_types_one_arg def union(self, b): merged_vs = self.copy() if type(b) is ValueSet: for region, si in b.regions.items(): if region not in merged_vs._regions: merged_vs._regions[region] = si else: merged_vs._regions[region] = merged_vs._regions[region].union(si) merged_vs._si = merged_vs._si.union(b._si) else: for region, si in merged_vs._regions.items(): merged_vs._regions[region] = merged_vs._regions[region].union(b) merged_vs._si = merged_vs._si.union(b) return merged_vs @normalize_types_one_arg def widen(self, b): merged_vs = self.copy() if isinstance(b, ValueSet): for region, si in b.regions.items(): if region not in merged_vs.regions: merged_vs.regions[region] = si else: merged_vs.regions[region] = merged_vs.regions[region].widen(si) merged_vs._si = merged_vs._si.widen(b._si) else: for region in merged_vs._regions: merged_vs._regions[region] = merged_vs._regions[region].widen(b) merged_vs._si = merged_vs._si.widen(b) return merged_vs @normalize_types_one_arg def intersection(self, b): vs = self.copy() if isinstance(b, ValueSet): for region, si in b.regions.items(): if region not in vs.regions: pass else: vs.regions[region] = vs.regions[region].intersection(si) if vs.regions[region].is_empty: del vs.regions[region] vs._si = vs._si.intersection(b._si) else: for region in self._regions: vs.regions[region] = vs.regions[region].intersection(b) if vs.regions[region].is_empty: del vs.regions[region] vs._si = vs._si.intersection(b) return vs def identical(self, o): """ Used to make exact comparisons between two ValueSets. :param o: The other ValueSet to compare with. :return: True if they are exactly same, False otherwise. """ if self._reversed != o._reversed: return False for region, si in self.regions.items(): if region in o.regions: o_si = o.regions[region] if not si.identical(o_si): return False else: return False return True from ..ast.base import Base from .strided_interval import StridedInterval from .bool_result import BoolResult, TrueResult, FalseResult, MaybeResult from .errors import ClaripyVSAOperationError, ClaripyVSAError from ..errors import ClaripyValueError
29.795385
120
0.58357
18,128
0.936025
0
0
8,201
0.423452
0
0
5,494
0.283678
feb21c64003d71c234c911e57ed8a4baa217c7cb
2,663
py
Python
fardaastationapi.py
sina-cb/fardaastationapi
0e27afe05195f346e17fd52e1c30b853c954a3b0
[ "Apache-2.0" ]
null
null
null
fardaastationapi.py
sina-cb/fardaastationapi
0e27afe05195f346e17fd52e1c30b853c954a3b0
[ "Apache-2.0" ]
1
2017-12-21T19:54:36.000Z
2018-01-08T02:05:11.000Z
fardaastationapi.py
sina-cb/fardaastationapi
0e27afe05195f346e17fd52e1c30b853c954a3b0
[ "Apache-2.0" ]
null
null
null
import logging from episodes import find_updates, db, count_all from logging import error as logi from flask import Flask, jsonify, request def create_app(config, debug=False, testing=False, config_overrides=None): app = Flask(__name__) app.config.from_object(config) app.config['JSON_AS_ASCII'] = False app.debug = debug app.testing = testing if config_overrides: app.config.update(config_overrides) # Configure logging if not app.testing: logging.basicConfig(level=logging.INFO) @app.before_request def before_request(): db.connect() @app.after_request def after_request(response): db.close() return response @app.route('/get_new_episodes') def get_new_episodes(): appengine_request = request.headers.get('X-Appengine-Cron') if appengine_request == 'true': from scraper import update_episodes update_episodes() return '<h1>Success</h1>' else: return '<h1>This is a crobjob and all the requests should come from appengine.</h1>' @app.route('/get_updates') def get_update(): timestamp = request.args.get('timestamp', '') if timestamp == '': logi('Default timestamp') timestamp = 0 else: timestamp = long(timestamp) result = find_updates(timestamp) return jsonify(result) @app.route('/') def welcome(): message = '{}{}{}{}'.format('<h1>Welcome to FardaStationAPI WebService</h1>', '<p>To get information about the latest episodes of Fardaa Station (by ' 'RadioFarda.com) please send a GET request to ' 'http://fardastationapi.appspot.com/get_updates URL.</p>', '<p>A UNIX epoch timestamp can also be passed in as an argument to filter out the ' 'episodes before that timestamp. Example: ' 'https://fardastationapi.appspot.com/get_updates?timestamp=1512629949</p>', '<h1>Current number of episodes: {}</h1>'.format(count_all())) return message # Add an error handler. This is useful for debugging the live application, # however, you should disable the output of the exception for production # applications. @app.errorhandler(500) def server_error(e): return """ An internal error occurred: <pre>{}</pre> See logs for full stacktrace. """.format(e), 500 return app
33.708861
119
0.592189
0
0
0
0
1,900
0.713481
0
0
962
0.361247
feb27ff41ef1690499bd0cbcb5cc15ed8e07d63d
868
py
Python
pytglib/api/types/can_transfer_ownership_result_password_too_fresh.py
iTeam-co/pytglib
e5e75e0a85f89b77762209b32a61b0a883c0ae61
[ "MIT" ]
6
2019-10-30T08:57:27.000Z
2021-02-08T14:17:43.000Z
pytglib/api/types/can_transfer_ownership_result_password_too_fresh.py
iTeam-co/python-telegram
e5e75e0a85f89b77762209b32a61b0a883c0ae61
[ "MIT" ]
1
2021-08-19T05:44:10.000Z
2021-08-19T07:14:56.000Z
pytglib/api/types/can_transfer_ownership_result_password_too_fresh.py
iTeam-co/python-telegram
e5e75e0a85f89b77762209b32a61b0a883c0ae61
[ "MIT" ]
5
2019-12-04T05:30:39.000Z
2021-05-21T18:23:32.000Z
from ..utils import Object class CanTransferOwnershipResultPasswordTooFresh(Object): """ The 2-step verification was enabled recently, user needs to wait Attributes: ID (:obj:`str`): ``CanTransferOwnershipResultPasswordTooFresh`` Args: retry_after (:obj:`int`): Time left before the session can be used to transfer ownership of a chat, in seconds Returns: CanTransferOwnershipResult Raises: :class:`telegram.Error` """ ID = "canTransferOwnershipResultPasswordTooFresh" def __init__(self, retry_after, **kwargs): self.retry_after = retry_after # int @staticmethod def read(q: dict, *args) -> "CanTransferOwnershipResultPasswordTooFresh": retry_after = q.get('retry_after') return CanTransferOwnershipResultPasswordTooFresh(retry_after)
26.30303
96
0.68318
836
0.963134
0
0
205
0.236175
0
0
512
0.589862
feb49cfe9fd1f9a9e260952a3552e9f39bc9e707
12,199
py
Python
catapult.py
spraakbanken/sparv-catapult
03273985ceea6feef47a56084c595580d0338f7d
[ "MIT" ]
null
null
null
catapult.py
spraakbanken/sparv-catapult
03273985ceea6feef47a56084c595580d0338f7d
[ "MIT" ]
2
2021-12-13T19:47:29.000Z
2021-12-15T16:14:50.000Z
catapult.py
spraakbanken/sparv-catapult
03273985ceea6feef47a56084c595580d0338f7d
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # catapult: runs python scripts in already running processes to eliminate the # python interpreter startup time. # # The lexicon for sparv.saldo.annotate and sparv.saldo.compound can be pre-loaded and # shared between processes. See the variable annotators in handle and start. # # Run scripts in the catapult with the c program catalaunch. from builtins import range, object from multiprocessing import Process, cpu_count from decorator import decorator import logging import os import re import runpy import socket import sys import traceback import sparv.util as util RECV_LEN = 4096 # Important to preload all modules otherwise processes will need to do # it upon request, introducing new delays. # # These imports uses the __all__ variables in the __init__ files. from sparv.util import * from sparv import * logging.basicConfig(format="%(process)d %(asctime)-15s %(message)s") log = logging.getLogger(__name__) log.setLevel(logging.INFO) """ Splits at every space that is not preceded by a backslash. """ splitter = re.compile('(?<!\\\\) ') def set_last_argument(*values): """ Decorates a function f, setting its last argument(s) to the given value(s). Used for setting the saldo lexicons to sparv.saldo.annotate and sparv.saldo.compound, and the process "dictionary" to sparv.malt.maltparse. The decorator module is used to give the same signature and docstring to the function, which is exploited in sparv.util.run. """ @decorator def inner(f, *args, **kwargs): args = list(args) for v in values: args.pop() for v in values: args.append(v) f(*args, **kwargs) return inner def handle(client_sock, verbose, annotators): """ Handle a client: parse the arguments, change to the relevant directory, then run the script. Stdout and stderr are directed to /dev/null or to the client socket. """ def chunk_send(msg): """ Sends a message chunk until it is totally received in the other end """ msg = msg.encode(util.UTF8) while len(msg) > 0: sent = client_sock.send(msg) if sent == 0: raise RuntimeError("socket connection broken") msg = msg[sent:] def set_stdout_stderr(): """ Put stdout and stderr to the client_sock, if verbose. Returns the clean-up handler. """ class Writer(object): def write(self, msg): log.debug(msg) if verbose: chunk_send(msg) def flush(self): pass orig_stds = sys.stdout, sys.stderr w = Writer() sys.stdout = w sys.stderr = w def cleanup(): """ Restores stdout and stderr """ sys.stdout = orig_stds[0] sys.stderr = orig_stds[1] client_sock.close() return cleanup # Receive data data = b"" new_data = None # Message is terminated with a lone \ while new_data is None or not new_data.endswith(b'\\'): new_data = client_sock.recv(RECV_LEN) log.debug("Received %s", new_data) data += new_data if len(new_data) == 0: log.warning("Received null!") chunk_send("Error when receiving: got an empty message") return # Drop the terminating \ data = data[0:-1] # Split arguments on spaces, and replace '\ ' to ' ' and \\ to \ args = [arg.replace('\\ ', ' ').replace('\\\\', '\\') for arg in re.split(splitter, data.decode(util.UTF8))] log.debug("Args: %s", args) ### PING? ### if len(args) == 2 and args[1] == "PING": log.info("Ping requested") chunk_send("PONG") return # If the first argument is -m, the following argument is a module # name instead of a script name module_flag = len(args) > 2 and args[1] == '-m' if module_flag: args.pop(1) if len(args) > 1: # First argument is the pwd of the caller old_pwd = os.getcwd() pwd = args.pop(0) log.info('Running %s', args[0]) log.debug('with arguments: %s', ' '.join(args[1:])) log.debug('in directory %s', pwd) # Set stdout and stderr, which returns the cleaup function cleanup = set_stdout_stderr() # Run the command try: sys.argv = args os.chdir(pwd) if module_flag: annotator = annotators.get(args[0], None) if not annotator: # some of the annotators require two arguments annotator = annotators.get((args[0], args[1]), None) if annotator: # skip the first argument now sys.argv = args[0] sys.argv.extend(args[2:]) if annotator: util.run.main(annotator) else: runpy.run_module(args[0], run_name='__main__') else: runpy.run_path(args[0], run_name='__main__') except (ImportError, IOError): # If file does not exist, send the error message chunk_send("%s\n" % sys.exc_info()[1]) cleanup() log.exception("File does not exist") except: # Send other errors, and if verbose, send tracebacks chunk_send("%s\n" % sys.exc_info()[1]) traceback.print_exception(*sys.exc_info()) cleanup() log.exception("Unknown error") else: cleanup() os.chdir(old_pwd) # Run the cleanup function if there is one (only used with malt) annotators.get((args[0], 'cleanup'), lambda: None)() log.info('Completed %s', args[0]) else: log.info('Cannot handle %s', data) chunk_send('Cannot handle %s\n' % data) def worker(server_socket, verbose, annotators, malt_args=None, swener_args=None): """ Workers listen to the socket server, and handle incoming requests Each process starts an own maltparser process, because they are cheap and cannot serve multiple clients at the same time. """ if malt_args: process_dict = dict(process=None, restart=True) def start_malt(): if process_dict['process'] is None or process_dict['restart']: old_process = process_dict['process'] old_process and util.system.kill_process(old_process) malt_process = malt.maltstart(**malt_args) if verbose: log.info('(Re)started malt process: %s', malt_process) process_dict['process'] = malt_process annotators['sparv.malt'] = set_last_argument(process_dict)(malt.maltparse) elif verbose: log.info("Not restarting malt this time") start_malt() annotators['sparv.malt', 'cleanup'] = start_malt if swener_args: process_dict = dict(process=None, restart=True) def start_swener(): if process_dict['process'] is None or process_dict['restart']: old_process = process_dict['process'] old_process and util.system.kill_process(old_process) swener_process = swener.swenerstart(**swener_args) if verbose: log.info('(Re)started SweNER process: %s', swener_process) process_dict['process'] = swener_process annotators['sparv.swener'] = set_last_argument(process_dict)(swener.tag_ne) elif verbose: log.info("Not restarting SweNER this time") start_swener() annotators['sparv.swener', 'cleanup'] = start_swener if verbose: log.info("Worker running!") while True: client_sock, addr = server_socket.accept() try: handle(client_sock, verbose, annotators) except: log.exception('Error in handling code') traceback.print_exception(*sys.exc_info()) client_sock.close() def start(socket_path, processes=1, verbose='false', saldo_model=None, compound_model=None, stats_model=None, dalin_model=None, swedberg_model=None, blingbring_model=None, malt_jar=None, malt_model=None, malt_encoding=util.UTF8, sentiment_model=None, swefn_model=None, swener=False, swener_encoding=util.UTF8): """ Starts a catapult on a socket file, using a number of processes. If verbose is false, all stdout and stderr programs produce is piped to /dev/null, otherwise it is sent to the client. The computation is done by the catapult processes, however. Regardless of what verbose is, client errors should be reported both in the catapult and to the client. The saldo model and compound model can be pre-loaded and shared in memory between processes. Start processes using catalaunch. """ if os.path.exists(socket_path): log.error('socket %s already exists', socket_path) exit(1) verbose = verbose.lower() == 'true' log.info('Verbose: %s', verbose) # If processes does not contain an int, set it to the number of processors try: processes = int(processes) except: processes = cpu_count() # Start the socket server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) server_socket.bind(socket_path) server_socket.listen(processes) # The dictionary of functions with saved lexica, indexed by module name strings annotators = {} # Load Saldo and older lexicons lexicons = [m for m in [saldo_model, dalin_model, swedberg_model] if m] if lexicons: lexicon_dict = {} for lexicon in lexicons: lexicon_dict[os.path.basename(lexicon).rstrip(".pickle")] = saldo.SaldoLexicon(lexicon) annotators['sparv.saldo'] = set_last_argument(lexicon_dict)(saldo.annotate) if stats_model and compound_model: annotators['sparv.compound'] = set_last_argument( compound.SaldoCompLexicon(compound_model), compound.StatsLexicon(stats_model))(compound.annotate) elif compound_model: annotators['sparv.compound_simple'] = set_last_argument( compound_simple.SaldoLexicon(compound_model))(compound_simple.annotate) # if blingbring_model: # annotators['sparv.lexical_classes'] = set_last_argument( # util.PickledLexicon(blingbring_model))(lexical_classes.annotate_bb_words) # if swefn_model: # annotators['sparv.lexical_classes'] = set_last_argument( # util.PickledLexicon(swefn_model))(lexical_classes.annotate_swefn_words) if sentiment_model: annotators['sparv.sentiment'] = set_last_argument( util.PickledLexicon(sentiment_model))(sentiment.sentiment) # if models_1700s: # models = models_1700s.split() # lexicons = [saldo.SaldoLexicon(lex) for lex in models] # annotators[('sparv.fsv', '--annotate_fallback')] = set_last_argument(lexicons)(fsv.annotate_fallback) # annotators[('sparv.fsv', '--annotate_full')] = set_last_argument(lexicons)(fsv.annotate_full) if verbose: log.info('Loaded annotators: %s', list(annotators.keys())) if malt_jar and malt_model: malt_args = dict(maltjar=malt_jar, model=malt_model, encoding=malt_encoding, send_empty_sentence=True) else: malt_args = None if swener: swener_args = dict(stdin="", encoding=swener_encoding, verbose=True) else: swener_args = None # Start processes-1 workers workers = [Process(target=worker, args=[server_socket, verbose, annotators, malt_args]) for i in range(processes - 1)] for p in workers: p.start() # Additionally, let this thread be worker 0 worker(server_socket, verbose, annotators, malt_args, swener_args) if __name__ == '__main__': util.run.main(start)
32.617647
111
0.61792
201
0.016477
0
0
198
0.016231
0
0
4,556
0.373473
feb55dc64767ea42fd4dbdb633eb49cefc5afea8
2,445
py
Python
tests/test_sentiments.py
rajeshkumargp/TextBlob
a8709368f2a8a8ba4d87730111f8b6675d0735cd
[ "MIT" ]
6,608
2015-01-02T13:13:16.000Z
2022-03-31T13:44:41.000Z
tests/test_sentiments.py
rajeshkumargp/TextBlob
a8709368f2a8a8ba4d87730111f8b6675d0735cd
[ "MIT" ]
277
2015-01-01T15:08:55.000Z
2022-03-28T20:00:06.000Z
tests/test_sentiments.py
rajeshkumargp/TextBlob
a8709368f2a8a8ba4d87730111f8b6675d0735cd
[ "MIT" ]
1,110
2015-01-01T22:04:39.000Z
2022-03-20T20:39:26.000Z
from __future__ import unicode_literals import unittest from nose.tools import * # PEP8 asserts from nose.plugins.attrib import attr from textblob.sentiments import PatternAnalyzer, NaiveBayesAnalyzer, DISCRETE, CONTINUOUS class TestPatternSentiment(unittest.TestCase): def setUp(self): self.analyzer = PatternAnalyzer() def test_kind(self): assert_equal(self.analyzer.kind, CONTINUOUS) def test_analyze(self): p1 = "I feel great this morning." n1 = "This is a terrible car." p1_result = self.analyzer.analyze(p1) n1_result = self.analyzer.analyze(n1) assert_true(p1_result[0] > 0) assert_true(n1_result[0] < 0) assert_equal(p1_result.polarity, p1_result[0]) assert_equal(p1_result.subjectivity, p1_result[1]) def test_analyze_assessments(self): p1 = "I feel great this morning." n1 = "This is a terrible car." p1_result = self.analyzer.analyze(p1,keep_assessments=True) n1_result = self.analyzer.analyze(n1,keep_assessments=True) p1_assessment = p1_result.assessments[0] n1_assessment = n1_result.assessments[0] assert_true(p1_assessment[1] > 0) assert_true(n1_assessment[1] < 0) assert_equal(p1_result.polarity, p1_assessment[1]) assert_equal(p1_result.subjectivity, p1_assessment[2]) class TestNaiveBayesAnalyzer(unittest.TestCase): def setUp(self): self.analyzer = NaiveBayesAnalyzer() def test_kind(self): assert_equal(self.analyzer.kind, DISCRETE) @attr('slow') def test_analyze(self): p1 = 'I feel great this morning.' n1 = 'This is a terrible car.' p1_result = self.analyzer.analyze(p1) assert_equal(p1_result[0], 'pos') assert_equal(self.analyzer.analyze(n1)[0], 'neg') # The 2nd item should be the probability that it is positive assert_true(isinstance(p1_result[1], float)) # 3rd item is probability that it is negative assert_true(isinstance(p1_result[2], float)) assert_about_equal(p1_result[1] + p1_result[2], 1) assert_equal(p1_result.classification, p1_result[0]) assert_equal(p1_result.p_pos, p1_result[1]) assert_equal(p1_result.p_neg, p1_result[2]) def assert_about_equal(first, second, places=4): return assert_equal(round(first, places), second) if __name__ == '__main__': unittest.main()
35.434783
89
0.685481
2,062
0.843354
0
0
721
0.294888
0
0
304
0.124335
feb57d630ade4f4d7aefdadbe2f5755982d89a54
127
py
Python
src/unicef_security/apps.py
unicef/unicef-security
cc51ba52cddb845b8174cf3dc94706f0334453b2
[ "Apache-2.0" ]
null
null
null
src/unicef_security/apps.py
unicef/unicef-security
cc51ba52cddb845b8174cf3dc94706f0334453b2
[ "Apache-2.0" ]
10
2019-04-24T14:33:49.000Z
2020-12-19T01:07:06.000Z
src/unicef_security/apps.py
unicef/unicef-security
cc51ba52cddb845b8174cf3dc94706f0334453b2
[ "Apache-2.0" ]
1
2019-04-11T15:34:18.000Z
2019-04-11T15:34:18.000Z
from django.apps import AppConfig class Config(AppConfig): name = 'unicef_security' verbose_name = "UNICEF Security"
18.142857
36
0.740157
90
0.708661
0
0
0
0
0
0
34
0.267717
feb6feac24e99949d73380d3a6510ebf108ac24b
229
py
Python
utils/pretty-tests.py
isJuhn/pcsx2_ipc
51f92d51aec05dffa82d418c97fc1d628b2ed40f
[ "MIT" ]
7
2021-07-09T20:23:19.000Z
2022-03-14T06:56:14.000Z
utils/pretty-tests.py
isJuhn/pcsx2_ipc
51f92d51aec05dffa82d418c97fc1d628b2ed40f
[ "MIT" ]
2
2021-03-07T16:14:44.000Z
2021-03-30T07:48:05.000Z
utils/pretty-tests.py
isJuhn/pcsx2_ipc
51f92d51aec05dffa82d418c97fc1d628b2ed40f
[ "MIT" ]
1
2021-03-07T15:59:31.000Z
2021-03-07T15:59:31.000Z
import json import sys f=open(sys.argv[1]) y = json.loads(f.read()) print("Tests results: " + str(y["result"])) print("Tests duration: " + str(y["duration"])) print("Tests output:\n~~~~~~~~~~~~~~~~~~~~\n" + str(y["stdout"]))
25.444444
66
0.576419
0
0
0
0
0
0
0
0
100
0.436681
feb7b66503cd218d51059640f9914912cefb66a6
14,533
py
Python
tests/scripts/thread-cert/test_network_layer.py
AdityaHPatwardhan/openthread
a201e9d5d0273bb51fa20efc8758be20a725018e
[ "BSD-3-Clause" ]
2,962
2016-05-11T15:06:06.000Z
2022-03-27T20:06:16.000Z
tests/scripts/thread-cert/test_network_layer.py
AdityaHPatwardhan/openthread
a201e9d5d0273bb51fa20efc8758be20a725018e
[ "BSD-3-Clause" ]
5,899
2016-05-11T19:21:49.000Z
2022-03-31T18:17:20.000Z
tests/scripts/thread-cert/test_network_layer.py
AdityaHPatwardhan/openthread
a201e9d5d0273bb51fa20efc8758be20a725018e
[ "BSD-3-Clause" ]
1,113
2016-05-11T15:37:42.000Z
2022-03-31T09:37:04.000Z
#!/usr/bin/env python3 # # Copyright (c) 2016, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import io import random import struct import unittest import common import network_layer def any_eid(): return bytearray([random.getrandbits(8) for _ in range(16)]) def any_mac_extended_address(): return bytearray([random.getrandbits(8) for _ in range(8)]) def any_rloc16(): return random.getrandbits(16) def any_ml_eid(): return bytearray([random.getrandbits(8) for _ in range(8)]) def any_status(): return random.getrandbits(1) def any_seconds(): return random.getrandbits(32) def any_id_sequence(): return random.getrandbits(8) def any_router_id_mask(): return random.getrandbits(64) def any_options(count=None): count = count if count is not None else random.randint(0, 255) return [random.getrandbits(8) for _ in range(count)] def any_tlv_data(length=None): _type = random.getrandbits(8) length = length if length is not None else random.getrandbits(8) value = bytearray([random.getrandbits(8) for _ in range(length)]) return bytearray([_type, length]) + value def any_tlvs_data(count=None): count = count if count is not None else random.randint(0, 16) data = bytearray() for _ in range(count): data += any_tlv_data(random.randint(1, 15)) return data class TestTargetEid(unittest.TestCase): def test_should_return_eid_value_when_eid_property_is_called(self): # GIVEN eid = any_eid() target_eid = network_layer.TargetEid(eid) # WHEN actual_eid = target_eid.eid # THEN self.assertEqual(eid, actual_eid) def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self): # GIVEN eid = any_eid() target_eid = network_layer.TargetEid(eid) # THEN self.assertEqual(target_eid, network_layer.TargetEid(eid)) class TestTargetEidFactory(unittest.TestCase): def test_should_create_TargetEid_from_bytearray_when_parse_method_is_called(self): # GIVEN eid = any_eid() factory = network_layer.TargetEidFactory() # WHEN target_eid = factory.parse(io.BytesIO(eid), common.MessageInfo()) # THEN self.assertTrue(isinstance(target_eid, network_layer.TargetEid)) self.assertEqual(eid, target_eid.eid) class TestMacExtendedAddress(unittest.TestCase): def test_should_return_mac_address_value_when_mac_address_property_is_called(self): # GIVEN mac_address = any_mac_extended_address() mac_extended_address = network_layer.MacExtendedAddress(mac_address) # WHEN actual_mac_address = mac_extended_address.mac_address # THEN self.assertEqual(mac_address, actual_mac_address) def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self): # GIVEN mac_address = any_mac_extended_address() mac_extended_address = network_layer.MacExtendedAddress(mac_address) # THEN self.assertEqual(mac_extended_address, network_layer.MacExtendedAddress(mac_address)) class TestMacExtendedAddressFactory(unittest.TestCase): def test_should_create_MacExtendedAddress_from_bytearray_when_parse_method_is_called(self): # GIVEN mac_address = any_mac_extended_address() factory = network_layer.MacExtendedAddressFactory() # WHEN mac_extended_address = factory.parse(io.BytesIO(mac_address), common.MessageInfo()) # THEN self.assertTrue(isinstance(mac_extended_address, network_layer.MacExtendedAddress)) self.assertEqual(mac_address, mac_extended_address.mac_address) class TestRloc16(unittest.TestCase): def test_should_return_rloc16_value_when_rloc16_property_is_called(self): # GIVEN rloc16 = any_rloc16() rloc16_obj = network_layer.Rloc16(rloc16) # WHEN actual_rloc16 = rloc16_obj.rloc16 # THEN self.assertEqual(rloc16, actual_rloc16) def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self): # GIVEN rloc16 = any_rloc16() rloc16_obj = network_layer.Rloc16(rloc16) # THEN self.assertEqual(rloc16_obj, network_layer.Rloc16(rloc16)) class TestRloc16Factory(unittest.TestCase): def test_should_create_Rloc16_from_bytearray_when_parse_method_is_called(self): # GIVEN rloc16 = any_rloc16() factory = network_layer.Rloc16Factory() data = bytearray(struct.pack(">H", rloc16)) # WHEN rloc16_obj = factory.parse(io.BytesIO(data), common.MessageInfo()) # THEN self.assertTrue(isinstance(rloc16_obj, network_layer.Rloc16)) self.assertEqual(rloc16, rloc16_obj.rloc16) class TestMlEid(unittest.TestCase): def test_should_return_ml_eid_value_when_ml_eid_property_is_called(self): # GIVEN ml_eid = any_ml_eid() ml_eid_obj = network_layer.MlEid(ml_eid) # WHEN actual_ml_eid = ml_eid_obj.ml_eid # THEN self.assertEqual(ml_eid, actual_ml_eid) def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self): # GIVEN ml_eid = any_ml_eid() ml_eid_obj = network_layer.MlEid(ml_eid) # THEN self.assertEqual(ml_eid_obj, network_layer.MlEid(ml_eid)) class TestMlEidFactory(unittest.TestCase): def test_should_create_MlEid_from_bytearray_when_parse_method_is_called(self): # GIVEN ml_eid = any_ml_eid() factory = network_layer.MlEidFactory() # WHEN ml_eid_obj = factory.parse(io.BytesIO(ml_eid), common.MessageInfo()) # THEN self.assertTrue(isinstance(ml_eid_obj, network_layer.MlEid)) self.assertEqual(ml_eid, ml_eid_obj.ml_eid) class TestStatus(unittest.TestCase): def test_should_return_status_value_when_status_property_is_called(self): # GIVEN status = any_status() status_obj = network_layer.Status(status) # WHEN actual_status = status_obj.status # THEN self.assertEqual(status, actual_status) def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self): # GIVEN status = any_status() status_obj = network_layer.Status(status) # THEN self.assertEqual(status_obj, network_layer.Status(status)) class TestStatusFactory(unittest.TestCase): def test_should_create_Status_from_bytearray_when_parse_method_is_called(self): # GIVEN status = any_status() factory = network_layer.StatusFactory() data = bytearray([status]) # WHEN status_obj = factory.parse(io.BytesIO(data), common.MessageInfo()) # THEN self.assertTrue(isinstance(status_obj, network_layer.Status)) self.assertEqual(status, status_obj.status) class TestTimeSinceLastTransaction(unittest.TestCase): def test_should_return_seconds_value_when_seconds_property_is_called(self): # GIVEN seconds = any_seconds() time_since_last_transaction = network_layer.TimeSinceLastTransaction(seconds) # WHEN actual_seconds = time_since_last_transaction.seconds # THEN self.assertEqual(seconds, actual_seconds) def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self): # GIVEN seconds = any_seconds() time_since_last_transaction = network_layer.TimeSinceLastTransaction(seconds) # THEN self.assertEqual( time_since_last_transaction, network_layer.TimeSinceLastTransaction(seconds), ) class TestTimeSinceLastTransactionFactory(unittest.TestCase): def test_should_create_TimeSinceLastTransaction_from_bytearray_when_parse_method_is_called(self): # GIVEN seconds = any_seconds() factory = network_layer.TimeSinceLastTransactionFactory() data = bytearray(struct.pack(">L", seconds)) # WHEN time_since_last_transaction = factory.parse(io.BytesIO(data), common.MessageInfo()) # THEN self.assertTrue(isinstance( time_since_last_transaction, network_layer.TimeSinceLastTransaction, )) self.assertEqual(seconds, time_since_last_transaction.seconds) class TestRouterMask(unittest.TestCase): def test_should_return_id_sequence_value_when_id_sequence_property_is_called(self): # GIVEN id_sequence = any_id_sequence() router_mask = network_layer.RouterMask(id_sequence, any_router_id_mask()) # WHEN actual_id_sequence = router_mask.id_sequence # THEN self.assertEqual(id_sequence, actual_id_sequence) def test_should_return_router_id_mask_value_when_router_id_mask_property_is_called(self): # GIVEN router_id_mask = any_router_id_mask() router_mask = network_layer.RouterMask(any_id_sequence(), router_id_mask) # WHEN actual_router_id_mask = router_mask.router_id_mask # THEN self.assertEqual(router_id_mask, actual_router_id_mask) def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self): # GIVEN id_sequence = any_id_sequence() router_id_mask = any_router_id_mask() router_mask = network_layer.RouterMask(id_sequence, router_id_mask) # THEN self.assertEqual(router_mask, network_layer.RouterMask(id_sequence, router_id_mask)) class TestRouterMaskFactory(unittest.TestCase): def test_should_create_RouterMask_from_bytearray_when_parse_method_is_called(self): # GIVEN id_sequence = any_id_sequence() router_id_mask = any_router_id_mask() factory = network_layer.RouterMaskFactory() data = bytearray([id_sequence]) + struct.pack(">Q", router_id_mask) # WHEN router_mask = factory.parse(io.BytesIO(data), common.MessageInfo()) # THEN self.assertTrue(isinstance(router_mask, network_layer.RouterMask)) self.assertEqual(id_sequence, router_mask.id_sequence) self.assertEqual(router_id_mask, router_mask.router_id_mask) class TestNdOption(unittest.TestCase): def test_should_return_options_value_when_options_property_is_called(self): # GIVEN options = any_options() nd_option = network_layer.NdOption(options) # WHEN actual_options = nd_option.options # THEN self.assertEqual(options, actual_options) def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self): # GIVEN options = any_options() nd_option = network_layer.NdOption(options) # THEN self.assertEqual(nd_option, network_layer.NdOption(options)) class TestNdOptionFactory(unittest.TestCase): def test_should_create_NdOption_from_bytearray_when_parse_method_is_called(self): # GIVEN options = any_options() factory = network_layer.NdOptionFactory() data = bytearray(options) # WHEN nd_option = factory.parse(io.BytesIO(data), common.MessageInfo()) # THEN self.assertTrue(isinstance(nd_option, network_layer.NdOption)) self.assertEqual(options, nd_option.options) class TestThreadNetworkData(unittest.TestCase): def test_should_return_options_value_when_options_property_is_called(self): # GIVEN tlvs = any_tlvs_data() thread_network_data = network_layer.ThreadNetworkData(tlvs) # WHEN actual_tlvs = thread_network_data.tlvs # THEN self.assertEqual(tlvs, actual_tlvs) def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self): # GIVEN tlvs = any_tlvs_data() thread_network_data = network_layer.ThreadNetworkData(tlvs) # THEN self.assertEqual(thread_network_data, network_layer.ThreadNetworkData(tlvs)) class TestThreadNetworkDataFactory(unittest.TestCase): def test_should_create_ThreadNetworkData_from_bytearray_when_parse_method_is_called(self): # GIVEN tlvs = any_tlvs_data() class DummyNetworkDataTlvsFactory: def parse(self, data, message_info): return bytearray(data.read()) factory = network_layer.ThreadNetworkDataFactory(DummyNetworkDataTlvsFactory()) # WHEN thread_network_data = factory.parse(io.BytesIO(tlvs), common.MessageInfo()) # THEN self.assertTrue(isinstance(thread_network_data, network_layer.ThreadNetworkData)) self.assertEqual(tlvs, thread_network_data.tlvs) if __name__ == "__main__": unittest.main()
29.538618
103
0.718021
11,562
0.795569
0
0
0
0
0
0
2,076
0.142847
feb8045cb4a0a0c1c1b374f1a7ddff3513dfcc95
7,079
py
Python
salt/modules/kernelpkg_linux_apt.py
markgras/salt
d66cd3c935533c63870b83228b978ce43e0ef70d
[ "Apache-2.0" ]
9,425
2015-01-01T05:59:24.000Z
2022-03-31T20:44:05.000Z
salt/modules/kernelpkg_linux_apt.py
markgras/salt
d66cd3c935533c63870b83228b978ce43e0ef70d
[ "Apache-2.0" ]
33,507
2015-01-01T00:19:56.000Z
2022-03-31T23:48:20.000Z
salt/modules/kernelpkg_linux_apt.py
markgras/salt
d66cd3c935533c63870b83228b978ce43e0ef70d
[ "Apache-2.0" ]
5,810
2015-01-01T19:11:45.000Z
2022-03-31T02:37:20.000Z
""" Manage Linux kernel packages on APT-based systems """ import functools import logging import re try: from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import CommandExecutionError HAS_REQUIRED_LIBS = True except ImportError: HAS_REQUIRED_LIBS = False log = logging.getLogger(__name__) __virtualname__ = "kernelpkg" def __virtual__(): """ Load this module on Debian-based systems only """ if not HAS_REQUIRED_LIBS: return (False, "Required library could not be imported") if __grains__.get("os_family", "") in ("Kali", "Debian"): return __virtualname__ elif __grains__.get("os_family", "") == "Cumulus": return __virtualname__ return (False, "Module kernelpkg_linux_apt: no APT based system detected") def active(): """ Return the version of the running kernel. CLI Example: .. code-block:: bash salt '*' kernelpkg.active """ if "pkg.normalize_name" in __salt__: return __salt__["pkg.normalize_name"](__grains__["kernelrelease"]) return __grains__["kernelrelease"] def list_installed(): """ Return a list of all installed kernels. CLI Example: .. code-block:: bash salt '*' kernelpkg.list_installed """ pkg_re = re.compile(r"^{}-[\d.-]+-{}$".format(_package_prefix(), _kernel_type())) pkgs = __salt__["pkg.list_pkgs"](versions_as_list=True) if pkgs is None: pkgs = [] result = list(filter(pkg_re.match, pkgs)) if result is None: return [] prefix_len = len(_package_prefix()) + 1 return sorted( [pkg[prefix_len:] for pkg in result], key=functools.cmp_to_key(_cmp_version) ) def latest_available(): """ Return the version of the latest kernel from the package repositories. CLI Example: .. code-block:: bash salt '*' kernelpkg.latest_available """ result = __salt__["pkg.latest_version"]( "{}-{}".format(_package_prefix(), _kernel_type()) ) if result == "": return latest_installed() version = re.match(r"^(\d+\.\d+\.\d+)\.(\d+)", result) return "{}-{}-{}".format(version.group(1), version.group(2), _kernel_type()) def latest_installed(): """ Return the version of the latest installed kernel. CLI Example: .. code-block:: bash salt '*' kernelpkg.latest_installed .. note:: This function may not return the same value as :py:func:`~salt.modules.kernelpkg_linux_apt.active` if a new kernel has been installed and the system has not yet been rebooted. The :py:func:`~salt.modules.kernelpkg_linux_apt.needs_reboot` function exists to detect this condition. """ pkgs = list_installed() if pkgs: return pkgs[-1] return None def needs_reboot(): """ Detect if a new kernel version has been installed but is not running. Returns True if a new kernel is installed, False otherwise. CLI Example: .. code-block:: bash salt '*' kernelpkg.needs_reboot """ return _LooseVersion(active()) < _LooseVersion(latest_installed()) def upgrade(reboot=False, at_time=None): """ Upgrade the kernel and optionally reboot the system. reboot : False Request a reboot if a new kernel is available. at_time : immediate Schedule the reboot at some point in the future. This argument is ignored if ``reboot=False``. See :py:func:`~salt.modules.system.reboot` for more details on this argument. CLI Example: .. code-block:: bash salt '*' kernelpkg.upgrade salt '*' kernelpkg.upgrade reboot=True at_time=1 .. note:: An immediate reboot often shuts down the system before the minion has a chance to return, resulting in errors. A minimal delay (1 minute) is useful to ensure the result is delivered to the master. """ result = __salt__["pkg.install"]( name="{}-{}".format(_package_prefix(), latest_available()) ) _needs_reboot = needs_reboot() ret = { "upgrades": result, "active": active(), "latest_installed": latest_installed(), "reboot_requested": reboot, "reboot_required": _needs_reboot, } if reboot and _needs_reboot: log.warning("Rebooting system due to kernel upgrade") __salt__["system.reboot"](at_time=at_time) return ret def upgrade_available(): """ Detect if a new kernel version is available in the repositories. Returns True if a new kernel is available, False otherwise. CLI Example: .. code-block:: bash salt '*' kernelpkg.upgrade_available """ return _LooseVersion(latest_available()) > _LooseVersion(latest_installed()) def remove(release): """ Remove a specific version of the kernel. release The release number of an installed kernel. This must be the entire release number as returned by :py:func:`~salt.modules.kernelpkg_linux_apt.list_installed`, not the package name. CLI Example: .. code-block:: bash salt '*' kernelpkg.remove 4.4.0-70-generic """ if release not in list_installed(): raise CommandExecutionError( "Kernel release '{}' is not installed".format(release) ) if release == active(): raise CommandExecutionError("Active kernel cannot be removed") target = "{}-{}".format(_package_prefix(), release) log.info("Removing kernel package %s", target) __salt__["pkg.purge"](target) return {"removed": [target]} def cleanup(keep_latest=True): """ Remove all unused kernel packages from the system. keep_latest : True In the event that the active kernel is not the latest one installed, setting this to True will retain the latest kernel package, in addition to the active one. If False, all kernel packages other than the active one will be removed. CLI Example: .. code-block:: bash salt '*' kernelpkg.cleanup """ removed = [] # Loop over all installed kernel packages for kernel in list_installed(): # Keep the active kernel package if kernel == active(): continue # Optionally keep the latest kernel package if keep_latest and kernel == latest_installed(): continue # Remove the kernel package removed.extend(remove(kernel)["removed"]) return {"removed": removed} def _package_prefix(): """ Return static string for the package prefix """ return "linux-image" def _kernel_type(): """ Parse the kernel name and return its type """ return re.match(r"^[\d.-]+-(.+)$", active()).group(1) def _cmp_version(item1, item2): """ Compare function for package version sorting """ vers1 = _LooseVersion(item1) vers2 = _LooseVersion(item2) if vers1 < vers2: return -1 if vers1 > vers2: return 1 return 0
24.49481
98
0.638932
0
0
0
0
0
0
0
0
4,036
0.570137
feb9338f0d564ca62f3ee051a6a33301b2ea1017
1,818
py
Python
main.py
david-slatinek/running-a-program-on-the-CPU-vs.-on-the-GPU
971b911efee8f52c5950ba777b79e58a4f840024
[ "Apache-2.0" ]
null
null
null
main.py
david-slatinek/running-a-program-on-the-CPU-vs.-on-the-GPU
971b911efee8f52c5950ba777b79e58a4f840024
[ "Apache-2.0" ]
null
null
null
main.py
david-slatinek/running-a-program-on-the-CPU-vs.-on-the-GPU
971b911efee8f52c5950ba777b79e58a4f840024
[ "Apache-2.0" ]
null
null
null
import json import numpy as np from numba import jit from timeit import default_timer as timer # Constant, used in the formula. # Defined here to speed up the calculation, i.e. it's calculated only once # and then placed in the formula. SQRT_2PI = np.float32(np.sqrt(2 * np.pi)) # This function will run on the CPU. def gaussian_cpu(values, mean, sigma): """Calculate values of the Gaussian function. :param values: list, function input parameters. :param mean: float, arithmetic mean. :param sigma: float, standard deviation. :return: list. """ result = np.zeros_like(values) for index, item in enumerate(values): result[index] = (1 / (sigma * SQRT_2PI)) * (np.e ** (-0.5 * ((item - mean) / sigma) ** 2)) return result # This function will run on the GPU. gaussian_gpu = jit(gaussian_cpu) def write_to_file(name, values): """Write results to a file. :param name: string, file name, only prefix. :param values: dictionary, values to write. """ with open(name + ".json", 'w') as f: json.dump(values, f, indent=4) if __name__ == "__main__": # Randomly generated values. x = np.random.uniform(-3, 3, size=1000000).astype(np.float32) # Randomly generated mean. m = np.random.uniform(1, 10) # Randomly generated standard deviation. s = np.random.uniform(1, 10) # The number of rounds. n = 1 # Used to store execution time. time_results = {} for i in range(n): start = timer() gaussian_cpu(x, m, s) end = timer() - start time_results[i] = end write_to_file("cpu", time_results) for i in range(n): start = timer() gaussian_gpu(x, m, s) end = timer() - start time_results[i] = end write_to_file("gpu", time_results)
25.605634
98
0.633663
0
0
0
0
0
0
0
0
733
0.40319
feb98f525f627b833eb5f7cdfb89e344a5f06574
103
py
Python
src/jj_analyzer/__init__.py
ninetymiles/jj-logcat-analyzer
d4ae0fddfefc303ae9c17e6c9e08aad6a231e036
[ "Apache-1.1" ]
null
null
null
src/jj_analyzer/__init__.py
ninetymiles/jj-logcat-analyzer
d4ae0fddfefc303ae9c17e6c9e08aad6a231e036
[ "Apache-1.1" ]
null
null
null
src/jj_analyzer/__init__.py
ninetymiles/jj-logcat-analyzer
d4ae0fddfefc303ae9c17e6c9e08aad6a231e036
[ "Apache-1.1" ]
null
null
null
#! /usr/bin/python import sys if sys.version_info[0] == 3: from .__main__ import * else: pass
12.875
28
0.640777
0
0
0
0
0
0
0
0
18
0.174757
227dbc607b392dad80b7a078ce5ee4e6eb5704f6
5,605
py
Python
utility_functions.py
Team-501-The-PowerKnights/Powerknights-Slack-Bot
1ce25c954aa0c089aa93a3d63bd475d585d39bb6
[ "Apache-2.0" ]
1
2019-05-03T13:20:09.000Z
2019-05-03T13:20:09.000Z
utility_functions.py
Team-501-The-PowerKnights/Powerknights-Slack-Bot
1ce25c954aa0c089aa93a3d63bd475d585d39bb6
[ "Apache-2.0" ]
8
2019-05-04T17:06:21.000Z
2020-05-29T12:37:06.000Z
utility_functions.py
Team-501-The-PowerKnights/Powerknights-Slack-Bot
1ce25c954aa0c089aa93a3d63bd475d585d39bb6
[ "Apache-2.0" ]
null
null
null
import datetime def iso_extract_info(string): """ Will get all of the info and return it as an array :param string: ISO formatted string that will be used for extraction :return: array [year, month, day, military_time_hour, minutes, hours] :note: every item is an int except for minutes :note: hours only is there is military_time_hour is greater than 12 """ elements = [] characters = list(string) year_int = int("".join(characters[0:4])) month_int = int("".join(characters[5:7])) day_int = int("".join(characters[8:10])) military_time_hours_int = int("".join(characters[11:13])) minutes_int = "".join(characters[14:16]) hours = 0 elements.append(year_int) elements.append(month_int) elements.append(day_int) elements.append(minutes_int) if military_time_hours_int > 12: hours += military_time_hours_int - 12 elements.append(hours) return elements # # Testing: # print("[year, month, day, military_time_hour, minutes, hours]") # print(iso_extract_info('2019-04-27T16:00:00-04:00')) # Doesn't use the "iso_extract_info" function def iso_format_to_regular(string): """ Will take a string that is an iso formatted string and make it look readable :param string: the iso formatted string :return: str """ characters = list(string) year_int = int("".join(characters[0:4])) month_int = int("".join(characters[5:7])) day_int = int("".join(characters[8:10])) military_time_hours_int = int("".join(characters[11:13])) minutes_int = "".join(characters[14:16]) if military_time_hours_int > 12: hours = military_time_hours_int - 12 final_string = "{month}/{day}/{year} {hour}:{minute}PM".format( month=month_int, day=day_int, year=year_int, hour=hours, minute=minutes_int) return final_string else: final_string = "{month}/{day}/{year} {hour}:{minute}AM".format( month=month_int, day=day_int, year=year_int, hour=military_time_hours_int, minute=minutes_int) return final_string # Testing: # print(iso_format_to_regular('2019-04-27T16:00:00-04:00')) # Doesn't use the "iso_extract_info" function def fix_time(strange_date): """ Will rearrange the strange date that Google gives and repalce it with the normal string. :param strange_date: strange time that google gives when an event is marked as "all day" :return: str """ items = strange_date.split("-") year_int = int(items[0]) month_int = int(items[1]) day_int = int(items[2]) new_str = "{month}/{day}/{year}".format( month=month_int, day=day_int, year=year_int) return new_str # Doesn't use the "iso_extract_info" function def multiday_checker_STRANGE(start_date, end_date): """ Will check if an event is more than day long :param start_date: Strange Google formatted date of the start of the event :param end_date: Strange Google formatted date of the end of the event :return: Boolean """ start_date_items = start_date.split("-") end_date_items = end_date.split("-") start_date_sum = 0 end_date_sum = 0 for string in start_date_items: number = int(string) start_date_sum += number for string in end_date_items: number = int(string) end_date_sum += number date_dif = start_date_sum - end_date_sum if date_dif > 2: return True else: return False # Testing: # print(multiday_checker_STRANGE('2019-04-21', '2019-04-22')) # Doesn't use the "iso_extract_info" function def STRANGE_string_weekday(string): """ Will take a string that is a date formatted in the Google format and find what day of the week it is :param string: Google formatted string for the date :return: string """ items = string.split("/") year_int = int(items[2]) month_int = int(items[0]) day_int = int(items[1]) datetime_instance = datetime.date(year_int, month_int, day_int) week_day_number = datetime_instance.weekday() if week_day_number == 0: return "Monday" elif week_day_number == 1: return "Tuesday" elif week_day_number == 2: return "Wendsday" elif week_day_number == 3: return "Thursday" elif week_day_number == 4: return "Friday" elif week_day_number == 5: return "Saturday" elif week_day_number == 6: return "Sunday" else: return "Error" # Testing: # print(STRANGE_string_weekday("2019-04-27")) # Doesn't use the "iso_extract_info" function def ISO_string_weekday(string): """ Will take a string that is a date formatted in the ISO format and find what day of the week it is :param string: ISO formatted string for the date :return: string """ characters = list(string) year_int = int("".join(characters[0:4])) month_int = int("".join(characters[5:7])) day_int = int("".join(characters[8:10])) datetime_instance = datetime.date(year_int, month_int, day_int) week_day_number = datetime_instance.weekday() if week_day_number == 0: return "Monday" elif week_day_number == 1: return "Tuesday" elif week_day_number == 2: return "Wendsday" elif week_day_number == 3: return "Thursday" elif week_day_number == 4: return "Friday" elif week_day_number == 5: return "Saturday" elif week_day_number == 6: return "Sunday" else: return "Error" # Testing: # print(ISO_string_weekday('2019-06-28T16:00:00-04:00'))
31.846591
106
0.662979
0
0
0
0
0
0
0
0
2,221
0.396253
228079c406da2849bf07a999b9fbe4042daf4300
1,424
py
Python
python/ch_06_Animatronic_Head.py
tallamjr/mbms
6763faa870d1a16f272b3eade70b433ed3df0e51
[ "MIT" ]
18
2018-06-07T07:11:59.000Z
2022-02-28T20:08:23.000Z
python/ch_06_Animatronic_Head.py
tallamjr/mbms
6763faa870d1a16f272b3eade70b433ed3df0e51
[ "MIT" ]
1
2020-05-20T16:24:24.000Z
2020-05-21T09:03:24.000Z
python/ch_06_Animatronic_Head.py
tallamjr/mbms
6763faa870d1a16f272b3eade70b433ed3df0e51
[ "MIT" ]
8
2019-04-10T16:04:11.000Z
2022-01-08T20:39:15.000Z
from microbit import * import random, speech, radio eye_angles = [50, 140, 60, 90, 140] radio.off() sentences = [ "Hello my name is Mike", "What is your name", "I am looking at you", "Exterminate exterminate exterminate", "Number Five is alive", "I cant do that Dave", "daisee daisee give me your answer do" ] lips0 = Image("00000:" "00000:" "99999:" "00000:" "00000") lips1 = Image("00000:" "00900:" "99099:" "00900:" "00000") lips2 = Image("00000:" "09990:" "99099:" "09990:" "00000") lips = [lips0, lips1, lips2] def set_servo_angle(pin, angle): duty = 26 + (angle * 51) / 90 pin.write_analog(duty) def speak(sentence): words = sentence.split() for i in range(0, len(words)): display.show(random.choice(lips)) speech.say(words[i]) display.show(lips0) def act(): set_servo_angle(pin2, random.choice(eye_angles)) sleep(300) speak(random.choice(sentences)) set_servo_angle(pin2, 90) base_z = 0 while True: new_z = abs(accelerometer.get_z()) if abs(new_z - base_z) > 20: base_z = new_z act() if random.randint(0, 1000) == 0: # say something 1 time in 1000 act() sleep(200)
21.575758
67
0.525281
0
0
0
0
0
0
0
0
328
0.230337
22807a6716e561a1f502377b8a28eba78ad26040
322
py
Python
debugtalk.py
caoyp2/HRunDemo
41810a2fd366c780ea8f2bf9b4328fdd60aba171
[ "Apache-2.0" ]
null
null
null
debugtalk.py
caoyp2/HRunDemo
41810a2fd366c780ea8f2bf9b4328fdd60aba171
[ "Apache-2.0" ]
null
null
null
debugtalk.py
caoyp2/HRunDemo
41810a2fd366c780ea8f2bf9b4328fdd60aba171
[ "Apache-2.0" ]
null
null
null
import datetime import time def sleep(n_secs): time.sleep(n_secs) def get_timestamp(): dtime = datetime.datetime.now() un_time = time.mktime(dtime.timetuple()) return str(un_time) def print_docId(docId): print(docId) def print_phonepass(phone,password): print(phone + "---------" + password)
16.947368
44
0.677019
0
0
0
0
0
0
0
0
11
0.034161
228122dba71ea421f33f3e5c51b862184d5fc4c8
205
py
Python
hubcare/metrics/community_metrics/issue_template/urls.py
aleronupe/2019.1-hubcare-api
3f031eac9559a10fdcf70a88ee4c548cf93e4ac2
[ "MIT" ]
7
2019-03-31T17:58:45.000Z
2020-02-29T22:44:27.000Z
hubcare/metrics/community_metrics/issue_template/urls.py
aleronupe/2019.1-hubcare-api
3f031eac9559a10fdcf70a88ee4c548cf93e4ac2
[ "MIT" ]
90
2019-03-26T01:14:54.000Z
2021-06-10T21:30:25.000Z
hubcare/metrics/community_metrics/issue_template/urls.py
aleronupe/2019.1-hubcare-api
3f031eac9559a10fdcf70a88ee4c548cf93e4ac2
[ "MIT" ]
null
null
null
from django.urls import path from issue_template.views import IssueTemplateView urlpatterns = [ path( '<str:owner>/<str:repo>/<str:token_auth>/', IssueTemplateView.as_view() ), ]
18.636364
51
0.668293
0
0
0
0
0
0
0
0
42
0.204878
2283023fbf32c038ed31074c2a312a5a7aa70d38
5,248
py
Python
src/hammer-vlsi/technology/sky130/sram_compiler/__init__.py
httpsgithu/hammer
6099f4169a49f71cee2e24bb1052f273039505cd
[ "BSD-3-Clause" ]
138
2017-08-15T18:56:55.000Z
2022-03-29T05:23:37.000Z
src/hammer-vlsi/technology/sky130/sram_compiler/__init__.py
httpsgithu/hammer
6099f4169a49f71cee2e24bb1052f273039505cd
[ "BSD-3-Clause" ]
444
2017-09-11T01:15:37.000Z
2022-03-31T17:30:33.000Z
src/hammer-vlsi/technology/sky130/sram_compiler/__init__.py
httpsgithu/hammer
6099f4169a49f71cee2e24bb1052f273039505cd
[ "BSD-3-Clause" ]
33
2017-10-30T14:23:53.000Z
2022-03-25T01:36:13.000Z
import os, tempfile, subprocess from hammer_vlsi import MMMCCorner, MMMCCornerType, HammerTool, HammerToolStep, HammerSRAMGeneratorTool, SRAMParameters from hammer_vlsi.units import VoltageValue, TemperatureValue from hammer_tech import Library, ExtraLibrary from typing import NamedTuple, Dict, Any, List from abc import ABCMeta, abstractmethod class SKY130SRAMGenerator(HammerSRAMGeneratorTool): def tool_config_prefix(self) -> str: return "sram_generator.sky130" def version_number(self, version: str) -> int: return 0 # Run generator for a single sram and corner def generate_sram(self, params: SRAMParameters, corner: MMMCCorner) -> ExtraLibrary: tech_cache_dir = os.path.abspath(self.technology.cache_dir) #TODO: this is really an abuse of the corner stuff if corner.type == MMMCCornerType.Setup: speed_name = "slow" speed = "SS" elif corner.type == MMMCCornerType.Hold: speed_name = "fast" speed = "FF" elif corner.type == MMMCCornerType.Extra: speed_name = "typical" speed = "TT" # Different target memories based on port count # if params.family == "1rw": # self.logger.info("Compiling 1rw memories to DFFRAM instances") # base_dir = self.get_setting("technology.sky130.dffram_lib") # fam_code = params.family # sram_name = "RAM{d}x{w}".format( # d=params.depth, # w=params.width) # #TODO: need real libs (perhaps run Liberate here?) # #For now, use the dummy lib for all corners # corner_str = "" # # lib_path = "{b}/{n}.lib".format( # b=base_dir, # n=sram_name) # if not os.path.exists(lib_path): # self.logger.error("SKY130 1rw1r SRAM cache does not support corner: {c}".format(c=corner_str)) # return ExtraLibrary(prefix=None, library=Library( # name=sram_name, # nldm_liberty_file=lib_path, # lef_file="{b}/{n}/{n}.lef".format(b=base_dir,n=sram_name), # #TODO: GDS not generated. Unclear which DEF to use? # #gds_file="{b}/{n}/{n}.gds".format(b=base_dir,n=sram_name), # spice_file="{b}/{n}/{n}.spice".format(b=base_dir,n=sram_name), # #TODO: Will not work as-is for behav. sim (this is a structural netlist referencing std. cells) # #Need to add std cell behavioral Verilog to sim.inputs.input_files # verilog_sim="{b}/{n}/{n}.nl.v".format(b=base_dir,n=sram_name), # corner={'nmos': speed_name, 'pmos': speed_name, 'temperature': str(corner.temp.value_in_units("C")) + " C"}, # supplies={'VDD': str(corner.voltage.value_in_units("V")) + " V", 'GND': "0 V"}, # provides=[{'lib_type': "sram", 'vt': params.vt}])) # elif params.family == "1rw1r": if params.family == "1rw": self.logger.info("Compiling 1rw1r memories to OpenRAM instances") base_dir = self.get_setting("technology.sky130.openram_lib") fam_code = params.family s=round(round(params.width*params.depth/8, -3)/1000) # size in kiB w=params.width d=params.depth m=8 sram_name = f"sky130_sram_{s}kbyte_1rw1r_{w}x{d}_{m}" print(f"SRAM_NAME: {sram_name}") #TODO: Hammer SRAMParameters doesn't have this info #TODO: replace this if OpenRAM characterization done for other corners #For now, use typical lib for all corners corner_str = "TT_1p8V_25C" #corner_str = "{speed}_{volt}V_{temp}C".format( # speed = speed, # volt = str(corner.voltage.value_in_units("V")).replace(".","p"), # temp = str(int(corner.temp.value_in_units("C"))).replace(".","p")) lib_path = "{b}/{n}/{n}_{c}.lib".format( b=base_dir, n=sram_name, c=corner_str) if not os.path.exists(lib_path): self.logger.error("SKY130 1rw1r SRAM cache does not support corner: {c}".format(c=corner_str)) return ExtraLibrary(prefix=None, library=Library( name=sram_name, nldm_liberty_file=lib_path, lef_file="{b}/{n}/{n}.lef".format(b=base_dir,n=sram_name), gds_file="{b}/{n}/{n}.gds".format(b=base_dir,n=sram_name), spice_file="{b}/{n}/{n}.lvs.sp".format(b=base_dir,n=sram_name), verilog_sim="{b}/{n}/{n}.v".format(b=base_dir,n=sram_name), corner={'nmos': speed_name, 'pmos': speed_name, 'temperature': str(corner.temp.value_in_units("C")) + " C"}, supplies={'VDD': str(corner.voltage.value_in_units("V")) + " V", 'GND': "0 V"}, provides=[{'lib_type': "sram", 'vt': params.vt}])) else: self.logger.error("SKY130 SRAM cache does not support family:{f}".format(f=params.family)) return ExtraLibrary(prefix=None, library=None) tool=SKY130SRAMGenerator
51.960396
126
0.582127
4,872
0.928354
0
0
0
0
0
0
2,586
0.492759
2283626d76b9fe6781848e584e29b4b24ab5e062
2,837
py
Python
Section 4/nlp-4-ngrams.py
PacktPublishing/Hands-on-NLP-with-NLTK-and-scikit-learn-
8bb2095093a822363675368a4216d30d14cac501
[ "MIT" ]
34
2018-08-14T09:59:13.000Z
2021-11-08T13:12:50.000Z
Section 4/nlp-4-ngrams.py
anapatgl/Hands-on-NLP-with-NLTK-and-scikit-learn-
8bb2095093a822363675368a4216d30d14cac501
[ "MIT" ]
1
2018-11-28T19:20:37.000Z
2018-11-28T19:20:37.000Z
Section 4/nlp-4-ngrams.py
anapatgl/Hands-on-NLP-with-NLTK-and-scikit-learn-
8bb2095093a822363675368a4216d30d14cac501
[ "MIT" ]
31
2018-08-07T07:34:33.000Z
2022-03-15T08:50:44.000Z
import collections import nltk import os from sklearn import ( datasets, model_selection, feature_extraction, linear_model, naive_bayes, ensemble ) def extract_features(corpus): '''Extract TF-IDF features from corpus''' sa_stop_words = nltk.corpus.stopwords.words("english") # words that might invert a sentence's meaning white_list = [ 'what', 'but', 'if', 'because', 'as', 'until', 'against', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'why', 'how', 'all', 'any', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'can', 'will', 'just', 'don', 'should'] # take these out of the standard NLTK stop word list sa_stop_words = [sw for sw in sa_stop_words if sw not in white_list] # vectorize means we turn non-numerical data into an array of numbers count_vectorizer = feature_extraction.text.CountVectorizer( lowercase=True, # for demonstration, True by default tokenizer=nltk.word_tokenize, # use the NLTK tokenizer min_df=2, # minimum document frequency, i.e. the word must appear more than once. ngram_range=(1, 2), stop_words=sa_stop_words ) processed_corpus = count_vectorizer.fit_transform(corpus) processed_corpus = feature_extraction.text.TfidfTransformer().fit_transform( processed_corpus) return processed_corpus data_directory = 'movie_reviews' movie_sentiment_data = datasets.load_files(data_directory, shuffle=True) print('{} files loaded.'.format(len(movie_sentiment_data.data))) print('They contain the following classes: {}.'.format( movie_sentiment_data.target_names)) movie_tfidf = extract_features(movie_sentiment_data.data) X_train, X_test, y_train, y_test = model_selection.train_test_split( movie_tfidf, movie_sentiment_data.target, test_size=0.30, random_state=42) # similar to nltk.NaiveBayesClassifier.train() clf1 = linear_model.LogisticRegression() clf1.fit(X_train, y_train) print('Logistic Regression performance: {}'.format(clf1.score(X_test, y_test))) clf2 = linear_model.SGDClassifier() clf2.fit(X_train, y_train) print('SGDClassifier performance: {}'.format(clf2.score(X_test, y_test))) clf3 = naive_bayes.MultinomialNB() clf3.fit(X_train, y_train) print('MultinomialNB performance: {}'.format(clf3.score(X_test, y_test))) clf4 = naive_bayes.BernoulliNB() clf4.fit(X_train, y_train) print('BernoulliNB performance: {}'.format(clf4.score(X_test, y_test))) voting_model = ensemble.VotingClassifier( estimators=[('lr', clf1), ('sgd', clf2), ('mnb', clf3), ('bnb', clf4)], voting='hard') voting_model.fit(X_train, y_train) print('Voting classifier performance: {}'.format( voting_model.score(X_test, y_test)))
36.844156
90
0.70638
0
0
0
0
0
0
0
0
903
0.318294
2283d1768504ac50dd9ea43fb4e940fbaf88eee6
649
py
Python
code/gcd_sequence/sol_443.py
bhavinjawade/project-euler-solutions
56bf6a282730ed4b9b875fa081cf4509d9939d98
[ "Apache-2.0" ]
2
2020-07-16T08:16:32.000Z
2020-10-01T07:16:48.000Z
code/gcd_sequence/sol_443.py
Psingh12354/project-euler-solutions
56bf6a282730ed4b9b875fa081cf4509d9939d98
[ "Apache-2.0" ]
null
null
null
code/gcd_sequence/sol_443.py
Psingh12354/project-euler-solutions
56bf6a282730ed4b9b875fa081cf4509d9939d98
[ "Apache-2.0" ]
1
2021-05-07T18:06:08.000Z
2021-05-07T18:06:08.000Z
# -*- coding: utf-8 -*- ''' File name: code\gcd_sequence\sol_443.py Author: Vaidic Joshi Date created: Oct 20, 2018 Python Version: 3.x ''' # Solution to Project Euler Problem #443 :: GCD sequence # # For more information see: # https://projecteuler.net/problem=443 # Problem Statement ''' Let g(n) be a sequence defined as follows: g(4) = 13, g(n) = g(n-1) + gcd(n, g(n-1)) for n > 4. The first few values are: n4567891011121314151617181920... g(n)1314161718272829303132333451545560... You are given that g(1 000) = 2524 and g(1 000 000) = 2624152. Find g(1015). ''' # Solution # Solution Approach ''' '''
17.540541
62
0.644068
0
0
0
0
0
0
0
0
636
0.97546
22849e131dffff72236a4d1d46cddf477f92bab9
2,823
py
Python
src/collectors/rabbitmq/rabbitmq.py
lreed/Diamond
2772cdbc27a7ba3fedeb6d4241aeee9d2fcbdb80
[ "MIT" ]
null
null
null
src/collectors/rabbitmq/rabbitmq.py
lreed/Diamond
2772cdbc27a7ba3fedeb6d4241aeee9d2fcbdb80
[ "MIT" ]
null
null
null
src/collectors/rabbitmq/rabbitmq.py
lreed/Diamond
2772cdbc27a7ba3fedeb6d4241aeee9d2fcbdb80
[ "MIT" ]
null
null
null
# coding=utf-8 """ Collects data from RabbitMQ through the admin interface #### Notes * if two vhosts have the queues with the same name, the metrics will collide #### Dependencies * pyrabbit """ import diamond.collector try: from numbers import Number Number # workaround for pyflakes issue #13 import pyrabbit.api except ImportError: Number = None class RabbitMQCollector(diamond.collector.Collector): def get_default_config_help(self): config_help = super(RabbitMQCollector, self).get_default_config_help() config_help.update({ 'host': 'Hostname and port to collect from', 'user': 'Username', 'password': 'Password', 'queues': 'Queues to publish. Leave empty to publish all.', }) return config_help def get_default_config(self): """ Returns the default collector settings """ config = super(RabbitMQCollector, self).get_default_config() config.update({ 'path': 'rabbitmq', 'host': 'localhost:55672', 'user': 'guest', 'password': 'guest', }) return config def collect(self): if Number is None: self.log.error('Unable to import either Number or pyrabbit.api') return {} queues = [] if 'queues' in self.config: queues = self.config['queues'].split() try: client = pyrabbit.api.Client(self.config['host'], self.config['user'], self.config['password']) for queue in client.get_queues(): # skip queues we don't want to publish if queues and queue['name'] not in queues: continue for key in queue: name = '{0}.{1}'.format('queues', queue['name']) self._publish_metrics(name, [], key, queue) overview = client.get_overview() for key in overview: self._publish_metrics('', [], key, overview) except Exception, e: self.log.error('Couldnt connect to rabbitmq %s', e) return {} def _publish_metrics(self, name, prev_keys, key, data): """Recursively publish keys""" value = data[key] keys = prev_keys + [key] if isinstance(value, dict): for new_key in value: self._publish_metrics(name, keys, new_key, value) elif isinstance(value, Number): joined_keys = '.'.join(keys) if name: publish_key = '{0}.{1}'.format(name, joined_keys) else: publish_key = joined_keys self.publish(publish_key, value)
30.031915
78
0.54729
2,444
0.865746
0
0
0
0
0
0
728
0.257882
2284b104a47dc324bd27f42ce83e41850b152d6c
27,170
py
Python
nemo/collections/tts/torch/data.py
MalikIdreesHasanKhan/NeMo
984fd34921e81659c4594a22ab142311808b3bb7
[ "Apache-2.0" ]
4,145
2019-09-13T08:29:43.000Z
2022-03-31T18:31:44.000Z
nemo/collections/tts/torch/data.py
MalikIdreesHasanKhan/NeMo
984fd34921e81659c4594a22ab142311808b3bb7
[ "Apache-2.0" ]
2,031
2019-09-17T16:51:39.000Z
2022-03-31T23:52:41.000Z
nemo/collections/tts/torch/data.py
MalikIdreesHasanKhan/NeMo
984fd34921e81659c4594a22ab142311808b3bb7
[ "Apache-2.0" ]
1,041
2019-09-13T10:08:21.000Z
2022-03-30T06:37:38.000Z
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import pickle from pathlib import Path from typing import Callable, Dict, List, Optional, Union import librosa import torch from nemo_text_processing.text_normalization.normalize import Normalizer from tqdm import tqdm from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer from nemo.collections.tts.torch.helpers import ( BetaBinomialInterpolator, beta_binomial_prior_distribution, general_padding, ) from nemo.collections.tts.torch.tts_data_types import ( DATA_STR2DATA_CLASS, MAIN_DATA_TYPES, VALID_SUPPLEMENTARY_DATA_TYPES, DurationPrior, Durations, Energy, LMTokens, LogMel, Pitch, SpeakerID, WithLens, ) from nemo.collections.tts.torch.tts_tokenizers import BaseTokenizer, EnglishCharsTokenizer, EnglishPhonemesTokenizer from nemo.core.classes import Dataset from nemo.utils import logging class TTSDataset(Dataset): def __init__( self, manifest_filepath: str, sample_rate: int, text_tokenizer: Union[BaseTokenizer, Callable[[str], List[int]]], tokens: Optional[List[str]] = None, text_normalizer: Optional[Union[Normalizer, Callable[[str], str]]] = None, text_normalizer_call_args: Optional[Dict] = None, text_tokenizer_pad_id: Optional[int] = None, sup_data_types: Optional[List[str]] = None, sup_data_path: Optional[Union[Path, str]] = None, max_duration: Optional[float] = None, min_duration: Optional[float] = None, ignore_file: Optional[str] = None, trim: bool = False, n_fft=1024, win_length=None, hop_length=None, window="hann", n_mels=80, lowfreq=0, highfreq=None, **kwargs, ): """Dataset that loads main data types (audio and text) and specified supplementary data types (e.g. log mel, durations, pitch). Most supplementary data types will be computed on the fly and saved in the supplementary_folder if they did not exist before. Arguments for supplementary data should be also specified in this class and they will be used from kwargs (see keyword args section). Args: manifest_filepath (str, Path, List[str, Path]): Path(s) to the .json manifests containing information on the dataset. Each line in the .json file should be valid json. Note: the .json file itself is not valid json. Each line should contain the following: "audio_filepath": <PATH_TO_WAV> "mel_filepath": <PATH_TO_LOG_MEL_PT> (Optional) "duration": <Duration of audio clip in seconds> (Optional) "text": <THE_TRANSCRIPT> (Optional) sample_rate (int): The sample rate of the audio. Or the sample rate that we will resample all files to. text_tokenizer (Optional[Union[BaseTokenizer, Callable[[str], List[int]]]]): BaseTokenizer or callable which represents text tokenizer. tokens (Optional[List[str]]): Tokens from text_tokenizer. Should be specified if text_tokenizer is not BaseTokenizer. text_normalizer (Optional[Union[Normalizer, Callable[[str], str]]]): Normalizer or callable which represents text normalizer. text_normalizer_call_args (Optional[Dict]): Additional arguments for text_normalizer function. text_tokenizer_pad_id (Optional[int]): Index of padding. Should be specified if text_tokenizer is not BaseTokenizer. sup_data_types (Optional[List[str]]): List of supplementary data types. sup_data_path (Optional[Union[Path, str]]): A folder that contains or will contain supplementary data (e.g. pitch). max_duration (Optional[float]): Max duration of audio clips in seconds. All samples exceeding this will be pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load audio to compute duration. Defaults to None which does not prune. min_duration (Optional[float]): Min duration of audio clips in seconds. All samples lower than this will be pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load audio to compute duration. Defaults to None which does not prune. ignore_file (Optional[str, Path]): The location of a pickle-saved list of audio_ids (the stem of the audio files) that will be pruned prior to training. Defaults to None which does not prune. trim (Optional[bool]): Whether to apply librosa.effects.trim to the audio file. Defaults to False. n_fft (Optional[int]): The number of fft samples. Defaults to 1024 win_length (Optional[int]): The length of the stft windows. Defaults to None which uses n_fft. hop_length (Optional[int]): The hope length between fft computations. Defaults to None which uses n_fft//4. window (Optional[str]): One of 'hann', 'hamming', 'blackman','bartlett', 'none'. Which corresponds to the equivalent torch window function. n_mels (Optional[int]): The number of mel filters. Defaults to 80. lowfreq (Optional[int]): The lowfreq input to the mel filter calculation. Defaults to 0. highfreq (Optional[int]): The highfreq input to the mel filter calculation. Defaults to None. Keyword Args: durs_file (Optional[str]): String path to pickled durations location. durs_type (Optional[str]): Type of durations. Currently supported only "aligned-based". use_beta_binomial_interpolator (Optional[bool]): Whether to use beta-binomial interpolator. Defaults to False. pitch_fmin (Optional[float]): The fmin input to librosa.pyin. Defaults to librosa.note_to_hz('C2'). pitch_fmax (Optional[float]): The fmax input to librosa.pyin. Defaults to librosa.note_to_hz('C7'). pitch_avg (Optional[float]): The mean that we use to normalize the pitch. pitch_std (Optional[float]): The std that we use to normalize the pitch. pitch_norm (Optional[bool]): Whether to normalize pitch (via pitch_avg and pitch_std) or not. """ super().__init__() self.text_normalizer = text_normalizer self.text_normalizer_call = ( self.text_normalizer.normalize if isinstance(self.text_normalizer, Normalizer) else self.text_normalizer ) self.text_normalizer_call_args = text_normalizer_call_args if text_normalizer_call_args is not None else {} self.text_tokenizer = text_tokenizer if isinstance(self.text_tokenizer, BaseTokenizer): self.text_tokenizer_pad_id = text_tokenizer.pad self.tokens = text_tokenizer.tokens else: if text_tokenizer_pad_id is None: raise ValueError(f"text_tokenizer_pad_id must be specified if text_tokenizer is not BaseTokenizer") if tokens is None: raise ValueError(f"tokens must be specified if text_tokenizer is not BaseTokenizer") self.text_tokenizer_pad_id = text_tokenizer_pad_id self.tokens = tokens if isinstance(manifest_filepath, str): manifest_filepath = [manifest_filepath] self.manifest_filepath = manifest_filepath if sup_data_path is not None: Path(sup_data_path).mkdir(parents=True, exist_ok=True) self.sup_data_path = sup_data_path self.sup_data_types = ( [DATA_STR2DATA_CLASS[d_as_str] for d_as_str in sup_data_types] if sup_data_types is not None else [] ) self.sup_data_types_set = set(self.sup_data_types) self.data = [] audio_files = [] total_duration = 0 for manifest_file in self.manifest_filepath: with open(Path(manifest_file).expanduser(), 'r') as f: logging.info(f"Loading dataset from {manifest_file}.") for line in tqdm(f): item = json.loads(line) file_info = { "audio_filepath": item["audio_filepath"], "mel_filepath": item["mel_filepath"] if "mel_filepath" in item else None, "duration": item["duration"] if "duration" in item else None, "text_tokens": None, "speaker_id": item["speaker"] if "speaker" in item else None, } if "text" in item: text = item["text"] if self.text_normalizer is not None: text = self.text_normalizer_call(text, **self.text_normalizer_call_args) text_tokens = self.text_tokenizer(text) file_info["raw_text"] = item["text"] file_info["text_tokens"] = text_tokens audio_files.append(file_info) if file_info["duration"] is None: logging.info( "Not all audio files have duration information. Duration logging will be disabled." ) total_duration = None if total_duration is not None: total_duration += item["duration"] logging.info(f"Loaded dataset with {len(audio_files)} files.") if total_duration is not None: logging.info(f"Dataset contains {total_duration / 3600:.2f} hours.") if ignore_file: logging.info(f"using {ignore_file} to prune dataset.") with open(Path(ignore_file).expanduser(), "rb") as f: wavs_to_ignore = set(pickle.load(f)) pruned_duration = 0 if total_duration is not None else None pruned_items = 0 for item in audio_files: audio_path = item['audio_filepath'] audio_id = Path(audio_path).stem # Prune data according to min/max_duration & the ignore file if total_duration is not None: if (min_duration and item["duration"] < min_duration) or ( max_duration and item["duration"] > max_duration ): pruned_duration += item["duration"] pruned_items += 1 continue if ignore_file and (audio_id in wavs_to_ignore): pruned_items += 1 pruned_duration += item["duration"] wavs_to_ignore.remove(audio_id) continue self.data.append(item) logging.info(f"Pruned {pruned_items} files. Final dataset contains {len(self.data)} files") if pruned_duration is not None: logging.info( f"Pruned {pruned_duration / 3600:.2f} hours. Final dataset contains " f"{(total_duration - pruned_duration) / 3600:.2f} hours." ) self.sample_rate = sample_rate self.featurizer = WaveformFeaturizer(sample_rate=self.sample_rate) self.trim = trim self.n_fft = n_fft self.n_mels = n_mels self.lowfreq = lowfreq self.highfreq = highfreq self.window = window self.win_length = win_length or self.n_fft self.hop_length = hop_length self.hop_len = self.hop_length or self.n_fft // 4 self.fb = torch.tensor( librosa.filters.mel( self.sample_rate, self.n_fft, n_mels=self.n_mels, fmin=self.lowfreq, fmax=self.highfreq ), dtype=torch.float, ).unsqueeze(0) window_fn = { 'hann': torch.hann_window, 'hamming': torch.hamming_window, 'blackman': torch.blackman_window, 'bartlett': torch.bartlett_window, 'none': None, }.get(self.window, None) self.stft = lambda x: torch.stft( input=x, n_fft=self.n_fft, hop_length=self.hop_len, win_length=self.win_length, window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, ) for data_type in self.sup_data_types: if data_type not in VALID_SUPPLEMENTARY_DATA_TYPES: raise NotImplementedError(f"Current implementation of TTSDataset doesn't support {data_type} type.") getattr(self, f"add_{data_type.name}")(**kwargs) def add_log_mel(self, **kwargs): pass def add_durations(self, **kwargs): durs_file = kwargs.pop('durs_file') durs_type = kwargs.pop('durs_type') audio_stem2durs = torch.load(durs_file) self.durs = [] for tag in [Path(d["audio_filepath"]).stem for d in self.data]: durs = audio_stem2durs[tag] if durs_type == "aligner-based": self.durs.append(durs) else: raise NotImplementedError( f"{durs_type} duration type is not supported. Only align-based is supported at this moment." ) def add_duration_prior(self, **kwargs): self.use_beta_binomial_interpolator = kwargs.pop('use_beta_binomial_interpolator', False) if self.use_beta_binomial_interpolator: self.beta_binomial_interpolator = BetaBinomialInterpolator() def add_pitch(self, **kwargs): self.pitch_fmin = kwargs.pop("pitch_fmin", librosa.note_to_hz('C2')) self.pitch_fmax = kwargs.pop("pitch_fmax", librosa.note_to_hz('C7')) self.pitch_avg = kwargs.pop("pitch_avg", None) self.pitch_std = kwargs.pop("pitch_std", None) self.pitch_norm = kwargs.pop("pitch_norm", False) def add_energy(self, **kwargs): pass def add_speaker_id(self, **kwargs): pass def get_spec(self, audio): with torch.cuda.amp.autocast(enabled=False): spec = self.stft(audio) if spec.dtype in [torch.cfloat, torch.cdouble]: spec = torch.view_as_real(spec) spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9) return spec def get_log_mel(self, audio): with torch.cuda.amp.autocast(enabled=False): spec = self.get_spec(audio) mel = torch.matmul(self.fb.to(spec.dtype), spec) log_mel = torch.log(torch.clamp(mel, min=torch.finfo(mel.dtype).tiny)) return log_mel def __getitem__(self, index): sample = self.data[index] audio_stem = Path(sample["audio_filepath"]).stem features = self.featurizer.process(sample["audio_filepath"], trim=self.trim) audio, audio_length = features, torch.tensor(features.shape[0]).long() text = torch.tensor(sample["text_tokens"]).long() text_length = torch.tensor(len(sample["text_tokens"])).long() log_mel, log_mel_length = None, None if LogMel in self.sup_data_types_set: mel_path = sample["mel_filepath"] if mel_path is not None and Path(mel_path).exists(): log_mel = torch.load(mel_path) else: mel_path = Path(self.sup_data_path) / f"mel_{audio_stem}.pt" if mel_path.exists(): log_mel = torch.load(mel_path) else: log_mel = self.get_log_mel(audio) torch.save(log_mel, mel_path) log_mel = log_mel.squeeze(0) log_mel_length = torch.tensor(log_mel.shape[1]).long() durations = None if Durations in self.sup_data_types_set: durations = self.durs[index] duration_prior = None if DurationPrior in self.sup_data_types_set: if self.use_beta_binomial_interpolator: mel_len = self.get_log_mel(audio).shape[2] duration_prior = torch.from_numpy(self.beta_binomial_interpolator(mel_len, text_length.item())) else: prior_path = Path(self.sup_data_path) / f"pr_{audio_stem}.pt" if prior_path.exists(): duration_prior = torch.load(prior_path) else: mel_len = self.get_log_mel(audio).shape[2] duration_prior = beta_binomial_prior_distribution(text_length, mel_len) duration_prior = torch.from_numpy(duration_prior) torch.save(duration_prior, prior_path) pitch, pitch_length = None, None if Pitch in self.sup_data_types_set: pitch_name = ( f"{audio_stem}_pitch_pyin_" f"fmin{self.pitch_fmin}_fmax{self.pitch_fmax}_" f"fl{self.win_length}_hs{self.hop_len}.pt" ) pitch_path = Path(self.sup_data_path) / pitch_name if pitch_path.exists(): pitch = torch.load(pitch_path).float() else: pitch, _, _ = librosa.pyin( audio.numpy(), fmin=self.pitch_fmin, fmax=self.pitch_fmax, frame_length=self.win_length, sr=self.sample_rate, fill_na=0.0, ) pitch = torch.from_numpy(pitch).float() torch.save(pitch, pitch_path) if self.pitch_avg is not None and self.pitch_std is not None and self.pitch_norm: pitch -= self.pitch_avg pitch[pitch == -self.pitch_avg] = 0.0 # Zero out values that were perviously zero pitch /= self.pitch_std pitch_length = torch.tensor(len(pitch)).long() energy, energy_length = None, None if Energy in self.sup_data_types_set: energy_path = Path(self.sup_data_path) / f"{audio_stem}_energy_wl{self.win_length}_hs{self.hop_len}.pt" if energy_path.exists(): energy = torch.load(energy_path).float() else: spec = self.get_spec(audio) energy = torch.linalg.norm(spec.squeeze(0), axis=0).float() torch.save(energy, energy_path) energy_length = torch.tensor(len(energy)).long() speaker_id = None if SpeakerID in self.sup_data_types_set: speaker_id = torch.tensor(sample["speaker_id"]).long() return ( audio, audio_length, text, text_length, log_mel, log_mel_length, durations, duration_prior, pitch, pitch_length, energy, energy_length, speaker_id, ) def __len__(self): return len(self.data) def join_data(self, data_dict): result = [] for data_type in MAIN_DATA_TYPES + self.sup_data_types: result.append(data_dict[data_type.name]) if issubclass(data_type, WithLens): result.append(data_dict[f"{data_type.name}_lens"]) return tuple(result) def general_collate_fn(self, batch): ( _, audio_lengths, _, tokens_lengths, _, log_mel_lengths, durations_list, duration_priors_list, pitches, pitches_lengths, energies, energies_lengths, _, ) = zip(*batch) max_audio_len = max(audio_lengths).item() max_tokens_len = max(tokens_lengths).item() max_log_mel_len = max(log_mel_lengths) if LogMel in self.sup_data_types_set else None max_durations_len = max([len(i) for i in durations_list]) if Durations in self.sup_data_types_set else None max_pitches_len = max(pitches_lengths).item() if Pitch in self.sup_data_types_set else None max_energies_len = max(energies_lengths).item() if Energy in self.sup_data_types_set else None if LogMel in self.sup_data_types_set: log_mel_pad = torch.finfo(batch[0][2].dtype).tiny duration_priors = ( torch.zeros( len(duration_priors_list), max([prior_i.shape[0] for prior_i in duration_priors_list]), max([prior_i.shape[1] for prior_i in duration_priors_list]), ) if DurationPrior in self.sup_data_types_set else [] ) audios, tokens, log_mels, durations_list, pitches, energies, speaker_ids = [], [], [], [], [], [], [] for i, sample_tuple in enumerate(batch): ( audio, audio_len, token, token_len, log_mel, log_mel_len, durations, duration_prior, pitch, pitch_length, energy, energy_length, speaker_id, ) = sample_tuple audio = general_padding(audio, audio_len.item(), max_audio_len) audios.append(audio) token = general_padding(token, token_len.item(), max_tokens_len, pad_value=self.text_tokenizer_pad_id) tokens.append(token) if LogMel in self.sup_data_types_set: log_mels.append(general_padding(log_mel, log_mel_len, max_log_mel_len, pad_value=log_mel_pad)) if Durations in self.sup_data_types_set: durations_list.append(general_padding(durations, len(durations), max_durations_len)) if DurationPrior in self.sup_data_types_set: duration_priors[i, : duration_prior.shape[0], : duration_prior.shape[1]] = duration_prior if Pitch in self.sup_data_types_set: pitches.append(general_padding(pitch, pitch_length.item(), max_pitches_len)) if Energy in self.sup_data_types_set: energies.append(general_padding(energy, energy_length.item(), max_energies_len)) if SpeakerID in self.sup_data_types_set: speaker_ids.append(speaker_id) data_dict = { "audio": torch.stack(audios), "audio_lens": torch.stack(audio_lengths), "text": torch.stack(tokens), "text_lens": torch.stack(tokens_lengths), "log_mel": torch.stack(log_mels) if LogMel in self.sup_data_types_set else None, "log_mel_lens": torch.stack(log_mel_lengths) if LogMel in self.sup_data_types_set else None, "durations": torch.stack(durations_list) if Durations in self.sup_data_types_set else None, "duration_prior": duration_priors if DurationPrior in self.sup_data_types_set else None, "pitch": torch.stack(pitches) if Pitch in self.sup_data_types_set else None, "pitch_lens": torch.stack(pitches_lengths) if Pitch in self.sup_data_types_set else None, "energy": torch.stack(energies) if Energy in self.sup_data_types_set else None, "energy_lens": torch.stack(energies_lengths) if Energy in self.sup_data_types_set else None, "speaker_id": torch.stack(speaker_ids) if SpeakerID in self.sup_data_types_set else None, } return data_dict def _collate_fn(self, batch): data_dict = self.general_collate_fn(batch) joined_data = self.join_data(data_dict) return joined_data class MixerTTSDataset(TTSDataset): def __init__(self, **kwargs): super().__init__(**kwargs) def _albert(self): from transformers import AlbertTokenizer # noqa pylint: disable=import-outside-toplevel self.lm_model_tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') self.lm_padding_value = self.lm_model_tokenizer._convert_token_to_id('<pad>') space_value = self.lm_model_tokenizer._convert_token_to_id('▁') self.id2lm_tokens = {} for i, d in enumerate(self.data): raw_text = d["raw_text"] assert isinstance(self.text_tokenizer, EnglishPhonemesTokenizer) or isinstance( self.text_tokenizer, EnglishCharsTokenizer ) preprocess_text_as_tts_input = self.text_tokenizer.text_preprocessing_func(raw_text) lm_tokens_as_ids = self.lm_model_tokenizer.encode(preprocess_text_as_tts_input, add_special_tokens=False) if self.text_tokenizer.pad_with_space: lm_tokens_as_ids = [space_value] + lm_tokens_as_ids + [space_value] self.id2lm_tokens[i] = lm_tokens_as_ids def add_lm_tokens(self, **kwargs): lm_model = kwargs.pop('lm_model') if lm_model == "albert": self._albert() else: raise NotImplementedError( f"{lm_model} lm model is not supported. Only albert is supported at this moment." ) def __getitem__(self, index): ( audio, audio_length, text, text_length, log_mel, log_mel_length, durations, duration_prior, pitch, pitch_length, energy, energy_length, speaker_id, ) = super().__getitem__(index) lm_tokens = None if LMTokens in self.sup_data_types_set: lm_tokens = torch.tensor(self.id2lm_tokens[index]).long() return ( audio, audio_length, text, text_length, log_mel, log_mel_length, durations, duration_prior, pitch, pitch_length, energy, energy_length, speaker_id, lm_tokens, ) def _collate_fn(self, batch): batch = list(zip(*batch)) data_dict = self.general_collate_fn(list(zip(*batch[:13]))) lm_tokens_list = batch[13] if LMTokens in self.sup_data_types_set: lm_tokens = torch.full( (len(lm_tokens_list), max([lm_tokens.shape[0] for lm_tokens in lm_tokens_list])), fill_value=self.lm_padding_value, ) for i, lm_tokens_i in enumerate(lm_tokens_list): lm_tokens[i, : lm_tokens_i.shape[0]] = lm_tokens_i data_dict[LMTokens.name] = lm_tokens joined_data = self.join_data(data_dict) return joined_data
42.386895
147
0.61325
25,649
0.94395
0
0
0
0
0
0
7,154
0.263286
2284c119fbaa59ef00a4dd53417eccef839221b3
1,140
py
Python
anmotordesign/server.py
MarkWengSTR/ansys-maxwell-online
f9bbc535c7637d8f34abb241acfb97d1bdbe4103
[ "MIT" ]
8
2021-01-25T11:17:32.000Z
2022-03-29T05:34:47.000Z
anmotordesign/server.py
MarkWengSTR/ansys-maxwell-online
f9bbc535c7637d8f34abb241acfb97d1bdbe4103
[ "MIT" ]
1
2021-06-14T18:40:16.000Z
2021-08-25T14:37:21.000Z
anmotordesign/server.py
MarkWengSTR/ansys-maxwell-online
f9bbc535c7637d8f34abb241acfb97d1bdbe4103
[ "MIT" ]
8
2020-09-25T15:40:07.000Z
2022-03-29T05:34:48.000Z
from flask import Flask, request, jsonify from flask_cors import CORS from run import run_ansys from api.validate import spec_present, data_type_validate, spec_keys_validate, ansys_overload_check ansys_processing_count = 0 # debug # import ipdb; ipdb.set_trace() app = Flask(__name__) CORS(app) # local development cors @app.route('/run_simu', methods=["POST"]) def run_simulation(): global ansys_processing_count ansys_processing_count += 1 ctx = { "request": request.get_json(), "allow_run": True, "process": { "limit": 4, "count": ansys_processing_count, }, "start_run_response": {"msg": "start run at background"}, "error": { "validate": {"msg": ""} } } if spec_present(ctx) and \ data_type_validate(ctx) and \ spec_keys_validate(ctx) and \ ansys_overload_check(ctx): ctx = run_ansys(self.ctx) else: return jsonify(ctx["error"]["validate"]) return jsonify(ctx["response"]) if __name__ == "__main__": app.run(host='0.0.0.0', port=5000, debug=True)
25.909091
99
0.62193
0
0
0
0
735
0.644737
0
0
242
0.212281
2284f5a8afa9699354bd56f97faf33c044aeae81
160
py
Python
cnn/donas_utils/dataset/__init__.py
eric8607242/darts
34c79a0956039f56a6a87bfb7f4b1ae2af615bea
[ "Apache-2.0" ]
null
null
null
cnn/donas_utils/dataset/__init__.py
eric8607242/darts
34c79a0956039f56a6a87bfb7f4b1ae2af615bea
[ "Apache-2.0" ]
null
null
null
cnn/donas_utils/dataset/__init__.py
eric8607242/darts
34c79a0956039f56a6a87bfb7f4b1ae2af615bea
[ "Apache-2.0" ]
null
null
null
from .dataset import get_cifar100, get_cifar10, get_imagenet_lmdb, get_imagenet __all__ = ["get_cifar100", "get_cifar10", "get_imagenet_lmdb", "get_imagenet"]
40
79
0.8
0
0
0
0
0
0
0
0
60
0.375
2285470cfe61c3208efb829c668012f4eb4c042d
196
py
Python
classifier/cross_validation.py
ahmdrz/spam-classifier
a9cc3916a7c22545c82f0bfae7e4b95f3b36248f
[ "MIT" ]
1
2019-08-05T12:02:53.000Z
2019-08-05T12:02:53.000Z
classifier/cross_validation.py
ahmdrz/spam-classifier
a9cc3916a7c22545c82f0bfae7e4b95f3b36248f
[ "MIT" ]
null
null
null
classifier/cross_validation.py
ahmdrz/spam-classifier
a9cc3916a7c22545c82f0bfae7e4b95f3b36248f
[ "MIT" ]
null
null
null
from sklearn.model_selection import KFold def kfold_cross_validation(data, k=10): kfold = KFold(n_splits=k) for train, test in kfold.split(data): yield data[train], data[test]
32.666667
41
0.704082
0
0
149
0.760204
0
0
0
0
0
0
2285d8fefdc5efe988f942a7eb7b3f78ecd84063
310
py
Python
category/models.py
captainxavier/AutoBlog
44fb23628fe0210a3dcec80b91e1217d27ee9462
[ "MIT" ]
null
null
null
category/models.py
captainxavier/AutoBlog
44fb23628fe0210a3dcec80b91e1217d27ee9462
[ "MIT" ]
null
null
null
category/models.py
captainxavier/AutoBlog
44fb23628fe0210a3dcec80b91e1217d27ee9462
[ "MIT" ]
null
null
null
from django.db import models class Category(models.Model): title = models.CharField(max_length=20) class Meta: db_table = 'category' verbose_name = ("Category") verbose_name_plural = ("Categories") def __str__(self): return self.title
15.5
45
0.590323
262
0.845161
0
0
0
0
0
0
32
0.103226
228727092b8b8c1cbde1234be034bd7032daae7a
1,488
py
Python
admin_tools/urls.py
aucoeur/WeVoteServer
7b30bdbb59d6e0c19abc81237aa42fba7de1a432
[ "MIT" ]
44
2015-11-19T04:52:39.000Z
2021-03-17T02:08:26.000Z
admin_tools/urls.py
aucoeur/WeVoteServer
7b30bdbb59d6e0c19abc81237aa42fba7de1a432
[ "MIT" ]
748
2015-09-03T04:18:33.000Z
2022-03-10T14:08:10.000Z
admin_tools/urls.py
aucoeur/WeVoteServer
7b30bdbb59d6e0c19abc81237aa42fba7de1a432
[ "MIT" ]
145
2015-09-19T10:10:44.000Z
2022-03-04T21:01:12.000Z
# admin_tools/urls.py # Brought to you by We Vote. Be good. # -*- coding: UTF-8 -*- from django.conf.urls import re_path from . import views urlpatterns = [ re_path(r'^$', views.admin_home_view, name='admin_home',), re_path(r'^data_cleanup/$', views.data_cleanup_view, name='data_cleanup'), re_path(r'^data_cleanup_organization_analysis/$', views.data_cleanup_organization_analysis_view, name='data_cleanup_organization_analysis'), re_path(r'^data_cleanup_organization_list_analysis/$', views.data_cleanup_organization_list_analysis_view, name='data_cleanup_organization_list_analysis'), re_path(r'^data_cleanup_position_list_analysis/$', views.data_cleanup_position_list_analysis_view, name='data_cleanup_position_list_analysis'), re_path(r'^data_cleanup_voter_hanging_data_process/$', views.data_cleanup_voter_hanging_data_process_view, name='data_cleanup_voter_hanging_data_process'), re_path(r'^data_cleanup_voter_list_analysis/$', views.data_cleanup_voter_list_analysis_view, name='data_cleanup_voter_list_analysis'), re_path(r'^data_voter_statistics/$', views.data_voter_statistics_view, name='data_voter_statistics'), re_path(r'^import_sample_data/$', views.import_sample_data_view, name='import_sample_data'), re_path(r'^statistics/$', views.statistics_summary_view, name='statistics_summary'), re_path(r'^sync_dashboard/$', views.sync_data_with_master_servers_view, name='sync_dashboard'), ]
55.111111
108
0.78293
0
0
0
0
0
0
0
0
694
0.466398
22875dd3eed7789c404cf71dae058c78660c2f50
3,414
py
Python
hippynn/graphs/nodes/base/multi.py
tautomer/hippynn
df4504a5ea4680cfc61f490984dcddeac7ed99ee
[ "BSD-3-Clause" ]
21
2021-11-17T00:56:35.000Z
2022-03-22T05:57:11.000Z
hippynn/graphs/nodes/base/multi.py
tautomer/hippynn
df4504a5ea4680cfc61f490984dcddeac7ed99ee
[ "BSD-3-Clause" ]
4
2021-12-17T16:16:53.000Z
2022-03-16T23:50:38.000Z
hippynn/graphs/nodes/base/multi.py
tautomer/hippynn
df4504a5ea4680cfc61f490984dcddeac7ed99ee
[ "BSD-3-Clause" ]
6
2021-11-30T21:09:31.000Z
2022-03-18T07:07:32.000Z
""" A base node that provides several output tensors. """ from ....layers.algebra import Idx from .base import SingleNode, Node from .. import _debprint from ...indextypes import IdxType class IndexNode(SingleNode): _input_names = ("parent",) def __init__(self, name, parents, index, index_state=None): if len(parents) != 1: raise TypeError("Index node takes exactly one parent.") par = parents[0] iname = par._output_names[index] if hasattr(par, "_output_names") else "<{index}>".format(index=index) repr_info = {"parent_name": par.name, "index": iname} module = Idx(index, repr_info=repr_info) self.index = index self._index_state = IdxType.NotFound if index_state is None else index_state super().__init__(name, parents, module=module) class MultiNode(Node): # Multinode _output_names = NotImplemented _output_index_states = NotImplemented # optional? _main_output = NotImplemented def __init__(self, name, parents, module="auto", *args, db_name=None, **kwargs): super().__init__(name, parents, *args, module=module, **kwargs) self.children = tuple( IndexNode(name + "." + cn, (self,), index=i, index_state=cidx) for i, (cn, cidx) in enumerate(zip(self._output_names, self._output_index_states)) ) self.main_output.db_name = db_name def set_dbname(self, db_name): self.main_output.set_dbname(db_name) def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) # Enforce _child_index_states has same length as _output_names if cls._output_index_states is not NotImplemented: if len(cls._output_index_states) != len(cls._output_names): raise AssertionError( "Lengths of _child_index_states {} doesn't match lengths of ouput_names {}".format( cls._output_index_states, cls._output_names ) ) # Enforce no name conflict between input names and output names if cls._input_names is not NotImplemented: try: assert all(o not in cls._input_names for o in cls._output_names) except AssertionError as ae: raise ValueError( "Multi-node output names {} conflict with input names {}".format( cls._output_names, cls._input_names ) ) from ae def __dir__(self): dir_ = super().__dir__() if self._output_names is not NotImplemented: dir_ = dir_ + list(self._output_names) return dir_ def __getattr__(self, item): if item in ("children", "_output_names"): # Guard against recursion raise AttributeError("Attribute {} not yet present.".format(item)) try: return super().__getattr__(item) # Defer to BaseNode first except AttributeError: pass try: return self.children[self._output_names.index(item)] except (AttributeError, ValueError): raise AttributeError("{} object has no attribute '{}'".format(self.__class__, item)) @property def main_output(self): if self._main_output is NotImplemented: return super().main_output return getattr(self, self._main_output)
38.795455
110
0.626245
3,221
0.943468
0
0
171
0.050088
0
0
576
0.168717
22881ed2f077cedcedaa10dbf83c13905a622021
113
py
Python
main_module/__init__.py
JohanNicander/python-test-architecture
2418f861cb46c3fccaa21be94ee92c5862985a15
[ "Apache-2.0" ]
null
null
null
main_module/__init__.py
JohanNicander/python-test-architecture
2418f861cb46c3fccaa21be94ee92c5862985a15
[ "Apache-2.0" ]
null
null
null
main_module/__init__.py
JohanNicander/python-test-architecture
2418f861cb46c3fccaa21be94ee92c5862985a15
[ "Apache-2.0" ]
null
null
null
from .zero import zero from main_module._unittester import UnitTester test = UnitTester(__name__) del UnitTester
22.6
46
0.840708
0
0
0
0
0
0
0
0
0
0
228856c2bad586d523ebf387bffc058ae9b589d7
4,151
py
Python
barber/cutter.py
LSSTDESC/barber
9dbe69e69a078ef3b70a316807517e2a4d4e60cd
[ "MIT" ]
null
null
null
barber/cutter.py
LSSTDESC/barber
9dbe69e69a078ef3b70a316807517e2a4d4e60cd
[ "MIT" ]
6
2020-04-28T15:20:08.000Z
2020-04-28T15:37:02.000Z
barber/cutter.py
LSSTDESC/barber
9dbe69e69a078ef3b70a316807517e2a4d4e60cd
[ "MIT" ]
null
null
null
import numpy as np import numpy.random as npr import scipy.optimize as spo import tomo_challenge.metrics as tcm # custom data type, could be replaced with/tie in to tree.py class # cut_vals is (nfeat, nbins - 1) numpy array, float # tree_ids is ((nbins,) * nfeat) numpy array, int TreePars = namedtuple('TreePars', ['cut_vals', 'tree_ids']) # should maybe put this function in a class so we can call TreePars.to_array def treepars_to_array(treepars): """ Flattens cut_vals and tree_ids for optimizer """ cuts = np.flatten(treepars.cut_vals) ids = np.flatten(treepars.tree_ids) arr = np.concatenate((cuts, ids)) return(arr) # should maybe put this function in a class so we can call TreePars.from_array def array_to_treepars(arr): """ Converts optimizer format of 1D array back into namedtuple of arrays """ flat_cuts = arr[type(arr) == float] flat_ids = arr[type(arr) == int] nbins = len(np.unique(flat_ids)) nfeat = len(flat_cuts) / (nbins - 1) # maybe do some assert checks with these just in case types have problems # cuts = arr[0:nfeat*(nbins-1)].reshape((nfeat, nbins-1)) # ids = arr[feat*(nbins-1):].reshape((nbins,) * nfeat) cuts = flat_cuts.reshape((nfeat, nbins-1)) ids = flat_ids.reshape((nbins,) * nfeat) treepars = TreePars(cuts, ids) return(treepars) def get_cuts(galaxies, ival_treepars=None, nbins=3): """ Obtains simplest possible bin definitions: cuts in the space of observables given number of bins Parameters ---------- galaxies: numpy.ndarray, float observables (magnitudes and/or colors and/or errors) to serve as features for set of galaxies shape(galaxies) = (ngals, nfeat) ival_treepars: namedtuple, numpy.ndarray, float and int, optional initial values for decision tree parameters shape(ivals.cut_vals) = (nfeat, (nbins - 1)) shape(tree_ids) = ((nbins,) * nfeat) nbins: int, optional number of bins for which to obtain cuts Returns ------- assignments: numpy.ndarray, int bin assignment for each galaxy shape(assignments) = (ngals, 1) Notes ----- `sort_gals` does the heavy lifting. `eval_metric` will call one of the metrics from [tomo_challenge](https://github.com/LSSTDESC/tomo_challenge/blob/master/tomo_challenge/metrics.py). The original idea for a general, non-cut-based optimizer was to have parameters equal to the (ngals) length array of ints representing the bin assignments, but that's not necessary for the simple cut-and-sweep barber and would probably break `spo.minimize`. """ (ngals, nfeat) = np.shape(galaxies) if ival_treepars is None: cut_ivals = np.quantile(galaxies, np.linspace(0., 1., nbins), axis=1) assert(len(np.flatten(ivals)) == nbins**nfeat) # need structure and way of making dumb version of these tree_ids = npr.random_integers(0, nbins, nbins**nfeat) assert(len(np.unique(tree_ids)) == nbins) tree_ids.reshape((nfeat, nbins)) ival_treepars = TreePars(cut_ivals, tree_ids) ivals = treepars_to_array(ival_treepars) opt_res = spo.minimize(eval_metric, ivals, args=galaxies) treepars = array_to_treepars(opt_res.x) assignments = sort_gals(galaxies, treepars) return(assignments) def sort_gals(galaxies, tree_pars): """ Divides available galaxies into subsets according to a given decision tree on their observables Parameters ---------- galaxies: nfeature x n_gal array tree: tree object Notes ----- could be based on bisect, or maybe a sklearn object? """ pass def eval_metric(arr, galaxies): """ Just calls a metric from tomo_challenge wrapped for the `spo.minimize` API Notes ----- Replace `tcm.metric` with actual call to one of the tomo_challenge metrics Actually, there's a problem in that the current tomo_challenge metrics require the true redshifts... """ treepars = array_to_treepars(arr) assignments = sort_gals(galaxies, treepars) metval = tcm.metric(assignments) return metval
35.478632
261
0.685859
0
0
0
0
0
0
0
0
2,538
0.611419
2288f93227622fced04679bfe49afbad16de4e0a
480
py
Python
examples/transfer/highscore.py
coding-world/matrix_max7219
3126604ee400a9ec1d25797f6957a2eae8a3f33c
[ "MIT" ]
null
null
null
examples/transfer/highscore.py
coding-world/matrix_max7219
3126604ee400a9ec1d25797f6957a2eae8a3f33c
[ "MIT" ]
null
null
null
examples/transfer/highscore.py
coding-world/matrix_max7219
3126604ee400a9ec1d25797f6957a2eae8a3f33c
[ "MIT" ]
null
null
null
import shelve regal = shelve.open('score.txt') def updateScore(neuerScore): if('score' in regal): score = regal['score'] if(neuerScore not in score): score.insert(0, neuerScore) score.sort() ranking = score.index(neuerScore) ranking = len(score)-ranking else: score = [neuerScore] ranking = 1 print(score) print(ranking) regal['score'] = score return ranking neuerScore = int(input("Neuer HighScore: \n")) updateScore(neuerScore)
20
46
0.66875
0
0
0
0
0
0
0
0
53
0.110417
22896fc7355f1baa1a7f7d9e3165cdfe2c0b6611
165
py
Python
src/node/ext/ldap/scope.py
enfold/node.ext.ldap
28127057be6ba3092389f3c920575292d43d9f94
[ "BSD-2-Clause" ]
3
2016-04-22T00:37:17.000Z
2020-04-03T07:14:54.000Z
src/node/ext/ldap/scope.py
enfold/node.ext.ldap
28127057be6ba3092389f3c920575292d43d9f94
[ "BSD-2-Clause" ]
51
2015-02-10T11:14:01.000Z
2021-05-05T11:06:59.000Z
src/node/ext/ldap/scope.py
enfold/node.ext.ldap
28127057be6ba3092389f3c920575292d43d9f94
[ "BSD-2-Clause" ]
12
2016-08-09T09:39:35.000Z
2020-04-18T14:53:56.000Z
# -*- coding: utf-8 -*- import ldap BASE = ldap.SCOPE_BASE ONELEVEL = ldap.SCOPE_ONELEVEL SUBTREE = ldap.SCOPE_SUBTREE SCOPES = [BASE, ONELEVEL, SUBTREE] del ldap
16.5
34
0.727273
0
0
0
0
0
0
0
0
23
0.139394
2289dcddf267c6a1a0e8cb907450531ad79de492
493
py
Python
urban-sound-classification/feature_merge.py
tensorflow-korea/tfk-notebooks
67831acce7f435500377bf03e6bd9d15fdd5f1bc
[ "MIT" ]
50
2016-06-18T12:52:29.000Z
2021-12-10T07:13:20.000Z
urban-sound-classification/feature_merge.py
tensorflow-korea/tfk-notebooks
67831acce7f435500377bf03e6bd9d15fdd5f1bc
[ "MIT" ]
null
null
null
urban-sound-classification/feature_merge.py
tensorflow-korea/tfk-notebooks
67831acce7f435500377bf03e6bd9d15fdd5f1bc
[ "MIT" ]
51
2016-04-30T16:38:05.000Z
2021-01-15T18:12:03.000Z
import glob import numpy as np X = np.empty((0, 193)) y = np.empty((0, 10)) groups = np.empty((0, 1)) npz_files = glob.glob('./urban_sound_?.npz') for fn in npz_files: print(fn) data = np.load(fn) X = np.append(X, data['X'], axis=0) y = np.append(y, data['y'], axis=0) groups = np.append(groups, data['groups'], axis=0) print(groups[groups>0]) print(X.shape, y.shape) for r in y: if np.sum(r) > 1.5: print(r) np.savez('urban_sound', X=X, y=y, groups=groups)
22.409091
54
0.602434
0
0
0
0
0
0
0
0
48
0.097363
228ad78fbc730707861e4c8d9c262be93d22bf72
485
py
Python
program/program/trackers/TrackerCorrelation.py
JankaSvK/thesis
c440ab8242b058f580fdf9d5a1d00708a1696561
[ "MIT" ]
1
2018-11-29T14:13:47.000Z
2018-11-29T14:13:47.000Z
program/program/trackers/TrackerCorrelation.py
JankaSvK/thesis
c440ab8242b058f580fdf9d5a1d00708a1696561
[ "MIT" ]
3
2018-04-24T18:30:00.000Z
2018-05-11T23:25:07.000Z
program/program/trackers/TrackerCorrelation.py
JankaSvK/thesis
c440ab8242b058f580fdf9d5a1d00708a1696561
[ "MIT" ]
null
null
null
import dlib class CorrelationTracker(object): def init(self, image, bbox): self.tracker = dlib.correlation_tracker() x, y, x2, y2 = bbox x2 += x y2 += y self.tracker.start_track(image, dlib.rectangle(x, y, x2, y2)) return True def update(self, image): self.tracker.update(image) out = self.tracker.get_position() return True, (out.left(), out.top(), out.right() - out.left(), out.bottom() - out.top())
28.529412
96
0.585567
470
0.969072
0
0
0
0
0
0
0
0
228b1c94896beb15138918d15679461767abdb01
3,238
py
Python
examples/nlp/language_modeling/megatron_gpt_ckpt_to_nemo.py
rilango/NeMo
6f23ff725c596f25fab6043d95e7c0b4a5f56331
[ "Apache-2.0" ]
null
null
null
examples/nlp/language_modeling/megatron_gpt_ckpt_to_nemo.py
rilango/NeMo
6f23ff725c596f25fab6043d95e7c0b4a5f56331
[ "Apache-2.0" ]
null
null
null
examples/nlp/language_modeling/megatron_gpt_ckpt_to_nemo.py
rilango/NeMo
6f23ff725c596f25fab6043d95e7c0b4a5f56331
[ "Apache-2.0" ]
1
2021-12-07T08:15:36.000Z
2021-12-07T08:15:36.000Z
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from argparse import ArgumentParser import torch.multiprocessing as mp from pytorch_lightning.trainer.trainer import Trainer from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector from nemo.utils import AppState, logging def get_args(): parser = ArgumentParser() parser.add_argument( "--checkpoint_folder", type=str, default=None, required=True, help="Path to PTL checkpoints saved during training. Ex: /raid/nemo_experiments/megatron_gpt/checkpoints", ) parser.add_argument( "--checkpoint_name", type=str, default=None, required=True, help="Name of checkpoint to be used. Ex: megatron_gpt--val_loss=6.34-step=649-last.ckpt", ) parser.add_argument( "--hparams_file", type=str, default=None, required=False, help="Path config for restoring. It's created during training and may need to be modified during restore if restore environment is different than training. Ex: /raid/nemo_experiments/megatron_gpt/hparams.yaml", ) parser.add_argument("--nemo_file_path", type=str, default=None, required=True, help="Path to output .nemo file.") parser.add_argument("--tensor_model_parallel_size", type=int, required=True, default=None) args = parser.parse_args() return args def convert(rank, world_size, args): app_state = AppState() app_state.data_parallel_rank = 0 trainer = Trainer(gpus=args.tensor_model_parallel_size) # TODO: reach out to PTL For an API-safe local rank override trainer.accelerator.training_type_plugin._local_rank = rank if args.tensor_model_parallel_size is not None and args.tensor_model_parallel_size > 1: # inject model parallel rank checkpoint_path = os.path.join(args.checkpoint_folder, f'mp_rank_{rank:02d}', args.checkpoint_name) else: checkpoint_path = os.path.join(args.checkpoint_folder, args.checkpoint_name) model = MegatronGPTModel.load_from_checkpoint(checkpoint_path, hparams_file=args.hparams_file, trainer=trainer) model._save_restore_connector = NLPSaveRestoreConnector() model.save_to(args.nemo_file_path) logging.info(f'NeMo model saved to: {args.nemo_file_path}') def main() -> None: args = get_args() world_size = args.tensor_model_parallel_size mp.spawn(convert, args=(world_size, args), nprocs=world_size, join=True) if __name__ == '__main__': main() # noqa pylint: disable=no-value-for-parameter
37.218391
218
0.734713
0
0
0
0
0
0
0
0
1,325
0.409203
228b861994dfd3c8d5b7524f5b44ae49bacc2148
6,007
py
Python
sdk/python/pulumi_aws/apigateway/api_key.py
dixler/pulumi-aws
88838ed6d412c092717a916b0b5b154f68226c3a
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_aws/apigateway/api_key.py
dixler/pulumi-aws
88838ed6d412c092717a916b0b5b154f68226c3a
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_aws/apigateway/api_key.py
dixler/pulumi-aws
88838ed6d412c092717a916b0b5b154f68226c3a
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import json import warnings import pulumi import pulumi.runtime from typing import Union from .. import utilities, tables class ApiKey(pulumi.CustomResource): arn: pulumi.Output[str] """ Amazon Resource Name (ARN) """ created_date: pulumi.Output[str] """ The creation date of the API key """ description: pulumi.Output[str] """ The API key description. Defaults to "Managed by Pulumi". """ enabled: pulumi.Output[bool] """ Specifies whether the API key can be used by callers. Defaults to `true`. """ last_updated_date: pulumi.Output[str] """ The last update date of the API key """ name: pulumi.Output[str] """ The name of the API key """ tags: pulumi.Output[dict] """ Key-value mapping of resource tags """ value: pulumi.Output[str] """ The value of the API key. If not specified, it will be automatically generated by AWS on creation. """ def __init__(__self__, resource_name, opts=None, description=None, enabled=None, name=None, tags=None, value=None, __props__=None, __name__=None, __opts__=None): """ Provides an API Gateway API Key. > **NOTE:** Since the API Gateway usage plans feature was launched on August 11, 2016, usage plans are now **required** to associate an API key with an API stage. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: The API key description. Defaults to "Managed by Pulumi". :param pulumi.Input[bool] enabled: Specifies whether the API key can be used by callers. Defaults to `true`. :param pulumi.Input[str] name: The name of the API key :param pulumi.Input[dict] tags: Key-value mapping of resource tags :param pulumi.Input[str] value: The value of the API key. If not specified, it will be automatically generated by AWS on creation. > This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/api_gateway_api_key.html.markdown. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if description is None: description = 'Managed by Pulumi' __props__['description'] = description __props__['enabled'] = enabled __props__['name'] = name __props__['tags'] = tags __props__['value'] = value __props__['arn'] = None __props__['created_date'] = None __props__['last_updated_date'] = None super(ApiKey, __self__).__init__( 'aws:apigateway/apiKey:ApiKey', resource_name, __props__, opts) @staticmethod def get(resource_name, id, opts=None, arn=None, created_date=None, description=None, enabled=None, last_updated_date=None, name=None, tags=None, value=None): """ Get an existing ApiKey resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param str id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] arn: Amazon Resource Name (ARN) :param pulumi.Input[str] created_date: The creation date of the API key :param pulumi.Input[str] description: The API key description. Defaults to "Managed by Pulumi". :param pulumi.Input[bool] enabled: Specifies whether the API key can be used by callers. Defaults to `true`. :param pulumi.Input[str] last_updated_date: The last update date of the API key :param pulumi.Input[str] name: The name of the API key :param pulumi.Input[dict] tags: Key-value mapping of resource tags :param pulumi.Input[str] value: The value of the API key. If not specified, it will be automatically generated by AWS on creation. > This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/api_gateway_api_key.html.markdown. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["arn"] = arn __props__["created_date"] = created_date __props__["description"] = description __props__["enabled"] = enabled __props__["last_updated_date"] = last_updated_date __props__["name"] = name __props__["tags"] = tags __props__["value"] = value return ApiKey(resource_name, opts=opts, __props__=__props__) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
45.507576
170
0.662227
5,697
0.948394
0
0
1,966
0.327285
0
0
3,470
0.577659
228b9e5c3d1a55dd867bb42f9e9fbbc7ed2e9fc5
10,684
py
Python
SROMPy/optimize/ObjectiveFunction.py
jwarner308/SROMPy
12007e4cd99c88446f10974a93050405c5cd925b
[ "Apache-2.0" ]
23
2018-05-13T05:13:03.000Z
2022-01-29T19:43:28.000Z
SROMPy/optimize/ObjectiveFunction.py
jwarner308/SROMPy
12007e4cd99c88446f10974a93050405c5cd925b
[ "Apache-2.0" ]
11
2018-03-28T13:13:44.000Z
2022-03-30T18:56:57.000Z
SROMPy/optimize/ObjectiveFunction.py
jwarner308/SROMPy
12007e4cd99c88446f10974a93050405c5cd925b
[ "Apache-2.0" ]
19
2018-06-01T14:49:30.000Z
2022-03-05T05:02:06.000Z
# Copyright 2018 United States Government as represented by the Administrator of # the National Aeronautics and Space Administration. No copyright is claimed in # the United States under Title 17, U.S. Code. All Other Rights Reserved. # The Stochastic Reduced Order Models with Python (SROMPy) platform is licensed # under the Apache License, Version 2.0 (the "License"); you may not use this # file except in compliance with the License. You may obtain a copy of the # License at http://www.apache.org/licenses/LICENSE-2.0. # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import numpy as np from SROMPy.target import RandomVector from SROMPy.target.RandomEntity import RandomEntity class ObjectiveFunction: """ Defines the objective function for optimizing SROM parameters. Calculates errors between the statistics of the SROM and the target random vector being model by it. Will create objective function for optimization library (e.g. scipy) that essentially wraps this class's evaluate function """ def __init__(self, srom, target, obj_weights=None, error='mean', max_moment=5, num_cdf_grid_points=100): """ Initialize objective function. Pass in SROM & target random vector objects that have been previously initialized. Objective function calculates the errors between the statistics of this SROM and the target random vector (these objects must have compute_moments,CDF, corr_mat functions defined). inputs: -SROM - initialized SROM object -targetRV - initialized RandomVector object (either AnalyticRandomVector or SampleRandomVector) with same dimension as SROM -obj_weights - array of floats defining the relative weight of the terms in the objective function. Terms are error in moments, CDFs, and correlation matrix in that order. Default is equal weights ([1.0,1.0,1.0]) -error - string 'mean','max', or 'sse' defining how error is defined between the statistics of the SROM & target -max_moment - int, max order to evaluate moment errors up to -num_cdf_grid_points - int, # pts to evaluate CDF errors on """ self.__test_init_params(srom, target, obj_weights, error, max_moment, num_cdf_grid_points) self._SROM = srom self._target = target self._x_grid = None # Generate grids for evaluating CDFs based on target RV's range self.generate_cdf_grids(num_cdf_grid_points) self._metric = error.upper() self._max_moment = max_moment def get_moment_error(self, samples, probabilities): """ Returns moment error for given samples & probabilities """ self._SROM.set_params(samples, probabilities) return self.compute_moment_error() def get_cdf_error(self, samples, probabilities): """ Returns CDF error for given samples & probabilities """ self._SROM.set_params(samples, probabilities) return self.compute_cdf_error() def get_corr_error(self, samples, probabilities): """ Returns correlation error for given samples & probabilities """ self._SROM.set_params(samples, probabilities) return self.compute_correlation_error() def evaluate(self, samples, probabilities): """ Evaluates the objective function for the specified SROM samples & probabilities. Calculates errrors in statistics between SROM/target """ error = 0.0 # SROM is by the current values of samples/probabilities for stats. self._SROM.set_params(samples, probabilities) if self._weights[0] > 0.0: cdf_error = self.compute_cdf_error() error += cdf_error * self._weights[0] if self._weights[1] > 0.0: moment_error = self.compute_moment_error() error += moment_error * self._weights[1] if self._weights[2] > 0.0: corr_error = self.compute_correlation_error() error += corr_error * self._weights[2] return error def compute_moment_error(self): """ Calculate error in moments between SROM & target """ srom_moments = self._SROM.compute_moments(self._max_moment) target_moments = self._target.compute_moments(self._max_moment) # Reshape to 2D if returned as 1D for scalar RV. if len(target_moments.shape) == 1: target_moments = target_moments.reshape((self._max_moment, 1)) # Prevent divide by zero. zero_indices = np.where(np.abs(target_moments) <= 1e-12)[0] target_moments[zero_indices] = 1.0 # Squared relative difference: if self._metric == "SSE": rel_diffs = ((srom_moments-target_moments)/target_moments)**2.0 error = 0.5*np.sum(rel_diffs) # Max absolute value: elif self._metric == "MAX": diffs = np.abs(srom_moments - target_moments) error = np.max(diffs) elif self._metric == "MEAN": diffs = np.abs(srom_moments - target_moments) error = np.mean(diffs) else: raise ValueError("Invalid error metric") return error def compute_cdf_error(self): """ Calculate error in CDFs between SROM & target at pts in x_grid """ srom_cdfs = self._SROM.compute_cdf(self._x_grid) target_cdfs = self._target.compute_cdf(self._x_grid) # Check for 0 cdf values to prevent divide by zero. nonzero_indices = np.where(target_cdfs[:, 0] > 0)[0] srom_cdfs = srom_cdfs[nonzero_indices, :] target_cdfs = target_cdfs[nonzero_indices, :] if self._metric == "SSE": squared_diffs = (srom_cdfs - target_cdfs)**2.0 rel_diffs = squared_diffs / target_cdfs**2.0 error = 0.5*np.sum(rel_diffs) elif self._metric == "MAX": diffs = np.abs(srom_cdfs - target_cdfs) error = np.max(diffs) elif self._metric == "MEAN": diffs = np.abs(srom_cdfs - target_cdfs) error = np.mean(diffs) else: raise ValueError("Invalid error metric") return error def compute_correlation_error(self): """ Calculate error in correlation matrix between SROM & target """ # Neglect for 1D random variable: if self._target._dim == 1: return 0.0 srom_corr = self._SROM.compute_corr_mat() target_corr = self._target.compute_correlation_matrix() if self._metric == "SSE": squared_diffs = (srom_corr - target_corr)**2.0 rel_diffs = squared_diffs / target_corr**2.0 error = 0.5*np.sum(rel_diffs) elif self._metric == "MAX": diffs = np.abs(srom_corr - target_corr) error = np.max(diffs) elif self._metric == "MEAN": diffs = np.abs(srom_corr - target_corr) error = np.mean(diffs) else: raise ValueError("Invalid error metric") return error def generate_cdf_grids(self, num_cdf_grid_points): """ Generate numerical grids for evaluating the CDF errors based on the range of the target random vector. Create x_grid member variable with num_cdf_grid_points along each dimension of the random vector. """ self._x_grid = np.zeros((num_cdf_grid_points, self._target._dim)) for i in range(self._target._dim): grid = np.linspace(self._target.mins[i], self._target.maxs[i], num_cdf_grid_points) self._x_grid[:, i] = grid def __test_init_params(self, srom, target, obj_weights, error, max_moment, num_cdf_grid_points): """ Due to the large numbers of parameters passed into __init__() that need to be tested, the testing is done in this utility function instead of __init__(). """ # Test target. if not (isinstance(target, RandomEntity)): raise TypeError("target must inherit from RandomEntity.") # Test srom. from SROMPy.srom import SROM if not isinstance(srom, SROM): raise TypeError("srom must be of type SROM.") # Ensure srom and target have same dimensions if target is RandomVector. if isinstance(target, RandomVector): if target._dim != srom._dim: raise ValueError("target and srom must have same dimensions.") # Test obj_weights. if obj_weights is not None: if isinstance(obj_weights, list): obj_weights = np.array(obj_weights) if not isinstance(obj_weights, np.ndarray): raise TypeError("obj_weights must be of type ndarray or list.") if len(obj_weights.shape) != 1: raise ValueError("obj_weights must be a one dimensional array.") if obj_weights.shape[0] != 3: raise ValueError("obj_weights must have exactly 3 elements.") if np.min(obj_weights) < 0.: raise ValueError("obj_weights cannot be less than zero.") self._weights = obj_weights else: self._weights = np.ones((3,)) # Test error function name. if not isinstance(error, str): raise TypeError("error must be a string: 'MEAN', 'MAX', or 'SSE'.") if error.upper() not in ["MEAN", "MAX", "SSE"]: raise ValueError("error must be either 'mean', 'max', or 'SSE'.") # Test max_moment. if not isinstance(max_moment, int): raise TypeError("max_moment must be a positive integer.") if max_moment < 1: raise ValueError("max_moment must be a positive integer.") # Test num_cdf_grid_points. if not isinstance(num_cdf_grid_points, int): raise TypeError("cf_grid_pts must be a positive integer.") if num_cdf_grid_points < 1: raise ValueError("num_cdf_grid_points must be a positive integer.")
37.356643
80
0.624579
9,726
0.910333
0
0
0
0
0
0
4,584
0.429053
228bb0a969acb617ccc7d0b12b1281bd81283a5f
4,016
py
Python
test/utils.py
vasili-v/distcovery
e07882d55ebe2e4fd78a720764803e6b3e8cbc7d
[ "MIT" ]
null
null
null
test/utils.py
vasili-v/distcovery
e07882d55ebe2e4fd78a720764803e6b3e8cbc7d
[ "MIT" ]
null
null
null
test/utils.py
vasili-v/distcovery
e07882d55ebe2e4fd78a720764803e6b3e8cbc7d
[ "MIT" ]
null
null
null
import os import errno import sys def mock_directory_tree(tree): tree = dict([(os.path.join(*key), value) \ for key, value in tree.iteritems()]) def listdir(path): try: names = tree[path] except KeyError: raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), path) if names is None: raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path) return names def isfile(path): try: item = tree[path] except KeyError: return False return item is None def isdir(path): try: item = tree[path] except KeyError: return False return item is not None return listdir, isfile, isdir class PreserveOs(object): def setUp(self): super(PreserveOs, self).setUp() self.__listdir = os.listdir self.__isfile = os.path.isfile self.__isdir = os.path.isdir def tearDown(self): os.path.isdir = self.__isdir os.path.isfile = self.__isfile os.listdir = self.__listdir super(PreserveOs, self).tearDown() def full_test_tree(self): tree = {('.',): ('__init__.py', 'test_first.py', 'test_second.py', 'test_sub_first', 't_sub_first', 'test_sub_third'), ('.', '__init__.py'): None, ('.', 'test_first.py'): None, ('.', 'test_second.py'): None, ('.', 'test_sub_first'): ('__init__.py', 'test_sub_first.py'), ('.', 'test_sub_first', '__init__.py'): None, ('.', 'test_sub_first', 'test_sub_first.py'): None, ('.', 't_sub_first'): ('__init__.py', 'test_sub_first.py'), ('.', 't_sub_first', '__init__.py'): None, ('.', 't_sub_first', 'test_sub_first.py'): None, ('.', 'test_sub_second'): ('test_sub_first.py',), ('.', 'test_sub_second', 'test_sub_first.py'): None, ('.', 'test_sub_third'): ('__init__.py', 'test_sub_first.py', 'test_sub_second'), ('.', 'test_sub_third', '__init__.py'): None, ('.', 'test_sub_third', 'test_sub_first.py'): None, ('.', 'test_sub_third', 'test_sub_second'): \ ('__init__.py', 'test_sub_first.py', 't_sub_second.py'), ('.', 'test_sub_third', 'test_sub_second', '__init__.py'): None, ('.', 'test_sub_third', 'test_sub_second', 'test_sub_first.py'): None, ('.', 'test_sub_third', 'test_sub_second', 't_sub_second.py'): None} os.listdir, os.path.isfile, os.path.isdir = mock_directory_tree(tree) self.expected_content = {'first': 'test_first', 'second': 'test_second', 'sub_first': 'test_sub_first', 'sub_first.sub_first': \ 'test_sub_first.test_sub_first', 'sub_third': 'test_sub_third', 'sub_third.sub_first': \ 'test_sub_third.test_sub_first', 'sub_third.sub_second': \ 'test_sub_third.test_sub_second', 'sub_third.sub_second.sub_first': \ 'test_sub_third.test_sub_second.' \ 'test_sub_first'} class ImportTrash(object): def setUp(self): self.modules_trash = [] self.meta_path_trash = [] def tearDown(self): for item in self.meta_path_trash: if item in sys.meta_path: sys.meta_path.remove(item) for name in self.modules_trash: if name in sys.modules: del sys.modules[name]
36.844037
80
0.493775
3,221
0.802042
0
0
0
0
0
0
1,169
0.291086
228d76877f0d9f67ffc6dc7483c7c0a95962b0f9
864
py
Python
var/spack/repos/builtin/packages/perl-ipc-run/package.py
adrianjhpc/spack
0a9e4fcee57911f2db586aa50c8873d9cca8de92
[ "ECL-2.0", "Apache-2.0", "MIT" ]
2
2020-10-15T01:08:42.000Z
2021-10-18T01:28:18.000Z
var/spack/repos/builtin/packages/perl-ipc-run/package.py
adrianjhpc/spack
0a9e4fcee57911f2db586aa50c8873d9cca8de92
[ "ECL-2.0", "Apache-2.0", "MIT" ]
2
2019-07-30T10:12:28.000Z
2019-12-17T09:02:27.000Z
var/spack/repos/builtin/packages/perl-ipc-run/package.py
adrianjhpc/spack
0a9e4fcee57911f2db586aa50c8873d9cca8de92
[ "ECL-2.0", "Apache-2.0", "MIT" ]
5
2019-07-30T09:42:14.000Z
2021-01-25T05:39:20.000Z
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PerlIpcRun(PerlPackage): """IPC::Run allows you to run and interact with child processes using files, pipes, and pseudo-ttys. Both system()-style and scripted usages are supported and may be mixed. Likewise, functional and OO API styles are both supported and may be mixed.""" homepage = "https://metacpan.org/pod/IPC::Run" url = "https://cpan.metacpan.org/authors/id/T/TO/TODDR/IPC-Run-20180523.0.tar.gz" version('20180523.0', sha256='3850d7edf8a4671391c6e99bb770698e1c45da55b323b31c76310913349b6c2f') depends_on('perl-io-tty', type=('build', 'run')) depends_on('perl-readonly', type='build')
39.272727
100
0.730324
643
0.744213
0
0
0
0
0
0
691
0.799769
228d8328feac3519c1eb966b9a43a964120c8c6c
1,369
py
Python
tests/test_parser_create_site_users.py
WillAyd/tabcmd
1ba4a6ce1586b5ec4286aca0edff0fbaa1c69f15
[ "MIT" ]
null
null
null
tests/test_parser_create_site_users.py
WillAyd/tabcmd
1ba4a6ce1586b5ec4286aca0edff0fbaa1c69f15
[ "MIT" ]
null
null
null
tests/test_parser_create_site_users.py
WillAyd/tabcmd
1ba4a6ce1586b5ec4286aca0edff0fbaa1c69f15
[ "MIT" ]
null
null
null
import sys import unittest try: from unittest import mock except ImportError: import mock import argparse from tabcmd.parsers.create_site_users_parser import CreateSiteUsersParser from .common_setup import * commandname = 'createsiteusers' class CreateSiteUsersParserTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.parser_under_test, manager, mock_command = initialize_test_pieces(commandname) CreateSiteUsersParser.create_site_user_parser(manager, mock_command) def test_create_site_users_parser_users_file(self): with mock.patch('builtins.open', mock.mock_open(read_data='test')) as open_file: mock_args = [commandname, "users.csv"] args = self.parser_under_test.parse_args(mock_args) open_file.assert_called_with('users.csv', 'r', -1, None, None) def test_create_site_user_parser_missing_arguments(self): mock_args = [commandname] with self.assertRaises(SystemExit): args = self.parser_under_test.parse_args(mock_args) def test_create_site_user_parser_role(self): with mock.patch('builtins.open', mock.mock_open(read_data='test')): mock_args = [commandname, "users.csv", '--site', 'site-name'] args = self.parser_under_test.parse_args(mock_args) assert args.site == 'site-name', args
37
90
0.720964
1,117
0.815924
0
0
205
0.149744
0
0
125
0.091308
228e4efae17879a415faffa2bdf7cfbc08f32c9f
1,078
py
Python
secretsmanager_env.py
iarlyy/secretsmanager-env
3a34a4e9561e4651fa2975ff6f32b00ef0c0ca73
[ "MIT" ]
1
2020-02-13T17:11:29.000Z
2020-02-13T17:11:29.000Z
secretsmanager_env.py
iarlyy/secretsmanager-env
3a34a4e9561e4651fa2975ff6f32b00ef0c0ca73
[ "MIT" ]
null
null
null
secretsmanager_env.py
iarlyy/secretsmanager-env
3a34a4e9561e4651fa2975ff6f32b00ef0c0ca73
[ "MIT" ]
null
null
null
#!/usr/bin/env python import argparse import json import os import boto3 parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description='''\ Output following the defined format. Options are: dotenv - dotenv style [default] export - shell export style stdout - secret plain value style''' ) parser.add_argument( '--output', default='dotenv', choices=['stdout', 'dotenv', 'export'], ) args = parser.parse_args() try: secret_id = os.environ.get("ENV_SECRET_NAME") secretsmanager = boto3.client('secretsmanager') secret_values = json.loads(secretsmanager.get_secret_value(SecretId=secret_id)['SecretString']) except: print('Error getting secret') raise if args.output == 'export': prefix = 'export ' else: prefix = '' if args.output != 'stdout': for envvar in secret_values: print(prefix+envvar+"=$'"+secret_values[envvar].replace('\\n', '\n')+"'") else: print(json.dumps(secret_values.replace('\\n', '\n'), indent=2, sort_keys=True))
24.5
99
0.670686
0
0
0
0
0
0
0
0
354
0.328386
228e74b0f9248fe2ef101b86260ca316c5578c5c
1,730
py
Python
109.py
juandarr/ProjectEuler
951705ac62f550d7fbecdc3f35ab8c38b53b9225
[ "MIT" ]
null
null
null
109.py
juandarr/ProjectEuler
951705ac62f550d7fbecdc3f35ab8c38b53b9225
[ "MIT" ]
null
null
null
109.py
juandarr/ProjectEuler
951705ac62f550d7fbecdc3f35ab8c38b53b9225
[ "MIT" ]
null
null
null
""" Finds the number of distinct ways a player can checkout a score less than 100 Author: Juan Rios """ import math def checkout_solutions(checkout,sequence,idx_sq,d): ''' returns the number of solution for a given checkout value ''' counter = 0 for double in d: if double>checkout: break res = checkout-double if res==0: counter +=1 continue if res<=60: if res in idx_sq: index = idx_sq[res] else: index = len(sequence)-1 while res>sequence[index]: index -=1 else: index = len(sequence)-1 for idx in range(index,-1,-1): a = sequence[idx] if a==res: counter+=1 continue for idx2 in range(idx,-1,-1): if a+sequence[idx2]==res: counter +=1 elif a+sequence[idx2]<res: break return counter def darts_checkout(limit_value): s = [i for i in range(1,21)]+[25] d = [2*i for i in range(1,21)]+[50] t = [3*i for i in range(1,21)] sequence = sorted(s+d+t) idx_sq = {} for idx in range(len(sequence)-1): if sequence[idx]!=sequence[idx+1]: idx_sq[sequence[idx]]=idx idx_sq[sequence[-1]]=len(sequence)-1 n = limit_value total = 0 for checkout in range(1,limit_value+1): total += checkout_solutions(checkout,sequence,idx_sq,d) return total if __name__ == "__main__": limit_value=99 print('The number of distinct ways a player can checkout a score less than 100 is {0}'.format(darts_checkout(limit_value)))
28.360656
128
0.540462
0
0
0
0
0
0
0
0
266
0.153757
228e9262ba137f922fefb676a2a9e3eabc4bf87c
804
py
Python
src/tevatron/tevax/loss.py
vjeronymo2/tevatron
7235b0823b5c3cdf1c8ce8f67cb5f1209218086a
[ "Apache-2.0" ]
95
2021-09-16T00:35:17.000Z
2022-03-31T04:59:05.000Z
src/tevatron/tevax/loss.py
vjeronymo2/tevatron
7235b0823b5c3cdf1c8ce8f67cb5f1209218086a
[ "Apache-2.0" ]
16
2021-10-05T12:29:33.000Z
2022-03-31T17:59:20.000Z
src/tevatron/tevax/loss.py
vjeronymo2/tevatron
7235b0823b5c3cdf1c8ce8f67cb5f1209218086a
[ "Apache-2.0" ]
15
2021-09-19T02:20:03.000Z
2022-03-10T03:00:23.000Z
import jax.numpy as jnp from jax import lax import optax import chex def _onehot(labels: chex.Array, num_classes: int) -> chex.Array: x = labels[..., None] == jnp.arange(num_classes).reshape((1,) * labels.ndim + (-1,)) x = lax.select(x, jnp.ones(x.shape), jnp.zeros(x.shape)) return x.astype(jnp.float32) def p_contrastive_loss(ss: chex.Array, tt: chex.Array, axis: str = 'device') -> chex.Array: per_shard_targets = tt.shape[0] per_sample_targets = int(tt.shape[0] / ss.shape[0]) labels = jnp.arange(0, per_shard_targets, per_sample_targets) + per_shard_targets * lax.axis_index(axis) tt = lax.all_gather(tt, axis).reshape((-1, ss.shape[-1])) scores = jnp.dot(ss, jnp.transpose(tt)) return optax.softmax_cross_entropy(scores, _onehot(labels, scores.shape[-1]))
36.545455
108
0.690299
0
0
0
0
0
0
0
0
8
0.00995
228eb608e052e061a5945151be48c2a98a56d133
1,758
py
Python
setup.py
kinnala/gammy
85237d424001f77f296d724c95c8dec5803a8e1e
[ "MIT" ]
null
null
null
setup.py
kinnala/gammy
85237d424001f77f296d724c95c8dec5803a8e1e
[ "MIT" ]
null
null
null
setup.py
kinnala/gammy
85237d424001f77f296d724c95c8dec5803a8e1e
[ "MIT" ]
null
null
null
import os from setuptools import setup, find_packages import versioneer if __name__ == "__main__": def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() meta = {} base_dir = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(base_dir, 'gammy', '_meta.py')) as fp: exec(fp.read(), meta) setup( name = "gammy", version = versioneer.get_version(), author = meta["__author__"], author_email = meta["__contact__"], description = "Generalized additive models with a Bayesian twist", url = "https://github.com/malmgrek/Gammy", cmdclass = versioneer.get_cmdclass(), packages = find_packages(), install_requires = [ "attrs", "bayespy", "h5py", "matplotlib", "numpy", "scipy" ], extras_require = { "dev": [ "versioneer", "pytest", "hypothesis", ], }, keywords = [ "Statistical modeling", "Bayesian statistics", "Machine learning", ], classifiers = [ "Programming Language :: Python :: 3 :: Only", "Development Status :: 1 - Planning", "Environment :: Console", "Intended Audience :: Science/Research", "License :: OSI Approved :: {0}".format(meta["__license__"]), "Operating System :: OS Independent", "Topic :: Scientific/Engineering", ], long_description = read('README.md'), long_description_content_type = "text/markdown", )
30.842105
75
0.513083
0
0
0
0
0
0
0
0
575
0.327076
228f917fd03d25566ca49e7918c233c48b585119
88
py
Python
fast-ml/main.py
gabrielstork/fast-ml
ce93c1263970ce7b958e1c3e932c70909bcc0e31
[ "Apache-2.0" ]
1
2021-07-26T15:37:30.000Z
2021-07-26T15:37:30.000Z
fast-ml/main.py
gabrielstork/fast-ml
ce93c1263970ce7b958e1c3e932c70909bcc0e31
[ "Apache-2.0" ]
null
null
null
fast-ml/main.py
gabrielstork/fast-ml
ce93c1263970ce7b958e1c3e932c70909bcc0e31
[ "Apache-2.0" ]
null
null
null
import root if __name__ == '__main__': window = root.Root() window.mainloop()
12.571429
26
0.636364
0
0
0
0
0
0
0
0
10
0.113636
2290a77719ce3ea48bd13dc7fb8b6642fe413085
144
py
Python
application/recommendations/__init__.py
QualiChain/qualichain_backend
cc6dbf1ae5d09e8d01cccde94326563b25d28b58
[ "MIT" ]
null
null
null
application/recommendations/__init__.py
QualiChain/qualichain_backend
cc6dbf1ae5d09e8d01cccde94326563b25d28b58
[ "MIT" ]
null
null
null
application/recommendations/__init__.py
QualiChain/qualichain_backend
cc6dbf1ae5d09e8d01cccde94326563b25d28b58
[ "MIT" ]
null
null
null
from flask import Blueprint recommendation_blueprint = Blueprint('recommendations', __name__) from application.recommendations import routes
20.571429
65
0.847222
0
0
0
0
0
0
0
0
17
0.118056
2290bfd1c4b65da8f41f786b9bf73bcded25e4b1
4,203
py
Python
predictors/scene_predictor.py
XenonLamb/higan
6e7b47f91df23d8d6075d95921e664c9fa4f1306
[ "MIT" ]
83
2020-03-11T21:20:59.000Z
2022-03-17T10:08:27.000Z
predictors/scene_predictor.py
XenonLamb/higan
6e7b47f91df23d8d6075d95921e664c9fa4f1306
[ "MIT" ]
8
2020-04-16T14:37:42.000Z
2021-09-20T20:18:06.000Z
predictors/scene_predictor.py
billzhonggz/higan
168f24f7e3969bc8dc580e2c997463e76644c17f
[ "MIT" ]
19
2020-04-13T02:55:51.000Z
2022-01-28T06:37:25.000Z
# python 3.7 """Predicts the scene category, attribute.""" import numpy as np from PIL import Image import torch import torch.nn.functional as F import torchvision.transforms as transforms from .base_predictor import BasePredictor from .scene_wideresnet import resnet18 __all__ = ['ScenePredictor'] NUM_CATEGORIES = 365 NUM_ATTRIBUTES = 102 FEATURE_DIM = 512 class ScenePredictor(BasePredictor): """Defines the predictor class for scene analysis.""" def __init__(self): super().__init__('scene') def build(self): self.net = resnet18(num_classes=NUM_CATEGORIES) def load(self): # Load category labels. self.check_attr('category_anno_path') self.category_name_to_idx = {} self.category_idx_to_name = {} with open(self.category_anno_path, 'r') as f: for line in f: name, idx = line.strip().split(' ') name = name[3:].replace('/', '__') idx = int(idx) self.category_name_to_idx[name] = idx self.category_idx_to_name[idx] = name assert len(self.category_name_to_idx) == NUM_CATEGORIES assert len(self.category_idx_to_name) == NUM_CATEGORIES # Load attribute labels. self.check_attr('attribute_anno_path') self.attribute_name_to_idx = {} self.attribute_idx_to_name = {} with open(self.attribute_anno_path, 'r') as f: for idx, line in enumerate(f): name = line.strip().replace(' ', '_') self.attribute_name_to_idx[name] = idx self.attribute_idx_to_name[idx] = name assert len(self.attribute_name_to_idx) == NUM_ATTRIBUTES assert len(self.attribute_idx_to_name) == NUM_ATTRIBUTES # Transform for input images. self.transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) # Load pre-trained weights for category prediction. checkpoint = torch.load(self.weight_path, map_location=lambda storage, loc: storage) state_dict = {k.replace('module.', ''): v for k, v in checkpoint['state_dict'].items()} self.net.load_state_dict(state_dict) fc_weight = list(self.net.parameters())[-2].data.numpy() fc_weight[fc_weight < 0] = 0 # Load additional weights for attribute prediction. self.check_attr('attribute_additional_weight_path') self.attribute_weight = np.load(self.attribute_additional_weight_path) assert self.attribute_weight.shape == (NUM_ATTRIBUTES, FEATURE_DIM) def _predict(self, images): if not isinstance(images, np.ndarray): raise ValueError(f'Images should be with type `numpy.ndarray`!') if images.dtype != np.uint8: raise ValueError(f'Images should be with dtype `numpy.uint8`!') if not (len(images.shape) == 4 and 0 < images.shape[0] <= self.batch_size and images.shape[3] == self.image_channels): raise ValueError(f'Images should be with shape [batch_size, height ' f'width, channel], where `batch_size` no larger than ' f'{self.batch_size}, and `channel` equals to ' f'{self.image_channels}!\n' f'But {images.shape} received!') xs = [self.transform(Image.fromarray(img)).unsqueeze(0) for img in images] xs = torch.cat(xs, dim=0).to(self.run_device) logits, features = self.net(xs) category_scores = self.get_value(F.softmax(logits, dim=1)) features = self.get_value(features).squeeze(axis=(2, 3)) attribute_scores = features.dot(self.attribute_weight.T) assert (len(category_scores.shape) == 2 and category_scores.shape[1] == NUM_CATEGORIES) assert (len(attribute_scores.shape) == 2 and attribute_scores.shape[1] == NUM_ATTRIBUTES) results = { 'category': category_scores, 'attribute': attribute_scores, } if self.use_cuda: torch.cuda.empty_cache() return results def predict(self, images, **kwargs): return self.batch_run(images, self._predict)
36.232759
79
0.647395
3,817
0.908161
0
0
0
0
0
0
758
0.180347
22915424775bb0c1cd95df8d2deeb30cca4451ba
1,845
py
Python
python_test.py
jackKiZhu/mypython
43eac97bec07338ed3b8b9473d4e4fae26f7140c
[ "MIT" ]
null
null
null
python_test.py
jackKiZhu/mypython
43eac97bec07338ed3b8b9473d4e4fae26f7140c
[ "MIT" ]
null
null
null
python_test.py
jackKiZhu/mypython
43eac97bec07338ed3b8b9473d4e4fae26f7140c
[ "MIT" ]
null
null
null
from flask import Flask, render_template, request from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) app.config["SQLALCHEMY_DATABASE_URI"] = "mysql://root:[email protected]:3306/python_github" app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True db = SQLAlchemy(app) class User(db.Model): id = db.Column(db.Integer, primary_key=True) user_name = db.Column(db.String(64), unique=True) user_password = db.Column(db.String(32)) def __repr__(self): return "用户id:%s 用户名:%s" % (self.id, self.user_name) @app.route("/", methods=["post", "get"]) def index(): index_meg = "" if request.method == "POST": user_name = request.form.get("user_name", "") user_pwd = request.form.get("user_pwd", "") if not all([user_name, user_pwd]): index_meg = "请正确输入信息" else: print(request.get_data()) user_name_is_exits = User.query.filter(User.user_name == user_name).first() if user_name_is_exits: index_meg = "用户名已存在" else: user_obj = User(user_name=user_name, user_password=user_pwd) db.session.add(user_obj) db.session.commit() index_meg = "注册成功" print("注册成功") # user_name = request.args.get("user_name", "") # user_pwd = request.args.get("user_pwd", "") # user_is_login = User.query.filter_by(user_name=user_name, user_password=user_pwd).first() # if user_is_login: # index_meg = "登陆成功" # print("登陆成功") # return render_template("login_ok.html", index_meg=index_meg) # else: # # index_meg = "登陆失败" # print("登陆失败") return render_template("index.html", index_meg=index_meg) if __name__ == "__main__": db.drop_all() db.create_all() app.run(debug=True)
32.368421
95
0.614634
264
0.136858
0
0
1,287
0.667185
0
0
667
0.345775
2291547d5512bbb1bda47b665f654ae2a6cde5f2
652
py
Python
src/etc/gec/3.py
iml1111/algorithm-study
f21f6f9f43235248f3496f034a899f2314ab6fcc
[ "MIT" ]
null
null
null
src/etc/gec/3.py
iml1111/algorithm-study
f21f6f9f43235248f3496f034a899f2314ab6fcc
[ "MIT" ]
null
null
null
src/etc/gec/3.py
iml1111/algorithm-study
f21f6f9f43235248f3496f034a899f2314ab6fcc
[ "MIT" ]
null
null
null
from collections import deque def solution(N, bus_stop): answer = [[1300 for _ in range(N)] for _ in range(N)] bus_stop = [(x-1, y-1) for x,y in bus_stop] q = deque(bus_stop) for x,y in bus_stop: answer[x][y] = 0 while q: x, y = q.popleft() for nx, ny in ((x-1, y), (x+1, y), (x, y+1), (x, y-1)): if ( 0 <= nx < N and 0 <= ny < N and answer[nx][ny] > answer[x][y] ): answer[nx][ny] = answer[x][y] + 1 q.append((nx, ny)) return answer if __name__ == '__main__': print(solution( 3, [[1,2],[3,3]], ))
27.166667
63
0.45092
0
0
0
0
0
0
0
0
10
0.015337
2293c25414f578bb3829ecd6692177ce5d098784
1,218
py
Python
python/tree/0103_binary_tree_zigzag_level_order_traversal.py
linshaoyong/leetcode
ea052fad68a2fe0cbfa5469398508ec2b776654f
[ "MIT" ]
6
2019-07-15T13:23:57.000Z
2020-01-22T03:12:01.000Z
python/tree/0103_binary_tree_zigzag_level_order_traversal.py
linshaoyong/leetcode
ea052fad68a2fe0cbfa5469398508ec2b776654f
[ "MIT" ]
null
null
null
python/tree/0103_binary_tree_zigzag_level_order_traversal.py
linshaoyong/leetcode
ea052fad68a2fe0cbfa5469398508ec2b776654f
[ "MIT" ]
1
2019-07-24T02:15:31.000Z
2019-07-24T02:15:31.000Z
class TreeNode(object): def __init__(self, x): self.val = x self.left = None self.right = None class Solution(object): def zigzagLevelOrder(self, root): """ :type root: TreeNode :rtype: List[List[int]] """ if not root: return [] a = [root] b = [] c = [] r = [[root.val]] i = 1 while True: for n in a: if n.left: b.append(n.left) c.append(n.left.val) if n.right: b.append(n.right) c.append(n.right.val) if not b: break else: a = b if i & 1 == 1: c.reverse() r.append(c) b = [] c = [] i += 1 return r def test_zigzag_level_order(): a = TreeNode(3) b = TreeNode(9) c = TreeNode(20) d = TreeNode(15) e = TreeNode(7) a.left = b a.right = c c.left = d c.right = e assert Solution().zigzagLevelOrder(a) == [ [3], [20, 9], [15, 7] ]
21
46
0.374384
917
0.752874
0
0
0
0
0
0
76
0.062397
22941cdcf437ea8fe9f771e15f228dacff7fbb5f
5,452
py
Python
plaso/parsers/winreg_plugins/usbstor.py
berggren/plaso
2658c80c5076f97a9a27272e73997bde8c39e875
[ "Apache-2.0" ]
2
2020-02-09T01:11:08.000Z
2021-09-17T04:16:31.000Z
plaso/parsers/winreg_plugins/usbstor.py
berggren/plaso
2658c80c5076f97a9a27272e73997bde8c39e875
[ "Apache-2.0" ]
null
null
null
plaso/parsers/winreg_plugins/usbstor.py
berggren/plaso
2658c80c5076f97a9a27272e73997bde8c39e875
[ "Apache-2.0" ]
1
2021-03-17T09:47:01.000Z
2021-03-17T09:47:01.000Z
# -*- coding: utf-8 -*- """File containing a Windows Registry plugin to parse the USBStor key.""" from __future__ import unicode_literals from plaso.containers import events from plaso.containers import time_events from plaso.lib import definitions from plaso.parsers import logger from plaso.parsers import winreg from plaso.parsers.winreg_plugins import interface class USBStorEventData(events.EventData): """USBStor event data attribute container. Attributes: device_type (str): type of USB device. display_name (str): display name of the USB device. key_path (str): Windows Registry key path. parent_id_prefix (str): parent identifier prefix of the USB device. product (str): product of the USB device. serial (str): serial number of the USB device. revision (str): revision number of the USB device. subkey_name (str): name of the Windows Registry subkey. vendor (str): vendor of the USB device. """ DATA_TYPE = 'windows:registry:usbstor' def __init__(self): """Initializes event data.""" super(USBStorEventData, self).__init__(data_type=self.DATA_TYPE) self.device_type = None self.display_name = None self.key_path = None self.parent_id_prefix = None self.product = None self.revision = None self.serial = None # TODO: rename subkey_name to something that closer matches its purpose. self.subkey_name = None self.vendor = None class USBStorPlugin(interface.WindowsRegistryPlugin): """USBStor key plugin. Also see: http://www.forensicswiki.org/wiki/USB_History_Viewing """ NAME = 'windows_usbstor_devices' DESCRIPTION = 'Parser for USB Plug And Play Manager USBStor Registry Key.' FILTERS = frozenset([ interface.WindowsRegistryKeyPathFilter( 'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Enum\\USBSTOR')]) def ExtractEvents(self, parser_mediator, registry_key, **kwargs): """Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. """ for subkey in registry_key.GetSubkeys(): subkey_name = subkey.name name_values = subkey_name.split('&') number_of_name_values = len(name_values) # Normally we expect 4 fields here however that is not always the case. if number_of_name_values != 4: logger.warning( 'Expected 4 &-separated values in: {0:s}'.format(subkey_name)) event_data = USBStorEventData() event_data.key_path = registry_key.path event_data.subkey_name = subkey_name if number_of_name_values >= 1: event_data.device_type = name_values[0] if number_of_name_values >= 2: event_data.vendor = name_values[1] if number_of_name_values >= 3: event_data.product = name_values[2] if number_of_name_values >= 4: event_data.revision = name_values[3] if subkey.number_of_subkeys == 0: # Time last USB device of this class was first inserted. event = time_events.DateTimeValuesEvent( subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) continue for device_key in subkey.GetSubkeys(): event_data.serial = device_key.name friendly_name_value = device_key.GetValueByName('FriendlyName') if friendly_name_value: event_data.display_name = friendly_name_value.GetDataAsObject() # ParentIdPrefix applies to Windows XP Only. parent_id_prefix_value = device_key.GetValueByName('ParentIdPrefix') if parent_id_prefix_value: event_data.parent_id_prefix = parent_id_prefix_value.GetDataAsObject() # Time last USB device of this class was first inserted. event = time_events.DateTimeValuesEvent( subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) # Win7 - Last Connection. # Vista/XP - Time of an insert. event = time_events.DateTimeValuesEvent( device_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) device_parameter_key = device_key.GetSubkeyByName('Device Parameters') if device_parameter_key: event = time_events.DateTimeValuesEvent( device_parameter_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) log_configuration_key = device_key.GetSubkeyByName('LogConf') if log_configuration_key: event = time_events.DateTimeValuesEvent( log_configuration_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) properties_key = device_key.GetSubkeyByName('Properties') if properties_key: event = time_events.DateTimeValuesEvent( properties_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) winreg.WinRegistryParser.RegisterPlugin(USBStorPlugin)
37.6
80
0.716985
5,021
0.920946
0
0
0
0
0
0
1,674
0.307043
2298b7f13b630423d0c12d2422ae336ad2ea8774
71
py
Python
damn_vulnerable_python/evil.py
CodyKochmann/damn_vulnerable_python
8a90ee3b70dddae96f9f0a8500ed9ba5693f3082
[ "MIT" ]
1
2018-05-22T03:27:54.000Z
2018-05-22T03:27:54.000Z
damn_vulnerable_python/evil.py
CodyKochmann/damn_vulnerable_python
8a90ee3b70dddae96f9f0a8500ed9ba5693f3082
[ "MIT" ]
2
2018-05-22T02:04:39.000Z
2018-05-22T12:46:31.000Z
damn_vulnerable_python/evil.py
CodyKochmann/damn_vulnerable_python
8a90ee3b70dddae96f9f0a8500ed9ba5693f3082
[ "MIT" ]
null
null
null
''' static analyzers are annoying so lets rename eval ''' evil = eval
17.75
57
0.704225
0
0
0
0
0
0
0
0
57
0.802817
229d03edb58694ea053e0d0cf56108a3ca34b32c
17,257
py
Python
rltoolkit/rltoolkit/acm/off_policy/ddpg_acm.py
MIMUW-RL/spp-rl
86b96cdd220cc4eae86f7cfd26924c69b498dcc6
[ "MIT" ]
7
2020-06-15T12:25:53.000Z
2021-11-03T01:08:47.000Z
rltoolkit/rltoolkit/acm/off_policy/ddpg_acm.py
MIMUW-RL/spp-rl
86b96cdd220cc4eae86f7cfd26924c69b498dcc6
[ "MIT" ]
null
null
null
rltoolkit/rltoolkit/acm/off_policy/ddpg_acm.py
MIMUW-RL/spp-rl
86b96cdd220cc4eae86f7cfd26924c69b498dcc6
[ "MIT" ]
1
2020-12-21T11:21:22.000Z
2020-12-21T11:21:22.000Z
import numpy as np import torch from torch.nn import functional as F from rltoolkit.acm.off_policy import AcMOffPolicy from rltoolkit.algorithms import DDPG from rltoolkit.algorithms.ddpg.models import Actor, Critic class DDPG_AcM(AcMOffPolicy, DDPG): def __init__( self, unbiased_update: bool = False, custom_loss: float = 0.0, custom_loss_target: float = 0.0, custom_loss_lr: float = 0.0001, refill_buffer: bool = False, lagrangian_custom_loss: bool = False, separate_custom_loss: bool = False, cw_cl_targets: list = None, custom_loss_target_decay: int = None, custom_loss_target_dfactor: float = None, *args, **kwargs, ): f"""DDPG with AcM class Args: unbiased_update (bool, optional): Use next_obs as action for update. Defaults to { False }. refill_buffer (bool, optional): if buffer should be refilled with new observations, when its full Defaults to {False} """ super().__init__(*args, **kwargs) self.unbiased_update = unbiased_update self.actor = Actor( self.ob_dim, ac_lim=self.actor_ac_lim, ac_dim=self.actor_output_dim ) if not self.acm_critic: self.critic = Critic(self.ob_dim, ac_dim=self.actor_output_dim) self.custom_loss = custom_loss custom_loss_scaled = np.log(np.exp(custom_loss) - 1) self.custom_loss_param = torch.tensor(custom_loss_scaled) if not separate_custom_loss else torch.Tensor([custom_loss_scaled] * self.actor_output_dim) self.custom_loss_param.requires_grad = lagrangian_custom_loss self.custom_loss_target = custom_loss_target self.cw_cl_targets = cw_cl_targets if lagrangian_custom_loss and cw_cl_targets: self.custom_loss_target = cw_cl_targets self.lagrangian_custom_loss = lagrangian_custom_loss self.custom_loss_lr = custom_loss_lr self.separate_custom_loss = separate_custom_loss self.custom_loss_optimizer = self.opt([self.custom_loss_param], lr=custom_loss_lr) self.refill_buffer = refill_buffer self.custom_loss_target_decay = custom_loss_target_decay self.custom_loss_target_dfactor = custom_loss_target_dfactor if self.custom_loss: self.loss["ddpg"] = 0.0 self.loss["dist"] = 0.0 if lagrangian_custom_loss: if self.separate_custom_loss: self.distances = [] for i in range(self.actor_output_dim): self.loss[f"custom_loss_param/{i}"] = 0.0 else: self.loss["custom_loss_param"] = 0.0 new_hparams = { "hparams/unbiased_update": self.unbiased_update, "hparams/custom_loss": self.custom_loss, "hparams/lagrangian_cl": self.lagrangian_custom_loss, "hparams/custom_loss_target_decay": self.custom_loss_target_decay, "hparams/custom_loss_target_dfactor": self.custom_loss_target_dfactor, } if self.lagrangian_custom_loss: if self.cw_cl_targets is None: new_hparams["hparams/cl_target"] = self.custom_loss_target new_hparams["hparams/cl_lr"] = self.custom_loss_lr self.hparams_acm.update(new_hparams) self.hparams.update(self.hparams_acm) def noise_action(self, obs, act_noise, deterministic=False): action, _ = self._actor.act(obs, deterministic) noise = act_noise * torch.randn(self.actor_output_dim, device=self.device) action += noise * self.actor_ac_lim action = np.clip( action.cpu(), -1.1 * self.actor_ac_lim.cpu(), 1.1 * self.actor_ac_lim.cpu() ) action = action.to(self.device) if self.denormalize_actor_out: action = self.replay_buffer.denormalize(action, self.acm_ob_idx) return action def custom_loss_target_decay_condition(self): return( self.custom_loss_target_decay is not None and self.custom_loss_target_dfactor is not None and self.iterations > 0 and self.stats_logger.frames % self.custom_loss_target_decay == 0 ) def acm_update_condition(self): return ( self.iteration > 0 and self.acm_epochs > 0 and self.stats_logger.frames % self.acm_update_freq == 0 ) def make_unbiased_update(self): if self.update_condition(): for _ in range(self.grad_steps): batch = self.replay_buffer.sample_batch( self.update_batch_size, self.device ) obs, next_obs, _, reward, done, acm_action = batch self.update( obs=obs, next_obs=next_obs, action=next_obs, reward=reward, done=done, acm_action=acm_action, ) def make_update(self): if self.unbiased_update: self.make_unbiased_update() else: super().make_update() if self.custom_loss_target_decay_condition(): self.custom_loss_target *= self.custom_loss_target_dfactor print(f"CUSTOM LOSS TARTGET DECAY, CURRENT VALUE {self.custom_loss_target}") if self.acm_update_condition(): if self.acm_update_batches: self.update_acm_batches(self.acm_update_batches) else: self.update_acm(self.acm_epochs) def collect_params_dict(self): params_dict = super().collect_params_dict() params_dict["acm"] = self.acm.state_dict() return params_dict def apply_params_dict(self, params_dict): super().apply_params_dict(params_dict) self.acm.load_state_dict(params_dict["acm"]) def save_model(self, save_path=None): save_path = DDPG.save_model(self, save_path) torch.save(self.acm.state_dict(), save_path + "_acm_model.pt") def compute_qfunc_targ( self, reward: torch.Tensor, next_obs: torch.Tensor, done: torch.Tensor ): """Compute targets for Q-functions Args: reward (torch.Tensor): batch of rewards next_obs (torch.Tensor): batch of next observations done (torch.Tensor): batch of done Returns: torch.Tensor: Q-function targets for the batch """ with torch.no_grad(): next_action, _ = self.actor_targ(next_obs) next_action = self.replay_buffer.denormalize(next_action, self.acm_ob_idx) if self.acm_critic: acm_obs = torch.cat([next_obs, next_action], axis=1) next_action = self.acm(acm_obs) q_target = self.critic_targ(next_obs, next_action) qfunc_target = reward + self.gamma * (1 - done) * q_target return qfunc_target def add_custom_loss(self, loss, action, denorm_action, next_obs): if self.custom_loss: self.loss["ddpg"] = loss.item() if self.norm_closs: next_obs = self.replay_buffer.normalize(next_obs, force=True) else: action = denorm_action if not self.separate_custom_loss: loss_dist = F.mse_loss(action, self.cut_obs(next_obs)) self.loss["dist"] = loss_dist.item() if self.lagrangian_custom_loss: loss += F.softplus(self.custom_loss_param) * (loss_dist - self.custom_loss_target) else: loss += self.custom_loss * loss_dist if self.custom_loss_target_decay is not None: self.loss["custom_loss_target"] = self.custom_loss_target else: distances = torch.mean(F.mse_loss(action, self.cut_obs(next_obs), reduction='none'), dim=0) if self.cw_cl_targets is None: loss += torch.sum(F.softplus(self.custom_loss_param) * (distances - self.custom_loss_target)) else: loss += torch.sum(F.softplus(self.custom_loss_param) * (distances - torch.Tensor(self.custom_loss_target))) self.loss["dist"] = distances.detach() if self.debug_mode: for j in range(distances.shape[0]): self.loss[f"dist/cw/{j}"] = distances[j] return loss def compute_pi_loss(self, obs, next_obs): action, _ = self._actor(obs) denorm_action = self.replay_buffer.denormalize(action, self.acm_ob_idx) if self.acm_critic: acm_obs = torch.cat([obs, denorm_action], axis=1) critic_action = self.acm(acm_obs) else: critic_action = denorm_action loss = -self._critic(obs, critic_action).mean() return self.add_custom_loss(loss, action, denorm_action, next_obs) def update_custom_loss_param_loss(self): if not self.lagrangian_custom_loss: return dist_loss = self.loss["dist"] if self.cw_cl_targets is None: loss = -F.softplus(self.custom_loss_param) * (dist_loss - self.custom_loss_target) else: loss = -F.softplus(self.custom_loss_param) * (dist_loss - torch.Tensor(self.custom_loss_target)) if self.separate_custom_loss: for i in range(len(loss)): self.loss[f"custom_loss_param/{i}"] = loss[i].item() self.loss["dist"] = torch.mean(self.loss["dist"]).item() loss = torch.sum(loss) else: self.loss["custom_loss_param"] = loss.item() self.custom_loss_optimizer.zero_grad() loss.backward() self.custom_loss_optimizer.step() def copy_offline_dataset(self, dataset, size): """copies the provided offlineRL dataset into the replay buffer. for the moment assumes D4RL dataset format (a dictionary) and copies elements one-by-one """ i = 0 traj = 0 while i < size: traj += 1 done = torch.tensor(dataset['timeouts'][i] or dataset['terminals'][i]) obs = torch.tensor(dataset['observations'][i]) prev_idx = self.replay_buffer.add_obs(obs) i += 1 ep_len = 0 while(not done and i < size): nextobs = torch.tensor(dataset['observations'][i]) rew = torch.tensor( dataset['rewards'][i] ) done = torch.tensor( dataset['timeouts'][i] or dataset['terminals'][i] ) action = torch.tensor( dataset['actions'][i] ) end = torch.tensor( dataset['terminals'][i] ) next_idx = self.replay_buffer.add_obs(nextobs) self.replay_buffer.add_timestep( prev_idx, next_idx, nextobs, rew, done, end ) self.replay_buffer.add_acm_action(action) prev_idx = next_idx i += 1 ep_len += 1 print(f"copied offline dataset with {i} samples, contains {traj} trajectories") #sets the internal variables according to the provided offline dataset self.acm_pre_train_samples = i self.buffer_size = i self.max_frames = i self.iterations = i / self.steps_per_epoch #updates std/dev/min/max parameters of the dataset self.update_obs_mean_std(self.replay_buffer) def collect_batch_and_train(self, steps_per_epoch: int, *args, **kwargs): """SPP variant of rollouts and collect samples if there is enough samples in replay buffer use existing samples to perform actor/critic update otherwise generate new samples till steps_per_epoch number of steps will be added to the replay buffer Args: steps_per_epoch (int): number of samples to collect and train *args, **kwargs: arguments for make_update """ collected = 0 while collected < steps_per_epoch: # important part, # when the replay buffer is filled stop generating new frames, just use the existing buffer # such that the number of used experience in learning is counted correctly if (self.stats_logger.frames >= self.buffer_size - self.acm_pre_train_samples) and not self.refill_buffer: self.stats_logger.frames += 1 collected += 1 self.make_update(*args, **kwargs) continue self.stats_logger.rollouts += 1 obs = self.env.reset() # end - end of the episode from perspective of the simulation # done - end of the episode from perspective of the model end = False obs = self.process_obs(obs) prev_idx = self.replay_buffer.add_obs(obs) ep_len = 0 while not end: obs = self.replay_buffer.normalize(obs) if (self.stats_logger.frames > self.acm_pre_train_samples) and (self.stats_logger.frames <= self.acm_pre_train_samples + self.random_frames): action = self.initial_act(obs) else: action = self.noise_action(obs, self.act_noise) action_proc = self.process_action(action, obs) prev_obs = obs obs, rew, done, _ = self.env.step(action_proc) ep_len += 1 end = True if ep_len == self.max_ep_len else done done = False if ep_len == self.max_ep_len else done obs = self.process_obs(obs) if self.next_obs_diff is not None: obs = self.compute_next_obs_diff(prev_obs, obs) next_idx = self.replay_buffer.add_obs(obs) self.replay_buffer.add_timestep( prev_idx, next_idx, action, rew, done, end ) prev_idx = next_idx self.stats_logger.frames += 1 collected += 1 self.make_update(*args, **kwargs) def update( self, obs: torch.Tensor, next_obs: torch.Tensor, action: torch.Tensor, reward: torch.Tensor, done: torch.Tensor, acm_action: torch.Tensor, ): """DDPG update step Args: obs (torch.Tensor): observations tensor next_obs (torch.Tensor): next observations tensor action (torch.Tensor): actions tensor reward (torch.Tensor): rewards tensor done (torch.Tensor): dones tensor acm_action (torch.Tensor): tensor of acm actions """ for param in self.acm.parameters(): param.requires_grad = False if self.acm_critic: action = acm_action y = self.compute_qfunc_targ(reward, next_obs, done) # Update Q-function by one step y_q = self._critic(obs, action) loss_q = F.mse_loss(y_q, y) self.loss["critic"] = loss_q.item() self.critic_optimizer.zero_grad() loss_q.backward() self.critic_optimizer.step() # Update policy by one step self._critic.eval() loss = self.compute_pi_loss(obs, next_obs) self.loss["actor"] = loss.item() self.actor_optimizer.zero_grad() loss.backward() self.actor_optimizer.step() #update temperature of Lagrangian optimization obj self.update_custom_loss_param_loss() # Update target networks self.update_target_nets() self._critic.train() for param in self.acm.parameters(): param.requires_grad = True def add_tensorboard_logs(self, buffer, done): super().add_tensorboard_logs(buffer, done) if self.lagrangian_custom_loss: self.tensorboard_writer.log_custom_loss_param( self.iteration, self.custom_loss_param) if __name__ == "__main__": #with torch.cuda.device(0): model = DDPG_AcM( # unbiased_update=True, # custom_loss=True, # acm_update_batches=50, # denormalize_actor_out=True, env_name="Pendulum-v0", buffer_size=50000, act_noise=0.05, iterations=100, gamma=0.99, steps_per_epoch=200, stats_freq=5, test_episodes=3, custom_loss=1, lagrangian_custom_loss=False, # tensorboard_dir="logs_ddpg", # tensorboard_comment="", acm_update_freq=200, acm_epochs=1, acm_pre_train_epochs=10, acm_pre_train_samples=10000, use_gpu=False, render=False, ) model.pre_train() model.train()
39.042986
157
0.589963
16,275
0.943096
0
0
0
0
0
0
2,994
0.173495
229f21bdd7be594d33b1093f3cb181d2690aa326
3,714
py
Python
pyroute/poi_osm.py
ftrimble/route-grower
d4343ecc9b13a3e1701c8460c8a1792d08b74567
[ "Apache-2.0" ]
null
null
null
pyroute/poi_osm.py
ftrimble/route-grower
d4343ecc9b13a3e1701c8460c8a1792d08b74567
[ "Apache-2.0" ]
null
null
null
pyroute/poi_osm.py
ftrimble/route-grower
d4343ecc9b13a3e1701c8460c8a1792d08b74567
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python #---------------------------------------------------------------- # OSM POI handler for pyroute # #------------------------------------------------------ # Copyright 2007, Oliver White # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #------------------------------------------------------ from xml.sax import make_parser, handler from poi_base import * import os from xml.sax._exceptions import SAXParseException import urllib class osmPoiModule(poiModule, handler.ContentHandler): def __init__(self, modules): poiModule.__init__(self, modules) self.draw = False self.loadPOIs("all", "amenity|shop=*") def loadPOIs(self, name, search): filename = os.path.join(os.path.dirname(__file__), "data", "poi_%s.osm" % name) url = "http://www.informationfreeway.org/api/0.5/node[%s][%s]" %(search, self.bbox()) if(not os.path.exists(filename)): print "Downloading POIs from OSM" urllib.urlretrieve(url, filename) self.load(filename, os.path.join(os.path.dirname(__file__), "Setup", "poi.txt")) def bbox(self): # TODO: based on location! return "bbox=-6,48,2.5,61" def load(self, filename, listfile): self.filters = [] print "Loading POIs from %s" % listfile f = open(listfile,"r") try: for line in f: if(len(line) > 1): text = line.rstrip() name, filter = text.split('|') group = poiGroup(name) self.groups.append(group) self.filters.append({'name':name,'filter':filter,'group':group}) finally: f.close() if(not os.path.exists(filename)): print "Can't load %s"%filename return elif not os.path.getsize(filename): print "%s is empty"%filename self.inNode = False parser = make_parser() parser.setContentHandler(self) try: parser.parse(filename) except SAXParseException: print "Error while parsing file" #TODO: what should now happens? def startElement(self, name, attrs): if name == "node": self.currentNode = { \ 'lat': float(attrs.get('lat')), 'lon': float(attrs.get('lon'))} self.inNode = True if name == "tag" and self.inNode: self.currentNode[attrs.get('k')] = attrs.get('v') def endElement(self, name): if(name == "node"): self.storeNode(self.currentNode) self.inNode = False def passesFilter(self,n,f): parts = f.split(';') matched = True for part in parts: k,v = part.split('=',1) if(n.get(k,'') != v): matched = False return(matched) def storeNode(self, n): for f in self.filters: if(self.passesFilter(n,f['filter'])): x = poi(n['lat'], n['lon']) x.title = n.get('amenity','') + ': ' + n.get('name', '?') #print "%s matches %s" % (x.title, f['name']) f['group'].items.append(x) def save(self): # Default filename if none was loaded if(self.filename == None): self.filename = os.path.join(os.path.dirname(__file__), "data", "poi.osm") self.saveAs(self.filename) def saveAs(self,filename): if(filename == None): return pass if __name__ == "__main__": nodes = osmPoiModule(None) nodes.sort({'valid':True,'lat':51.3,'lon':-0.2}) #nodes.report()
29.244094
74
0.630856
2,556
0.688207
0
0
0
0
0
0
1,431
0.385299
22a0ba4419e5d5479b0eea3b85e6ded458dffecb
13,025
py
Python
pelutils/logger.py
peleiden/pelutils
9860734c0e06481aa58a9f767a4cfb5129cb48ec
[ "BSD-3-Clause" ]
3
2021-02-28T13:03:12.000Z
2022-01-01T09:53:33.000Z
pelutils/logger.py
peleiden/pelutils
9860734c0e06481aa58a9f767a4cfb5129cb48ec
[ "BSD-3-Clause" ]
72
2020-10-13T09:20:01.000Z
2022-02-26T09:12:21.000Z
pelutils/logger.py
peleiden/pelutils
9860734c0e06481aa58a9f767a4cfb5129cb48ec
[ "BSD-3-Clause" ]
null
null
null
from __future__ import annotations import os import traceback as tb from collections import defaultdict from enum import IntEnum from functools import update_wrapper from itertools import chain from typing import Any, Callable, DefaultDict, Generator, Iterable, Optional from pelutils import get_timestamp, get_repo from .format import RichString class LogLevels(IntEnum): """ Logging levels by priority. Don't set any to 0, as falsiness is used in the code """ SECTION = 6 CRITICAL = 5 ERROR = 4 WARNING = 3 INFO = 2 DEBUG = 1 _STDERR_LEVELS = { LogLevels.CRITICAL, LogLevels.ERROR, LogLevels.WARNING } # https://rich.readthedocs.io/en/stable/appendix/colors.html _TIMESTAMP_COLOR = "#72b9e0" _LEVEL_FORMAT = { LogLevels.SECTION: "bright_yellow", LogLevels.CRITICAL: "red1", LogLevels.ERROR: "red3", LogLevels.WARNING: "gold3", LogLevels.INFO: "chartreuse3", LogLevels.DEBUG: "deep_sky_blue1", } class _LevelManager: """ Used for disabling logging below a certain level Example: with log.level(Levels.WARNING): log.error("This will be logged") log.info("This will not be logged") """ level: LogLevels is_active = False def with_level(self, level: LogLevels | int) -> _LevelManager: self.level = level return self def __enter__(self): self.is_active = True def __exit__(self, *args): self.is_active = False del self.level # Prevent silent failures by having level accidentally set class _LogErrors: """ Used for catching exceptions with logger and logging them before reraising them """ def __init__(self, log): self._log = log def __enter__(self): pass def __exit__(self, et, ev, tb_): if et and self._log._collect: self._log.log_collected() if et: self._log._throw(ev, tb_) class LoggingException(RuntimeError): pass class _Logger: """ A simple logger which creates a log file and pushes strings both to stdout and the log file Sections, verbosity and error logging is supported """ _loggers: DefaultDict[str, dict[str, Any]] _selected_logger: str _maxlen = max(len(l.name) for l in LogLevels) _spacing = 4 * " " _yes = { "j", "y" } _no = { "n" } @property def _logger(self) -> dict: return self._loggers[self._selected_logger] @property def _fpath(self) -> str: return self._logger["fpath"] @property def _default_sep(self) -> str: return self._logger["default_sep"] @property def _include_micros(self) -> bool: return self._logger["include_micros"] @property def _print_level(self) -> LogLevels: return self._logger["print_level"] @property def _level_mgr(self) -> _LevelManager: return self._logger["level_mgr"] @property def _level(self) -> LogLevels: return self._level_mgr.level def __init__(self): self._log_errors = _LogErrors(self) self._collect = False self._collected_log: list[RichString] = list() self._collected_print: list[RichString] = list() self._loggers = defaultdict(dict) self.clean() self.configure(logger_name="print_only", print_level=LogLevels.DEBUG) def configure( self, fpath: Optional[str] = None, # Path to place logger. Any missing directories are created title: Optional[str] = None, # Title on first line of logfile default_seperator = "\n", include_micros = False, # Include microseconds in timestamps log_commit = False, # Log commit of git repository logger_name = "default", # Name of logger append = False, # Set to True to append to old log file instead of overwriting it print_level = LogLevels.INFO, # Highest level that will be printed. All will be logged. None for no print ): """ Configure a logger. If not called, the logger will act like a print statement """ if logger_name in self._loggers: raise LoggingException("Logger '%s' already exists. Did you call log.configure(...) twice?" % logger_name) if self._collect: raise LoggingException("Cannot configure a new logger while using collect_logs") self._selected_logger = logger_name self._loggers[logger_name]["fpath"] = os.path.realpath(fpath) if fpath else None self._loggers[logger_name]["default_sep"] = default_seperator self._loggers[logger_name]["include_micros"] = include_micros self._loggers[logger_name]["level_mgr"] = _LevelManager() self._loggers[logger_name]["print_level"] = print_level or len(LogLevels) + 1 if fpath is not None: dirs = os.path.split(fpath)[0] if dirs: os.makedirs(dirs, exist_ok=True) exists = os.path.exists(fpath) with open(fpath, "a" if append else "w", encoding="utf-8") as logfile: logfile.write("\n\n" if append and exists else "") if title is not None: self.section(title + "\n") if log_commit: repo, commit = get_repo() if repo is not None: self.debug( "Executing in repository %s" % repo, "Commit: %s\n" % commit, ) else: self.debug("Unable to find repository that code was executed in") def set_logger(self, logger: str): if logger not in self._loggers: raise LoggingException("Logger '%s' does not exist. Available loggers: %s" % (logger, list(self._loggers))) if self._collect: raise LoggingException("Cannot configure a new logger while using collect_logs") self._selected_logger = logger def level(self, level: LogLevels): """ Log only at given level and above. Use with a with block """ return self._level_mgr.with_level(level) @property def no_log(self): """ Disable logging inside a with block """ return self._level_mgr.with_level(max(LogLevels)+1) @property def log_errors(self): return self._log_errors def __call__(self, *tolog, with_info=True, sep=None, with_print=None, level: LogLevels=LogLevels.INFO): self._log(*tolog, level=level, with_info=with_info, sep=sep, with_print=with_print) def _write_to_log(self, content: RichString): if self._fpath is not None: with open(self._fpath, "a", encoding="utf-8") as logfile: logfile.write(f"{content}\n") @staticmethod def _format(s: str, format: str) -> str: return f"[{format}]{s}[/]" def _log(self, *tolog, level=LogLevels.INFO, with_info=True, sep=None, with_print=None): if not self._loggers: return if self._level_mgr.is_active and level < self._level_mgr.level: return sep = sep or self._default_sep with_print = level >= self._print_level if with_print is None else with_print time = get_timestamp() tolog = sep.join([str(x) for x in tolog]) time_spaces = len(time) * " " level_format = level.name + (self._maxlen - len(level.name)) * " " space = self._spacing + self._maxlen * " " + self._spacing logs = tolog.split("\n") rs = RichString(stderr=level in _STDERR_LEVELS) # Send warning if with_info and tolog: rs.add_string( f"{time}{self._spacing}{level_format}{self._spacing}", self._format(time, _TIMESTAMP_COLOR) +\ self._spacing +\ self._format(level_format, _LEVEL_FORMAT[level]) +\ self._spacing, ) rs.add_string(logs[0]) else: rs.add_string(f"{time_spaces}{space}{logs[0]}".rstrip()) for i in range(1, len(logs)): s = f"\n{time_spaces}{space}{logs[i]}".rstrip() rs.add_string( s if s.strip() else "\n" ) if not self._collect: self._write_to_log(rs) if with_print: rs.print() else: self._collected_log.append(rs) if with_print: self._collected_print.append(rs) def _format_tb(self, error: Exception, tb_) -> list[str]: stack = list(chain.from_iterable([elem.split("\n") for elem in tb.format_tb(tb_)])) stack = [line for line in stack if line.strip()] return [ "ERROR: %s thrown with stacktrace" % type(error).__name__, *stack, "%s: %s" % (type(error).__name__, error), ] def _throw(self, error: Exception, tb_=None): stack = list() has_cause = error.__cause__ is not None cur_error = error.__context__ while cur_error: stack += self._format_tb(cur_error, cur_error.__traceback__) if has_cause: stack += ["", "The above exception was the direct cause of the following exception:", ""] else: stack += ["", "During handling of the above exception, another exception occurred:", ""] has_cause = cur_error.__cause__ is not None cur_error = cur_error.__context__ stack += self._format_tb(error, tb_) self.critical(*stack, with_print=False) raise error def _input(self, prompt: str) -> str: self.info("Prompt: '%s'" % prompt, with_print=False) response = input(prompt) self.info("Input: '%s'" % response, with_print=False) return response def input(self, prompt: str | Iterable[str] = "") -> str | Generator[str]: """ Get user input and log both prompt an input If prompt is an iterable, a generator of user inputs will be returned """ self._log("Waiting for user input", with_print=False) if isinstance(prompt, str): return self._input(prompt) else: return (self._input(p) for p in prompt) @classmethod def bool_input(cls, inp: str, default=True) -> bool: """ Parse a yes/no user input """ inp = inp.lower() if default: return inp[0] not in cls._no if inp else True else: return inp[0] in cls._yes if inp else False def _reset_collected(self): self._collected_log = list() self._collected_print = list() def set_collect_mode(self, collect: bool): self._collect = collect if not collect: self._reset_collected() def log_collected(self): if self._collected_log: logs = "\n".join(str(log) for log in self._collected_log) self._write_to_log(logs) if self._collected_print: RichString.multiprint(self._collected_print) def clean(self): """ Resets the loggers and removes all existing logger configurations """ self._loggers = defaultdict(dict) self._selected_logger = "default" def section(self, *tolog, with_info=True, sep=None, with_print=None, newline=True): if newline: self._log("") self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.SECTION) def critical(self, *tolog, with_info=True, sep=None, with_print=None): self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.CRITICAL) def error(self, *tolog, with_info=True, sep=None, with_print=None): self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.ERROR) def warning(self, *tolog, with_info=True, sep=None, with_print=None): self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.WARNING) def info(self, *tolog, with_info=True, sep=None, with_print=None): self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.INFO) def debug(self, *tolog, with_info=True, sep=None, with_print=None): self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.DEBUG) log = _Logger() class collect_logs: """ Wrap functions with this class to have them output all their output at once Useful with multiprocessing, e.g. ``` with mp.Pool() as p: p.map(collect_logs(fun), ...) ``` Loggers cannot be changed or configured during this """ def __init__(self, fun: Callable): self.fun = fun update_wrapper(self, fun) def __call__(self, *args, **kwargs): log.set_collect_mode(True) return_value = self.fun(*args, **kwargs) log.log_collected() log.set_collect_mode(False) return return_value
36.080332
123
0.61666
12,233
0.939194
0
0
1,203
0.092361
0
0
2,716
0.208522
22a11f4324f76cab0ee6ba121cab810e162f6104
10,942
py
Python
tests/test_metrics.py
aaxelb/django-elasticsearch-metrics
8a02ffc57f57257843834d4f84c41480f4e27fbd
[ "MIT" ]
5
2018-08-21T19:48:39.000Z
2021-04-01T22:11:31.000Z
tests/test_metrics.py
aaxelb/django-elasticsearch-metrics
8a02ffc57f57257843834d4f84c41480f4e27fbd
[ "MIT" ]
18
2018-07-26T16:04:53.000Z
2018-08-30T19:31:30.000Z
tests/test_metrics.py
aaxelb/django-elasticsearch-metrics
8a02ffc57f57257843834d4f84c41480f4e27fbd
[ "MIT" ]
5
2019-04-01T17:47:08.000Z
2022-01-28T17:23:11.000Z
import mock import pytest import datetime as dt from django.utils import timezone from elasticsearch_metrics import metrics from elasticsearch_dsl import IndexTemplate from elasticsearch_metrics import signals from elasticsearch_metrics.exceptions import ( IndexTemplateNotFoundError, IndexTemplateOutOfSyncError, ) from tests.dummyapp.metrics import ( DummyMetric, DummyMetricWithExplicitTemplateName, DummyMetricWithExplicitTemplatePattern, ) class PreprintView(metrics.Metric): provider_id = metrics.Keyword(index=True) user_id = metrics.Keyword(index=True) preprint_id = metrics.Keyword(index=True) class Index: settings = {"refresh_interval": "-1"} class Meta: app_label = "dummyapp" template_name = "osf_metrics_preprintviews" template = "osf_metrics_preprintviews-*" class TestGetIndexName: def test_get_index_name(self): date = dt.date(2020, 2, 14) assert ( PreprintView.get_index_name(date=date) == "osf_metrics_preprintviews_2020.02.14" ) def test_get_index_name_respects_date_format_setting(self, settings): settings.ELASTICSEARCH_METRICS_DATE_FORMAT = "%Y-%m-%d" date = dt.date(2020, 2, 14) assert ( PreprintView.get_index_name(date=date) == "osf_metrics_preprintviews_2020-02-14" ) def test_get_index_name_gets_index_for_today_by_default(self): today = timezone.now().date() today_formatted = today.strftime("%Y.%m.%d") assert PreprintView.get_index_name() == "osf_metrics_preprintviews_{}".format( today_formatted ) class TestGetIndexTemplate: def test_get_index_template_returns_template_with_correct_name_and_pattern(self): template = PreprintView.get_index_template() assert isinstance(template, IndexTemplate) assert template._template_name == "osf_metrics_preprintviews" assert "osf_metrics_preprintviews-*" in template.to_dict()["index_patterns"] def test_get_index_template_respects_index_settings(self): template = PreprintView.get_index_template() assert template._index.to_dict()["settings"] == {"refresh_interval": "-1"} def test_get_index_template_creates_template_with_mapping(self): template = PreprintView.get_index_template() mappings = template.to_dict()["mappings"] assert mappings["doc"]["_source"]["enabled"] is False properties = mappings["doc"]["properties"] assert "timestamp" in properties assert properties["timestamp"] == {"doc_values": True, "type": "date"} assert properties["provider_id"] == {"type": "keyword", "index": True} assert properties["user_id"] == {"type": "keyword", "index": True} assert properties["preprint_id"] == {"type": "keyword", "index": True} # regression test def test_mappings_are_not_shared(self): template1 = DummyMetric.get_index_template() template2 = DummyMetricWithExplicitTemplateName.get_index_template() assert "my_int" in template1.to_dict()["mappings"]["doc"]["properties"] assert "my_keyword" not in template1.to_dict()["mappings"]["doc"]["properties"] assert "my_int" not in template2.to_dict()["mappings"]["doc"]["properties"] assert "my_keyword" in template2.to_dict()["mappings"]["doc"]["properties"] def test_declaring_metric_with_no_app_label_or_template_name_errors(self): with pytest.raises(RuntimeError): class BadMetric(metrics.Metric): pass with pytest.raises(RuntimeError): class MyMetric(metrics.Metric): class Meta: template_name = "osf_metrics_preprintviews" def test_get_index_template_default_template_name(self): template = DummyMetric.get_index_template() assert isinstance(template, IndexTemplate) assert template._template_name == "dummyapp_dummymetric" assert "dummyapp_dummymetric_*" in template.to_dict()["index_patterns"] def test_get_index_template_uses_app_label_in_class_meta(self): class MyMetric(metrics.Metric): class Meta: app_label = "myapp" template = MyMetric.get_index_template() assert template._template_name == "myapp_mymetric" def test_template_name_defined_with_no_template_falls_back_to_default_template( self ): template = DummyMetricWithExplicitTemplateName.get_index_template() # template name specified in class Meta assert template._template_name == "dummymetric" # template is not specified, so it's generated assert ( "dummyapp_dummymetricwithexplicittemplatename_*" in template.to_dict()["index_patterns"] ) def test_template_defined_with_no_template_name_falls_back_to_default_name(self): template = DummyMetricWithExplicitTemplatePattern.get_index_template() # template name specified in class Meta assert ( template._template_name == "dummyapp_dummymetricwithexplicittemplatepattern" ) # template is not specified, so it's generated assert "dummymetric-*" in template.to_dict()["index_patterns"] def test_inheritance(self): class MyBaseMetric(metrics.Metric): user_id = metrics.Keyword(index=True) class Index: settings = {"number_of_shards": 2} class Meta: abstract = True class ConcreteMetric(MyBaseMetric): class Meta: app_label = "dummyapp" template = ConcreteMetric.get_index_template() assert template._template_name == "dummyapp_concretemetric" assert template._index.to_dict()["settings"] == {"number_of_shards": 2} def test_source_may_be_enabled(self): class MyMetric(metrics.Metric): class Meta: app_label = "dummyapp" template_name = "mymetric" template = "mymetric-*" source = metrics.MetaField(enabled=True) template = MyMetric.get_index_template() template_dict = template.to_dict() doc = template_dict["mappings"]["doc"] assert doc["_source"]["enabled"] is True class TestRecord: def test_calls_save(self, mock_save): timestamp = dt.datetime(2017, 8, 21) p = PreprintView.record(timestamp=timestamp, provider_id="abc12") assert mock_save.call_count == 1 assert p.timestamp == timestamp assert p.provider_id == "abc12" @mock.patch.object(timezone, "now") def test_defaults_timestamp_to_now(self, mock_now, mock_save): fake_now = dt.datetime(2016, 8, 21) mock_now.return_value = fake_now p = PreprintView.record(provider_id="abc12") assert mock_save.call_count == 1 assert p.timestamp == fake_now class TestSignals: @mock.patch.object(PreprintView, "get_index_template") def test_create_metric_sends_signals(self, mock_get_index_template): mock_pre_index_template_listener = mock.Mock() mock_post_index_template_listener = mock.Mock() signals.pre_index_template_create.connect(mock_pre_index_template_listener) signals.post_index_template_create.connect(mock_post_index_template_listener) PreprintView.sync_index_template() assert mock_pre_index_template_listener.call_count == 1 assert mock_post_index_template_listener.call_count == 1 pre_call_kwargs = mock_pre_index_template_listener.call_args[1] assert "index_template" in pre_call_kwargs assert "using" in pre_call_kwargs post_call_kwargs = mock_pre_index_template_listener.call_args[1] assert "index_template" in post_call_kwargs assert "using" in post_call_kwargs def test_save_sends_signals(self, mock_save): mock_pre_save_listener = mock.Mock() mock_post_save_listener = mock.Mock() signals.pre_save.connect(mock_pre_save_listener, sender=PreprintView) signals.post_save.connect(mock_post_save_listener, sender=PreprintView) provider_id = "12345" user_id = "abcde" preprint_id = "zyxwv" doc = PreprintView( provider_id=provider_id, user_id=user_id, preprint_id=preprint_id ) doc.save() assert mock_pre_save_listener.call_count == 1 pre_save_kwargs = mock_pre_save_listener.call_args[1] assert isinstance(pre_save_kwargs["instance"], PreprintView) assert "index" in pre_save_kwargs assert "using" in pre_save_kwargs assert pre_save_kwargs["sender"] is PreprintView assert mock_post_save_listener.call_count == 1 post_save_kwargs = mock_pre_save_listener.call_args[1] assert isinstance(post_save_kwargs["instance"], PreprintView) assert "index" in post_save_kwargs assert "using" in post_save_kwargs assert post_save_kwargs["sender"] is PreprintView @pytest.mark.es class TestIntegration: def test_init(self, client): PreprintView.init() name = PreprintView.get_index_name() mapping = client.indices.get_mapping(index=name) properties = mapping[name]["mappings"]["doc"]["properties"] assert properties["timestamp"] == {"type": "date"} assert properties["provider_id"] == {"type": "keyword"} assert properties["user_id"] == {"type": "keyword"} assert properties["preprint_id"] == {"type": "keyword"} def test_create_document(self, client): provider_id = "12345" user_id = "abcde" preprint_id = "zyxwv" doc = PreprintView( provider_id=provider_id, user_id=user_id, preprint_id=preprint_id ) doc.save() document = PreprintView.get(id=doc.meta.id, index=PreprintView.get_index_name()) # TODO flesh out this test more. Try to query ES? assert document is not None def test_check_index_template(self): with pytest.raises(IndexTemplateNotFoundError): assert PreprintView.check_index_template() is False PreprintView.sync_index_template() assert PreprintView.check_index_template() is True # When settings change, template is out of sync PreprintView._index.settings( **{"refresh_interval": "1s", "number_of_shards": 1, "number_of_replicas": 2} ) with pytest.raises(IndexTemplateOutOfSyncError) as excinfo: assert PreprintView.check_index_template() is False error = excinfo.value assert error.settings_in_sync is False assert error.mappings_in_sync is True assert error.patterns_in_sync is True PreprintView.sync_index_template() assert PreprintView.check_index_template() is True
39.501805
88
0.683787
10,442
0.954305
0
0
3,063
0.279931
0
0
1,750
0.159934
22a124507f9c19ec78061c640c8a18dd5ea530ee
180
py
Python
6 kyu/SumFibs.py
mwk0408/codewars_solutions
9b4f502b5f159e68024d494e19a96a226acad5e5
[ "MIT" ]
6
2020-09-03T09:32:25.000Z
2020-12-07T04:10:01.000Z
6 kyu/SumFibs.py
mwk0408/codewars_solutions
9b4f502b5f159e68024d494e19a96a226acad5e5
[ "MIT" ]
1
2021-12-13T15:30:21.000Z
2021-12-13T15:30:21.000Z
6 kyu/SumFibs.py
mwk0408/codewars_solutions
9b4f502b5f159e68024d494e19a96a226acad5e5
[ "MIT" ]
null
null
null
from functools import lru_cache @lru_cache def fib(n): return n if n<2 else fib(n-1)+fib(n-2) def sum_fibs(n): return sum(j for j in (fib(i) for i in range(n+1)) if j%2==0)
30
65
0.661111
0
0
0
0
65
0.361111
0
0
0
0
22a1b8da531316fb6c21092916dd14f6945d1c1d
1,924
py
Python
tests/unit/test_iris_helpers.py
jvegreg/ESMValCore
03eb1c942bf1dc3be98cb30c3592b42e82a94f16
[ "Apache-2.0" ]
null
null
null
tests/unit/test_iris_helpers.py
jvegreg/ESMValCore
03eb1c942bf1dc3be98cb30c3592b42e82a94f16
[ "Apache-2.0" ]
2
2022-03-02T16:16:06.000Z
2022-03-10T12:58:49.000Z
tests/unit/test_iris_helpers.py
valeriupredoi/ESMValCore
b46b948c47d8579d997b28501f8588f5531aa354
[ "Apache-2.0" ]
null
null
null
"""Tests for :mod:`esmvalcore.iris_helpers`.""" import datetime import iris import numpy as np import pytest from cf_units import Unit from esmvalcore.iris_helpers import date2num, var_name_constraint @pytest.fixture def cubes(): """Test cubes.""" cubes = iris.cube.CubeList([ iris.cube.Cube(0.0, var_name='a', long_name='a'), iris.cube.Cube(0.0, var_name='a', long_name='b'), iris.cube.Cube(0.0, var_name='c', long_name='d'), ]) return cubes @pytest.fixture def units(): return Unit('days since 0001-01-01', calendar='proleptic_gregorian') @pytest.mark.parametrize("date, dtype, expected", [ (datetime.datetime(1, 1, 1), np.float64, 0.0), (datetime.datetime(1, 1, 1), int, 0.0), (datetime.datetime(1, 1, 2, 12), np.float64, 1.5), ]) def test_date2num_scalar(date, dtype, expected, units): num = date2num(date, units, dtype=dtype) assert num == expected assert num.dtype == dtype def test_var_name_constraint(cubes): """Test :func:`esmvalcore.iris_helpers.var_name_constraint`.""" out_cubes = cubes.extract(var_name_constraint('a')) assert out_cubes == iris.cube.CubeList([ iris.cube.Cube(0.0, var_name='a', long_name='a'), iris.cube.Cube(0.0, var_name='a', long_name='b'), ]) out_cubes = cubes.extract(var_name_constraint('b')) assert out_cubes == iris.cube.CubeList([]) out_cubes = cubes.extract(var_name_constraint('c')) assert out_cubes == iris.cube.CubeList([ iris.cube.Cube(0.0, var_name='c', long_name='d'), ]) with pytest.raises(iris.exceptions.ConstraintMismatchError): cubes.extract_cube(var_name_constraint('a')) with pytest.raises(iris.exceptions.ConstraintMismatchError): cubes.extract_cube(var_name_constraint('b')) out_cube = cubes.extract_cube(var_name_constraint('c')) assert out_cube == iris.cube.Cube(0.0, var_name='c', long_name='d')
33.172414
72
0.677755
0
0
0
0
744
0.386694
0
0
254
0.132017
22a26cac9546e3d04238eea2e14e595751d5270c
11,429
py
Python
geo_regions.py
saeed-moghimi-noaa/Maxelev_plot
5bb701d8cb7d64db4c89ea9d7993a8269e57e504
[ "CC0-1.0" ]
null
null
null
geo_regions.py
saeed-moghimi-noaa/Maxelev_plot
5bb701d8cb7d64db4c89ea9d7993a8269e57e504
[ "CC0-1.0" ]
null
null
null
geo_regions.py
saeed-moghimi-noaa/Maxelev_plot
5bb701d8cb7d64db4c89ea9d7993a8269e57e504
[ "CC0-1.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Geo regions for map plot """ __author__ = "Saeed Moghimi" __copyright__ = "Copyright 2017, UCAR/NOAA" __license__ = "GPL" __version__ = "1.0" __email__ = "[email protected]" import matplotlib.pyplot as plt from collections import defaultdict defs = defaultdict(dict) defs['elev']['var'] = 'elev' defs['elev']['vmin'] = -1 defs['elev']['vmax'] = 1 defs['elev']['label'] = 'Elev. [m]' defs['elev']['format']= '%3.1g' defs['elev']['cmap'] = plt.cm.jet_r def get_region_extent(region = 'hsofs_region'): if region == 'hsofs_region': defs['lim']['xmin'] = -99.0 defs['lim']['xmax'] = -52.8 defs['lim']['ymin'] = 5.0 defs['lim']['ymax'] = 46.3 ##IKE elif region == 'caribbean': defs['lim']['xmin'] = -78. defs['lim']['xmax'] = -74. defs['lim']['ymin'] = 20. defs['lim']['ymax'] = 24. defs['lim']['xmin'] = -82. defs['lim']['xmax'] = -71. defs['lim']['ymin'] = 18. defs['lim']['ymax'] = 26. elif region == 'ike_region': defs['lim']['xmin'] = -98.5 defs['lim']['xmax'] = -84.5 defs['lim']['ymin'] = 24. defs['lim']['ymax'] = 31.5 elif region == 'caribbean_bigger': defs['lim']['xmin'] = -78.0 defs['lim']['xmax'] = -58 defs['lim']['ymin'] = 10.0 defs['lim']['ymax'] = 28. elif region == 'ike_local': defs['lim']['xmin'] = -96 defs['lim']['xmax'] = -92 defs['lim']['ymin'] = 28.5 defs['lim']['ymax'] = 30.6 elif region == 'ike_wave': defs['lim']['xmin'] = -95.63 defs['lim']['xmax'] = -88.0 defs['lim']['ymin'] = 28.37 defs['lim']['ymax'] = 30.50 elif region == 'ike_hwm': defs['lim']['xmin'] = -96.15 defs['lim']['xmax'] = -88.5 defs['lim']['ymin'] = 28.45 defs['lim']['ymax'] = 30.7 elif region == 'ike_galv_bay': defs['lim']['xmin'] = -95.92 defs['lim']['xmax'] = -94.81 defs['lim']['ymin'] = 29.37 defs['lim']['ymax'] = 29.96 elif region == 'ike_galv_nwm': defs['lim']['xmin'] = -95.4 defs['lim']['xmax'] = -94.2 defs['lim']['ymin'] = 28.66 defs['lim']['ymax'] = 30.4 elif region == 'ike_wav_break': defs['lim']['xmin'] = -95 defs['lim']['xmax'] = -94.5 defs['lim']['ymin'] = 28.7 + 0.6 defs['lim']['ymax'] = 30.4 - 0.6 elif region == 'ike_f63_timeseries': defs['lim']['xmin'] = -94.2579 - 0.1 defs['lim']['xmax'] = -94.2579 + 0.1 defs['lim']['ymin'] = 29.88642 - 0.1 defs['lim']['ymax'] = 29.88642 + 0.1 elif region == 'ike_f63_timeseries_det': defs['lim']['xmin'] = -94.2300 defs['lim']['xmax'] = -94.1866 defs['lim']['ymin'] = 29.82030 defs['lim']['ymax'] = 29.84397+0.05 elif region == 'ike_cpl_paper': defs['lim']['xmin'] = -95.127481 defs['lim']['xmax'] = -93.233053 defs['lim']['ymin'] = 29.198490 defs['lim']['ymax'] = 30.132224 ##IRMA elif region == 'carib_irma': defs['lim']['xmin'] = -84.0 defs['lim']['xmax'] = -60. defs['lim']['ymin'] = 15.0 defs['lim']['ymax'] = 29. elif region == 'burbuda': defs['lim']['xmin'] = -65.0 defs['lim']['xmax'] = -60. defs['lim']['ymin'] = 15.0 defs['lim']['ymax'] = 19. elif region == 'burbuda_zoom': defs['lim']['xmin'] = -63.8 defs['lim']['xmax'] = -60.8 defs['lim']['ymin'] = 16.8 defs['lim']['ymax'] = 18.65 elif region == 'puertorico': defs['lim']['xmin'] = -67.35 defs['lim']['xmax'] = -66.531 defs['lim']['ymin'] = 18.321 defs['lim']['ymax'] = 18.674 elif region == 'puertorico_shore': defs['lim']['xmin'] = -67.284 defs['lim']['xmax'] = -66.350 defs['lim']['ymin'] = 18.360 defs['lim']['ymax'] = 18.890 elif region == 'key_west': defs['lim']['xmin'] = -82.7 defs['lim']['xmax'] = -74.5 defs['lim']['ymin'] = 21.3 defs['lim']['ymax'] = 27.2 elif region == 'key_west_zoom': defs['lim']['xmin'] = -82.2 defs['lim']['xmax'] = -79.4 defs['lim']['ymin'] = 24.1 defs['lim']['ymax'] = 26.1 elif region == 'cuba_zoom': defs['lim']['xmin'] = -82. defs['lim']['xmax'] = -77. defs['lim']['ymin'] = 21.5 defs['lim']['ymax'] = 23.5 elif region == 'key_west_timeseries': defs['lim']['xmin'] = -84.62 defs['lim']['xmax'] = -79.2 defs['lim']['ymin'] = 23.6 defs['lim']['ymax'] = 30.0 elif region == 'pr_timeseries': defs['lim']['xmin'] = -68 defs['lim']['xmax'] = -64 defs['lim']['ymin'] = 17.3 defs['lim']['ymax'] = 19.2 elif region == 'key_west_anim': defs['lim']['xmin'] = -85.5 defs['lim']['xmax'] = -74.5 defs['lim']['ymin'] = 21.0 defs['lim']['ymax'] = 31.5 ## ISABEL elif region == 'isa_region': defs['lim']['xmin'] = -80.2 defs['lim']['xmax'] = -71.6 defs['lim']['ymin'] = 31.9 defs['lim']['ymax'] = 41.9 elif region == 'isa_local': defs['lim']['xmin'] = -77.5 defs['lim']['xmax'] = -74 defs['lim']['ymin'] = 34.5 defs['lim']['ymax'] = 40.0 defs['lim']['xmin'] = -78.5 defs['lim']['xmax'] = -74 defs['lim']['ymin'] = 33.5 defs['lim']['ymax'] = 39.5 elif region == 'isa_hwm': defs['lim']['xmin'] = -76.01 defs['lim']['xmax'] = -75.93 defs['lim']['ymin'] = 36.74 defs['lim']['ymax'] = 36.93 elif region == 'isa_landfall': defs['lim']['xmin'] = -77.8 defs['lim']['xmax'] = -75.2 defs['lim']['ymin'] = 34.2 defs['lim']['ymax'] = 37.5 elif region == 'isa_landfall_zoom': defs['lim']['xmin'] = -77.8 defs['lim']['xmax'] = -75.2 defs['lim']['ymin'] = 34.2 defs['lim']['ymax'] = 36.0 ## SANDY elif region == 'san_track': defs['lim']['xmin'] = -82.0 defs['lim']['xmax'] = -67.0 defs['lim']['ymin'] = 23.0 defs['lim']['ymax'] = 43.6 elif region == 'san_area': defs['lim']['xmin'] = -77.0 defs['lim']['xmax'] = -70.0 defs['lim']['ymin'] = 37.0 defs['lim']['ymax'] = 42.0 elif region == 'san_track': defs['lim']['xmin'] = -82.0 defs['lim']['xmax'] = -67.0 defs['lim']['ymin'] = 23.0 defs['lim']['ymax'] = 43.6 elif region == 'san_area': defs['lim']['xmin'] = -77.0 defs['lim']['xmax'] = -70.0 defs['lim']['ymin'] = 37.0 defs['lim']['ymax'] = 42.0 elif region == 'san_area2': defs['lim']['xmin'] = -75.9 defs['lim']['xmax'] = -73.3 defs['lim']['ymin'] = 38.5 defs['lim']['ymax'] = 41.3 elif region == 'san_newyork': defs['lim']['xmin'] = -74.5 defs['lim']['xmax'] = -73.55 defs['lim']['ymin'] = 40.35 defs['lim']['ymax'] = 41.2 elif region == 'san_delaware': defs['lim']['xmin'] = -75.87 defs['lim']['xmax'] = -74.31 defs['lim']['ymin'] = 38.26 defs['lim']['ymax'] = 40.51 elif region == 'san_jamaica_bay': defs['lim']['xmin'] = -73.963520 defs['lim']['xmax'] = -73.731455 defs['lim']['ymin'] = 40.518074 defs['lim']['ymax'] = 40.699618 elif region == 'irn_region': defs['lim']['xmin'] = -78.41 defs['lim']['xmax'] = -73.48 defs['lim']['ymin'] = 33.55 defs['lim']['ymax'] = 41.31 elif region == 'irn_hwm': defs['lim']['xmin'] = -78.64 defs['lim']['xmax'] = -69.54 defs['lim']['ymin'] = 33.80 defs['lim']['ymax'] = 41.82 ## ANDREW elif region == 'and_region': defs['lim']['xmin'] = -98.5 defs['lim']['xmax'] = -77.5 defs['lim']['ymin'] = 23. defs['lim']['ymax'] = 32. elif region == 'and_fl_lu': defs['lim']['xmin'] = -98.5 defs['lim']['xmax'] = -76.5 defs['lim']['ymin'] = 21. defs['lim']['ymax'] = 32. elif region == 'and_local_lu': defs['lim']['xmin'] = -95 defs['lim']['xmax'] = -86 defs['lim']['ymin'] = 28. defs['lim']['ymax'] = 32 elif region == 'and_local_fl': defs['lim']['xmin'] = -86 defs['lim']['xmax'] = -79.5 defs['lim']['ymin'] = 24. defs['lim']['ymax'] = 34 elif region == 'and_local_lu_landfall': defs['lim']['xmin'] = -92.4 defs['lim']['xmax'] = -87.5 defs['lim']['ymin'] = 28. defs['lim']['ymax'] = 31. elif region == 'and_local_fl_landfall': defs['lim']['xmin'] = -80.0 defs['lim']['xmax'] = -80.5 defs['lim']['ymin'] = 25.34 defs['lim']['ymax'] = 25.8 ## operational upgrade # NYC area: -74.027725,40.596099 elif region == 'NYC_area': defs['lim']['xmin'] = -74.027725 - 0.25 defs['lim']['xmax'] = -74.027725 + 0.25 defs['lim']['ymin'] = 40.596099 - 0.2 defs['lim']['ymax'] = 40.596099 + 0.2 # Tampa area: -82.455511,27.921438 elif region == 'Tampa_area': defs['lim']['xmin'] = -82.455511 - 0.25 defs['lim']['xmax'] = -82.455511 + 0.25 defs['lim']['ymin'] = 27.921438 - 0.2 defs['lim']['ymax'] = 27.921438 + 0.2 # Marshall Islands: 169.107299,7.906637 elif region == 'Marshall': defs['lim']['xmin'] = 169.107299 - 0.25 defs['lim']['xmax'] = 169.107299 + 0.25 defs['lim']['ymin'] = 7.906637 - 0.2 defs['lim']['ymax'] = 7.906637 + 0.2 # Palau: 134.461436,7.436438 elif region == 'Palau': defs['lim']['xmin'] = 134.461436 - 0.25 defs['lim']['xmax'] = 134.461436 + 0.25 defs['lim']['ymin'] = 7.436438 - 0.2 defs['lim']['ymax'] = 7.436438 + 0.2 elif region == 'NYC_Area_m': defs['lim']['xmin'] = -73.55 defs['lim']['xmax'] = -74.26 defs['lim']['ymin'] = 40.55 defs['lim']['ymax'] = 40.91 elif region == 'Tampa_Area_m': defs['lim']['xmin'] = -82.37 defs['lim']['xmax'] = -82.75 defs['lim']['ymin'] = 27.63 defs['lim']['ymax'] = 28.05 elif region == 'Marshall_Islands_m': defs['lim']['xmin'] = 164.92 defs['lim']['xmax'] = 173.45 defs['lim']['ymin'] = 5.10 defs['lim']['ymax'] = 11.90 elif region == 'Palau_m': defs['lim']['xmin'] = 134.01 defs['lim']['xmax'] = 134.78 defs['lim']['ymin'] = 6.78 defs['lim']['ymax'] = 8.52 elif region == 'Port_Arthur_m': defs['lim']['xmin'] = -93.60 defs['lim']['xmax'] = -94.24 defs['lim']['ymin'] = 29.62 defs['lim']['ymax'] = 30.14 return defs['lim']
34.116418
52
0.441683
0
0
0
0
0
0
0
0
3,649
0.319276
22a33ada09a97d4c429f1c99f360e9ceb37d5903
771
py
Python
figures/plot_log_figure_paper.py
davidADSP/deepAI_paper
f612e80aa0e8507444228940c54554a83bc16119
[ "MIT" ]
21
2017-09-09T18:41:40.000Z
2022-03-16T06:50:00.000Z
figures/plot_log_figure_paper.py
davidADSP/deepAI_paper
f612e80aa0e8507444228940c54554a83bc16119
[ "MIT" ]
null
null
null
figures/plot_log_figure_paper.py
davidADSP/deepAI_paper
f612e80aa0e8507444228940c54554a83bc16119
[ "MIT" ]
6
2017-09-09T18:41:53.000Z
2022-02-25T08:11:40.000Z
import numpy import matplotlib.pyplot as plt fig_convergence = plt.figure(1,figsize=(12,6)) x = numpy.loadtxt('log_deepAI_paper_nonlin_action_long.txt') plt.subplot(122) plt.plot(x[:,0]) plt.xlim([0,500]) plt.ylim([-10,200]) plt.xlabel('Steps') plt.ylabel('Free Action') plt.axvline(x=230.0,linestyle=':') plt.axvline(x=250.0,linestyle=':') plt.axvline(x=270.0,linestyle=':') ax = plt.subplot(121) plt.plot(x[:,0]) plt.ylim([-10,200]) ax.axvspan(0, 500, alpha=0.3, color='red') plt.xlim([0,30000]) plt.xlabel('Steps') plt.ylabel('Free Action') fig_convergence.subplots_adjust(left=0.07, bottom=0.1, right=0.95, top=0.95, wspace=0.2, hspace=0.15) fig_convergence.savefig('fig_convergence.pdf') plt.show()
24.09375
76
0.657588
0
0
0
0
0
0
0
0
116
0.150454
22a452c901b5e5a2bc4953164caa1bd099196d19
2,938
py
Python
setup.py
matiasgrana/nagios_sql
7858b852cf539da418a1a289e8c06e386b62287a
[ "MIT" ]
null
null
null
setup.py
matiasgrana/nagios_sql
7858b852cf539da418a1a289e8c06e386b62287a
[ "MIT" ]
4
2017-08-08T13:42:39.000Z
2019-11-25T10:29:29.000Z
setup.py
matiasgrana/nagios_sql
7858b852cf539da418a1a289e8c06e386b62287a
[ "MIT" ]
4
2019-01-28T13:58:09.000Z
2019-11-29T14:01:07.000Z
#! python3 # Help from: http://www.scotttorborg.com/python-packaging/minimal.html # https://docs.python.org/3/distutils/commandref.html#sdist-cmd # https://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # https://docs.python.org/3.4/tutorial/modules.html # Install it with python setup.py install # Or use: python setup.py develop (changes to the source files will be # immediately available) # https://pypi.python.org/pypi?%3Aaction=list_classifiers from setuptools import setup, find_packages import os from os import path import rstcheck exec(open('src/version.py').read()) # __version__ comes when execution src/version.py version = __version__ here = path.abspath(path.dirname(__file__)) with open(os.path.join(here, 'requirements.txt')) as f: requires = [x.strip() for x in f if x.strip()] def check_readme(file='README.rst'): """ Checks readme rst file, to ensure it will upload to pypi and be formatted correctly. :param file: :return: """ # Get the long description from the relevant file with open(file, encoding='utf-8') as f: readme_content = f.read() errors = list(rstcheck.check(readme_content)) if errors: msg = 'There are errors in {}, errors \n {}'.format(file, errors[0].message) raise SystemExit(msg) else: msg = 'No errors in {}'.format(file) print(msg) readme_file = path.join(here, 'README.rst') # Get the long description from the relevant file with open(readme_file, encoding='utf-8') as f: long_description = f.read() check_readme(readme_file) # Define setuptools specifications setup(name='nagios_sql', version=version, description='Nagios plugin with sqlchecks', long_description=long_description, # this is the file README.rst classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: SQL', 'Topic :: System :: Monitoring', 'Topic :: Database :: Database Engines/Servers', 'Topic :: System :: Systems Administration' ], url='https://github.com/pablodav/nagios_sql', author='Pablo Estigarribia', author_email='[email protected]', license='MIT', packages=find_packages(), #include_package_data=True, #package_data={ # 'data': 'src/data/*', #}, #data_files=[('VERSION', ['src/VERSION'])], entry_points={ 'console_scripts': [ 'nagios_sql = src.nagios_sql:main' ] }, install_requires=requires, tests_require=['pytest', 'pytest-cov'], zip_safe=False)
32.285714
84
0.636147
0
0
0
0
0
0
0
0
1,614
0.549353
22a4a9fee06a32718975fa561659e922ae3f756e
1,838
py
Python
textnn/utils/test/test_progress_iterator.py
tongr/TextNN
a0294a197d3be284177214e8f019e9fed13dff1a
[ "Apache-2.0" ]
1
2019-03-08T12:12:45.000Z
2019-03-08T12:12:45.000Z
textnn/utils/test/test_progress_iterator.py
tongr/TextNN
a0294a197d3be284177214e8f019e9fed13dff1a
[ "Apache-2.0" ]
16
2019-02-14T11:51:30.000Z
2019-06-11T08:25:53.000Z
textnn/utils/test/test_progress_iterator.py
tongr/TextNN
a0294a197d3be284177214e8f019e9fed13dff1a
[ "Apache-2.0" ]
null
null
null
import io import sys from textnn.utils import ProgressIterator #inspired by https://stackoverflow.com/a/34738440 def capture_sysout(cmd): capturedOutput = io.StringIO() # Create StringIO object sys.stdout = capturedOutput # and redirect stdout. cmd() # Call function. sys.stdout = sys.__stdout__ # Reset redirect. return capturedOutput.getvalue() # Now works as before. def test_progress_iterator(): def progress_generator(): sum(ProgressIterator([1, 2, 3], interval=0, description="")) report = capture_sysout(cmd=progress_generator) lines = report.strip().split("\n") # expected result (with changing numbers): # 1/3 [=========>....................] - ETA: 7s # 2/3 [===================>..........] - ETA: 1s # 3/3 [==============================] - 4s 1s/step assert lines[0].startswith("1/3") assert "ETA: " in lines[0] assert lines[1].startswith("2/3") assert "ETA: " in lines[1] assert lines[2].startswith("3/3") assert lines[2].endswith("s/step") def test_progress_iterator_with_statement(): def progress_generator(): with ProgressIterator([1,2,3], interval=0, description="") as it: sum(it) report = capture_sysout(cmd=progress_generator) lines = report.strip().split("\n") # expected result (with changing numbers): # 1/3 [=========>....................] - ETA: 7s # 2/3 [===================>..........] - ETA: 1s # 3/3 [==============================] - 4s 1s/step assert lines[0].startswith("1/3") assert "ETA: " in lines[0] assert lines[1].startswith("2/3") assert "ETA: " in lines[1] assert lines[2].startswith("3/3") assert lines[2].endswith("s/step")
34.679245
76
0.541349
0
0
0
0
0
0
0
0
615
0.334603
22a5a69bd0005b87e47d0ff6d4ecd35b5d2cdf15
159
py
Python
reach.py
NIKH0610/class5-homework
d4cfb1b28656a37002dff6b1b20bae1253b2ae80
[ "MIT" ]
null
null
null
reach.py
NIKH0610/class5-homework
d4cfb1b28656a37002dff6b1b20bae1253b2ae80
[ "MIT" ]
null
null
null
reach.py
NIKH0610/class5-homework
d4cfb1b28656a37002dff6b1b20bae1253b2ae80
[ "MIT" ]
null
null
null
import os import numpy as np import pandas as pd housing_df = pd.read_csv(filepath_or_buffer='~/C:\Users\nikhi\NIKH0610\class5-homework\toys-datasets\boston')
31.8
109
0.805031
0
0
0
0
0
0
0
0
64
0.402516
22a5b5de1219dd90ee90a5e573d5793e913c42ca
379
py
Python
queries/general_queries.py
souparvo/airflow-plugins
0ca7fa634335145b69671054680d5d67de329644
[ "BSD-3-Clause" ]
null
null
null
queries/general_queries.py
souparvo/airflow-plugins
0ca7fa634335145b69671054680d5d67de329644
[ "BSD-3-Clause" ]
null
null
null
queries/general_queries.py
souparvo/airflow-plugins
0ca7fa634335145b69671054680d5d67de329644
[ "BSD-3-Clause" ]
null
null
null
def insert_metatable(): """SQL query to insert records from table insert into a table on a DB """ return """ INSERT INTO TABLE {{ params.target_schema }}.{{ params.target_table }} VALUES ('{{ params.schema }}', '{{ params.table }}', {{ ti.xcom_pull(key='hive_res', task_ids=params.count_inserts)[0][0] }}, current_timestamp(), '{{ params.type }}'); """
42.111111
165
0.62533
0
0
0
0
0
0
0
0
337
0.889182
22a5f31f1b502fe38b7dada2cca91916da3eb320
24,973
py
Python
pyvisa_py/highlevel.py
Handfeger/pyvisa-py
fcfb45895cd44dd922985c3a9d8f3372c8318d63
[ "MIT" ]
1
2019-03-25T20:26:16.000Z
2019-03-25T20:26:16.000Z
pyvisa_py/highlevel.py
Handfeger/pyvisa-py
fcfb45895cd44dd922985c3a9d8f3372c8318d63
[ "MIT" ]
null
null
null
pyvisa_py/highlevel.py
Handfeger/pyvisa-py
fcfb45895cd44dd922985c3a9d8f3372c8318d63
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Highlevel wrapper of the VISA Library. :copyright: 2014-2020 by PyVISA-py Authors, see AUTHORS for more details. :license: MIT, see LICENSE for more details. """ import random from collections import OrderedDict from typing import Any, Dict, Iterable, List, Optional, Tuple, Union, cast from pyvisa import constants, highlevel, rname from pyvisa.constants import StatusCode from pyvisa.typing import VISAEventContext, VISARMSession, VISASession from pyvisa.util import LibraryPath from . import sessions from .common import logger class PyVisaLibrary(highlevel.VisaLibraryBase): """A pure Python backend for PyVISA. The object is basically a dispatcher with some common functions implemented. When a new resource object is requested to pyvisa, the library creates a Session object (that knows how to perform low-level communication operations) associated with a session handle (a number, usually refered just as session). A call to a library function is handled by PyVisaLibrary if it involves a resource agnostic function or dispatched to the correct session object (obtained from the session id). Importantly, the user is unaware of this. PyVisaLibrary behaves for the user just as NIVisaLibrary. """ #: Live session object identified by a randon session ID sessions: Dict[int, sessions.Session] # Try to import packages implementing lower level functionality. try: from .serial import SerialSession logger.debug("SerialSession was correctly imported.") except Exception as e: logger.debug("SerialSession was not imported %s." % e) try: from .usb import USBRawSession, USBSession logger.debug("USBSession and USBRawSession were correctly imported.") except Exception as e: logger.debug("USBSession and USBRawSession were not imported %s." % e) try: from .tcpip import TCPIPInstrSession, TCPIPSocketSession logger.debug("TCPIPSession was correctly imported.") except Exception as e: logger.debug("TCPIPSession was not imported %s." % e) try: from .gpib import GPIBSession logger.debug("GPIBSession was correctly imported.") except Exception as e: logger.debug("GPIBSession was not imported %s." % e) @staticmethod def get_library_paths() -> Iterable[LibraryPath]: """List a dummy library path to allow to create the library.""" return (LibraryPath("py"),) @staticmethod def get_debug_info() -> Dict[str, Union[str, List[str], Dict[str, str]]]: """Return a list of lines with backend info.""" from . import __version__ d: OrderedDict[str, Union[str, List[str], Dict[str, str]]] = OrderedDict() d["Version"] = "%s" % __version__ for key, val in sessions.Session.iter_valid_session_classes(): key_name = "%s %s" % (key[0].name.upper(), key[1]) d[key_name] = "Available " + val.get_low_level_info() for key, issue in sessions.Session.iter_session_classes_issues(): key_name = "%s %s" % (key[0].name.upper(), key[1]) d[key_name] = issue.split("\n") return d def _init(self) -> None: """Custom initialization code.""" # Map session handle to session object. self.sessions = {} def _register(self, obj: object) -> VISASession: """Creates a random but unique session handle for a session object. Register it in the sessions dictionary and return the value. """ session = None while session is None or session in self.sessions: session = random.randint(1000000, 9999999) self.sessions[session] = obj return session def open( self, session: VISARMSession, resource_name: str, access_mode: constants.AccessModes = constants.AccessModes.no_lock, open_timeout: int = constants.VI_TMO_IMMEDIATE, ) -> Tuple[VISASession, StatusCode]: """Opens a session to the specified resource. Corresponds to viOpen function of the VISA library. Parameters ---------- session : VISARMSession Resource Manager session (should always be a session returned from open_default_resource_manager()). resource_name : str Unique symbolic name of a resource. access_mode : constants.AccessModes, optional Specifies the mode by which the resource is to be accessed. open_timeout : int Specifies the maximum time period (in milliseconds) that this operation waits before returning an error. constants.VI_TMO_IMMEDIATE and constants.VI_TMO_INFINITE are used as min and max. Returns ------- VISASession Unique logical identifier reference to a session StatusCode Return value of the library call. """ try: open_timeout = int(open_timeout) except ValueError: raise ValueError( "open_timeout (%r) must be an integer (or compatible type)" % open_timeout ) try: parsed = rname.parse_resource_name(resource_name) except rname.InvalidResourceName: return ( VISASession(0), self.handle_return_value(None, StatusCode.error_invalid_resource_name), ) cls = sessions.Session.get_session_class( parsed.interface_type_const, parsed.resource_class ) sess = cls(session, resource_name, parsed, open_timeout) return self._register(sess), StatusCode.success def clear(self, session: VISASession) -> StatusCode: """Clears a device. Corresponds to viClear function of the VISA library. Parameters ---------- session : typin.VISASession Unique logical identifier to a session. Returns ------- StatusCode Return value of the library call. """ try: sess = self.sessions[session] except KeyError: return self.handle_return_value(session, StatusCode.error_invalid_object) return self.handle_return_value(session, sess.clear()) def flush( self, session: VISASession, mask: constants.BufferOperation ) -> StatusCode: """Flush the specified buffers. The buffers can be associated with formatted I/O operations and/or serial communication. Corresponds to viFlush function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. mask : constants.BufferOperation Specifies the action to be taken with flushing the buffer. The values can be combined using the | operator. However multiple operations on a single buffer cannot be combined. Returns ------- StatusCode Return value of the library call. """ try: sess = self.sessions[session] except KeyError: return self.handle_return_value(session, StatusCode.error_invalid_object) return self.handle_return_value(session, sess.flush(mask)) def gpib_command( self, session: VISASession, command_byte: bytes ) -> Tuple[int, StatusCode]: """Write GPIB command bytes on the bus. Corresponds to viGpibCommand function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. command_byte : bytes Data to write. Returns ------- int Number of written bytes StatusCode Return value of the library call. """ try: written, st = self.sessions[session].gpib_command(command_byte) return written, self.handle_return_value(session, st) except KeyError: return 0, self.handle_return_value(session, StatusCode.error_invalid_object) def assert_trigger( self, session: VISASession, protocol: constants.TriggerProtocol ) -> StatusCode: """Assert software or hardware trigger. Corresponds to viAssertTrigger function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. protocol : constants.TriggerProtocol Trigger protocol to use during assertion. Returns ------- StatusCode Return value of the library call. """ try: return self.handle_return_value( session, self.sessions[session].assert_trigger(protocol) ) except KeyError: return self.handle_return_value(session, StatusCode.error_invalid_object) def gpib_send_ifc(self, session: VISASession) -> StatusCode: """Pulse the interface clear line (IFC) for at least 100 microseconds. Corresponds to viGpibSendIFC function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. Returns ------- StatusCode Return value of the library call. """ try: return self.handle_return_value( session, self.sessions[session].gpib_send_ifc() ) except KeyError: return self.handle_return_value(session, StatusCode.error_invalid_object) def gpib_control_ren( self, session: VISASession, mode: constants.RENLineOperation ) -> StatusCode: """Controls the state of the GPIB Remote Enable (REN) interface line. Optionally the remote/local state of the device can also be set. Corresponds to viGpibControlREN function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. mode : constants.RENLineOperation State of the REN line and optionally the device remote/local state. Returns ------- StatusCode Return value of the library call. """ try: return self.handle_return_value( session, self.sessions[session].gpib_control_ren(mode) ) except KeyError: return self.handle_return_value(session, StatusCode.error_invalid_object) def gpib_control_atn( self, session: VISASession, mode: constants.ATNLineOperation ) -> StatusCode: """Specifies the state of the ATN line and the local active controller state. Corresponds to viGpibControlATN function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. mode : constants.ATNLineOperation State of the ATN line and optionally the local active controller state. Returns ------- StatusCode Return value of the library call. """ try: return self.handle_return_value( session, self.sessions[session].gpib_control_atn(mode) ) except KeyError: return self.handle_return_value(session, StatusCode.error_invalid_object) def gpib_pass_control( self, session: VISASession, primary_address: int, secondary_address: int ) -> StatusCode: """Tell a GPIB device to become controller in charge (CIC). Corresponds to viGpibPassControl function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. primary_address : int Primary address of the GPIB device to which you want to pass control. secondary_address : int Secondary address of the targeted GPIB device. If the targeted device does not have a secondary address, this parameter should contain the value Constants.VI_NO_SEC_ADDR. Returns ------- StatusCode Return value of the library call. """ try: return self.handle_return_value( session, self.sessions[session].gpib_pass_control( primary_address, secondary_address ), ) except KeyError: return self.handle_return_value(session, StatusCode.error_invalid_object) def read_stb(self, session: VISASession) -> Tuple[int, StatusCode]: """Reads a status byte of the service request. Corresponds to viReadSTB function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. Returns ------- int Service request status byte StatusCode Return value of the library call. """ try: sess = self.sessions[session] except KeyError: return 0, self.handle_return_value(session, StatusCode.error_invalid_object) stb, status_code = sess.read_stb() return stb, self.handle_return_value(session, status_code) def close( self, session: Union[VISASession, VISAEventContext, VISARMSession] ) -> StatusCode: """Closes the specified session, event, or find list. Corresponds to viClose function of the VISA library. Parameters --------- session : Union[VISASession, VISAEventContext, VISARMSession] Unique logical identifier to a session, event, resource manager. Returns ------- StatusCode Return value of the library call. """ try: sess = self.sessions[session] # The RM session directly references the library. if sess is not self: return self.handle_return_value(session, sess.close()) else: return self.handle_return_value(session, StatusCode.success) except KeyError: return self.handle_return_value(session, StatusCode.error_invalid_object) def open_default_resource_manager(self) -> Tuple[VISARMSession, StatusCode]: """This function returns a session to the Default Resource Manager resource. Corresponds to viOpenDefaultRM function of the VISA library. Returns ------- VISARMSession Unique logical identifier to a Default Resource Manager session StatusCode Return value of the library call. """ return ( cast(VISARMSession, self._register(self)), self.handle_return_value(None, StatusCode.success), ) def list_resources( self, session: VISARMSession, query: str = "?*::INSTR" ) -> Tuple[str, ...]: """Return a tuple of all connected devices matching query. Parameters ---------- session : VISARMSession Unique logical identifier to the resource manager session. query : str Regular expression used to match devices. Returns ------- Tuple[str, ...] Resource names of all the connected devices matching the query. """ # For each session type, ask for the list of connected resources and # merge them into a single list. # HINT: the cast should not be necessary here resources: List[str] = [] for key, st in sessions.Session.iter_valid_session_classes(): resources += st.list_resources() return rname.filter(resources, query) def read(self, session: VISASession, count: int) -> Tuple[bytes, StatusCode]: """Reads data from device or interface synchronously. Corresponds to viRead function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. count : int Number of bytes to be read. Returns ------- bytes Date read StatusCode Return value of the library call. """ # from the session handle, dispatch to the read method of the session object. try: data, status_code = self.sessions[session].read(count) except KeyError: return ( b"", self.handle_return_value(session, StatusCode.error_invalid_object), ) return data, self.handle_return_value(session, status_code) def write(self, session: VISASession, data: bytes) -> Tuple[int, StatusCode]: """Write data to device or interface synchronously. Corresponds to viWrite function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. data : bytes Data to be written. Returns ------- int Number of bytes actually transferred StatusCode Return value of the library call. """ # from the session handle, dispatch to the write method of the session object. try: written, status_code = self.sessions[session].write(data) except KeyError: return 0, self.handle_return_value(session, StatusCode.error_invalid_object) return written, self.handle_return_value(session, status_code) def buffer_read(self, session: VISASession, count: int) -> Tuple[bytes, StatusCode]: """Reads data through the use of a formatted I/O read buffer. The data can be read from a device or an interface. Corresponds to viBufRead function of the VISA library. Parameters ---------- session : VISASession\ Unique logical identifier to a session. count : int Number of bytes to be read. Returns ------- bytes Data read StatusCode Return value of the library call. """ return self.read(session, count) def buffer_write(self, session: VISASession, data: bytes) -> Tuple[int, StatusCode]: """Writes data to a formatted I/O write buffer synchronously. Corresponds to viBufWrite function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. data : bytes Data to be written. Returns ------- int number of written bytes StatusCode return value of the library call. """ return self.write(session, data) def get_attribute( self, session: Union[VISASession, VISAEventContext, VISARMSession], attribute: Union[constants.ResourceAttribute, constants.EventAttribute], ) -> Tuple[Any, StatusCode]: """Retrieves the state of an attribute. Corresponds to viGetAttribute function of the VISA library. Parameters ---------- session : Union[VISASession, VISAEventContext] Unique logical identifier to a session, event, or find list. attribute : Union[constants.ResourceAttribute, constants.EventAttribute] Resource or event attribute for which the state query is made. Returns ------- Any State of the queried attribute for a specified resource StatusCode Return value of the library call. """ try: sess = self.sessions[session] except KeyError: return ( None, self.handle_return_value(session, StatusCode.error_invalid_object), ) state, status_code = sess.get_attribute( cast(constants.ResourceAttribute, attribute) ) return state, self.handle_return_value(session, status_code) def set_attribute( self, session: VISASession, attribute: constants.ResourceAttribute, attribute_state: Any, ) -> StatusCode: """Set the state of an attribute. Corresponds to viSetAttribute function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. attribute : constants.ResourceAttribute Attribute for which the state is to be modified. attribute_state : Any The state of the attribute to be set for the specified object. Returns ------- StatusCode Return value of the library call. """ try: return self.handle_return_value( session, self.sessions[session].set_attribute(attribute, attribute_state), ) except KeyError: return self.handle_return_value(session, StatusCode.error_invalid_object) def lock( self, session: VISASession, lock_type: constants.Lock, timeout: int, requested_key: Optional[str] = None, ) -> Tuple[str, StatusCode]: """Establishes an access mode to the specified resources. Corresponds to viLock function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. lock_type : constants.Lock Specifies the type of lock requested. timeout : int Absolute time period (in milliseconds) that a resource waits to get unlocked by the locking session before returning an error. requested_key : Optional[str], optional Requested locking key in the case of a shared lock. For an exclusive lock it should be None. Returns ------- str Key that can then be passed to other sessions to share the lock, or None for an exclusive lock. StatusCode Return value of the library call. """ try: sess = self.sessions[session] except KeyError: return ( "", self.handle_return_value(session, StatusCode.error_invalid_object), ) key, status_code = sess.lock(lock_type, timeout, requested_key) return key, self.handle_return_value(session, status_code) def unlock(self, session: VISASession) -> StatusCode: """Relinquish a lock for the specified resource. Corresponds to viUnlock function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. Returns ------- StatusCode Return value of the library call. """ try: sess = self.sessions[session] except KeyError: return self.handle_return_value(session, StatusCode.error_invalid_object) return self.handle_return_value(session, sess.unlock()) def disable_event( self, session: VISASession, event_type: constants.EventType, mechanism: constants.EventMechanism, ) -> StatusCode: """Disable notification for an event type(s) via the specified mechanism(s). Corresponds to viDisableEvent function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. event_type : constants.EventType Event type. mechanism : constants.EventMechanism Event handling mechanisms to be disabled. Returns ------- StatusCode Return value of the library call. """ pass def discard_events( self, session: VISASession, event_type: constants.EventType, mechanism: constants.EventMechanism, ) -> StatusCode: """Discard event occurrences for a given type and mechanisms in a session. Corresponds to viDiscardEvents function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. event_type : constans.EventType Logical event identifier. mechanism : constants.EventMechanism Specifies event handling mechanisms to be discarded. Returns ------- StatusCode Return value of the library call. """ pass
31.893997
88
0.610019
24,407
0.977336
0
0
883
0.035358
0
0
14,074
0.563569
22a63f951029bec63e4f61cb892764b3e55fdcae
13,219
py
Python
detectron/utils/webly_vis.py
sisrfeng/NA-fWebSOD
49cb75a9a0d557b05968c6b11b0f17a7043f2077
[ "Apache-2.0" ]
23
2020-03-30T11:48:33.000Z
2022-03-11T06:34:31.000Z
detectron/utils/webly_vis.py
sisrfeng/NA-fWebSOD
49cb75a9a0d557b05968c6b11b0f17a7043f2077
[ "Apache-2.0" ]
9
2020-09-28T07:15:16.000Z
2022-03-25T08:11:06.000Z
detectron/utils/webly_vis.py
sisrfeng/NA-fWebSOD
49cb75a9a0d557b05968c6b11b0f17a7043f2077
[ "Apache-2.0" ]
10
2020-03-30T11:48:34.000Z
2021-06-02T06:12:36.000Z
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import cv2 import numpy as np import os import math from PIL import Image, ImageDraw, ImageFont from caffe2.python import workspace from detectron.core.config import cfg from detectron.core.config import get_output_dir def vis_training(cur_iter): prefix = '' if cfg.WEBLY.MINING: prefix = 'mining_' if not (cfg.WSL.DEBUG or (cfg.WSL.SAMPLE and cur_iter % cfg.WSL.SAMPLE_ITER == 0)): return output_dir = get_output_dir(cfg.TRAIN.DATASETS, training=True) sample_dir = os.path.join(output_dir, 'webly_sample') if not os.path.exists(sample_dir): os.makedirs(sample_dir) for gpu_id in range(cfg.NUM_GPUS): data_ids = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'data_ids')) ims = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'data')) labels_oh = workspace.FetchBlob('gpu_{}/{}'.format( gpu_id, 'labels_oh')) im_score = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'cls_prob')) roi_score = workspace.FetchBlob('gpu_{}/{}'.format( gpu_id, prefix + 'rois_pred')) # roi_score_softmax = workspace.FetchBlob('gpu_{}/{}'.format( # gpu_id, prefix + 'rois_pred_softmax')) rois = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, prefix + 'rois')) # anchor_argmax = workspace.FetchBlob('gpu_{}/{}'.format( # gpu_id, 'anchor_argmax')) preffix = 'iter_' + str(cur_iter) + '_gpu_' + str(gpu_id) save_im(labels_oh, im_score, ims, cfg.PIXEL_MEANS, preffix, sample_dir) save_rois(labels_oh, im_score, roi_score, ims, rois, cfg.PIXEL_MEANS, preffix, '', sample_dir) # continue if cfg.WEBLY.ENTROPY: pass else: continue class_weight = workspace.FetchBlob('gpu_{}/{}'.format( gpu_id, prefix + 'rois_class_weight')) rois_pred_hatE = workspace.FetchBlob('gpu_{}/{}'.format( gpu_id, prefix + 'rois_pred_hatE')) rois_pred_E = workspace.FetchBlob('gpu_{}/{}'.format( gpu_id, prefix + 'rois_pred_E')) y_logN__logy = workspace.FetchBlob('gpu_{}/{}'.format( gpu_id, prefix + 'rois_pred_y_logN__logy')) save_entropy(labels_oh, im_score, class_weight, roi_score, ims, rois, cfg.PIXEL_MEANS, preffix, '', sample_dir, rois_pred_hatE, rois_pred_E, y_logN__logy) def save_im(labels_oh, im_score, ims, pixel_means, prefix, output_dir): batch_size, num_classes = im_score.shape for b in range(batch_size): for c in range(num_classes): # if labels_oh[b][c] == 0.0: # continue if im_score[b][c] < 0.1: continue im = ims[b, :, :, :].copy() channel_swap = (1, 2, 0) im = im.transpose(channel_swap) im += pixel_means im = im.astype(np.uint8) file_name = os.path.join( output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '.png') cv2.imwrite(file_name, im) def save_rois(labels_oh, im_score, roi_score, ims, rois, pixel_means, prefix, suffix, output_dir): num_rois, num_classes = roi_score.shape batch_size, _, height, weight = ims.shape has_bg = False num_rois_this = min(500, num_rois) for b in range(batch_size): for c in range(num_classes): # if labels_oh[b][c] == 0.0: # continue if im_score[b][c] < 0.1: if has_bg: continue has_bg = True im = ims[b, :, :, :].copy() channel_swap = (1, 2, 0) im = im.transpose(channel_swap) im += pixel_means im = im.astype(np.uint8) im_S = im.copy() im_A = im.copy() argsort = np.argsort(-np.abs(roi_score[:, c])) argsort = argsort[:num_rois_this] argsort = argsort[::-1] if im_score[b][c] < 0.1: scale_p = 1.0 else: scale_p = 1.0 / roi_score[:, c].max() for n in range(num_rois_this): roi = rois[argsort[n]] if roi[0] != b: continue if roi_score[argsort[n]][c] * scale_p < 0.4: thickness = 3 else: thickness = 6 jet = gray2jet(roi_score[argsort[n]][c] * scale_p) cv2.rectangle(im_S, (roi[1], roi[2]), (roi[3], roi[4]), jet, thickness) file_name = os.path.join( output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_' + suffix + '.png') cv2.imwrite(file_name, im_S) continue num_anchors = anchor_argmax.shape[0] for n in range(num_rois): roi = rois[n] if roi[0] != b: continue for a in range(num_anchors): if anchor_argmax[a][n] == 1.0: break jet = gray2jet(1.0 * a / num_anchors) cv2.rectangle(im_A, (roi[1], roi[2]), (roi[3], roi[4]), jet, 1) file_name = os.path.join( output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_A_' + suffix + '.png') cv2.imwrite(file_name, im_A) def save_entropy(labels_oh, im_score, class_weight, roi_score, ims, rois, pixel_means, prefix, suffix, output_dir, rois_pred_hatE, rois_pred_E, y_logN__logy): num_rois, num_classes = roi_score.shape batch_size, _, height, weight = ims.shape rois_pred_E_sum = np.sum(rois_pred_E, axis=0).reshape(1, -1) E_sum_norm = np.true_divide(rois_pred_E_sum, y_logN__logy) E_sum_norm = np.where(E_sum_norm > 1., 1., E_sum_norm) E_class_weight = 1 - E_sum_norm for b in range(batch_size): for c in range(num_classes): if labels_oh[b][c] == 0.0 and im_score[b][c] < 0.1: continue im = ims[b, :, :, :].copy() channel_swap = (1, 2, 0) im = im.transpose(channel_swap) im += pixel_means im = im.astype(np.uint8) im_S = im.copy() im_A = im.copy() im_hatE = im.copy() im_E = im.copy() _NUM = 10 argsort_roi = np.argsort(roi_score[:, c])[::-1] argsort_hatE = np.argsort(rois_pred_hatE[:, c])[::-1] argsort_E = np.argsort(rois_pred_E[:, c])[::-1] if len(argsort_roi) >= _NUM: _NUM = 10 else: _NUM = len(argsort_roi) argsort_roi = argsort_roi[:_NUM][::-1] argsort_hatE = argsort_hatE[:_NUM][::-1] argsort_E = argsort_E[:_NUM][::-1] argsort_hatE = argsort_roi argsort_E = argsort_roi scale_p = 1.0 / roi_score[:, c].max() scale_p = 1.0 for n in range(_NUM): roi = rois[argsort_roi[n]] hatE_roi = rois[argsort_hatE[n]] E_roi = rois[argsort_E[n]] if roi[0] != b: continue # draw roi jet = gray2jet(roi_score[argsort_roi[n]][c] * scale_p) bgr = jet rgb = (jet[2], jet[1], jet[0]) # roi location cv2.rectangle(im_S, (roi[1], roi[2]), (roi[3], roi[4]), bgr, 2, lineType=cv2.LINE_AA) text = "{:.4f}".format(roi_score[argsort_roi[n]][c]) im_S = putText_with_TNR(im_S, int(roi[1]), int(roi[2]), 15, jet, rgb, text) if hatE_roi[0] != b: continue # draw rois_pred_hatE # jet = gray2jet(rois_pred_hatE[argsort_hatE[n]][c] * scale_p) # bgr = jet # rgb = (jet[2], jet[1], jet[0]) # roi location cv2.rectangle(im_hatE, (hatE_roi[1], hatE_roi[2]), (hatE_roi[3], hatE_roi[4]), bgr, 2, lineType=cv2.LINE_AA) # put Text hat_E text = "{:.4f}".format(rois_pred_hatE[argsort_hatE[n]][c]) im_hatE = putText_with_TNR(im_hatE, int(hatE_roi[1]), int(hatE_roi[2]), 15, jet, rgb, text) if E_roi[0] != b: continue # draw rois_pred_E # jet = gray2jet(rois_pred_E[argsort_E[n]][c] * scale_p) # bgr = jet # rgb = (jet[2], jet[1], jet[0]) # roi location cv2.rectangle(im_E, (E_roi[1], E_roi[2]), (E_roi[3], E_roi[4]), bgr, 2, lineType=cv2.LINE_AA) # put Text E text = "{:.4f}".format(rois_pred_E[argsort_E[n]][c]) im_E = putText_with_TNR(im_E, int(E_roi[1]), int(E_roi[2]), 15, jet, rgb, text) # write im_score text = "{:.4f}".format(im_score[b][c]) im_S = putText_with_TNR(im_S, 0, 0, 20, (0, 140, 255), (255, 255, 255), text) # write class_weight text = "{:.4f}".format(class_weight[b][c]) im_hatE = putText_with_TNR(im_hatE, 0, 0, 20, (0, 140, 255), (255, 255, 255), text) # write class_weight text = "{:.4f}".format(E_class_weight[b][c]) im_E = putText_with_TNR(im_E, 0, 0, 20, (0, 140, 255), (255, 255, 255), text) file_name_roi = os.path.join( output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_roi' + suffix + '.png') cv2.imwrite(file_name_roi, im_S) file_name_hatE = os.path.join( output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_hatE' + suffix + '.png') cv2.imwrite(file_name_hatE, im_hatE) file_name_E = os.path.join( output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_E' + suffix + '.png') cv2.imwrite(file_name_E, im_E) def dump_proto_files(model, output_dir): """Save prototxt descriptions of the training network and parameter initialization network.""" with open(os.path.join(output_dir, model.net.Proto().name), 'w') as fid: fid.write(str(model.net.Proto())) with open(os.path.join(output_dir, model.param_init_net.Proto().name), 'w') as fid: fid.write(str(model.param_init_net.Proto())) def gray2jet(f): # plot short rainbow RGB a = f / 0.25 # invert and group X = math.floor(a) # this is the integer part Y = math.floor(255 * (a - X)) # fractional part from 0 to 255 Z = math.floor(128 * (a - X)) # fractional part from 0 to 128 if X == 0: r = 0 g = Y b = 128 - Z elif X == 1: r = Y g = 255 b = 0 elif X == 2: r = 255 g = 255 - Z b = 0 elif X == 3: r = 255 g = 128 - Z b = 0 elif X == 4: r = 255 g = 0 b = 0 # opencv is bgr, not rgb return (b, g, r) def putText_with_TNR(img, x, y, size, fontColor, bgColor, string): thickness = 2 font_scale = 1.1 font = cv2.FONT_HERSHEY_SIMPLEX s = cv2.getTextSize(string, font, font_scale, thickness) cv2.rectangle( img, (x + thickness, y + thickness), (x + thickness + s[0][0] + 2, y + thickness + s[0][1] + 2), # (0, 140, 255), fontColor, cv2.FILLED, lineType=cv2.LINE_AA) position = (x + thickness + 1, y + thickness + s[0][1] + 1) cv2.putText(img, string, position, font, font_scale, (255, 255, 255), thickness, cv2.LINE_AA) return img # from OpenCV to PIL font = "/home/chenzhiwei/Documents/myFonts/timesnewroman.ttf" img_PIL = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) font = ImageFont.truetype(font, size) position = (x + 3, y - 2) draw = ImageDraw.Draw(img_PIL) offsetx, offsety = font.getoffset(string) width, height = font.getsize(string) draw.rectangle((offsetx + x + 2, offsety + y - 3, offsetx + x + width + 3, offsety + y + height - 3), fill=bgColor) draw.text(position, string, font=font, fill=fontColor) # back to OpenCV type img_OpenCV = cv2.cvtColor(np.asarray(img_PIL), cv2.COLOR_RGB2BGR) return img_OpenCV
37.341808
87
0.504577
0
0
0
0
0
0
0
0
1,467
0.110977
22a72547959131b60da1f328cdda0445ca0ed7eb
13,740
py
Python
salt/runner.py
StepOneInc/salt
ee210172c37bf0cee224794cd696b38e288e4073
[ "Apache-2.0" ]
1
2016-04-26T03:42:32.000Z
2016-04-26T03:42:32.000Z
salt/runner.py
apergos/salt
106c715d495a9c2bd747c8ca75745236b0d7fb41
[ "Apache-2.0" ]
null
null
null
salt/runner.py
apergos/salt
106c715d495a9c2bd747c8ca75745236b0d7fb41
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- ''' Execute salt convenience routines ''' # Import python libs from __future__ import print_function from __future__ import absolute_import import collections import logging import time import sys import multiprocessing # Import salt libs import salt.exceptions import salt.loader import salt.minion import salt.utils import salt.utils.args import salt.utils.event from salt.client import mixins from salt.output import display_output from salt.utils.error import raise_error from salt.utils.event import tagify import salt.ext.six as six log = logging.getLogger(__name__) class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object): ''' The interface used by the :command:`salt-run` CLI tool on the Salt Master It executes :ref:`runner modules <all-salt.runners>` which run on the Salt Master. Importing and using ``RunnerClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. Salt's :conf_master:`external_auth` can be used to authenticate calls. The eauth user must be authorized to execute runner modules: (``@runner``). Only the :py:meth:`master_call` below supports eauth. ''' client = 'runner' tag_prefix = 'run' def __init__(self, opts): self.opts = opts self.functions = salt.loader.runner(opts) # Must be self.functions for mixin to work correctly :-/ self.returners = salt.loader.returners(opts, self.functions) self.outputters = salt.loader.outputters(opts) self.event = salt.utils.event.MasterEvent(self.opts['sock_dir']) def cmd(self, fun, arg, pub_data=None, kwarg=None): ''' Execute a runner function .. code-block:: python >>> opts = salt.config.master_config('/etc/salt/master') >>> runner = salt.runner.RunnerClient(opts) >>> runner.cmd('jobs.list_jobs', []) { '20131219215650131543': { 'Arguments': [300], 'Function': 'test.sleep', 'StartTime': '2013, Dec 19 21:56:50.131543', 'Target': '*', 'Target-type': 'glob', 'User': 'saltdev' }, '20131219215921857715': { 'Arguments': [300], 'Function': 'test.sleep', 'StartTime': '2013, Dec 19 21:59:21.857715', 'Target': '*', 'Target-type': 'glob', 'User': 'saltdev' }, } ''' if kwarg is None: kwarg = {} if not isinstance(kwarg, dict): raise salt.exceptions.SaltInvocationError( 'kwarg must be formatted as a dictionary' ) if pub_data is None: pub_data = {} if not isinstance(pub_data, dict): raise salt.exceptions.SaltInvocationError( 'pub_data must be formatted as a dictionary' ) arglist = salt.utils.args.parse_input(arg) def _append_kwarg(arglist, kwarg): ''' Append the kwarg dict to the arglist ''' kwarg['__kwarg__'] = True arglist.append(kwarg) if kwarg: try: if isinstance(arglist[-1], dict) \ and '__kwarg__' in arglist[-1]: for key, val in six.iteritems(kwarg): if key in arglist[-1]: log.warning( 'Overriding keyword argument {0!r}'.format(key) ) arglist[-1][key] = val else: # No kwargs yet present in arglist _append_kwarg(arglist, kwarg) except IndexError: # arglist is empty, just append _append_kwarg(arglist, kwarg) self._verify_fun(fun) args, kwargs = salt.minion.load_args_and_kwargs( self.functions[fun], arglist, pub_data ) fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) jid = self.returners[fstr]() log.debug('Runner starting with jid {0}'.format(jid)) self.event.fire_event({'runner_job': fun}, tagify([jid, 'new'], 'job')) target = RunnerClient._thread_return data = {'fun': fun, 'jid': jid, 'args': args, 'kwargs': kwargs} args = (self, self.opts, data) ret = jid if self.opts.get('async', False): process = multiprocessing.Process( target=target, args=args ) process.start() else: ret = target(*args) return ret @classmethod def _thread_return(cls, instance, opts, data): ''' The multiprocessing process calls back here to stream returns ''' # Runners modules runtime injection: # - the progress event system with the correct jid # - Provide JID if the runner wants to access it directly done = {} progress = salt.utils.event.get_runner_event(opts, data['jid']).fire_progress for func_name, func in instance.functions.items(): if func.__module__ in done: continue mod = sys.modules[func.__module__] mod.__jid__ = data['jid'] mod.__progress__ = progress done[func.__module__] = mod ret = instance.functions[data['fun']](*data['args'], **data['kwargs']) # Sleep for just a moment to let any progress events return time.sleep(0.1) ret_load = {'return': ret, 'fun': data['fun'], 'fun_args': data['args']} # Don't use the invoking processes' event socket because it could be closed down by the time we arrive here. # Create another, for safety's sake. salt.utils.event.MasterEvent(opts['sock_dir']).fire_event(ret_load, tagify([data['jid'], 'return'], 'runner')) try: fstr = '{0}.save_runner_load'.format(opts['master_job_cache']) instance.returners[fstr](data['jid'], ret_load) except KeyError: log.debug( 'The specified returner used for the master job cache ' '"{0}" does not have a save_runner_load function! The results ' 'of this runner execution will not be stored.'.format( opts['master_job_cache'] ) ) except Exception: log.critical( 'The specified returner threw a stack trace:\n', exc_info=True ) if opts.get('async', False): return data['jid'] else: return ret def master_call(self, **kwargs): ''' Execute a runner function through the master network interface (eauth). ''' load = kwargs load['cmd'] = 'runner' sreq = salt.transport.Channel.factory(self.opts, crypt='clear', usage='master_call') ret = sreq.send(load) if isinstance(ret, collections.Mapping): if 'error' in ret: raise_error(**ret['error']) return ret def _reformat_low(self, low): ''' Format the low data for RunnerClient()'s master_call() function The master_call function here has a different function signature than on WheelClient. So extract all the eauth keys and the fun key and assume everything else is a kwarg to pass along to the runner function to be called. ''' auth_creds = dict([(i, low.pop(i)) for i in [ 'username', 'password', 'eauth', 'token', 'client', ] if i in low]) reformatted_low = {'fun': low.pop('fun')} reformatted_low.update(auth_creds) reformatted_low['kwarg'] = low return reformatted_low def cmd_async(self, low): ''' Execute a runner function asynchronously; eauth is respected This function requires that :conf_master:`external_auth` is configured and the user is authorized to execute runner functions: (``@runner``). .. code-block:: python runner.eauth_async({ 'fun': 'jobs.list_jobs', 'username': 'saltdev', 'password': 'saltdev', 'eauth': 'pam', }) ''' reformatted_low = self._reformat_low(low) return self.master_call(**reformatted_low) def cmd_sync(self, low, timeout=None): ''' Execute a runner function synchronously; eauth is respected This function requires that :conf_master:`external_auth` is configured and the user is authorized to execute runner functions: (``@runner``). .. code-block:: python runner.eauth_sync({ 'fun': 'jobs.list_jobs', 'username': 'saltdev', 'password': 'saltdev', 'eauth': 'pam', }) ''' sevent = salt.utils.event.get_event('master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts) reformatted_low = self._reformat_low(low) job = self.master_call(**reformatted_low) ret_tag = tagify('ret', base=job['tag']) timelimit = time.time() + (timeout or 300) while True: ret = sevent.get_event(full=True) if ret is None: if time.time() > timelimit: raise salt.exceptions.SaltClientTimeout( "RunnerClient job '{0}' timed out".format(job['jid']), jid=job['jid']) else: continue if ret['tag'] == ret_tag: return ret['data']['return'] class Runner(RunnerClient): ''' Execute the salt runner interface ''' def print_docs(self): ''' Print out the documentation! ''' arg = self.opts.get('fun', None) docs = super(Runner, self).get_docs(arg) for fun in sorted(docs): display_output('{0}:'.format(fun), 'text', self.opts) print(docs[fun]) def run(self): ''' Execute the runner sequence ''' ret = {} if self.opts.get('doc', False): self.print_docs() else: try: # Run the runner! jid = super(Runner, self).cmd( self.opts['fun'], self.opts['arg'], self.opts) if self.opts.get('async', False): log.info('Running in async mode. Results of this execution may ' 'be collected by attaching to the master event bus or ' 'by examing the master job cache, if configured.') sys.exit(0) rets = self.get_runner_returns(jid) else: rets = [jid] # Gather the returns for ret in rets: if not self.opts.get('quiet', False): if isinstance(ret, dict) and 'outputter' in ret and ret['outputter'] is not None: print(self.outputters[ret['outputter']](ret['data'])) else: salt.output.display_output(ret, '', self.opts) except salt.exceptions.SaltException as exc: ret = str(exc) print(ret) return ret log.debug('Runner return: {0}'.format(ret)) return ret def get_runner_returns(self, jid, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' if timeout is None: timeout = self.opts['timeout'] * 2 timeout_at = time.time() + timeout last_progress_timestamp = time.time() while True: raw = self.event.get_event(timeout, full=True) time.sleep(0.1) # If we saw no events in the event bus timeout # OR # we have reached the total timeout # AND # have not seen any progress events for the length of the timeout. if raw is None and (time.time() > timeout_at and time.time() - last_progress_timestamp > timeout): # Timeout reached break try: if not raw['tag'].split('/')[1] == 'runner' and raw['tag'].split('/')[2] == jid: continue elif raw['tag'].split('/')[3] == 'progress' and raw['tag'].split('/')[2] == jid: last_progress_timestamp = time.time() yield {'data': raw['data']['data'], 'outputter': raw['data']['outputter']} elif raw['tag'].split('/')[3] == 'return' and raw['tag'].split('/')[2] == jid: yield raw['data']['return'] break # Handle a findjob that might have been kicked off under the covers elif raw['data']['fun'] == 'saltutil.findjob': timeout_at = timeout_at + 10 continue except (IndexError, KeyError): continue
37.135135
118
0.527365
13,134
0.955895
1,718
0.125036
2,023
0.147234
0
0
5,476
0.398544
22a8b0a10c5a619e3d02f83382579627b355c5a9
186
py
Python
.venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/__pyinstaller/__init__.py
RivtLib/replit01
ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7
[ "MIT" ]
1
2020-08-07T16:09:57.000Z
2020-08-07T16:09:57.000Z
.venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/__pyinstaller/__init__.py
RivtLib/replit01
ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7
[ "MIT" ]
null
null
null
.venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/__pyinstaller/__init__.py
RivtLib/replit01
ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7
[ "MIT" ]
null
null
null
# For usage of lark with PyInstaller. See https://pyinstaller-sample-hook.readthedocs.io/en/latest/index.html import os def get_hook_dirs(): return [os.path.dirname(__file__)]
31
110
0.747312
0
0
0
0
0
0
0
0
110
0.591398
22a8bf88232fd22e170f70f6a4d8e344cbe114aa
4,257
py
Python
pong-pg.py
s-gv/pong-keras
38a0f25ae0e628f357512d085dc957720d83ece2
[ "0BSD" ]
null
null
null
pong-pg.py
s-gv/pong-keras
38a0f25ae0e628f357512d085dc957720d83ece2
[ "0BSD" ]
null
null
null
pong-pg.py
s-gv/pong-keras
38a0f25ae0e628f357512d085dc957720d83ece2
[ "0BSD" ]
null
null
null
# Copyright (c) 2019 Sagar Gubbi. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import sys import numpy as np import gym import tensorflow as tf from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Input, Lambda, Dense, Conv2D, MaxPool2D, Flatten, BatchNormalization, Dropout from tensorflow.keras.optimizers import RMSprop, Adam import tensorflow.keras.backend as K env = gym.make('PongDeterministic-v4') UP_ACTION = 2 DOWN_ACTION = 3 ACTIONS = [UP_ACTION, DOWN_ACTION] # Neural net model takes the state and outputs action and value for that state model = Sequential([ Dense(512, activation='elu', input_shape=(2*6400,)), Dense(len(ACTIONS), activation='softmax'), ]) model.compile(optimizer=RMSprop(1e-4), loss='sparse_categorical_crossentropy') gamma = 0.99 # preprocess frames def prepro(I): """ prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector. http://karpathy.github.io/2016/05/31/rl/ """ if I is None: return np.zeros((6400,)) I = I[35:195] # crop I = I[::2,::2,0] # downsample by factor of 2 I[I == 144] = 0 # erase background (background type 1) I[I == 109] = 0 # erase background (background type 2) I[I != 0] = 1 # everything else (paddles, ball) just set to 1 return I.astype(np.float).ravel() def discount_rewards(r): """ take 1D float array of rewards and compute discounted reward. http://karpathy.github.io/2016/05/31/rl/ """ discounted_r = np.zeros((len(r),)) running_add = 0 for t in reversed(range(0, len(r))): if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!) running_add = running_add * gamma + r[t] discounted_r[t] = running_add return discounted_r def train(): reward_sums = [] for ep in range(2000): Xs, ys, rewards = [], [], [] prev_obs, obs = None, env.reset() for t in range(99000): x = np.hstack([prepro(obs), prepro(prev_obs)]) prev_obs = obs action_probs = model.predict(x[None, :]) ya = np.random.choice(len(ACTIONS), p=action_probs[0]) action = ACTIONS[ya] obs, reward, done, _ = env.step(action) Xs.append(x) ys.append(ya) rewards.append(reward) #if reward != 0: print(f'Episode {ep} -- step: {t}, ya: {ya}, reward: {reward}') if done: Xs = np.array(Xs) ys = np.array(ys) discounted_rewards = discount_rewards(rewards) advantages = (discounted_rewards - discounted_rewards.mean()) / discounted_rewards.std() print(f'adv: {np.min(advantages):.2f}, {np.max(advantages):.2f}') model.fit(Xs, ys, sample_weight=advantages, epochs=1, batch_size=1024) reward_sum = sum(rewards) reward_sums.append(reward_sum) avg_reward_sum = sum(reward_sums[-50:]) / len(reward_sums[-50:]) print(f'Episode {ep} -- reward_sum: {reward_sum}, avg_reward_sum: {avg_reward_sum}\n') if ep % 20 == 0: model.save_weights('params/model3.h5') break def test(): global env env = gym.wrappers.Monitor(env, './tmp', video_callable=lambda ep_id: True, force=True) model.load_weights('params/model3.h5') reward_sum = 0 prev_obs, obs = None, env.reset() for t in range(99000): x = np.hstack([prepro(obs), prepro(prev_obs)]) prev_obs = obs action_probs = model.predict(x[None, :]) #ya = np.argmax(action_probs[0]) ya = np.random.choice(len(ACTIONS), p=action_probs[0]) action = ACTIONS[ya] obs, reward, done, _ = env.step(action) reward_sum += reward if reward != 0: print(f't: {t} -- reward: {reward}') if done: print(f't: {t} -- reward_sum: {reward_sum}') break def main(): if len(sys.argv) >= 2 and sys.argv[1] == 'test': test() else: train() if __name__ == '__main__': main()
33.257813
116
0.597369
0
0
0
0
0
0
0
0
1,137
0.267089