hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c7d75d84ab48e0f55426fa5ef9b76cbde3951e30 | 7,027 | py | Python | src/ipywidgets_toggle_buttons/abc_toggle_buttons_with_hide.py | stas-prokopiev/ipywidgets_toggle_buttons | 84d1afde1d02c19fb6a41b20e17b9d2b1c7980e2 | [
"MIT"
]
| null | null | null | src/ipywidgets_toggle_buttons/abc_toggle_buttons_with_hide.py | stas-prokopiev/ipywidgets_toggle_buttons | 84d1afde1d02c19fb6a41b20e17b9d2b1c7980e2 | [
"MIT"
]
| null | null | null | src/ipywidgets_toggle_buttons/abc_toggle_buttons_with_hide.py | stas-prokopiev/ipywidgets_toggle_buttons | 84d1afde1d02c19fb6a41b20e17b9d2b1c7980e2 | [
"MIT"
]
| null | null | null | """Abstract class for all toggle buttons"""
# Standard library imports
import logging
from collections import OrderedDict
# Third party imports
import ipywidgets
# Local imports
from .abc_toggle_buttons import BaseToggleButtons
from .layouts import DICT_LAYOUT_HBOX_ANY
LOGGER = logging.getLogger(__name__)
class BaseToggleButtonsWithHide(BaseToggleButtons):
"""Abstract class for all toggle buttons
Values are stored in self.widget_parent when displayed is self.widget
Which is updated in the moment when display() is launched
"""
def __init__(
self,
widget_parent,
options_visible=None,
options_hidden=None,
**kwargs
):
"""Initialize object"""
super().__init__(widget_parent, **kwargs)
# hidden attributes to setters
self._options_visible = []
self._options_hidden = []
self._bool_is_hidden_options_created = False
# Create scaffolds inside self.widgets
self._create_scaffold_for_widget()
self._dict_visible_button_by_option = OrderedDict()
self._dict_hidden_button_by_option = OrderedDict()
# Set options
self.options_visible = options_visible
self.options_hidden = options_hidden
self._update_buttons_for_new_options()
@property
def options_visible(self):
"""Getter for visible options used in widget"""
return self._options_visible
@options_visible.setter
def options_visible(self, new_value):
"""Setter for visible options in widget
Args:
new_value (list or tuple): New options to set for widgets
"""
if new_value is None:
new_value = []
if set(new_value) == set(self.options_visible):
return None
self._options_visible = new_value
self._create_buttons_for_visible_options()
# Update hidden options to delete which exists in new visible
# This will also update the whole widget
self.options_hidden = self._options_hidden
self.options = self._options_visible + self._options_hidden
self._update_widget_view()
@property
def options_hidden(self):
"""Getter for hidden options used in widget"""
return self._options_hidden
@options_hidden.setter
def options_hidden(self, new_value):
"""Setter for hidden options in widget
Args:
new_value (list or tuple): New options to set for widgets
"""
if new_value is None:
new_value = []
if set(new_value) == set(self.options_hidden):
return None
# Filter out from hidden options all options which exists in main
options_hidden_cleared = []
for str_option in new_value:
if str_option not in self.options_visible:
options_hidden_cleared.append(str_option)
self._options_hidden = options_hidden_cleared
self.options = self._options_visible + self._options_hidden
# self._create_buttons_for_hidden_options()
self._update_widget_view()
def turn_off_all_buttons(self):
"""Mark all buttons as not clicked"""
for str_option in self._dict_visible_button_by_option:
but = self._dict_visible_button_by_option[str_option]
but.button_style = ""
for str_option in self._dict_hidden_button_by_option:
but = self._dict_hidden_button_by_option[str_option]
but.button_style = ""
# Change style of selected hidden button
# self._widget_but_hidden_option_selected.description = "..."
# self._widget_but_hidden_option_selected.button_style = ""
def _update_buttons_for_new_options(self):
"""Update buttons if options were changed"""
self._create_buttons_for_visible_options()
self._bool_is_hidden_options_created = False
# self._create_buttons_for_hidden_options()
def _create_scaffold_for_widget(self):
"""Create scaffold of ipywidget Boxes for self"""
# Main buttons box
self._widget_hbox_main = ipywidgets.HBox()
self._widget_hbox_main.layout = ipywidgets.Layout(**DICT_LAYOUT_HBOX_ANY)
# self._widget_hbox_main.layout.flex_flow = "row wrap"
# Middle buttons box
self._widget_hbox_middle_buttons = ipywidgets.HBox()
self._widget_hbox_middle_buttons.layout = ipywidgets.Layout(**DICT_LAYOUT_HBOX_ANY)
self._create_middle_buttons()
# Hidden buttons box
self._widget_hbox_hidden = ipywidgets.HBox()
self._widget_hbox_hidden.layout = ipywidgets.Layout(**DICT_LAYOUT_HBOX_ANY)
# self._widget_hbox_hidden.layout.flex_flow = "row wrap"
def _create_buttons_for_visible_options(self):
"""Create buttons for all visible options"""
self._dict_visible_button_by_option = OrderedDict()
int_button_width = self._get_button_width(self.options_visible)
list_buttons = []
for str_option in list(self.options_visible):
but_wid = ipywidgets.Button(
description=str_option,
layout={"width": "%dpx" % int_button_width}
)
but_wid.on_click(self._on_click_button_to_choose_option)
self._dict_visible_button_by_option[str_option] = but_wid
list_buttons.append(but_wid)
self._widget_hbox_main.children = list_buttons
def _create_middle_buttons(self):
"""Create buttons which are in charge what to do with hidden buttons"""
self._wid_but_hide_show = ipywidgets.ToggleButton(
value=False,
description="Show Hidden options",
button_style="info",
)
self._wid_but_hide_show.layout.width = "40%"
self._wid_but_hide_show.observe(
lambda _: self._update_widget_view(), "value")
self._widget_but_hidden_option_selected = ipywidgets.Button(
description="...", disabled=True)
self._widget_but_hidden_option_selected.layout.width = "40%"
self._widget_hbox_middle_buttons.children = [
self._widget_but_hidden_option_selected, self._wid_but_hide_show]
def _create_buttons_for_hidden_options(self):
"""Create buttons for all hidden options"""
self._dict_hidden_button_by_option = OrderedDict()
int_button_width = self._get_button_width(self.options_hidden)
list_buttons = []
for str_option in list(self.options_hidden):
but_wid = ipywidgets.Button(
description=str_option,
layout={"width": "%dpx" % int_button_width}
)
if str_option in self.value:
but_wid.button_style = "success"
but_wid.on_click(self._on_click_button_to_choose_option)
self._dict_hidden_button_by_option[str_option] = but_wid
list_buttons.append(but_wid)
self._widget_hbox_hidden.children = list_buttons
| 40.154286 | 91 | 0.672549 | 6,714 | 0.955458 | 0 | 0 | 1,788 | 0.254447 | 0 | 0 | 1,714 | 0.243916 |
c7d7886d9a5f7ae38bdb7d01f1fc136b75bb2a50 | 3,899 | py | Python | Players/DWPMPlayer.py | jokvedaras/game-framework | 9ff60e15d1beff54f94e280501929664ce59afe7 | [
"Apache-2.0"
]
| null | null | null | Players/DWPMPlayer.py | jokvedaras/game-framework | 9ff60e15d1beff54f94e280501929664ce59afe7 | [
"Apache-2.0"
]
| null | null | null | Players/DWPMPlayer.py | jokvedaras/game-framework | 9ff60e15d1beff54f94e280501929664ce59afe7 | [
"Apache-2.0"
]
| null | null | null | __author__ = 'Pat McClernan and Dan Wegmann'
import Player
import Message
# input
#0 for rock
#1 for paper
#2 for scissors
# past move is array of numbers
# our move followed by their move
#Our strategy is to look at all past moves
#In a large number of games, you would expect
# each move to be seen an even amount of times
#So our strategy is to take the least seen move
# and expect it to show up soon
# so we will play to beat that move
class DWPMPlayer(Player.Player):
def __init__(self):
Player.Player.__init__(self)
self.past_moves = []
self.set_name("Dan and Pats Player")
def play(self):
return RpsPlayingStrategy.play(self.past_moves)
def add_past_move(self, move):
"""
adds opponents move to past moves
"""
self.past_moves.append(move)
def get_name(self):
return self.name
def notify(self, message):
# We use notifications to store opponent's moves in past rounds
# Process match-start and round-end messages
# At the start of the match, clear opponent moves history since a new match has started
# At the end of a round, append move to opponent's move history. Move history is used
# to compute the next move played.
if message.is_match_start_message():
players = message.get_players()
if players[0] == self or players[1] == self:
self.reset()
elif message.is_round_end_message():
players = message.get_players()
# Check if this message is for me and only then proceed
if (players[0] == self) or (players[1] == self):
# In this case, (by convention) the info is a tuple of the moves made and result
# e.g. ((1, 0), (1,0)) which
# means player 1 played paper (1), the player 2 played rock(0) and the result was that
# player 1 won (got 1 point) and player 2 lost (got 0 point)
moves, result = message.get_info()
# RPS is a two person game; figure out which of the players is me
# and which one is the opponent
if players[0] == self:
opponent = 1
else:
opponent = 0
# Update opponent's past moves history
self.add_past_move(moves[opponent])
def reset(self):
self.past_moves = []
def set_name(self, name):
self.name = name
class RpsPlayingStrategy(object):
@staticmethod
def play(past_moves):
"""
our player assumes that given a high number of games, all 3 different moves of opponent will be used
an equal number of times. Given a list of past_moves, we can counter an opponent's assumed move
"""
rock = 0
paper = 0
scissors = 0
for this_move in list(past_moves):
if this_move == 0:
rock += 1
elif this_move == 1:
paper += 1
elif this_move == 2:
scissors += 1
#determine which move has been used least
if (rock < paper) and (rock < scissors):
move = 0
elif paper < scissors:
move = 1
else:
move = 2
move = (move + 1) % 3
return move
# Test driver
# Run by typing "python3 RpsPlayerExample.py"
if __name__ == "__main__":
player = PatAndDansRPSPlayer()
opponent = PatAndDansRPSPlayer()
players = [opponent, player]
fakemoves = (1, 2)
fakeresult = (0, 1)
player.notify(Message.Message.get_match_start_message(players))
player.notify(Message.Message.get_round_start_message(players))
move = player.play()
print ("Move played: ", move)
player.notify(Message.Message.get_round_end_message(players, fakemoves, fakeresult))
| 32.22314 | 108 | 0.598359 | 2,918 | 0.748397 | 0 | 0 | 815 | 0.209028 | 0 | 0 | 1,580 | 0.405232 |
c7d7ef9a92fb0bfab05a3bc1de9e8efb6f62b67d | 1,023 | py | Python | example/example.py | mowshon/age-and-gender | e5c912f6ba739f30a45c04208b6d16500e4488cd | [
"MIT"
]
| 81 | 2020-06-17T12:53:03.000Z | 2022-03-11T20:02:46.000Z | example/example.py | mowshon/age-and-gender | e5c912f6ba739f30a45c04208b6d16500e4488cd | [
"MIT"
]
| 4 | 2020-06-18T09:28:12.000Z | 2021-07-13T09:16:29.000Z | example/example.py | mowshon/age-and-gender | e5c912f6ba739f30a45c04208b6d16500e4488cd | [
"MIT"
]
| 17 | 2020-06-18T07:08:09.000Z | 2022-03-31T03:56:58.000Z | from age_and_gender import *
from PIL import Image, ImageDraw, ImageFont
data = AgeAndGender()
data.load_shape_predictor('models/shape_predictor_5_face_landmarks.dat')
data.load_dnn_gender_classifier('models/dnn_gender_classifier_v1.dat')
data.load_dnn_age_predictor('models/dnn_age_predictor_v1.dat')
filename = 'test-image.jpg'
img = Image.open(filename).convert("RGB")
result = data.predict(img)
font = ImageFont.truetype("Acme-Regular.ttf", 20)
for info in result:
shape = [(info['face'][0], info['face'][1]), (info['face'][2], info['face'][3])]
draw = ImageDraw.Draw(img)
gender = info['gender']['value'].title()
gender_percent = int(info['gender']['confidence'])
age = info['age']['value']
age_percent = int(info['age']['confidence'])
draw.text(
(info['face'][0] - 10, info['face'][3] + 10), f"{gender} (~{gender_percent}%)\n{age} y.o. (~{age_percent}%).",
fill='white', font=font, align='center'
)
draw.rectangle(shape, outline="red", width=5)
img.show()
| 31 | 118 | 0.672532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.329423 |
c7d86ca9e9717fc1914525f4cf4555781fc27cb0 | 1,463 | py | Python | code/generate_games.py | jppg/pygame-tictactoe | f7283a71bb289601b4b8ee0b0bdbe731e67fa8a7 | [
"MIT"
]
| null | null | null | code/generate_games.py | jppg/pygame-tictactoe | f7283a71bb289601b4b8ee0b0bdbe731e67fa8a7 | [
"MIT"
]
| null | null | null | code/generate_games.py | jppg/pygame-tictactoe | f7283a71bb289601b4b8ee0b0bdbe731e67fa8a7 | [
"MIT"
]
| null | null | null | from tictactoe import TicTacToe
import random
import csv
import os
gameNr = 1
gameLimit = 10000
lst_moves_1 = []
lst_moves_2 = []
while gameNr <= gameLimit:
print("+++++++++++")
print("Game#", gameNr)
game = TicTacToe()
tmp_moves_1 = []
tmp_moves_2 = []
while game.get_winner() == 0 and game.possible_moves() > 0:
pos = game.get_positions().copy()
while game.possible_moves() > 0:
move = random.randint(0,9)
if game.play(int(move)):
if game.get_player() == 1:
tmp_moves_2.append([gameNr] + [game.get_turn() - 1] + pos + [move])
else:
tmp_moves_1.append([gameNr] + [game.get_turn() - 1] + pos + [move])
break
print("Winner of game ", gameNr, "is", game.get_winner())
if game.get_winner() == 1:
lst_moves_1.append(tmp_moves_1)
#lst_moves_1.append(tmp_moves_1[len(tmp_moves_1) - 1])
else:
#lst_moves_2.append(tmp_moves_2[len(tmp_moves_2) - 1])
lst_moves_2.append(tmp_moves_2)
#print("List X: ", lst_moves_1)
#print("List O: ", lst_moves_2)
game.print_board()
gameNr = gameNr + 1
with open('moves_1.csv', 'w', newline='') as f:
writer = csv.writer(f)
for row in lst_moves_1:
writer.writerows(row)
with open('moves_2.csv', 'w', newline='') as f:
writer = csv.writer(f)
for row in lst_moves_2:
writer.writerows(row) | 27.603774 | 87 | 0.580314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.168831 |
c7d9eaf5171771685897ba7e8ba2988b57091181 | 350 | py | Python | applications/CoSimulationApplication/custom_data_structure/pyKratos/IntervalUtility.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
]
| 2 | 2019-10-25T09:28:10.000Z | 2019-11-21T12:51:46.000Z | applications/CoSimulationApplication/custom_data_structure/pyKratos/IntervalUtility.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
]
| 13 | 2019-10-07T12:06:51.000Z | 2020-02-18T08:48:33.000Z | applications/CoSimulationApplication/custom_data_structure/pyKratos/IntervalUtility.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
]
| null | null | null | from __future__ import print_function, absolute_import, division # makes these scripts backward compatible with python 2.6 and 2.7
# TODO this should be implemented, see "kratos/utilities/interval_utility.h"
class IntervalUtility(object):
def __init__(self, settings):
pass
def IsInInterval(self, current_time):
return True | 38.888889 | 131 | 0.757143 | 140 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 141 | 0.402857 |
c7dc267a8e2592a1c24d3b8c06a265a370010c46 | 2,906 | py | Python | stixcore/tmtc/tests/test_packets.py | nicHoch/STIXCore | 16822bbb37046f8e6c03be51909cfc91e9822cf7 | [
"BSD-3-Clause"
]
| 1 | 2022-03-31T13:42:43.000Z | 2022-03-31T13:42:43.000Z | stixcore/tmtc/tests/test_packets.py | nicHoch/STIXCore | 16822bbb37046f8e6c03be51909cfc91e9822cf7 | [
"BSD-3-Clause"
]
| 192 | 2020-11-03T22:40:19.000Z | 2022-03-31T15:17:13.000Z | stixcore/tmtc/tests/test_packets.py | nicHoch/STIXCore | 16822bbb37046f8e6c03be51909cfc91e9822cf7 | [
"BSD-3-Clause"
]
| 3 | 2020-11-09T15:05:18.000Z | 2022-01-21T07:52:51.000Z |
import bitstring
import pytest
from stixcore.data.test import test_data
from stixcore.idb.manager import IDBManager
from stixcore.tmtc.packets import (
SOURCE_PACKET_HEADER_STRUCTURE,
TC_DATA_HEADER_STRUCTURE,
TM_DATA_HEADER_STRUCTURE,
SourcePacketHeader,
TCPacket,
TMDataHeader,
TMPacket,
)
from stixcore.tmtc.tm.tm_1 import TM_1_1
@pytest.fixture
def idb():
return IDBManager(test_data.idb.DIR).get_idb("2.26.34")
@pytest.mark.parametrize('class_header', [(SourcePacketHeader, SOURCE_PACKET_HEADER_STRUCTURE),
(TMDataHeader, TM_DATA_HEADER_STRUCTURE)])
def test_tmtc_headers(class_header):
cls, header = class_header
test_fmt = ', '.join(header.values())
test_values = {n: 2**int(v.split(':')[-1])-1 for n, v in header.items()}
test_binary = bitstring.pack(test_fmt, *test_values.values())
sph = cls(test_binary)
assert all([getattr(sph, key) == test_values[key]
for key in header.keys() if not key.startswith('spare')])
def test_tm_packet(idb):
combind_structures = {**SOURCE_PACKET_HEADER_STRUCTURE, **TM_DATA_HEADER_STRUCTURE}
test_fmt = ', '.join(combind_structures.values())
test_values = {n: 2 ** int(v.split(':')[-1]) - 1 for n, v in
combind_structures.items()}
test_binary = bitstring.pack(test_fmt, *test_values.values())
tmtc_packet = TMPacket(test_binary, idb=idb)
assert all([getattr(tmtc_packet.source_packet_header, key) == test_values[key]
for key in SOURCE_PACKET_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
assert all([getattr(tmtc_packet.data_header, key) == test_values[key]
for key in TM_DATA_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
def test_tc_packet():
combind_structures = {**SOURCE_PACKET_HEADER_STRUCTURE, **TC_DATA_HEADER_STRUCTURE}
test_fmt = ', '.join(combind_structures.values())
test_values = {n: 2 ** int(v.split(':')[-1]) - 1 for n, v in
combind_structures.items()}
test_values['process_id'] = 90
test_values['packet_category'] = 12
test_binary = bitstring.pack(test_fmt, *test_values.values())
tmtc_packet = TCPacket(test_binary)
assert all([getattr(tmtc_packet.source_packet_header, key) == test_values[key]
for key in SOURCE_PACKET_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
assert all([getattr(tmtc_packet.data_header, key) == test_values[key]
for key in TC_DATA_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
def test_tm_1_1(idb):
packet = TM_1_1('0x0da1c066000d100101782628a9c4e71e1dacc0a0', idb=idb)
assert packet.source_packet_header.process_id == 90
assert packet.source_packet_header.packet_category == 1
assert packet.data_header.service_type == 1
assert packet.data_header.service_subtype == 1
| 41.514286 | 97 | 0.699931 | 0 | 0 | 0 | 0 | 674 | 0.231934 | 0 | 0 | 152 | 0.052306 |
c7dcc75b55961bd952da5e374d98d1ab7d3f5c96 | 40,969 | py | Python | python/thunder/rdds/fileio/seriesloader.py | broxtronix/thunder | 4dad77721e2c9e225f94a6a5366d51ec83ac4690 | [
"Apache-2.0"
]
| null | null | null | python/thunder/rdds/fileio/seriesloader.py | broxtronix/thunder | 4dad77721e2c9e225f94a6a5366d51ec83ac4690 | [
"Apache-2.0"
]
| null | null | null | python/thunder/rdds/fileio/seriesloader.py | broxtronix/thunder | 4dad77721e2c9e225f94a6a5366d51ec83ac4690 | [
"Apache-2.0"
]
| null | null | null | """Provides SeriesLoader object and helpers, used to read Series data from disk or other filesystems.
"""
from collections import namedtuple
import json
from numpy import array, arange, frombuffer, load, ndarray, unravel_index, vstack
from numpy import dtype as dtypeFunc
from scipy.io import loadmat
from cStringIO import StringIO
import itertools
import struct
import urlparse
import math
from thunder.rdds.fileio.writers import getParallelWriterForPath
from thunder.rdds.keys import Dimensions
from thunder.rdds.fileio.readers import getFileReaderForPath, FileNotFoundError, appendExtensionToPathSpec
from thunder.rdds.imgblocks.blocks import SimpleBlocks
from thunder.rdds.series import Series
from thunder.utils.common import parseMemoryString, smallestFloatType
class SeriesLoader(object):
"""Loader object used to instantiate Series data stored in a variety of formats.
"""
def __init__(self, sparkContext, minPartitions=None):
"""Initialize a new SeriesLoader object.
Parameters
----------
sparkcontext: SparkContext
The pyspark SparkContext object used by the current Thunder environment.
minPartitions: int
minimum number of partitions to use when loading data. (Used by fromText, fromMatLocal, and fromNpyLocal)
"""
from thunder.utils.aws import AWSCredentials
self.sc = sparkContext
self.minPartitions = minPartitions
self.awsCredentialsOverride = AWSCredentials.fromContext(sparkContext)
def _checkOverwrite(self, outputDirPath):
from thunder.utils.common import raiseErrorIfPathExists
raiseErrorIfPathExists(outputDirPath, awsCredentialsOverride=self.awsCredentialsOverride)
def fromArrays(self, arrays, npartitions=None):
"""
Create a Series object from a sequence of 1d numpy arrays on the driver.
"""
# recast singleton
if isinstance(arrays, ndarray):
arrays = [arrays]
# check shape and dtype
shape = arrays[0].shape
dtype = arrays[0].dtype
for ary in arrays:
if not ary.shape == shape:
raise ValueError("Inconsistent array shapes: first array had shape %s, but other array has shape %s" %
(str(shape), str(ary.shape)))
if not ary.dtype == dtype:
raise ValueError("Inconsistent array dtypes: first array had dtype %s, but other array has dtype %s" %
(str(dtype), str(ary.dtype)))
# generate linear keys
keys = map(lambda k: (k,), xrange(0, len(arrays)))
return Series(self.sc.parallelize(zip(keys, arrays), npartitions), dtype=str(dtype))
def fromArraysAsImages(self, arrays):
"""Create a Series object from a sequence of numpy ndarrays resident in memory on the driver.
The arrays will be interpreted as though each represents a single time point - effectively the same
as if converting Images to a Series, with each array representing a volume image at a particular
point in time. Thus in the resulting Series, the value of the record with key (0,0,0) will be
array([arrays[0][0,0,0], arrays[1][0,0,0],... arrays[n][0,0,0]).
The dimensions of the resulting Series will be *opposite* that of the passed numpy array. Their dtype will not
be changed.
"""
# if passed a single array, cast it to a sequence of length 1
if isinstance(arrays, ndarray):
arrays = [arrays]
# check that shapes of passed arrays are consistent
shape = arrays[0].shape
dtype = arrays[0].dtype
for ary in arrays:
if not ary.shape == shape:
raise ValueError("Inconsistent array shapes: first array had shape %s, but other array has shape %s" %
(str(shape), str(ary.shape)))
if not ary.dtype == dtype:
raise ValueError("Inconsistent array dtypes: first array had dtype %s, but other array has dtype %s" %
(str(dtype), str(ary.dtype)))
# get indices so that fastest index changes first
shapeiters = (xrange(n) for n in shape)
keys = [idx[::-1] for idx in itertools.product(*shapeiters)]
values = vstack([ary.ravel() for ary in arrays]).T
dims = Dimensions.fromTuple(shape[::-1])
return Series(self.sc.parallelize(zip(keys, values), self.minPartitions), dims=dims, dtype=str(dtype))
@staticmethod
def __normalizeDatafilePattern(dataPath, ext):
dataPath = appendExtensionToPathSpec(dataPath, ext)
# we do need to prepend a scheme here, b/c otherwise the Hadoop based readers
# will adopt their default behavior and start looking on hdfs://.
parseResult = urlparse.urlparse(dataPath)
if parseResult.scheme:
# this appears to already be a fully-qualified URI
return dataPath
else:
# this looks like a local path spec
# check whether we look like an absolute or a relative path
import os
dirComponent, fileComponent = os.path.split(dataPath)
if not os.path.isabs(dirComponent):
# need to make relative local paths absolute; our file scheme parsing isn't all that it could be.
dirComponent = os.path.abspath(dirComponent)
dataPath = os.path.join(dirComponent, fileComponent)
return "file://" + dataPath
def fromText(self, dataPath, nkeys=None, ext="txt", dtype='float64'):
"""
Loads Series data from text files.
Parameters
----------
dataPath : string
Specifies the file or files to be loaded. dataPath may be either a URI (with scheme specified) or a path
on the local filesystem.
If a path is passed (determined by the absence of a scheme component when attempting to parse as a URI),
and it is not already a wildcard expression and does not end in <ext>, then it will be converted into a
wildcard pattern by appending '/*.ext'. This conversion can be avoided by passing a "file://" URI.
dtype: dtype or dtype specifier, default 'float64'
"""
dataPath = self.__normalizeDatafilePattern(dataPath, ext)
def parse(line, nkeys_):
vec = [float(x) for x in line.split(' ')]
ts = array(vec[nkeys_:], dtype=dtype)
keys = tuple(int(x) for x in vec[:nkeys_])
return keys, ts
lines = self.sc.textFile(dataPath, self.minPartitions)
data = lines.map(lambda x: parse(x, nkeys))
return Series(data, dtype=str(dtype))
# keytype, valuetype here violate camelCasing convention for consistence with JSON conf file format
BinaryLoadParameters = namedtuple('BinaryLoadParameters', 'nkeys nvalues keytype valuetype')
BinaryLoadParameters.__new__.__defaults__ = (None, None, 'int16', 'int16')
def __loadParametersAndDefaults(self, dataPath, confFilename, nkeys, nvalues, keyType, valueType):
"""Collects parameters to use for binary series loading.
Priority order is as follows:
1. parameters specified as keyword arguments;
2. parameters specified in a conf.json file on the local filesystem;
3. default parameters
Returns
-------
BinaryLoadParameters instance
"""
params = self.loadConf(dataPath, confFilename=confFilename)
# filter dict to include only recognized field names:
for k in params.keys():
if k not in SeriesLoader.BinaryLoadParameters._fields:
del params[k]
keywordParams = {'nkeys': nkeys, 'nvalues': nvalues, 'keytype': keyType, 'valuetype': valueType}
for k, v in keywordParams.items():
if not v:
del keywordParams[k]
params.update(keywordParams)
return SeriesLoader.BinaryLoadParameters(**params)
@staticmethod
def __checkBinaryParametersAreSpecified(paramsObj):
"""Throws ValueError if any of the field values in the passed namedtuple instance evaluate to False.
Note this is okay only so long as zero is not a valid parameter value. Hmm.
"""
missing = []
for paramName, paramVal in paramsObj._asdict().iteritems():
if not paramVal:
missing.append(paramName)
if missing:
raise ValueError("Missing parameters to load binary series files - " +
"these must be given either as arguments or in a configuration file: " +
str(tuple(missing)))
def fromBinary(self, dataPath, ext='bin', confFilename='conf.json',
nkeys=None, nvalues=None, keyType=None, valueType=None,
newDtype='smallfloat', casting='safe', maxPartitionSize='32mb'):
"""
Load a Series object from a directory of binary files.
Parameters
----------
dataPath : string URI or local filesystem path
Specifies the directory or files to be loaded. May be formatted as a URI string with scheme (e.g. "file://",
"s3n://", or "gs://"). If no scheme is present, will be interpreted as a path on the local filesystem. This path
must be valid on all workers. Datafile may also refer to a single file, or to a range of files specified
by a glob-style expression using a single wildcard character '*'.
newDtype : dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Most methods expect Series data to be floating-point. Input data will be
cast to the requested `newdtype` if not None - see Data `astype()` method.
casting : 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
maxPartitionSize : str, optional, default = '32mb'
Maximum size of partitions as Java-style memory, will indirectly control the number of partitions
"""
paramsObj = self.__loadParametersAndDefaults(dataPath, confFilename, nkeys, nvalues, keyType, valueType)
self.__checkBinaryParametersAreSpecified(paramsObj)
dataPath = self.__normalizeDatafilePattern(dataPath, ext)
keyDtype = dtypeFunc(paramsObj.keytype)
valDtype = dtypeFunc(paramsObj.valuetype)
keySize = paramsObj.nkeys * keyDtype.itemsize
recordSize = keySize + paramsObj.nvalues * valDtype.itemsize
from thunder.utils.common import parseMemoryString
if isinstance(maxPartitionSize, basestring):
size = parseMemoryString(maxPartitionSize)
else:
raise Exception("Invalid size specification")
hadoopConf = {'recordLength': str(recordSize), 'mapred.max.split.size': str(size)}
lines = self.sc.newAPIHadoopFile(dataPath, 'thunder.util.io.hadoop.FixedLengthBinaryInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.BytesWritable',
conf=hadoopConf)
data = lines.map(lambda (_, v):
(tuple(int(x) for x in frombuffer(buffer(v, 0, keySize), dtype=keyDtype)),
frombuffer(buffer(v, keySize), dtype=valDtype)))
return Series(data, dtype=str(valDtype), index=arange(paramsObj.nvalues)).astype(newDtype, casting)
def _getSeriesBlocksFromStack(self, dataPath, dims, ext="stack", blockSize="150M", dtype='int16',
newDtype='smallfloat', casting='safe', startIdx=None, stopIdx=None, recursive=False):
"""Create an RDD of <string blocklabel, (int k-tuple indices, array of datatype values)>
Parameters
----------
dataPath: string URI or local filesystem path
Specifies the directory or files to be loaded. May be formatted as a URI string with scheme (e.g. "file://",
"s3n://" or "gs://"). If no scheme is present, will be interpreted as a path on the local filesystem. This path
must be valid on all workers. Datafile may also refer to a single file, or to a range of files specified
by a glob-style expression using a single wildcard character '*'.
dims: tuple of positive int
Dimensions of input image data, ordered with the fastest-changing dimension first.
dtype: dtype or dtype specifier, optional, default 'int16'
Numpy dtype of input stack data
newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Series data must be floating-point. Input data will be cast to the
requested `newdtype` - see numpy `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
recursive: boolean, default False
If true, will recursively descend directories rooted at dataPath, loading all files in the tree that
have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems
(not s3).
Returns
---------
pair of (RDD, ntimepoints)
RDD: sequence of keys, values pairs
(call using flatMap)
RDD Key: tuple of int
zero-based indicies of position within original image volume
RDD Value: numpy array of datatype
series of values at position across loaded image volumes
ntimepoints: int
number of time points in returned series, determined from number of stack files found at dataPath
newDtype: string
string representation of numpy data type of returned blocks
"""
dataPath = self.__normalizeDatafilePattern(dataPath, ext)
blockSize = parseMemoryString(blockSize)
totalDim = reduce(lambda x_, y_: x_*y_, dims)
dtype = dtypeFunc(dtype)
if newDtype is None or newDtype == '':
newDtype = str(dtype)
elif newDtype == 'smallfloat':
newDtype = str(smallestFloatType(dtype))
else:
newDtype = str(newDtype)
reader = getFileReaderForPath(dataPath)(awsCredentialsOverride=self.awsCredentialsOverride)
filenames = reader.list(dataPath, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive)
if not filenames:
raise IOError("No files found for path '%s'" % dataPath)
dataSize = totalDim * len(filenames) * dtype.itemsize
nblocks = max(dataSize / blockSize, 1) # integer division
if len(dims) >= 3:
# for 3D stacks, do calculations to ensure that
# different planes appear in distinct files
blocksPerPlane = max(nblocks / dims[-1], 1)
pixPerPlane = reduce(lambda x_, y_: x_*y_, dims[:-1]) # all but last dimension
# get the greatest number of blocks in a plane (up to as many as requested) that still divide the plane
# evenly. This will always be at least one.
kUpdated = [x for x in range(1, blocksPerPlane+1) if not pixPerPlane % x][-1]
nblocks = kUpdated * dims[-1]
blockSizePerStack = (totalDim / nblocks) * dtype.itemsize
else:
# otherwise just round to make contents divide into nearly even blocks
blockSizePerStack = int(math.ceil(totalDim / float(nblocks)))
nblocks = int(math.ceil(totalDim / float(blockSizePerStack)))
blockSizePerStack *= dtype.itemsize
fileSize = totalDim * dtype.itemsize
def readBlock(blockNum):
# copy size out from closure; will modify later:
blockSizePerStack_ = blockSizePerStack
# get start position for this block
position = blockNum * blockSizePerStack_
# adjust if at end of file
if (position + blockSizePerStack_) > fileSize:
blockSizePerStack_ = int(fileSize - position)
# loop over files, loading one block from each
bufs = []
for fname in filenames:
buf = reader.read(fname, startOffset=position, size=blockSizePerStack_)
bufs.append(frombuffer(buf, dtype=dtype))
buf = vstack(bufs).T # dimensions are now linindex x time (images)
del bufs
buf = buf.astype(newDtype, casting=casting, copy=False)
# append subscript keys based on dimensions
itemPosition = position / dtype.itemsize
itemBlocksize = blockSizePerStack_ / dtype.itemsize
linearIdx = arange(itemPosition, itemPosition + itemBlocksize) # zero-based
keys = zip(*map(tuple, unravel_index(linearIdx, dims, order='F')))
return zip(keys, buf)
# map over blocks
return (self.sc.parallelize(range(0, nblocks), nblocks).flatMap(lambda bn: readBlock(bn)),
len(filenames), newDtype)
@staticmethod
def __readMetadataFromFirstPageOfMultiTif(reader, filePath):
import thunder.rdds.fileio.multitif as multitif
# read first page of first file to get expected image size
tiffFP = reader.open(filePath)
tiffParser = multitif.TiffParser(tiffFP, debug=False)
tiffHeaders = multitif.TiffData()
tiffParser.parseFileHeader(destinationTiff=tiffHeaders)
firstIfd = tiffParser.parseNextImageFileDirectory(destinationTiff=tiffHeaders)
if not firstIfd.isLuminanceImage():
raise ValueError(("File %s does not appear to be a luminance " % filePath) +
"(greyscale or bilevel) TIF image, " +
"which are the only types currently supported")
# keep reading pages until we reach the end of the file, in order to get number of planes:
while tiffParser.parseNextImageFileDirectory(destinationTiff=tiffHeaders):
pass
# get dimensions
npages = len(tiffHeaders.ifds)
height = firstIfd.getImageHeight()
width = firstIfd.getImageWidth()
# get datatype
bitsPerSample = firstIfd.getBitsPerSample()
if not (bitsPerSample in (8, 16, 32, 64)):
raise ValueError("Only 8, 16, 32, or 64 bit per pixel TIF images are supported, got %d" % bitsPerSample)
sampleFormat = firstIfd.getSampleFormat()
if sampleFormat == multitif.SAMPLE_FORMAT_UINT:
dtStr = 'uint'
elif sampleFormat == multitif.SAMPLE_FORMAT_INT:
dtStr = 'int'
elif sampleFormat == multitif.SAMPLE_FORMAT_FLOAT:
dtStr = 'float'
else:
raise ValueError("Unknown TIF SampleFormat tag value %d, should be 1, 2, or 3 for uint, int, or float"
% sampleFormat)
dtype = dtStr+str(bitsPerSample)
return height, width, npages, dtype
def _getSeriesBlocksFromMultiTif(self, dataPath, ext="tif", blockSize="150M",
newDtype='smallfloat', casting='safe', startIdx=None, stopIdx=None,
recursive=False):
import thunder.rdds.fileio.multitif as multitif
import itertools
from PIL import Image
import io
dataPath = self.__normalizeDatafilePattern(dataPath, ext)
blockSize = parseMemoryString(blockSize)
reader = getFileReaderForPath(dataPath)(awsCredentialsOverride=self.awsCredentialsOverride)
filenames = reader.list(dataPath, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive)
if not filenames:
raise IOError("No files found for path '%s'" % dataPath)
ntimepoints = len(filenames)
doMinimizeReads = dataPath.lower().startswith("s3") or dataPath.lower().startswith("gs")
# check PIL version to see whether it is actually pillow or indeed old PIL and choose
# conversion function appropriately. See ImagesLoader.fromMultipageTif and common.pil_to_array
# for more explanation.
isPillow = hasattr(Image, "PILLOW_VERSION")
if isPillow:
conversionFcn = array # use numpy's array() function
else:
from thunder.utils.common import pil_to_array
conversionFcn = pil_to_array # use our modified version of matplotlib's pil_to_array
height, width, npages, dtype = SeriesLoader.__readMetadataFromFirstPageOfMultiTif(reader, filenames[0])
if dtype.startswith('int'):
raise ValueError('Signed integer tiff images are not supported in SeriesLoader (shuffle=False);' +
' please try loading as Images (shuffle=True)')
pixelBytesize = dtypeFunc(dtype).itemsize
if newDtype is None or str(newDtype) == '':
newDtype = str(dtype)
elif newDtype == 'smallfloat':
newDtype = str(smallestFloatType(dtype))
else:
newDtype = str(newDtype)
# intialize at one block per plane
bytesPerPlane = height * width * pixelBytesize * ntimepoints
bytesPerBlock = bytesPerPlane
blocksPerPlane = 1
# keep dividing while cutting our size in half still leaves us bigger than the requested size
# should end up no more than 2x blockSize.
while bytesPerBlock >= blockSize * 2:
bytesPerBlock /= 2
blocksPerPlane *= 2
blocklenPixels = max((height * width) / blocksPerPlane, 1) # integer division
while blocksPerPlane * blocklenPixels < height * width: # make sure we're reading the plane fully
blocksPerPlane += 1
# prevent bringing in self in closure:
awsCredentialsOverride = self.awsCredentialsOverride
# keys will be planeidx, blockidx:
keys = list(itertools.product(xrange(npages), xrange(blocksPerPlane)))
def readBlockFromTiff(planeIdxBlockIdx):
planeIdx, blockIdx = planeIdxBlockIdx
blocks = []
planeShape = None
blockStart = None
blockEnd = None
for fname in filenames:
reader_ = getFileReaderForPath(fname)(awsCredentialsOverride=awsCredentialsOverride)
fp = reader_.open(fname)
try:
if doMinimizeReads:
# use multitif module to generate a fake, in-memory
# one-page tif file. the advantage of this is that it
# cuts way down on the many small reads that PIL/pillow
# will make otherwise, which would be a problem for s3
# or Google Storage
tiffParser_ = multitif.TiffParser(fp, debug=False)
tiffFilebuffer = multitif.packSinglePage(tiffParser_, pageIdx=planeIdx)
byteBuf = io.BytesIO(tiffFilebuffer)
try:
pilImg = Image.open(byteBuf)
ary = conversionFcn(pilImg).T
finally:
byteBuf.close()
del tiffFilebuffer, tiffParser_, pilImg, byteBuf
else:
# read tif using PIL directly
pilImg = Image.open(fp)
pilImg.seek(planeIdx)
ary = conversionFcn(pilImg).T
del pilImg
if not planeShape:
planeShape = ary.shape[:]
blockStart = blockIdx * blocklenPixels
blockEnd = min(blockStart+blocklenPixels, planeShape[0]*planeShape[1])
blocks.append(ary.ravel(order='C')[blockStart:blockEnd])
del ary
finally:
fp.close()
buf = vstack(blocks).T # dimensions are now linindex x time (images)
del blocks
buf = buf.astype(newDtype, casting=casting, copy=False)
# append subscript keys based on dimensions
linearIdx = arange(blockStart, blockEnd) # zero-based
seriesKeys = zip(*map(tuple, unravel_index(linearIdx, planeShape, order='C')))
# add plane index to end of keys
if npages > 1:
seriesKeys = [tuple(list(keys_)[::-1]+[planeIdx]) for keys_ in seriesKeys]
else:
seriesKeys = [tuple(list(keys_)[::-1]) for keys_ in seriesKeys]
return zip(seriesKeys, buf)
# map over blocks
rdd = self.sc.parallelize(keys, len(keys)).flatMap(readBlockFromTiff)
if npages > 1:
dims = (npages, width, height)
else:
dims = (width, height)
metadata = (dims, ntimepoints, newDtype)
return rdd, metadata
def fromStack(self, dataPath, dims, ext="stack", blockSize="150M", dtype='int16',
newDtype='smallfloat', casting='safe', startIdx=None, stopIdx=None, recursive=False):
"""Load a Series object directly from binary image stack files.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
dims: tuple of positive int
Dimensions of input image data, ordered with the fastest-changing dimension first.
ext: string, optional, default "stack"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
dtype: dtype or dtype specifier, optional, default 'int16'
Numpy dtype of input stack data
newDtype: dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Most methods expect Series data to be floating-point. Input data will be
cast to the requested `newdtype` if not None - see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
recursive: boolean, default False
If true, will recursively descend directories rooted at dataPath, loading all files in the tree that
have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems
(not s3).
"""
seriesBlocks, npointsInSeries, newDtype = \
self._getSeriesBlocksFromStack(dataPath, dims, ext=ext, blockSize=blockSize, dtype=dtype,
newDtype=newDtype, casting=casting, startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
return Series(seriesBlocks, dims=dims, dtype=newDtype, index=arange(npointsInSeries))
def fromTif(self, dataPath, ext="tif", blockSize="150M", newDtype='smallfloat', casting='safe',
startIdx=None, stopIdx=None, recursive=False):
"""Load a Series object from multipage tiff files.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
ext: string, optional, default "tif"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
newDtype: dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Most methods expect Series data to be floating-point. Input data will be
cast to the requested `newdtype` if not None - see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
recursive: boolean, default False
If true, will recursively descend directories rooted at dataPath, loading all files in the tree that
have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems
(not s3).
"""
seriesBlocks, metadata = self._getSeriesBlocksFromMultiTif(dataPath, ext=ext, blockSize=blockSize,
newDtype=newDtype, casting=casting,
startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
dims, npointsInSeries, dtype = metadata
return Series(seriesBlocks, dims=Dimensions.fromTuple(dims[::-1]), dtype=dtype,
index=arange(npointsInSeries))
def __saveSeriesRdd(self, seriesBlocks, outputDirPath, dims, npointsInSeries, dtype, overwrite=False):
if not overwrite:
self._checkOverwrite(outputDirPath)
overwrite = True # prevent additional downstream checks for this path
writer = getParallelWriterForPath(outputDirPath)(outputDirPath, overwrite=overwrite,
awsCredentialsOverride=self.awsCredentialsOverride)
def blockToBinarySeries(kvIter):
label = None
keyPacker = None
buf = StringIO()
for seriesKey, series in kvIter:
if keyPacker is None:
keyPacker = struct.Struct('h'*len(seriesKey))
label = SimpleBlocks.getBinarySeriesNameForKey(seriesKey) + ".bin"
buf.write(keyPacker.pack(*seriesKey))
buf.write(series.tostring())
val = buf.getvalue()
buf.close()
return [(label, val)]
seriesBlocks.mapPartitions(blockToBinarySeries).foreach(writer.writerFcn)
writeSeriesConfig(outputDirPath, len(dims), npointsInSeries, valueType=dtype, overwrite=overwrite,
awsCredentialsOverride=self.awsCredentialsOverride)
def saveFromStack(self, dataPath, outputDirPath, dims, ext="stack", blockSize="150M", dtype='int16',
newDtype=None, casting='safe', startIdx=None, stopIdx=None, overwrite=False, recursive=False):
"""Write out data from binary image stack files in the Series data flat binary format.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
outputDirPath: string
Path to a directory into which to write Series file output. An outputdir argument may be either a path
on the local file system or a URI-like format, as in dataPath.
dims: tuple of positive int
Dimensions of input image data, ordered with the fastest-changing dimension first.
ext: string, optional, default "stack"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
dtype: dtype or dtype specifier, optional, default 'int16'
Numpy dtype of input stack data
newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default None
Numpy dtype of output series binary data. Input data will be cast to the requested `newdtype` if not None
- see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
overwrite: boolean, optional, default False
If true, the directory specified by outputdirpath will first be deleted, along with all its contents, if it
already exists. If false, a ValueError will be thrown if outputdirpath is found to already exist.
"""
if not overwrite:
self._checkOverwrite(outputDirPath)
overwrite = True # prevent additional downstream checks for this path
seriesBlocks, npointsInSeries, newDtype = \
self._getSeriesBlocksFromStack(dataPath, dims, ext=ext, blockSize=blockSize, dtype=dtype,
newDtype=newDtype, casting=casting, startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
self.__saveSeriesRdd(seriesBlocks, outputDirPath, dims, npointsInSeries, newDtype, overwrite=overwrite)
def saveFromTif(self, dataPath, outputDirPath, ext="tif", blockSize="150M",
newDtype=None, casting='safe', startIdx=None, stopIdx=None,
overwrite=False, recursive=False):
"""Write out data from multipage tif files in the Series data flat binary format.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
outputDirPpath: string
Path to a directory into which to write Series file output. An outputdir argument may be either a path
on the local file system or a URI-like format, as in dataPath.
ext: string, optional, default "stack"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default None
Numpy dtype of output series binary data. Input data will be cast to the requested `newdtype` if not None
- see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
overwrite: boolean, optional, default False
If true, the directory specified by outputdirpath will first be deleted, along with all its contents, if it
already exists. If false, a ValueError will be thrown if outputdirpath is found to already exist.
"""
if not overwrite:
self._checkOverwrite(outputDirPath)
overwrite = True # prevent additional downstream checks for this path
seriesBlocks, metadata = self._getSeriesBlocksFromMultiTif(dataPath, ext=ext, blockSize=blockSize,
newDtype=newDtype, casting=casting,
startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
dims, npointsInSeries, dtype = metadata
self.__saveSeriesRdd(seriesBlocks, outputDirPath, dims, npointsInSeries, dtype, overwrite=overwrite)
def fromMatLocal(self, dataPath, varName, keyFile=None):
"""Loads Series data stored in a Matlab .mat file.
`datafile` must refer to a path visible to all workers, such as on NFS or similar mounted shared filesystem.
"""
data = loadmat(dataPath)[varName]
if data.ndim > 2:
raise IOError('Input data must be one or two dimensional')
if keyFile:
keys = map(lambda x: tuple(x), loadmat(keyFile)['keys'])
else:
keys = arange(0, data.shape[0])
rdd = Series(self.sc.parallelize(zip(keys, data), self.minPartitions), dtype=str(data.dtype))
return rdd
def fromNpyLocal(self, dataPath, keyFile=None):
"""Loads Series data stored in the numpy save() .npy format.
`datafile` must refer to a path visible to all workers, such as on NFS or similar mounted shared filesystem.
"""
data = load(dataPath)
if data.ndim > 2:
raise IOError('Input data must be one or two dimensional')
if keyFile:
keys = map(lambda x: tuple(x), load(keyFile))
else:
keys = arange(0, data.shape[0])
rdd = Series(self.sc.parallelize(zip(keys, data), self.minPartitions), dtype=str(data.dtype))
return rdd
def loadConf(self, dataPath, confFilename='conf.json'):
"""Returns a dict loaded from a json file.
Looks for file named `conffile` in same directory as `dataPath`
Returns {} if file not found
"""
if not confFilename:
return {}
reader = getFileReaderForPath(dataPath)(awsCredentialsOverride=self.awsCredentialsOverride)
try:
jsonBuf = reader.read(dataPath, filename=confFilename)
except FileNotFoundError:
return {}
params = json.loads(jsonBuf)
if 'format' in params:
raise Exception("Numerical format of value should be specified as 'valuetype', not 'format'")
if 'keyformat' in params:
raise Exception("Numerical format of key should be specified as 'keytype', not 'keyformat'")
return params
def writeSeriesConfig(outputDirPath, nkeys, nvalues, keyType='int16', valueType='int16',
confFilename="conf.json", overwrite=True, awsCredentialsOverride=None):
"""
Helper function to write out a conf.json file with required information to load Series binary data.
"""
import json
from thunder.rdds.fileio.writers import getFileWriterForPath
filewriterClass = getFileWriterForPath(outputDirPath)
# write configuration file
# config JSON keys are lowercased "valuetype", "keytype", not valueType, keyType
conf = {'input': outputDirPath,
'nkeys': nkeys, 'nvalues': nvalues,
'valuetype': str(valueType), 'keytype': str(keyType)}
confWriter = filewriterClass(outputDirPath, confFilename, overwrite=overwrite,
awsCredentialsOverride=awsCredentialsOverride)
confWriter.writeFile(json.dumps(conf, indent=2))
# touch "SUCCESS" file as final action
successWriter = filewriterClass(outputDirPath, "SUCCESS", overwrite=overwrite,
awsCredentialsOverride=awsCredentialsOverride)
successWriter.writeFile('')
| 48.772619 | 124 | 0.631648 | 39,027 | 0.952598 | 0 | 0 | 3,632 | 0.088652 | 0 | 0 | 19,247 | 0.469794 |
c7dcceeeb44aada8315f0c77d81c291531d15b79 | 3,097 | py | Python | mxnet/local_forward.py | rai-project/onnx_examples | 45db7b3e03dd674f28aeef3fcb1e60f5bca47948 | [
"MIT"
]
| null | null | null | mxnet/local_forward.py | rai-project/onnx_examples | 45db7b3e03dd674f28aeef3fcb1e60f5bca47948 | [
"MIT"
]
| null | null | null | mxnet/local_forward.py | rai-project/onnx_examples | 45db7b3e03dd674f28aeef3fcb1e60f5bca47948 | [
"MIT"
]
| null | null | null | # run local models given a path, default to './mxnet_models/'
import os
import argparse
import time
import mxnet as mx
import numpy as np
file_path = os.path.realpath(__file__)
dir_name = os.path.dirname(file_path)
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
class cuda_profiler_start():
import numba.cuda as cuda
cuda.profile_start()
class cuda_profiler_stop():
import numba.cuda as cuda
cuda.profile_stop()
def xprint(s):
pass
parser = argparse.ArgumentParser(
description='Predict ImageNet classes from a given image')
parser.add_argument('--model_name', type=str, required=False, default='resnet50_v1',
help='name of the model to use')
parser.add_argument('--batch_size', type=int, required=False, default=1,
help='batch size to use')
parser.add_argument('--input_dim', type=int, required=False, default=224,
help='input dimension')
parser.add_argument('--input_channels', type=int, required=False, default=3,
help='input channels')
parser.add_argument('--num_iterations', type=int, required=False, default=30,
help='number of iterations to run')
parser.add_argument('--num_warmup', type=int, required=False, default=5,
help='number of warmup iterations to run')
parser.add_argument('--model_idx', type=int, required=False, default=2,
help='model idx')
parser.add_argument('--profile', type=bool, required=False, default=False,
help='enable profiling')
opt = parser.parse_args()
model_name = opt.model_name
batch_size = opt.batch_size
input_dim = opt.input_dim
input_channels = opt.input_channels
num_iterations = opt.num_iterations
num_warmup = opt.num_warmup
model_idx = opt.model_idx
profile = opt.profile
ctx = mx.gpu() if len(mx.test_utils.list_gpus()) else mx.cpu()
sym, arg_params, aux_params = mx.model.load_checkpoint(
dir_name + '/mxnet_models/'+model_name, 0)
data_names = [
graph_input
for graph_input in sym.list_inputs()
if graph_input not in arg_params and graph_input not in aux_params
]
net = mx.mod.Module(
symbol=sym,
data_names=[data_names[0]],
context=ctx,
label_names=None,
)
input_shape = (batch_size, input_channels, input_dim, input_dim)
img = mx.random.uniform(
shape=input_shape, ctx=ctx)
net.bind(for_training=False, data_shapes=[
(data_names[0], input_shape)], label_shapes=net._label_shapes)
net.set_params(arg_params, aux_params, allow_missing=True)
def forward_once():
mx.nd.waitall()
start = time.time()
prob = net.predict(img)
mx.nd.waitall()
end = time.time() # stop timer
return end - start
for i in range(num_warmup):
forward_once()
res = []
if profile:
cuda_profiler_start()
for i in range(num_iterations):
t = forward_once()
res.append(t)
if profile:
cuda_profiler_stop()
res = np.multiply(res, 1000)
print("{},{},{},{},{},{}".format(model_idx+1, model_name, batch_size, np.min(res),
np.average(res), np.max(res)))
| 27.651786 | 84 | 0.683242 | 164 | 0.052954 | 0 | 0 | 0 | 0 | 0 | 0 | 486 | 0.156926 |
c7de097e9b9739100654b069d9cac10ffe5b515c | 1,198 | py | Python | tests/test_get_angles.py | Mopolino8/lammps-data-file | 5c9015d05fa1484a33c84e6cfb90cd4a7d99d133 | [
"MIT"
]
| 13 | 2017-05-30T17:43:10.000Z | 2021-08-06T04:21:44.000Z | tests/test_get_angles.py | njustcodingjs/lammps-data-file | 3a0729b5ab4d2344326d09ac4ee1aab41442f14a | [
"MIT"
]
| 2 | 2018-05-28T15:35:32.000Z | 2018-05-28T16:21:09.000Z | tests/test_get_angles.py | njustcodingjs/lammps-data-file | 3a0729b5ab4d2344326d09ac4ee1aab41442f14a | [
"MIT"
]
| 10 | 2017-05-23T21:19:21.000Z | 2022-03-08T02:18:00.000Z | from lammps_data.angles import get_angles
def test_separate_diatomic_molecules_should_have_no_angles():
bonds = [(0, 1), (2, 3)]
assert get_angles(bonds) == []
def test_molecule_with_two_bonds_should_have_one_angle():
bonds = [(0, 1), (1, 2)]
assert get_angles(bonds) == [(0, 1, 2)]
def test_different_order_of_bond_tuples_should_return_same_order_within_angle_tuples():
bonds = [(0, 1), (1, 2)]
assert get_angles(bonds) == [(0, 1, 2)]
bonds = [(1, 2), (0, 1)]
assert get_angles(bonds) == [(0, 1, 2)]
def test_different_order_of_bond_tuples_should_return_same_order_of_angle_tuples():
bonds = [(0, 1), (1, 2), (1, 3)]
assert get_angles(bonds) == [(0, 1, 2), (0, 1, 3), (2, 1, 3)]
bonds = [(1, 2), (0, 1), (1, 3)]
assert get_angles(bonds) == [(0, 1, 2), (0, 1, 3), (2, 1, 3)]
def test_tetrahedral_molecule_should_have_six_angles():
bonds = [(0, 1), (0, 2), (0, 3), (0, 4)]
assert get_angles(bonds) == [(1, 0, 2),
(1, 0, 3),
(1, 0, 4),
(2, 0, 3),
(2, 0, 4),
(3, 0, 4)]
| 33.277778 | 87 | 0.520033 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c7dedb48cc1d235760b585e1ff0e7c005780aeec | 491 | py | Python | api/scheduler/migrations/0001_initial.py | jfaach/stock-app | 9cd0f98d3ec5d31dcd6680c5bf8b7b0fcdf025a6 | [
"CC0-1.0"
]
| null | null | null | api/scheduler/migrations/0001_initial.py | jfaach/stock-app | 9cd0f98d3ec5d31dcd6680c5bf8b7b0fcdf025a6 | [
"CC0-1.0"
]
| null | null | null | api/scheduler/migrations/0001_initial.py | jfaach/stock-app | 9cd0f98d3ec5d31dcd6680c5bf8b7b0fcdf025a6 | [
"CC0-1.0"
]
| null | null | null | # Generated by Django 3.1.1 on 2020-12-16 03:07
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Scheduler',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('minutes', models.IntegerField(default=15)),
],
),
]
| 22.318182 | 114 | 0.578411 | 398 | 0.810591 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.152749 |
c7e12276bc98092252c4149244dfdf01adca03b0 | 477 | py | Python | 9-Wine-Scaling.py | Pawel762/Class-7_homework | e79d2f8d218980d814443951dae7840f521ba191 | [
"MIT"
]
| null | null | null | 9-Wine-Scaling.py | Pawel762/Class-7_homework | e79d2f8d218980d814443951dae7840f521ba191 | [
"MIT"
]
| null | null | null | 9-Wine-Scaling.py | Pawel762/Class-7_homework | e79d2f8d218980d814443951dae7840f521ba191 | [
"MIT"
]
| null | null | null | from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
wine = load_wine()
columns_names = wine.feature_names
y = wine.target
X = wine.data
print('Pre scaling X')
print(X)
scaler = StandardScaler()
scaler.fit(X)
scaled_features = scaler.transform(X)
print('Post scaling X')
print(scaled_features)
X_train, X_test, y_train, y_test = train_test_split(scaled_features, y, test_size=0.375)
| 21.681818 | 88 | 0.796646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.06499 |
c7e14941f3967e5d720a9a0637e48720262f173d | 4,057 | py | Python | tests/conftest.py | szkkteam/flask-starter | 7019036e7ee017ca5df9059d0b4a0d29005beab5 | [
"MIT"
]
| null | null | null | tests/conftest.py | szkkteam/flask-starter | 7019036e7ee017ca5df9059d0b4a0d29005beab5 | [
"MIT"
]
| 2 | 2021-03-31T19:36:44.000Z | 2021-12-13T20:30:11.000Z | tests/conftest.py | szkkteam/flask-starter | 7019036e7ee017ca5df9059d0b4a0d29005beab5 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Common Python library imports
import os
import pytest
# Pip package imports
from collections import namedtuple
from flask import template_rendered
from flask_security.signals import (
reset_password_instructions_sent,
user_confirmed,
user_registered,
)
# Internal package imports
from backend.app import _create_app
from backend.config import TestConfig
from backend.extensions import db as db_ext
from backend.extensions.mail import mail
from ._client import (
ApiTestClient,
ApiTestResponse,
HtmlTestClient,
HtmlTestResponse,
)
from ._model_factory import ModelFactory
@pytest.fixture(autouse=True, scope='session')
def app():
app = _create_app(TestConfig)
#ctx = app.app_context()
ctx = app.test_request_context()
ctx.push()
yield app
ctx.pop()
@pytest.yield_fixture
def client(app):
app.response_class = HtmlTestResponse
app.test_client_class = HtmlTestClient
with app.test_client() as client:
yield client
@pytest.yield_fixture
def api_client(app):
app.response_class = ApiTestResponse
app.test_client_class = ApiTestClient
with app.test_client() as client:
yield client
@pytest.fixture(autouse=True, scope='session')
def db():
db_ext.create_all()
yield db_ext
db_ext.drop_all()
@pytest.fixture(autouse=True)
def db_session(db):
connection = db.engine.connect()
transaction = connection.begin()
session = db.create_scoped_session(options=dict(bind=connection, binds={}))
db.session = session
try:
yield session
finally:
transaction.rollback()
connection.close()
session.remove()
@pytest.fixture(scope='session')
def celery_config():
return {'broker_url': 'redis://localhost:6379/1',
'result_backend': 'redis://localhost:6379/1',
'accept_content': ('json', 'pickle')}
@pytest.fixture()
def templates(app):
records = []
RenderedTemplate = namedtuple('RenderedTemplate', 'template context')
def record(sender, template, context, **extra):
records.append(RenderedTemplate(template, context))
template_rendered.connect(record, app)
try:
yield records
finally:
template_rendered.disconnect(record, app)
@pytest.fixture()
def outbox():
with mail.record_messages() as messages:
yield messages
@pytest.fixture()
def registrations(app):
records = []
def record(sender, *args, **kwargs):
records.append(kwargs)
user_registered.connect(record, app)
try:
yield records
finally:
user_registered.disconnect(record, app)
@pytest.fixture()
def confirmations(app):
records = []
def record(sender, *args, **kwargs):
records.append(kwargs['user'])
print("Record: ", records[-1])
user_confirmed.connect(record, app)
try:
yield records
finally:
print("Disconnect record: ", records)
user_confirmed.disconnect(record, app)
@pytest.fixture()
def password_resets(app):
records = []
def record(sender, *args, **kwargs):
records.append(kwargs)
reset_password_instructions_sent.connect(record, app)
try:
yield records
finally:
reset_password_instructions_sent.disconnect(record, app)
@pytest.fixture()
def user(model_factory):
yield model_factory.create('User', 'user')
@pytest.fixture()
def newslettersubscribe(model_factory):
yield model_factory.create('NewsletterSubscribe', 'newslettersubscribe')
@pytest.fixture()
def admin(model_factory):
yield model_factory.create('User', 'admin')
@pytest.fixture()
def models(request, model_factory):
mark = request.param
if mark is not None:
return model_factory.get_models(mark)
@pytest.fixture()
def model_factory(app, db_session):
fixtures_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'model_fixtures')
yield ModelFactory(db_session, app.models, fixtures_dir)
| 22.792135 | 79 | 0.689426 | 0 | 0 | 2,666 | 0.657136 | 3,360 | 0.828198 | 0 | 0 | 439 | 0.108208 |
c7e1894d1594534627afedcd4ba2104fda1ac3a6 | 927 | py | Python | setup.py | YiuRULE/nats.py | 3a78ba4c385e2069daf5ff560aadc30968af1ccd | [
"Apache-2.0"
]
| null | null | null | setup.py | YiuRULE/nats.py | 3a78ba4c385e2069daf5ff560aadc30968af1ccd | [
"Apache-2.0"
]
| null | null | null | setup.py | YiuRULE/nats.py | 3a78ba4c385e2069daf5ff560aadc30968af1ccd | [
"Apache-2.0"
]
| null | null | null | from setuptools import setup
from nats.aio.client import __version__
EXTRAS = {
'nkeys': ['nkeys'],
}
setup(
name='nats-py',
version=__version__,
description='NATS client for Python',
long_description='Python client for NATS, a lightweight, high-performance cloud native messaging system',
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10'
],
url='https://github.com/nats-io/nats.py',
author='Waldemar Quevedo',
author_email='[email protected]',
license='Apache 2 License',
packages=['nats', 'nats.aio', 'nats.protocol', 'nats.js'],
zip_safe=True,
extras_require=EXTRAS
)
| 29.903226 | 109 | 0.636462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 524 | 0.565264 |
c7e2f163fdb11300c85e2c17e27cb56d8ee3f07e | 12,844 | py | Python | example_python_files/MagicDAQ,MABoard,FullDemo.py | MagicDAQ/magicdaq_docs | 896a2565a28d80c733d8a137211212816ef3fbe2 | [
"MIT"
]
| 1 | 2021-05-20T21:11:13.000Z | 2021-05-20T21:11:13.000Z | example_python_files/MagicDAQ,MABoard,FullDemo.py | MagicDAQ/magicdaq_docs | 896a2565a28d80c733d8a137211212816ef3fbe2 | [
"MIT"
]
| null | null | null | example_python_files/MagicDAQ,MABoard,FullDemo.py | MagicDAQ/magicdaq_docs | 896a2565a28d80c733d8a137211212816ef3fbe2 | [
"MIT"
]
| null | null | null | ##############################################################
#*** MagicDAQ USB DAQ and M&A Board General Demo Script ***
##############################################################
#*** Websites ***
# MagicDAQ Website:
# https://www.magicdaq.com/
# API Docs Website:
# https://magicdaq.github.io/magicdaq_docs/
#*** Install MagicDAQ ***
# Download the MagicDAQ python package from pypi
# Run this command in a command prompt:
# python -m pip install magicdaq
# Further docs: https://magicdaq.github.io/magicdaq_docs/#/Install_MagicDAQ
# MagicDAQ is only compatible with Python 3 on Windows. It does not work on Linux at the moment. It does not work with Python 2.
#*** Using Auto Code Complete With PyCharm ***
# Using a code editor like Pycharm and want to get auto complete working for the MagicDAQ package?
# Docs: https://magicdaq.github.io/magicdaq_docs/#/PyCharmCodeCompletion
##############################################################
#*** Imports ***
##############################################################
import sys
import time
# Import MagicDAQ
print('*** MagicDAQ Install Check ***')
print('')
try:
# Import MagicDAQDevice object
from magicdaq.api_class import MagicDAQDevice
# Create daq_one object
daq_one = MagicDAQDevice()
print('GOOD: MagicDAQ API is installed properly.')
# Get MagicDAQ Driver Version
driver_version = daq_one.get_driver_version()
if driver_version == 1.0:
print('GOOD: MagicDAQ Driver is installed properly.')
print('You are ready to use MagicDAQ!')
else:
print('ERROR: MagicDAQ Driver version not expected value: '+str(driver_version))
print('Try installing MagicDAQ using pip again.')
print('https://magicdaq.github.io/magicdaq_docs/#/Install_MagicDAQ')
print('Feel free to email MagicDAQ Support at: [email protected]')
except Exception as exception_text:
print('Original exception: ')
print(exception_text)
print('')
print('ERROR: Unable to import MagicDAQ API.')
print('Mostly likely, MagicDAQ has not been properly downloaded and installed using pip.')
print('Please consult MagicDAQ API Docs: https://magicdaq.github.io/magicdaq_docs/#/Install_MagicDAQ')
print('Feel free to email MagicDAQ Support at: [email protected]')
sys.exit(0)
##############################################################
#*** MagicDAQ USB DAQ MDAQ300 Features Demo ***
##############################################################
# This portion of the script shows off some of the USB DAQ's features
# Hardware docs: https://www.magicdaq.com/product/magic-daq/
print('')
print('*** MagicDAQ USB DAQ Demo ***')
print('Ensure the USB DAQ is plugged into the computer using the USB cable.')
print('The DAQ does not need to be connected to the M&A board.')
print('')
user_input = input('Press any key to continue.')
#*** Open DAQ Device ***
# Remember, the daq_one object has already been created in the above 'Imports' section
# We must open the daq device before performing any hardware feature manipulation
# https://magicdaq.github.io/magicdaq_docs/#/MagicDAQ_Basics
daq_one.open_daq_device()
###############################################################
#*** Analog Output Demo: Constant, Sine, and PWM on AO1 Pin ***
###############################################################
print('')
print('--- Analog Output Demo: Constant, Sine, and PWM Output ---')
# Set constant 3 volt output voltage on AO1 pin
daq_one.set_analog_output(1,3)
print('Using an oscilloscope, place the scope probe on pin AO1 and connect the scope probe GND to one of the USB DAQs AGND pins')
print('You should now observe a constant 3V')
print('')
user_input = input('Press any key to continue.')
# Configure and start 300Hz sine wave with 2V amplitude on AO1 pin
daq_one.configure_analog_output_sine_wave(1,300,amplitude=2)
daq_one.start_analog_output_wave(1)
print('You should now observe a 300Hz sine wave with 2V amplitude.')
print('')
user_input = input('Press any key to continue.')
# Stop previous wave
daq_one.stop_analog_output_wave(1)
# Configure and start PWM wave, 200 Hz, 50% duty cycle, 3.3V amplitude
daq_one.configure_analog_output_pwm_wave(1,200,50,amplitude=3.3)
daq_one.start_analog_output_wave(1)
print('You should now observe a 200Hz PWM wave, 50% duty cycle, with 3.3V amplitude.')
print('')
user_input = input('Press any key to continue.')
# Stop the wave
daq_one.stop_analog_output_wave(1)
print('The wave should now stop. You could set it to GND using set_analog_ouput() if you wanted.')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Pulse Counter Pin Demo: PWM waves ***
###############################################################
print('')
print('--- Pulse Counter Pin Demo: PWM Waves ---')
# Configure a 50 KHz frequency, 75% duty cycle, continuous PWM Wave on the counter pin (CTR0)
# Note that unlike the analog output pins, the CTR0 pin always outputs at an amplitude of 3.3v when producing PWM waves
daq_one.configure_counter_pwm(50000,75)
# Start counter wave
daq_one.start_counter_pwm()
print('Place your scope probe on pin CTR0')
print('You should see a 50kHz, 75% duty cycle PWM wave.')
print('')
user_input = input('Press any key to continue.')
# Now stopping the counter PWM wave
daq_one.stop_counter_pwm()
print('The PWM wave will now stop.')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Pulse Counter Pin Demo: Pulse Counting ***
###############################################################
print('')
print('--- Pulse Counter Pin Demo: Pulse Counting ---')
print('Use a piece of wire to bridge CTR0 to DGND several times')
print('CTR0 has an internal pull up resistor. You are simulating a pulse pulling the voltage to GND.')
print('You will have 8 sec to simulate some pulses.')
print('')
user_input = input('Press any key when you are ready to start.')
# Start the Pulse Counter
# Pulses will be counted on the falling edge
daq_one.enable_pulse_counter()
# Sleep for 8 sec
time.sleep(8)
# Read number of pulses
print('Number of pulses counted: '+str(daq_one.read_pulse_counter()))
print('You are using a piece of wire, so it is likely bouncing on and off the screw terminal, counting many pulses')
print('')
user_input = input('Stop simulating pulses. Press any key to continue.')
print('')
print('Now clearing the pulse counter')
daq_one.clear_pulse_counter()
print('Pulse count after clearing: '+str(daq_one.read_pulse_counter()))
###############################################################
#*** Digital Pin Demo ***
###############################################################
print('')
print('--- Digital Pin Demo ---')
# Set P0.0 pin LOW
daq_one.set_digital_output(0,0)
print('Place scope probe on pin P0.0, pin should be LOW')
print('')
user_input = input('Press any key to continue.')
# Set P0.0 pin HIGH
daq_one.set_digital_output(0,1)
print('Place scope probe on pin P0.0, pin should be HIGH')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Analog Input Pin Demo ***
###############################################################
print('')
print('--- Analog Input Pin Demo ---')
# Single ended voltage measurement
print('Apply voltage to AI0 pin. If you dont have a power supply handy, you can run a wire from the +5V pin to the AI0 pin.')
print('')
user_input = input('Press any key to continue.')
print('Voltage measured at AI0: '+str(daq_one.read_analog_input(0)))
print('If you are using the +5V pin, remember that this voltage is derived from the USB Power supply, so it will be what ever your USB bus ir producing, probably something slightly less than 5V.')
# If you want to perform a differential input measurement
# daq_one.read_diff_analog_input()
# https://magicdaq.github.io/magicdaq_docs/#/read_diff_analog_input
###############################################################
#*** M&A Board Demo ***
###############################################################
# M&A Board hardware spec:
# https://www.magicdaq.com/product/ma-board-full-kit/
print('')
print('*** M&A Board Demo ***')
print('Ensure the USB DAQ is connected to the M&A board using the ribbon cable.')
print('Ribbon cable pin out on page 6 of: ')
print('https://www.magicdaq.com/mdaq350datasheet/')
print('Use the provided power cable to apply power to the M&A board.')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Relay Demo ***
###############################################################
print('')
print('--- Relay Demo ---')
print('Setting all relays to closed.')
daq_one.set_digital_output(7, 1)
daq_one.set_digital_output(6, 1)
daq_one.set_digital_output(5, 1)
daq_one.set_digital_output(4, 1)
time.sleep(1)
relay_count = 1
digital_pin_count = 7
while relay_count <= 4:
print('Relay #: ' + str(relay_count) + ' Digital Pin #: ' + str(digital_pin_count))
# Set relay to open
print('Setting relay to OPEN.')
daq_one.set_digital_output(digital_pin_count, 0)
time.sleep(1)
# Increment counters
relay_count += 1
digital_pin_count -= 1
print('')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Vout Demo ***
###############################################################
print('')
print('--- Vout Demo ---')
print('Vout provides a variable voltage power output capable of up to 2A')
print('By characterizing your M&A board, or building a feedback loop; voltage accuracy of Vout can be made quite good.')
print('See notes on page 4 of the M&A data sheet.')
print('https://www.magicdaq.com/mdaq350datasheet/')
# See the M&A board data sheet for the equation that describes the Vout to Vout_set (0 and 2.77 here) relationship
print('')
print('Vout_set Set to 0V.')
print('Measure Vout with a multimeter. It should be about 10V')
daq_one.set_analog_output(0, 0)
print('')
user_input = input('Press any key to continue.')
print('Vout_set Set to 2.77V')
print('Measure Vout with a multimeter. It should be about 5V')
daq_one.set_analog_output(0, 2.77)
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Low Current Measurement Demo: A1 ***
###############################################################
print('')
print('--- A1 Low Current Measurement Demo ---')
print('Use the 3.3V board voltage and a 20K resistor to put 165uA through A1.')
print('')
user_input = input('Press any key to continue.')
# See the M&A board data sheet for the equation that describes the Vout to current relationship
pin_4_voltage = daq_one.read_analog_input(4)
print('Read voltage: ' + str(pin_4_voltage))
calculated_current_amps = pin_4_voltage / (332 * 97.863)
ua_current = round((calculated_current_amps / .000001), 3)
print('Calculated uA current: ' + str(ua_current))
###############################################################
#*** Current Measurement Demo: A2 ***
###############################################################
print('')
print('--- A2 Current Measurement Demo (+/- 5A max) ---')
print('Use an external 5V power supply and 5 ohm power resistor to put 1 Amp through A2.')
print('')
user_input = input('Press any key to continue.')
# See the M&A board data sheet for the equation that describes the Vout to current relationship
pin_5_voltage = daq_one.read_analog_input(5)
print('Read voltage: ' + str(pin_5_voltage))
calculated_current_amps = pin_5_voltage / (.01 * 200)
# ma_current = round((calculated_current_amps / .001), 3)
print('Calculated A current: ' + str(calculated_current_amps))
###############################################################
#*** Current Measurement Demo: A3 ***
###############################################################
print('')
print('--- A3 Current Measurement Demo (+/- 1.5A max) ---')
print('Use an external 5V power supply and 5 ohm power resistor to put 1 Amp through A3.')
print('')
user_input = input('Press any key to continue.')
# See the M&A board data sheet for the equation that describes the Vout to current relationship
pin_6_voltage = daq_one.read_analog_input(6)
print('Read voltage: ' + str(pin_6_voltage))
calculated_current_amps = pin_6_voltage / (.033 * 200)
ma_current = round((calculated_current_amps / .001), 3)
print('Calculated mA current: ' + str(ma_current))
###############################################################
#*** Demo Complete. ***
###############################################################
# Close connection to daq
daq_one.close_daq_device()
| 34.342246 | 196 | 0.617642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,312 | 0.725008 |
c7e321ea7df7191ba4707163a3bf9a97bdfd5999 | 252 | py | Python | src/onenutil/schemas/__init__.py | LemurPwned/onenote-utils | 07778e6b2433cf28fab2afdbb01a318f284989dc | [
"MIT"
]
| null | null | null | src/onenutil/schemas/__init__.py | LemurPwned/onenote-utils | 07778e6b2433cf28fab2afdbb01a318f284989dc | [
"MIT"
]
| null | null | null | src/onenutil/schemas/__init__.py | LemurPwned/onenote-utils | 07778e6b2433cf28fab2afdbb01a318f284989dc | [
"MIT"
]
| null | null | null | from .results import (ArticleSearchResult, EmbeddingsResult, SearchResult,
TagResult, ZoteroExtractionResult)
__all__ = [
"TagResult", "EmbeddingsResult", "ZoteroExtractionResult", "SearchResult",
"ArticleSearchResult"
]
| 31.5 | 78 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.349206 |
c7e32e60b520a7528f6c33e61490ce039febd1e0 | 2,257 | py | Python | src/account/api/serializers.py | amirpsd/drf_blog_api | 58be081a450840114af021e7412e469fad90456d | [
"MIT"
]
| 33 | 2022-02-11T12:16:29.000Z | 2022-03-26T15:08:47.000Z | src/account/api/serializers.py | amirpsd/django_blog_api | 58be081a450840114af021e7412e469fad90456d | [
"MIT"
]
| null | null | null | src/account/api/serializers.py | amirpsd/django_blog_api | 58be081a450840114af021e7412e469fad90456d | [
"MIT"
]
| 5 | 2022-02-11T13:03:52.000Z | 2022-03-28T16:04:32.000Z | from django.contrib.auth import get_user_model
from rest_framework import serializers
class UsersListSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = [
"id", "phone",
"first_name", "last_name",
"author",
]
class UserDetailUpdateDeleteSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
exclude = [
"password",
]
class UserProfileSerializer(serializers.ModelSerializer):
phone = serializers.ReadOnlyField()
class Meta:
model = get_user_model()
fields = [
"id", "phone",
"first_name", "last_name",
"two_step_password",
]
class AuthenticationSerializer(serializers.Serializer):
phone = serializers.CharField(
max_length=12,
min_length=12,
)
def validate_phone(self, value):
from re import match
if not match("^989\d{2}\s*?\d{3}\s*?\d{4}$", value):
raise serializers.ValidationError("Invalid phone number.")
return value
class OtpSerializer(serializers.Serializer):
code = serializers.CharField(
max_length=6,
min_length=6,
)
password = serializers.CharField(
max_length=20,
required=False,
)
def validate_code(self, value):
try:
int(value)
except ValueError as _:
raise serializers.ValidationError("Invalid Code.")
return value
class GetTwoStepPasswordSerializer(serializers.Serializer):
"""
Base serializer two-step-password.
"""
password = serializers.CharField(
max_length=20,
)
confirm_password = serializers.CharField(
max_length=20,
)
def validate(self, data):
password = data.get('password')
confirm_password = data.get('confirm_password')
if password != confirm_password:
raise serializers.ValidationError(
{"Error": "Your passwords didn't match."}
)
return data
class ChangeTwoStepPasswordSerializer(GetTwoStepPasswordSerializer):
old_password = serializers.CharField(
max_length=20,
)
| 23.030612 | 70 | 0.613646 | 2,149 | 0.952149 | 0 | 0 | 0 | 0 | 0 | 0 | 292 | 0.129375 |
c7e5a0b18daf16984d985969f34fb443eae76979 | 3,733 | py | Python | generate_figure9.py | IBM/Simultaneous-diagonalization | 385545401395a2e07f109441db4751a5dcf8f0a4 | [
"Apache-2.0"
]
| null | null | null | generate_figure9.py | IBM/Simultaneous-diagonalization | 385545401395a2e07f109441db4751a5dcf8f0a4 | [
"Apache-2.0"
]
| null | null | null | generate_figure9.py | IBM/Simultaneous-diagonalization | 385545401395a2e07f109441db4751a5dcf8f0a4 | [
"Apache-2.0"
]
| 1 | 2022-03-14T18:36:12.000Z | 2022-03-14T18:36:12.000Z | # Copyright 2022 IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is part of the code to reproduce the results in the paper:
# E. van den Berg and Kristan Temme, "Circuit optimization of Hamiltonian
# simulation by simultaneous diagonalization of Pauli clusters," Quantum 4,
# p. 322, 2020. https://doi.org/10.22331/q-2020-09-12-322
import os
import cl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.ticker import FuncFormatter
from itertools import permutations
def plotZ(Z, exportFilename=None) :
(m,n) = Z.shape
cmap = colors.LinearSegmentedColormap.from_list("white_and_gray", [(1, 1, 1), (0.6, 0.6, 0.6)], N=2)
fig, ax = plt.subplots()
im = ax.imshow(Z.T,cmap=cmap)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_xticks([])
for i in range(1,m) :
plt.plot([-0.5+i,-0.5+i],[-0.5,-0.5+n],color='k',linewidth=0.7)
for i in range(1,T.n) :
plt.plot([-0.5,-0.5+m],[-0.5+i,-0.5+i],color='k',linewidth=0.7)
for i in range(n) :
v = Z[:,i]
c = np.sum(v[:-1] != v[1:]) + v[0] + v[-1]
ax.text(m-0.25,i, str(c), fontsize=12, ha='left', va='center')
if (exportFilename) :
plt.gcf().tight_layout()
plt.savefig(exportFilename + "-uncropped.pdf", transparent=True)
plt.close()
os.system("pdfcrop %s-uncropped.pdf %s.pdf" % (exportFilename, exportFilename))
else :
plt.show()
# Make sure the figure directory exists
cl.ensureDirExists('fig')
# Create the test problem
M = cl.create_basic_problem(7,0)
C = cl.generate_full_rank_weights(20,7,seed=1)
M = np.dot(C,M) % 2
# Apply diagonalization and get the final Z matrix
T = cl.Tableau(M)
R = cl.RecordOperations(T.n)
T.addRecorder(R)
cl.zeroX_algorithm1_cz(T)
T = cl.Tableau(M)
R.apply(T)
Z = T.getZ()
# Plot the results
plotZ(Z,'fig/Figure_9a')
print("Original: %d" % cl.countCNot(Z))
idx = cl.orderZ(Z)
plotZ(Z[idx,:],'fig/Figure_9b')
print("Sorted : %d" % cl.countCNot(Z[idx,:]))
# Generate histogram of actual permutations
if (True) :
base = list(range(7))
count = []
for idx2 in permutations(base) :
idx1 = cl.orderZ(Z[:,idx2])
count.append(cl.countCNot(Z[idx1,:][:,idx2]))
def format_percentage(y, position):
return str(100 * y)
# Count is always even
plt.hist(count,bins=list(range(min(count)-1,max(count)+2,2)),rwidth=0.9,density=True)
plt.gca().set_xticklabels([str(x) for x in range(min(count),max(count)+1,2)],fontsize=16)
plt.gca().set_xticks(list(range(min(count),max(count)+1,2)))
plt.gca().yaxis.set_major_formatter(FuncFormatter(format_percentage))
plt.xlabel('Number of CNOT gates',fontsize=16)
plt.ylabel("Percentage",fontsize=16)
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(16)
plt.gcf().tight_layout()
ratio = 0.5
xleft, xright = plt.gca().get_xlim()
ybottom, ytop = plt.gca().get_ylim()
plt.gca().set_aspect(abs((xright-xleft)/(ybottom-ytop))*ratio)
plt.savefig("fig/Figure_9c-uncropped.pdf", transparent=True)
plt.close()
os.system("pdfcrop fig/Figure_9c-uncropped.pdf fig/Figure_9c.pdf")
| 31.905983 | 103 | 0.682561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,351 | 0.361907 |
c7e5bf2a376cfb8077d1056296fc71ad74e416d7 | 793 | py | Python | undeployed/legacy/Landsat/L7GapFiller_ArcInterface.py | NASA-DEVELOP/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
]
| 65 | 2015-09-10T12:59:56.000Z | 2022-02-27T22:09:03.000Z | undeployed/legacy/Landsat/L7GapFiller_ArcInterface.py | snowzm/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
]
| 40 | 2015-04-08T19:23:30.000Z | 2015-08-04T15:53:11.000Z | undeployed/legacy/Landsat/L7GapFiller_ArcInterface.py | snowzm/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
]
| 45 | 2015-08-14T19:09:38.000Z | 2022-02-15T18:53:16.000Z | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: qgeddes
#
# Created: 25/04/2013
# Copyright: (c) qgeddes 2013
# Licence: <your licence>
#-------------------------------------------------------------------------------
import L7GapFiller
Scenes=arcpy.GetParameterAsText(0)
Scenes=Scenes.split(";")
OutputFolder=arcpy.GetParameterAsText(1)
OutputFile= arcpy.GetParameterAsText(2)
Output=OutputFolder+"\\"+OutputFile
CloudMasks= arcpy.GetParameterAsText(3)
CloudMasks= CloudMasks.split(";")
Z=arcpy.GetParameter(4)
arcpy.AddMessage(Z)
arcpy.env.scratchWorkspace=OutputFolder
arcpy.CheckOutExtension("Spatial")
arcpy.env.overwriteOutput=True
L7GapFiller.L7GapFill(Scenes, Output,CloudMasks,Z)
| 26.433333 | 80 | 0.600252 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 320 | 0.403531 |
c7e62258b56e4e6157b37bc5877b4350133a63c1 | 1,676 | py | Python | tests/sentry/api/serializers/test_saved_search.py | practo/sentry | 82f530970ce205696469fa702246396acfd947a1 | [
"BSD-3-Clause"
]
| 4 | 2019-05-27T13:55:07.000Z | 2021-03-30T07:05:09.000Z | tests/sentry/api/serializers/test_saved_search.py | practo/sentry | 82f530970ce205696469fa702246396acfd947a1 | [
"BSD-3-Clause"
]
| 99 | 2019-05-20T14:16:33.000Z | 2021-01-19T09:25:15.000Z | tests/sentry/api/serializers/test_saved_search.py | practo/sentry | 82f530970ce205696469fa702246396acfd947a1 | [
"BSD-3-Clause"
]
| 1 | 2020-08-10T07:55:40.000Z | 2020-08-10T07:55:40.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize
from sentry.models import SavedSearch
from sentry.models.savedsearch import DEFAULT_SAVED_SEARCHES
from sentry.testutils import TestCase
class SavedSearchSerializerTest(TestCase):
def test_simple(self):
search = SavedSearch.objects.create(
project=self.project,
name='Something',
query='some query'
)
result = serialize(search)
assert result['id'] == six.text_type(search.id)
assert result['projectId'] == six.text_type(search.project_id)
assert result['name'] == search.name
assert result['query'] == search.query
assert result['isDefault'] == search.is_default
assert result['isUserDefault'] == search.is_default
assert result['dateCreated'] == search.date_added
assert not result['isPrivate']
assert not result['isGlobal']
def test_global(self):
default_saved_search = DEFAULT_SAVED_SEARCHES[0]
search = SavedSearch(
name=default_saved_search['name'],
query=default_saved_search['query'],
is_global=True,
)
result = serialize(search)
assert result['id'] == six.text_type(search.id)
assert result['projectId'] is None
assert result['name'] == search.name
assert result['query'] == search.query
assert not result['isDefault']
assert not result['isUserDefault']
assert result['dateCreated'] == search.date_added
assert not result['isPrivate']
assert result['isGlobal']
| 33.52 | 70 | 0.648568 | 1,414 | 0.843675 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.140215 |
c7e63e3b77d732305764d664c862b2625865bf3a | 864 | py | Python | xastropy/files/general.py | bpholden/xastropy | 66aff0995a84c6829da65996d2379ba4c946dabe | [
"BSD-3-Clause"
]
| 3 | 2015-08-23T00:32:58.000Z | 2020-12-31T02:37:52.000Z | xastropy/files/general.py | Kristall-WangShiwei/xastropy | 723fe56cb48d5a5c4cdded839082ee12ef8c6732 | [
"BSD-3-Clause"
]
| 104 | 2015-07-17T18:31:54.000Z | 2018-06-29T17:04:09.000Z | xastropy/files/general.py | Kristall-WangShiwei/xastropy | 723fe56cb48d5a5c4cdded839082ee12ef8c6732 | [
"BSD-3-Clause"
]
| 16 | 2015-07-17T15:50:37.000Z | 2019-04-21T03:42:47.000Z | """
#;+
#; NAME:
#; general
#; Version 1.0
#;
#; PURPOSE:
#; Module for monkeying with files and filenames
#; 172Sep-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
# Import libraries
import numpy as np
from astropy.io import fits
from astropy.io import ascii
import os, pdb
#### ###############################
# Deal with .gz extensions, usually on FITS files
# See if filenm exists, if so pass it back
#
def chk_for_gz(filenm,chk=None):
import os, pdb
# File exist?
if os.path.lexists(filenm):
chk=1
return filenm, chk
# .gz already
if filenm.find('.gz') > 0:
chk=0
return filenm, chk
# Add .gz
if os.path.lexists(filenm+'.gz'):
chk=1
return filenm+'.gz', chk
else:
chk=0
return filenm, chk
| 19.2 | 80 | 0.508102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 425 | 0.491898 |
c7e69418daeb84532c16aa76c96e7a0136b72521 | 655 | py | Python | setup.py | muatik/genderizer | 9866bf0371d1d984f6c4465ff78025d911f6a648 | [
"MIT"
]
| 54 | 2015-01-19T22:53:48.000Z | 2021-06-23T03:48:05.000Z | setup.py | nejdetckenobi/genderizer | 9866bf0371d1d984f6c4465ff78025d911f6a648 | [
"MIT"
]
| 4 | 2016-05-23T13:52:12.000Z | 2021-05-14T10:24:37.000Z | setup.py | nejdetckenobi/genderizer | 9866bf0371d1d984f6c4465ff78025d911f6a648 | [
"MIT"
]
| 18 | 2015-01-30T00:06:40.000Z | 2021-03-12T14:56:12.000Z | #!/usr/bin/env python
try:
from setuptools.core import setup
except ImportError:
from distutils.core import setup
setup(name='genderizer',
version='0.1.2.3',
license='MIT',
description='Genderizer tries to infer gender information looking at first name and/or making text analysis',
long_description=open('README.md').read(),
url='https://github.com/muatik/genderizer',
author='Mustafa Atik',
author_email='[email protected]',
maintainer='Mustafa Atik',
maintainer_email='[email protected]',
packages=['genderizer'],
package_data={'genderizer': ['data/*']},
platforms='any') | 31.190476 | 115 | 0.668702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 293 | 0.447328 |
c7e75b487c0cdec2958e2495ad3a66ff9804a5e3 | 1,855 | py | Python | ingestion/tests/unit/great_expectations/test_ometa_validation_action.py | ulixius9/OpenMetadata | f121698d968717f0932f685ef2a512c2a4d92438 | [
"Apache-2.0"
]
| null | null | null | ingestion/tests/unit/great_expectations/test_ometa_validation_action.py | ulixius9/OpenMetadata | f121698d968717f0932f685ef2a512c2a4d92438 | [
"Apache-2.0"
]
| null | null | null | ingestion/tests/unit/great_expectations/test_ometa_validation_action.py | ulixius9/OpenMetadata | f121698d968717f0932f685ef2a512c2a4d92438 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2022 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test suite for the action module implementation
"""
import os
from unittest import mock
from jinja2 import Environment
from pytest import mark
from metadata.great_expectations.action import OpenMetadataValidationAction
from metadata.great_expectations.utils.ometa_config_handler import render_template
@mark.parametrize(
"input,expected",
[
(None, "list_entities"),
("service_name", "get_by_name"),
],
)
def test_get_table_entity(input, expected, mocked_ometa, mocked_ge_data_context):
"""Test get table entity"""
ometa_validation = OpenMetadataValidationAction(
data_context=mocked_ge_data_context,
config_file_path="my/config/path",
ometa_service_name=input,
)
res = ometa_validation._get_table_entity("database", "schema", "table")
assert res._type == expected
def test_create_jinja_environment(fixture_jinja_environment):
"""Test create jinja environment"""
assert isinstance(fixture_jinja_environment, Environment)
@mock.patch.dict(os.environ, {"API_VERSION": "v1"})
def test_render_template(fixture_jinja_environment):
"""Test create jinja environment"""
tmplt = render_template(fixture_jinja_environment)
assert tmplt == "hostPort: http://localhost:8585\napiVersion: v1"
| 34.351852 | 82 | 0.755256 | 0 | 0 | 0 | 0 | 803 | 0.432884 | 0 | 0 | 878 | 0.473315 |
c7e7bdfc8b236f444e8faf6ff083ca3ec5dec358 | 1,285 | py | Python | tests/integration/Containers.py | adnrs96/runtime | e824224317e6aa108cf06968474fc44fa33488d6 | [
"Apache-2.0"
]
| null | null | null | tests/integration/Containers.py | adnrs96/runtime | e824224317e6aa108cf06968474fc44fa33488d6 | [
"Apache-2.0"
]
| null | null | null | tests/integration/Containers.py | adnrs96/runtime | e824224317e6aa108cf06968474fc44fa33488d6 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
from storyruntime.Containers import Containers
from storyruntime.constants.ServiceConstants import ServiceConstants
import storyscript
def test_containers_format_command(story):
"""
Ensures a simple resolve can be performed
"""
story_text = 'alpine echo msg:"foo"\n'
story.context = {}
story.app.services = {
'alpine': {
ServiceConstants.config: {
'actions': {
'echo': {
'arguments': {'msg': {'type': 'string'}}
}
}
}
}
}
story.tree = storyscript.Api.loads(story_text).result()['tree']
assert Containers.format_command(
story, story.line('1'), 'alpine', 'echo'
) == ['echo', '{"msg":"foo"}']
def test_containers_format_command_no_arguments(story):
story_text = 'alpine echo\n'
story.context = {}
story.app.services = {
'alpine': {
ServiceConstants.config: {
'actions': {
'echo': {}
}
}
}
}
story.tree = storyscript.Api.loads(story_text).result()['tree']
assert Containers.format_command(
story, story.line('1'), 'alpine', 'echo'
) == ['echo']
| 26.770833 | 68 | 0.529183 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 269 | 0.209339 |
c7e91e12c70be5743a54ddceae5d419516ca3301 | 1,367 | py | Python | project_name/core/admin.py | cosmunsoftwares/django-boilerplate | 147aa7f59901d0fb95d41acf8ec118c6830267f8 | [
"MIT"
]
| 3 | 2018-11-30T19:51:35.000Z | 2020-10-20T00:28:49.000Z | project_name/core/admin.py | cosmun-softwares/django-boilerplate | 147aa7f59901d0fb95d41acf8ec118c6830267f8 | [
"MIT"
]
| 6 | 2020-04-09T20:00:45.000Z | 2022-02-10T08:25:47.000Z | project_name/core/admin.py | cosmunsoftwares/django-boilerplate | 147aa7f59901d0fb95d41acf8ec118c6830267f8 | [
"MIT"
]
| 1 | 2018-08-27T21:44:44.000Z | 2018-08-27T21:44:44.000Z | from django.contrib import admin
from django.shortcuts import redirect
from django.utils.safestring import mark_safe
from django.contrib.admin.widgets import AdminFileWidget
class AdminImageWidget(AdminFileWidget):
def render(self, name, value, attrs=None, renderer=None):
output = []
if value and getattr(value, "url", None):
output.append(u'<a href="%s" target="_blank">%s</a>' % (value.url, thumbnail(value)))
output.append(super(AdminFileWidget, self).render(name, value, attrs, renderer))
return mark_safe(u''.join(output))
class ImageWidgetAdmin(admin.ModelAdmin):
image_fields = []
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name in self.image_fields:
kwargs.pop("request", None)
kwargs['widget'] = AdminImageWidget
return db_field.formfield(**kwargs)
return super(ImageWidgetAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def redirect_one_object(model, obj):
response = redirect(f'/admin/{model._meta.app_label}/{model._meta.model_name}/add/')
if obj:
response = redirect(f'/admin/{model._meta.app_label}/{model._meta.model_name}/{obj.pk}/change/')
return response
def thumbnail(obj, size='col-md-2'):
return mark_safe('<img src="{}" class="img-thumbnail {} p-0">'.format(obj.url, size))
| 37.972222 | 104 | 0.688369 | 793 | 0.580102 | 0 | 0 | 0 | 0 | 0 | 0 | 256 | 0.187271 |
c7e9c8cc7086c2b1fd149895cfcda90298ab4af1 | 1,222 | py | Python | src/5vents.py | subhash686/aoc-2021 | a01fa07f94148b7072c3ba4c854b546862d3486a | [
"Apache-2.0"
]
| null | null | null | src/5vents.py | subhash686/aoc-2021 | a01fa07f94148b7072c3ba4c854b546862d3486a | [
"Apache-2.0"
]
| null | null | null | src/5vents.py | subhash686/aoc-2021 | a01fa07f94148b7072c3ba4c854b546862d3486a | [
"Apache-2.0"
]
| null | null | null | import os
plane = [[0 for i in range(1000)] for j in range(1000)]
count = [0]
def overlapping_vents():
path = os.getcwd()
file_path = os.path.join(path, 'vents.txt')
file1 = open(file_path, 'r')
Lines = file1.readlines()
for line in Lines:
input = line.strip()
points = input.split(" -> ")
plot(points[0], points[1])
print(count[0])
def plot(point1, point2):
p1 = point1.split(",")
p2 = point2.split(",")
x1 = int(p1[0])
x2 = int(p2[0])
y1 = int(p1[1])
y2 = int(p2[1])
if x1 == x2 and y1 == y2:
addpoints(x1, y1)
elif x1 == x2:
if y1 > y2:
y1, y2 = y2, y1
for y in range(y1, y2+1):
addpoints(x1, y)
elif y1 == y2:
if x1 > x2:
x1, x2 = x2, x1
for x in range(x1, x2+1):
addpoints(x, y1)
else:
slope = (y2-y1)/ (x2-x1)
intercept = y1 - (x1 * slope)
if x1 > x2:
x1, x2 = x2, x1
for x in range(x1, x2+1):
addpoints(x, int(x*slope)+int(intercept))
def addpoints(x, y):
if plane[x][y] == 1:
count[0] +=1
plane[x][y] += 1
if __name__ == "__main__":
overlapping_vents()
| 22.218182 | 55 | 0.488543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.02946 |
c7eb057d4134335a7eb1bab05618a4866e334bff | 1,217 | py | Python | problems/test_0073_m_plus_n_space.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
]
| 1 | 2017-06-17T23:47:17.000Z | 2017-06-17T23:47:17.000Z | problems/test_0073_m_plus_n_space.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
]
| null | null | null | problems/test_0073_m_plus_n_space.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
]
| null | null | null | import unittest
class Solution:
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
rows = [0] * len(matrix)
cols = [0] * len(matrix[0])
for i, row in enumerate(matrix):
for j, num in enumerate(row):
if not num:
rows[i] = 1
cols[j] = 1
for row, num in enumerate(rows):
if num:
for j in range(len(matrix[0])):
matrix[row][j] = 0
for col, num in enumerate(cols):
if num:
for i in range(len(matrix)):
matrix[i][col] = 0
class Test(unittest.TestCase):
def test(self):
self._test(
[
[1, 2, 0],
[1, 2, 3],
[0, 2, 3],
],
[
[0, 0, 0],
[0, 2, 0],
[0, 0, 0],
]
)
def _test(self, matrix, expected):
Solution().setZeroes(matrix)
self.assertEqual(expected, matrix)
if __name__ == '__main__':
unittest.main()
| 23.403846 | 76 | 0.419063 | 1,146 | 0.94166 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.115037 |
c7eb49aae87e95e2b4d243e5c05c7251bfbcbd52 | 2,508 | py | Python | xlsxwriter/test/worksheet/test_write_print_options.py | Aeon1/XlsxWriter | 6871b6c3fe6c294632054ea91f23d9e27068bcc1 | [
"BSD-2-Clause-FreeBSD"
]
| 2 | 2019-07-25T06:08:09.000Z | 2019-11-01T02:33:56.000Z | xlsxwriter/test/worksheet/test_write_print_options.py | Aeon1/XlsxWriter | 6871b6c3fe6c294632054ea91f23d9e27068bcc1 | [
"BSD-2-Clause-FreeBSD"
]
| 13 | 2019-07-14T00:29:05.000Z | 2019-11-26T06:16:46.000Z | xlsxwriter/test/worksheet/test_write_print_options.py | Aeon1/XlsxWriter | 6871b6c3fe6c294632054ea91f23d9e27068bcc1 | [
"BSD-2-Clause-FreeBSD"
]
| null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ...worksheet import Worksheet
class TestWritePrintOptions(unittest.TestCase):
"""
Test the Worksheet _write_print_options() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_print_options_default(self):
"""Test the _write_print_options() method without options"""
self.worksheet._write_print_options()
exp = """"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_print_options_hcenter(self):
"""Test the _write_print_options() method with horizontal center"""
self.worksheet.center_horizontally()
self.worksheet._write_print_options()
exp = """<printOptions horizontalCentered="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_print_options_vcenter(self):
"""Test the _write_print_options() method with vertical center"""
self.worksheet.center_vertically()
self.worksheet._write_print_options()
exp = """<printOptions verticalCentered="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_print_options_center(self):
"""Test the _write_print_options() method with horiz + vert center"""
self.worksheet.center_horizontally()
self.worksheet.center_vertically()
self.worksheet._write_print_options()
exp = """<printOptions horizontalCentered="1" verticalCentered="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_print_options_gridlines_default(self):
"""Test the _write_print_options() method with default value"""
self.worksheet.hide_gridlines()
self.worksheet._write_print_options()
exp = """"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_print_options_gridlines_0(self):
"""Test the _write_print_options() method with 0 value"""
self.worksheet.hide_gridlines(0)
self.worksheet._write_print_options()
exp = """<printOptions gridLines="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
| 28.179775 | 79 | 0.637161 | 2,243 | 0.894338 | 0 | 0 | 0 | 0 | 0 | 0 | 811 | 0.323365 |
c7ebfcaf02d689a33ed8274d051230038106dff7 | 1,011 | py | Python | neo4j_helper.py | smartaec/OpenBridgeGraph | 61ca64ed339af4e77d928f83934a308277a79d81 | [
"MIT"
]
| null | null | null | neo4j_helper.py | smartaec/OpenBridgeGraph | 61ca64ed339af4e77d928f83934a308277a79d81 | [
"MIT"
]
| null | null | null | neo4j_helper.py | smartaec/OpenBridgeGraph | 61ca64ed339af4e77d928f83934a308277a79d81 | [
"MIT"
]
| null | null | null | from neo4j.v1 import GraphDatabase #neo4j==1.7.0
uri="bolt://localhost:7687"
driver=GraphDatabase.driver(uri, auth=("neo4j", "testneo4j"))
def execute_queries(scripts,message=None):
with driver.session() as session:
tx=session.begin_transaction()
res=tx.run(';'.join(scripts))
tx.commit()
return res
def execute_query(script,message=None):
with driver.session() as session:
return session.run(script,message)
def execute_read(cypher_func,message):
with driver.session() as session:
return session.read_transaction(cypher_func,message)
def execute_write(cypher_func,message):
with driver.session() as session:
return session.write_transaction(cypher_func,message)
def run_query(tx,script):
return tx.run(script)
def print_query(tx,name):
for record in tx.run("MATCH (a:Person)-[:KNOWS]->(f) WHERE a.name = {name} RETURN f.name",name=name):
print(record["f.name"])
return ""
#execute_read(print_query,'Alice') | 29.735294 | 105 | 0.69634 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.167161 |
c7edb1043a4f03dfdc950843e15b617197779da3 | 9,077 | py | Python | tests/unit/test_juju.py | KellenRenshaw/hotsos | e3fc51ab7f8af606a5846a3486a7fda23d761583 | [
"Apache-2.0"
]
| null | null | null | tests/unit/test_juju.py | KellenRenshaw/hotsos | e3fc51ab7f8af606a5846a3486a7fda23d761583 | [
"Apache-2.0"
]
| null | null | null | tests/unit/test_juju.py | KellenRenshaw/hotsos | e3fc51ab7f8af606a5846a3486a7fda23d761583 | [
"Apache-2.0"
]
| null | null | null | import os
import tempfile
import mock
from . import utils
from hotsos.core.config import setup_config
from hotsos.core.ycheck.scenarios import YScenarioChecker
from hotsos.core.issues.utils import KnownBugsStore, IssuesStore
from hotsos.plugin_extensions.juju import summary
JOURNALCTL_CAPPEDPOSITIONLOST = """
Dec 21 14:07:53 juju-1 mongod.37017[17873]: [replication-18] CollectionCloner ns:juju.txns.log finished cloning with status: QueryPlanKilled: PlanExecutor killed: CappedPositionLost: CollectionScan died due to position in capped collection being deleted. Last seen record id: RecordId(204021366)
Dec 21 14:07:53 juju-1 mongod.37017[17873]: [replication-18] collection clone for 'juju.txns.log' failed due to QueryPlanKilled: While cloning collection 'juju.txns.log' there was an error 'PlanExecutor killed: CappedPositionLost: CollectionScan died due to position in capped collection being deleted. Last seen record id: RecordId(204021366)'
""" # noqa
RABBITMQ_CHARM_LOGS = """
2021-02-17 08:18:44 ERROR juju.worker.dependency engine.go:671 "uniter" manifold worker returned unexpected error: failed to initialize uniter for "unit-rabbitmq-server-0": cannot create relation state tracker: cannot remove persisted state, relation 236 has members
2021-02-17 08:20:34 ERROR juju.worker.dependency engine.go:671 "uniter" manifold worker returned unexpected error: failed to initialize uniter for "unit-rabbitmq-server-0": cannot create relation state tracker: cannot remove persisted state, relation 236 has members
""" # noqa
UNIT_LEADERSHIP_ERROR = """
2021-09-16 10:28:25 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
2021-09-16 10:28:47 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
2021-09-16 10:29:06 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
2021-09-16 10:29:53 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
2021-09-16 10:30:41 WARNING leader-elected ERROR cannot write leadership settings: cannot write settings: failed to merge leadership settings: application "keystone": prerequisites failed: "keystone/2" is not leader of "keystone"
""" # noqa
class JujuTestsBase(utils.BaseTestCase):
def setUp(self):
super().setUp()
setup_config(PLUGIN_NAME='juju')
class TestJujuSummary(JujuTestsBase):
def test_summary_keys(self):
inst = summary.JujuSummary()
self.assertEqual(list(inst.output.keys()),
['charm-repo-info',
'charms',
'machine',
'services',
'units',
'version'])
def test_service_info(self):
expected = {'ps': ['jujud (1)'],
'systemd': {
'enabled': ['jujud-machine-1']}
}
inst = summary.JujuSummary()
self.assertEqual(self.part_output_to_actual(inst.output)['services'],
expected)
def test_machine_info(self):
inst = summary.JujuSummary()
self.assertTrue(inst.plugin_runnable)
actual = self.part_output_to_actual(inst.output)
self.assertEqual(actual['version'], '2.9.22')
self.assertEqual(actual['machine'], '1')
@mock.patch('hotsos.core.plugins.juju.JujuMachine')
def test_get_lxd_machine_info(self, mock_machine):
mock_machine.return_value = mock.MagicMock()
mock_machine.return_value.id = '0-lxd-11'
mock_machine.return_value.version = '2.9.9'
inst = summary.JujuSummary()
actual = self.part_output_to_actual(inst.output)
self.assertEqual(actual['version'], '2.9.9')
self.assertEqual(actual['machine'], '0-lxd-11')
def test_charm_versions(self):
expected = ['ceph-osd-508', 'neutron-openvswitch-457',
'nova-compute-589']
inst = summary.JujuSummary()
self.assertEqual(self.part_output_to_actual(inst.output)['charms'],
expected)
def test_get_unit_info(self):
expected = {'local': ['ceph-osd-0', 'neutron-openvswitch-1',
'nova-compute-0']}
inst = summary.JujuSummary()
self.assertEqual(self.part_output_to_actual(inst.output)['units'],
expected)
class TestJujuScenarios(JujuTestsBase):
@mock.patch('hotsos.core.ycheck.engine.YDefsLoader._is_def',
new=utils.is_def_filter('juju_core_bugs.yaml'))
@mock.patch('hotsos.core.ycheck.engine.properties.CLIHelper')
def test_1852502(self, mock_helper):
mock_helper.return_value = mock.MagicMock()
mock_helper.return_value.journalctl.return_value = \
JOURNALCTL_CAPPEDPOSITIONLOST.splitlines(keepends=True)
YScenarioChecker()()
mock_helper.return_value.journalctl.assert_called_with(
unit='juju-db')
msg_1852502 = ('known mongodb bug identified - '
'https://jira.mongodb.org/browse/TOOLS-1636 '
'Workaround is to pass --no-logs to juju '
'create-backup. This is an issue only with Mongo '
'3. Mongo 4 does not have this issue. Upstream is '
'working on migrating to Mongo 4 in the Juju 3.0 '
'release.')
expected = {'bugs-detected':
[{'id': 'https://bugs.launchpad.net/bugs/1852502',
'desc': msg_1852502,
'origin': 'juju.01part'}]}
self.assertEqual(KnownBugsStore().load(), expected)
@mock.patch('hotsos.core.ycheck.engine.YDefsLoader._is_def',
new=utils.is_def_filter('juju_core_bugs.yaml'))
def test_1910958(self):
with tempfile.TemporaryDirectory() as dtmp:
setup_config(DATA_ROOT=dtmp)
logfile = os.path.join(dtmp,
'var/log/juju/unit-rabbitmq-server-0.log')
os.makedirs(os.path.dirname(logfile))
with open(logfile, 'w') as fd:
fd.write(RABBITMQ_CHARM_LOGS)
YScenarioChecker()()
expected = {'bugs-detected':
[{'id': 'https://bugs.launchpad.net/bugs/1910958',
'desc':
('Unit unit-rabbitmq-server-0 failed to start due '
'to members in relation 236 that cannot be '
'removed.'),
'origin': 'juju.01part'}]}
self.assertEqual(KnownBugsStore().load(), expected)
@mock.patch('hotsos.core.ycheck.engine.YDefsLoader._is_def',
new=utils.is_def_filter('jujud_checks.yaml'))
@mock.patch('hotsos.core.host_helpers.systemd.ServiceChecksBase.processes',
{})
def test_jujud_checks(self):
YScenarioChecker()()
msg = ('No jujud processes found running on this host but it seems '
'there should be since Juju is installed.')
issues = list(IssuesStore().load().values())[0]
self.assertEqual([issue['desc'] for issue in issues], [msg])
@mock.patch('hotsos.core.ycheck.engine.properties.CLIHelper')
@mock.patch('hotsos.core.ycheck.engine.YDefsLoader._is_def',
new=utils.is_def_filter('charm_checks.yaml'))
def test_unit_checks(self, mock_cli):
mock_cli.return_value = mock.MagicMock()
with tempfile.TemporaryDirectory() as dtmp:
setup_config(DATA_ROOT=dtmp)
logfile = os.path.join(dtmp,
'var/log/juju/unit-keystone-2.log')
os.makedirs(os.path.dirname(logfile))
with open(logfile, 'w') as fd:
fd.write(UNIT_LEADERSHIP_ERROR)
# first try outside age limit
mock_cli.return_value.date.return_value = "2021-09-25 00:00:00"
YScenarioChecker()()
self.assertEqual(IssuesStore().load(), {})
# then within
mock_cli.return_value.date.return_value = "2021-09-17 00:00:00"
YScenarioChecker()()
msg = ("Juju unit(s) 'keystone' are showing leadership errors in "
"their logs from the last 7 days. Please investigate.")
issues = list(IssuesStore().load().values())[0]
self.assertEqual([issue['desc'] for issue in issues], [msg])
| 51.282486 | 344 | 0.637435 | 6,335 | 0.697918 | 0 | 0 | 4,551 | 0.501377 | 0 | 0 | 4,126 | 0.454555 |
c7ef7d842b61d4e084cbe5d2d84903334c53e8d0 | 9,626 | py | Python | tools/SPGAN/main.py | by-liu/OpenUnReID | 2260d8e16588a992631c9c84e6cee4304ae8593d | [
"Apache-2.0"
]
| null | null | null | tools/SPGAN/main.py | by-liu/OpenUnReID | 2260d8e16588a992631c9c84e6cee4304ae8593d | [
"Apache-2.0"
]
| null | null | null | tools/SPGAN/main.py | by-liu/OpenUnReID | 2260d8e16588a992631c9c84e6cee4304ae8593d | [
"Apache-2.0"
]
| null | null | null | import argparse
import collections
import shutil
import sys
import time
from datetime import timedelta
from pathlib import Path
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
try:
# PyTorch >= 1.6 supports mixed precision training
from torch.cuda.amp import autocast
amp_support = True
except:
amp_support = False
from openunreid.apis import GANBaseRunner, set_random_seed, infer_gan
from openunreid.core.solvers import build_lr_scheduler, build_optimizer
from openunreid.data import (
build_test_dataloader,
build_train_dataloader,
build_val_dataloader,
)
from openunreid.models import build_gan_model
from openunreid.models.losses import build_loss
from openunreid.models.utils.extract import extract_features
from openunreid.utils.config import (
cfg,
cfg_from_list,
cfg_from_yaml_file,
log_config_to_file,
)
from openunreid.utils.dist_utils import init_dist, synchronize
from openunreid.utils.file_utils import mkdir_if_missing
from openunreid.utils.logger import Logger
class SPGANRunner(GANBaseRunner):
def train_step(self, iter, batch):
data_src, data_tgt = batch[0], batch[1]
self.real_A = data_src['img'].cuda()
self.real_B = data_tgt['img'].cuda()
# Forward
self.fake_B = self.model['G_A'](self.real_A) # G_A(A)
self.fake_A = self.model['G_B'](self.real_B) # G_B(B)
self.rec_A = self.model['G_B'](self.fake_B) # G_B(G_A(A))
self.rec_B = self.model['G_A'](self.fake_A) # G_A(G_B(B))
# G_A and G_B
if iter % 2 == 0:
self.set_requires_grad([self.model['D_A'], self.model['D_B'], self.model['Metric']], False) # save memory
if self.scaler is None:
self.optimizer['G'].zero_grad()
else:
with autocast(enabled=False):
self.optimizer['G'].zero_grad()
if self._epoch > 1:
self.backward_G(retain_graph=True)
self.backward_GM()
else:
self.backward_G()
if self.scaler is None:
self.optimizer['G'].step()
else:
with autocast(enabled=False):
self.scaler.step(self.optimizer['G'])
# SiaNet for SPGAN
if self._epoch > 0:
self.set_requires_grad([self.model['Metric']], True)
if self.scaler is None:
self.optimizer['Metric'].zero_grad()
else:
with autocast(enabled=False):
self.optimizer['Metric'].zero_grad()
self.backward_M()
if self.scaler is None:
self.optimizer['Metric'].step()
else:
with autocast(enabled=False):
self.scaler.step(self.optimizer['Metric'])
# D_A and D_B
self.set_requires_grad([self.model['D_A'], self.model['D_B']], True)
# self.optimizer['D'].zero_grad()
# self.backward_D()
# self.optimizer['D'].step()
if self.scaler is None:
self.optimizer['D'].zero_grad()
else:
with autocast(enabled=False):
self.optimizer['D'].zero_grad()
self.backward_D()
if self.scaler is None:
self.optimizer['D'].step()
else:
with autocast(enabled=False):
self.scaler.step(self.optimizer['D'])
# save translated images
if self._rank == 0:
self.save_imgs(['real_A', 'real_B', 'fake_A', 'fake_B', 'rec_A', 'rec_B'])
return 0
def backward_GM(self):
real_A_metric = self.model['Metric'](self.real_A)
real_B_metric = self.model['Metric'](self.real_B)
fake_A_metric = self.model['Metric'](self.fake_A)
fake_B_metric = self.model['Metric'](self.fake_B)
# positive pairs
loss_pos = self.criterions['sia_G'](real_A_metric, fake_B_metric, 1) + \
self.criterions['sia_G'](real_B_metric, fake_A_metric, 1)
# negative pairs
loss_neg = self.criterions['sia_G'](fake_B_metric, real_B_metric, 0) + \
self.criterions['sia_G'](fake_A_metric, real_A_metric, 0)
loss_M = (loss_pos + 0.5 * loss_neg) / 4.0
loss = loss_M * self.cfg.TRAIN.LOSS.losses['sia_G']
if self.scaler is None:
loss.backward()
else:
with autocast(enabled=False):
self.scaler.scale(loss).backward()
meters = {'sia_G': loss_M.item()}
self.train_progress.update(meters)
def backward_M(self):
real_A_metric = self.model['Metric'](self.real_A)
real_B_metric = self.model['Metric'](self.real_B)
fake_A_metric = self.model['Metric'](self.fake_A.detach())
fake_B_metric = self.model['Metric'](self.fake_B.detach())
# positive pairs
loss_pos = self.criterions['sia_M'](real_A_metric, fake_B_metric, 1) + \
self.criterions['sia_M'](real_B_metric, fake_A_metric, 1)
# negative pairs
loss_neg = self.criterions['sia_M'](real_A_metric, real_B_metric, 0)
loss_M = (loss_pos + 2 * loss_neg) / 3.0
loss = loss_M * self.cfg.TRAIN.LOSS.losses['sia_M']
if self.scaler is None:
loss.backward()
else:
with autocast(enabled=False):
self.scaler.scale(loss).backward()
meters = {'sia_M': loss_M.item()}
self.train_progress.update(meters)
def parge_config():
parser = argparse.ArgumentParser(description="SPGAN training")
parser.add_argument("config", help="train config file path")
parser.add_argument(
"--work-dir", help="the dir to save logs and models", default=""
)
parser.add_argument("--resume-from", help="the checkpoint file to resume from")
parser.add_argument(
"--launcher",
type=str,
choices=["none", "pytorch", "slurm"],
default="none",
help="job launcher",
)
parser.add_argument("--tcp-port", type=str, default="5017")
parser.add_argument(
"--set",
dest="set_cfgs",
default=None,
nargs=argparse.REMAINDER,
help="set extra config keys if needed",
)
args = parser.parse_args()
cfg_from_yaml_file(args.config, cfg)
assert len(list(cfg.TRAIN.datasets.keys()))==2, \
"the number of datasets for domain-translation training should be two"
cfg.launcher = args.launcher
cfg.tcp_port = args.tcp_port
if not args.work_dir:
args.work_dir = Path(args.config).stem
cfg.work_dir = cfg.LOGS_ROOT / args.work_dir
mkdir_if_missing(cfg.work_dir)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
shutil.copy(args.config, cfg.work_dir / "config.yaml")
return args, cfg
def main():
start_time = time.monotonic()
# init distributed training
args, cfg = parge_config()
dist = init_dist(cfg)
set_random_seed(cfg.TRAIN.seed, cfg.TRAIN.deterministic)
synchronize()
# init logging file
logger = Logger(cfg.work_dir / 'log.txt', debug=False)
sys.stdout = logger
print("==========\nArgs:{}\n==========".format(args))
log_config_to_file(cfg)
# build train loader
train_loader, _ = build_train_dataloader(cfg, joint=False)
# build model
model = build_gan_model(cfg)
for key in model.keys():
model[key].cuda()
if dist:
ddp_cfg = {
"device_ids": [cfg.gpu],
"output_device": cfg.gpu,
"find_unused_parameters": True,
}
for key in model.keys():
model[key] = torch.nn.parallel.DistributedDataParallel(model[key], **ddp_cfg)
elif cfg.total_gpus > 1:
for key in model.keys():
model[key] = torch.nn.DataParallel(model[key])
# build optimizer
optimizer = {}
optimizer['G'] = build_optimizer([model['G_A'], model['G_B']], **cfg.TRAIN.OPTIM)
optimizer['D'] = build_optimizer([model['D_A'], model['D_B']], **cfg.TRAIN.OPTIM)
optimizer['Metric'] = build_optimizer([model['Metric']], **cfg.TRAIN.OPTIM)
# build lr_scheduler
if cfg.TRAIN.SCHEDULER.lr_scheduler is not None:
lr_scheduler = [build_lr_scheduler(optimizer[key], **cfg.TRAIN.SCHEDULER) \
for key in optimizer.keys()]
else:
lr_scheduler = None
# build loss functions
criterions = build_loss(cfg.TRAIN.LOSS, cuda=True)
# build runner
runner = SPGANRunner(
cfg,
model,
optimizer,
criterions,
train_loader,
lr_scheduler=lr_scheduler,
meter_formats={"Time": ":.3f"}
)
# resume
if args.resume_from:
runner.resume(args.resume_from)
# start training
runner.run()
# load the latest model
# runner.resume(cfg.work_dir)
# final inference
test_loader, _ = build_val_dataloader(
cfg,
for_clustering=True,
all_datasets=True
)
# source to target
infer_gan(
cfg,
model['G_A'],
test_loader[0],
dataset_name=list(cfg.TRAIN.datasets.keys())[0]
)
# target to source
infer_gan(
cfg,
model['G_B'],
test_loader[1],
dataset_name=list(cfg.TRAIN.datasets.keys())[1]
)
# print time
end_time = time.monotonic()
print("Total running time: ", timedelta(seconds=end_time - start_time))
if __name__ == '__main__':
main()
| 31.980066 | 117 | 0.60108 | 4,495 | 0.466964 | 0 | 0 | 0 | 0 | 0 | 0 | 1,468 | 0.152504 |
c7efcc01c957ea47bff3471d2bc47b9aa1291cde | 1,907 | py | Python | utility/data_download.py | LatvianPython/wind-experience | b634c020dff0a01152bb95b38e5f6f0e368d47f5 | [
"MIT"
]
| 2 | 2018-12-20T20:31:21.000Z | 2018-12-29T14:51:42.000Z | utility/data_download.py | LatvianPython/wind-experience | b634c020dff0a01152bb95b38e5f6f0e368d47f5 | [
"MIT"
]
| null | null | null | utility/data_download.py | LatvianPython/wind-experience | b634c020dff0a01152bb95b38e5f6f0e368d47f5 | [
"MIT"
]
| null | null | null | import logging
import requests
import multiprocessing
import pathlib
from typing import List
from typing import Optional
from typing import Tuple
from typing import Dict
from joblib import delayed
from joblib import Parallel
from datetime import date
from datetime import timedelta
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def next_date(start_date=date(2018, 3, 1)):
days_to_download = abs(start_date - date.today()).days - 5
for date_offset in range(days_to_download):
yield start_date
start_date = start_date + timedelta(days=1)
def download_all(inputs: List[Tuple[pathlib.Path, str]], cookies: Optional[Dict]):
session = requests.session()
inputs[0][0].parent.mkdir(parents=True, exist_ok=True)
def download_single_link(file_path: pathlib.Path, url):
thread_nr = multiprocessing.current_process().name
thread_nr = thread_nr[thread_nr.rfind('-') + 1:]
file_name = file_path.stem
if file_path.is_file():
logger.info('{} {} already exists'.format(thread_nr, file_name))
return
try:
response = session.get(url=url, cookies=cookies)
except TimeoutError:
logger.critical('{} Timeout Error'.format(thread_nr))
return
content = response.content.decode('utf-8')
if response.status_code != 200:
logger.critical('{} {}'.format(thread_nr, url, response.status_code))
logger.critical('{}'.format(thread_nr, content))
return
else:
logger.info('{} {} {} OK'.format(thread_nr, file_name, response.status_code))
with open(str(file_path), mode='w', encoding='utf-8') as output_file:
output_file.write(content)
num_cores = multiprocessing.cpu_count()
Parallel(n_jobs=num_cores)(delayed(download_single_link)(*j) for j in inputs)
| 32.322034 | 89 | 0.677504 | 0 | 0 | 231 | 0.121133 | 0 | 0 | 0 | 0 | 84 | 0.044048 |
c7f2afbcc386f15d0c1677f0f7647f383dcc88bb | 7,625 | py | Python | model/net_qspline_A.py | jercoco/QSQF | 6c435f8d4e1baf1937b06a52e63446f9a29f5ad8 | [
"Apache-2.0"
]
| null | null | null | model/net_qspline_A.py | jercoco/QSQF | 6c435f8d4e1baf1937b06a52e63446f9a29f5ad8 | [
"Apache-2.0"
]
| null | null | null | model/net_qspline_A.py | jercoco/QSQF | 6c435f8d4e1baf1937b06a52e63446f9a29f5ad8 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 19:52:22 2020
#Plan A
@author: 18096
"""
'''Defines the neural network, loss function and metrics'''
#from functools import reduce
import torch
import torch.nn as nn
from torch.nn.functional import pad
from torch.autograd import Variable
import logging
logger = logging.getLogger('DeepAR.Net')
class Net(nn.Module):
def __init__(self, params,device):
'''
We define a recurrent network that predicts the future values
of a time-dependent variable based on past inputs and covariates.
'''
super(Net, self).__init__()
self.params = params
self.device = device
self.lstm = nn.LSTM(input_size=params.lstm_input_size,
hidden_size=params.lstm_hidden_dim,
num_layers=params.lstm_layers,
bias=True,
batch_first=False,
dropout=params.lstm_dropout)
# initialize LSTM forget gate bias to be 1 as recommanded by
# http://proceedings.mlr.press/v37/jozefowicz15.pdf
for names in self.lstm._all_weights:
for name in filter(lambda n: "bias" in n, names):
bias = getattr(self.lstm, name)
n = bias.size(0)
start, end = n // 4, n // 2
bias.data[start:end].fill_(1.)
#Plan A:
#beta_01:[beta0,beta1]
self.beta_n1 = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, 1)
self.pre_beta_1 = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, 1)
self.pre_sigma = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, params.num_spline)
self.pre_gamma = nn.Linear(
params.lstm_hidden_dim * params.lstm_layers, params.num_spline)
# softmax to make sure Σu equals to 1
self.sigma = nn.Softmax(dim=1)
# softplus to make sure gamma is positive
self.gamma = nn.Softplus()
# softplus to make sure beta0 is positive
self.beta_1 = nn.Softplus()
def forward(self, x, hidden, cell):
_, (hidden, cell) = self.lstm(x, (hidden, cell))
# use h from all three layers to calculate mu and sigma
hidden_permute = \
hidden.permute(1, 2, 0).contiguous().view(hidden.shape[1], -1)
#Plan A:
beta_n1 = self.beta_n1(hidden_permute)
pre_beta_1 = self.pre_beta_1(hidden_permute)
beta_1 = self.beta_1(pre_beta_1)
beta_1=-beta_1
pre_sigma = self.pre_sigma(hidden_permute)
sigma = self.sigma(pre_sigma)
pre_gamma = self.pre_gamma(hidden_permute)
gamma = self.gamma(pre_gamma)
#Plan A:
return ((beta_n1,beta_1,sigma,torch.squeeze(gamma)),hidden,cell)
def init_hidden(self, input_size):
return torch.zeros(self.params.lstm_layers, input_size,
self.params.lstm_hidden_dim,
device=self.device)
def init_cell(self, input_size):
return torch.zeros(self.params.lstm_layers, input_size,
self.params.lstm_hidden_dim,
device=self.device)
def predict(self, x, hidden, cell, sampling=False):
"""
generate samples by sampling from
"""
batch_size = x.shape[1]
samples = torch.zeros(self.params.sample_times,batch_size,
self.params.pred_steps,
device=self.device)
for j in range(self.params.sample_times):
decoder_hidden = hidden
decoder_cell = cell
for t in range(self.params.pred_steps):
func_param,decoder_hidden,decoder_cell=\
self(x[self.params.pred_start+t].unsqueeze(0),
decoder_hidden,decoder_cell)
beta_n1,beta_1,sigma,gamma=func_param
#pred_cdf is a uniform ditribution
uniform = torch.distributions.uniform.Uniform(
torch.tensor([0.0], device=sigma.device),
torch.tensor([1.0], device=sigma.device))
pred_cdf=uniform.sample([batch_size])
beta_0=gamma[:,:1]-2*beta_1*sigma[:,:1]
beta_N=torch.cat((beta_n1,beta_0),dim=1)
beta=pad(gamma,(1,0))[:,:-1]
beta[:,0]=beta_0[:,0]
beta=(gamma-beta)/(2*sigma)
beta=beta-pad(beta,(1,0))[:,:-1]
beta[:,-1]=gamma[:,-1]-beta[:,:-1].sum(dim=1)
ksi=pad(torch.cumsum(sigma,dim=1),(1,0))[:,:-1]
indices=ksi<pred_cdf
pred=(beta_N*pad(pred_cdf,(1,0),value=1)).sum(dim=1)
pred=pred+((pred_cdf-ksi).pow(2)*beta*indices).sum(dim=1)
samples[j, :, t] = pred
#predict value at t-1 is as a covars for t,t+1,...,t+lag
for lag in range(self.params.lag):
if t<self.params.pred_steps-lag-1:
x[self.params.pred_start+t+1,:,0]=pred
sample_mu = torch.mean(samples, dim=0) # mean or median ?
sample_std = samples.std(dim=0)
return samples, sample_mu, sample_std
def loss_fn(func_param, labels: Variable):
beta_n1,beta_1,sigma,gamma=func_param
beta_0=gamma[:,:1]-2*beta_1*sigma[:,:1]
beta_N=torch.cat((beta_n1,beta_0),dim=1)
beta=pad(gamma,(1,0))[:,:-1]
beta[:,0]=beta_0[:,0]
beta=(gamma-beta)/(2*sigma)
beta=beta-pad(beta,(1,0))[:,:-1]
beta[:,-1]=gamma[:,-1]-beta[:,:-1].sum(dim=1)
#calculate the maximum for each segment of the spline
ksi=torch.cumsum(sigma,dim=1)
df1=ksi.expand(sigma.shape[1],sigma.shape[0],sigma.shape[1]).T.clone()
df2=pad(ksi.T.unsqueeze(2),(1,0),'constant',value=1)
ksi=pad(ksi,(1,0))[:,:-1]
knots=df1-ksi
knots[knots<0]=0
knots=(df2*beta_N).sum(dim=2)+(knots.pow(2)*beta).sum(dim=2)
knots=pad(knots.T,(1,0))[:,:-1]#F(ksi_1~K)=0~max
diff=labels.view(-1,1)-knots
alpha_l=diff>0
alpha_A=torch.sum(alpha_l*beta,dim=1)
alpha_B=beta_N[:,1]-2*torch.sum(alpha_l*beta*ksi,dim=1)
alpha_C=beta_N[:,0]-labels+torch.sum(alpha_l*beta*ksi*ksi,dim=1)
#since A may be zero, roots can be from different methods.
not_zero=(alpha_A!=0)
alpha=torch.zeros_like(alpha_A)
#since there may be numerical calculation error,#0
idx=(alpha_B**2-4*alpha_A*alpha_C)<0#0
diff=diff.abs()
index=diff==(diff.min(dim=1)[0].view(-1,1))
index[~idx,:]=False
#index=diff.abs()<1e-4#0,1e-4 is a threshold
#idx=index.sum(dim=1)>0#0
alpha[idx]=ksi[index]#0
alpha[~not_zero]=-alpha_C[~not_zero]/alpha_B[~not_zero]
not_zero=~(~not_zero | idx)#0
delta=alpha_B[not_zero].pow(2)-4*alpha_A[not_zero]*alpha_C[not_zero]
alpha[not_zero]=(-alpha_B[not_zero]+torch.sqrt(delta))/(2*alpha_A[not_zero])
crps_1=labels*(2*alpha-1)
#lam2=lambda n:2*beta_N[:,n-1]*(1/n/(n+1)-alpha.pow(n)/n)
#crps_2=reduce(lambda a,b:a+b,[lam2(n) for n in range(1,2+1)])
crps_2=beta_N[:,0]*(1-2*alpha)+beta_N[:,1]*(1/3-alpha.pow(2))
crps_3=torch.sum(2*beta/((2+1)*(2+2))*(1-ksi).pow(2+2),dim=1)
crps_4=torch.sum(alpha_l*2*beta/(2+1)*(torch.unsqueeze(alpha,1)-ksi).pow(2+1),dim=1)
crps=crps_1+crps_2+crps_3-crps_4
crps = torch.mean(crps)
return crps
| 40.131579 | 89 | 0.571148 | 5,072 | 0.665093 | 0 | 0 | 0 | 0 | 0 | 0 | 1,268 | 0.166273 |
c7f39bdc2218cef3b2fe963ee01b122a395a8bc3 | 227 | py | Python | tests/repositories/helpers/methods/test_reinstall_if_needed.py | traibnn/integration | cf5920a677fdaa8408074e533371141828b0b30f | [
"MIT"
]
| 1 | 2021-07-31T00:34:30.000Z | 2021-07-31T00:34:30.000Z | tests/repositories/helpers/methods/test_reinstall_if_needed.py | traibnn/integration | cf5920a677fdaa8408074e533371141828b0b30f | [
"MIT"
]
| 45 | 2021-07-21T13:32:44.000Z | 2022-03-28T06:15:40.000Z | tests/repositories/helpers/methods/test_reinstall_if_needed.py | traibnn/integration | cf5920a677fdaa8408074e533371141828b0b30f | [
"MIT"
]
| null | null | null | import pytest
@pytest.mark.asyncio
async def test_reinstall_if_needed(repository):
repository.content.path.local = "/non/existing/dir"
repository.data.installed = True
await repository.async_reinstall_if_needed()
| 25.222222 | 55 | 0.784141 | 0 | 0 | 0 | 0 | 210 | 0.92511 | 189 | 0.832599 | 19 | 0.0837 |
c7f3bbfe8ecf852146009a98359ee99148f7760a | 11,124 | py | Python | workflow_parser/datasource/log_engine.py | cyx1231st/workflow_parser | d2e78c191c75c7addda89e6e336be90f6ca9717d | [
"Apache-2.0"
]
| null | null | null | workflow_parser/datasource/log_engine.py | cyx1231st/workflow_parser | d2e78c191c75c7addda89e6e336be90f6ca9717d | [
"Apache-2.0"
]
| null | null | null | workflow_parser/datasource/log_engine.py | cyx1231st/workflow_parser | d2e78c191c75c7addda89e6e336be90f6ca9717d | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2017 Yingxin Cheng
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
from collections import defaultdict
import os
from os import path
import sys
from .. import reserved_vars as rv
from ..service_registry import Component
from ..service_registry import ServiceRegistry
from . import Line
from . import Source
from .exc import LogError
class DriverPlugin(object):
__metaclass__ = ABCMeta
def __init__(self,
f_filter_logfile,
f_filter_logline,
extensions):
self._extensions = extensions
self.f_filter_logfile = f_filter_logfile
self.f_filter_logline = f_filter_logline
def _purge_dict_empty_values(self, var_dict):
for k in var_dict.keys():
if var_dict[k] in {None, ""}:
var_dict.pop(k)
def do_filter_logfile(self, f_dir, f_name):
assert isinstance(f_dir, str)
assert isinstance(f_name, str)
assert f_name in f_dir
# skip non-file
if not path.isfile(f_dir):
return False, None
# check file extension
ext_match = False
for ext in self._extensions:
if f_name.endswith("." + ext):
ext_match = True
if not ext_match:
return False, None
try:
var_dict = {}
ret = self.f_filter_logfile(f_dir, f_name, var_dict)
assert isinstance(ret, bool)
if ret:
# NOTE
# print("(LogDriver) loaded: %s" % f_dir)
assert all(isinstance(k, str) for k in var_dict.keys())
self._purge_dict_empty_values(var_dict)
return True, var_dict
else:
# skip
return False, None
except Exception as e:
raise LogError(
"(LogDriver) `f_filter_logfile` error when f_name=%s"
% f_name, e)
def do_filter_logline(self, line, lino, where):
assert isinstance(line, str)
assert isinstance(lino, int)
assert isinstance(where, str)
try:
var_dict = {}
ret = self.f_filter_logline(line, var_dict)
assert all(isinstance(k, str) for k in var_dict.keys())
self._purge_dict_empty_values(var_dict)
assert isinstance(ret, bool)
return ret, var_dict
except Exception as e:
raise LogError("(LogDriver) `f_filter_logline` error at %s@%d %s"
% (where, lino, line), e)
class FileDatasource(object):
def __init__(self, name, f_dir, vs, sr, plugin):
assert isinstance(sr, ServiceRegistry)
assert isinstance(plugin, DriverPlugin)
self.sr = sr
self.plugin = plugin
self.name = name
self.f_dir = f_dir
self.total_lines = 0
self.source = Source(name, f_dir, vs)
self.requests = set()
@property
def total_lineobjs(self):
return self.source.len_lineobjs
# def _buffer_lines(self, lines):
# buffer_lines = Heap(key=lambda a: a.seconds)
# prv_line = [None]
# def _flush_line(flush=None):
# while buffer_lines:
# if flush and buffer_lines.distance < flush:
# break
# line = buffer_lines.pop()
# if prv_line[0] is not None:
# prv_line[0].nxt_logline = line
# line.prv_logline = prv_line[0]
# assert prv_line[0] <= line
# yield line
# prv_line[0] = line
# for line in lines:
# assert isinstance(line, LogLine)
# buffer_lines.push(line)
# for line in _flush_line(1):
# yield line
# for line in _flush_line():
# yield line
def yield_lineobjs(self, targets_byname):
with open(self.f_dir, 'r') as reader:
for line in reader:
self.total_lines += 1
lino = self.total_lines
if_proceed, vs = self.plugin.do_filter_logline(
line, lino, self.name)
if if_proceed:
# convert component
component = vs.get(rv.COMPONENT)
if component is not None:
c_obj = self.sr.f_to_component(component)
if not c_obj:
raise LogError(
"Error in %s@%d %s: unrecognized component %s"
% (self.name, lino, line, component))
else:
vs[rv.COMPONENT] = c_obj
# collect requests
request = vs.get(rv.REQUEST)
if request is not None:
self.requests.add(request)
lineobj = self.source.append_line(
lino, line, vs, targets_byname)
yield lineobj
@classmethod
def create_byfolder(cls, log_folder, sr, plugin):
assert isinstance(log_folder, str)
assert isinstance(plugin, DriverPlugin)
datasources = []
# current_path = path.dirname(os.path.realpath(__file__))
current_path = os.getcwd()
log_folder = path.join(current_path, log_folder)
for f_name in os.listdir(log_folder):
f_dir = path.join(log_folder, f_name)
if_proceed, vs = plugin.do_filter_logfile(f_dir, f_name)
if if_proceed:
# convert component
component = vs.get(rv.COMPONENT)
if component is not None:
c_obj = self.sr.f_to_component(component)
if not c_obj:
raise LogError(
"Error in %s: unrecognized component %s"
% (f_name, component))
else:
vs[rv.COMPONENT] = c_obj
ds = cls(f_name.rsplit(".", 1)[0], f_dir, vs, sr, plugin)
datasources.append(ds)
return log_folder, datasources
# step1: load related log files
def loadsources(log_folder, sr, plugin):
print("Load data sources...")
log_folder, datasources = FileDatasource.create_byfolder(
log_folder, sr, plugin)
print("---------------")
#### summary ####
print("%d datasources from %s" % (len(datasources), log_folder))
print()
return datasources
# step2: read sources
def readsources(datasources, sr, report):
targets_byname = {}
targets_byhost = defaultdict(list)
targets_bycomponent = defaultdict(list)
threads = set()
print("Read data sources...")
for datasource in datasources:
for line_obj in datasource.yield_lineobjs(targets_byname):
pass
for targetobj in targets_byname.values():
if not isinstance(targetobj.target, str) or not targetobj.target:
raise LogError("%s has invalid target: %s" % (
targetobj, target.target))
if not isinstance(targetobj.host, str) or not targetobj.host:
raise LogError("%s has invalid host: %s" % (
targetobj, target.host))
if not isinstance(targetobj.component, Component):
raise LogError("%s has invalid component: %s" % (
targetobj, target.component))
targets_byhost[targetobj.host].append(targetobj)
targets_bycomponent[targetobj.component].append(targetobj)
threads.update(targetobj.thread_objs)
print("---------------")
#### summary ####
total_targets = len(targets_byname)
total_hosts = len(targets_byhost)
total_components = len(targets_bycomponent)
print("%d targets, %d hosts" %
(total_targets,
total_hosts))
total_lines = sum(datasource.total_lines for datasource in datasources)
total_lineobjs = sum(datasource.total_lineobjs
for datasource in datasources)
if not total_lines:
print("0 valid lines")
else:
print("%.2f%% valid: %d lines -> %d lineobjs"
% (float(total_lineobjs)/total_lines*100,
total_lines,
total_lineobjs))
for comp in sr.sr_components:
targets = targets_bycomponent.get(comp, [])
if not targets:
raise LogError("ERROR! miss component %s" % comp)
else:
component_threads = sum(len(target.thread_objs) for target in targets)
component_lines = sum(target.len_lineobjs for target in targets)
min_target_threads, max_target_threads = sys.maxsize, 0
min_target_lineobjs, max_target_lineobjs = sys.maxsize, 0
hosts_ = set()
for target_obj in targets:
hosts_.add(target_obj.host)
min_target_threads = min(min_target_threads, len(target_obj.thread_objs))
max_target_threads = max(max_target_threads, len(target_obj.thread_objs))
min_target_lineobjs = min(min_target_lineobjs,
target_obj.len_lineobjs)
max_target_lineobjs = max(max_target_lineobjs,
target_obj.len_lineobjs)
print(" %s: %d hosts, %d targets, %d threads, %d lines"
% (comp, len(hosts_), len(targets),
component_threads,
component_lines))
print(" per-target: %.3f[%d, %d] threads, %.3f[%d, %d] loglines"
% (component_threads/float(len(targets)),
min_target_threads,
max_target_threads,
component_lines/float(len(targets)),
min_target_lineobjs,
max_target_lineobjs))
print()
#### report #####
requests = set()
for ds in datasources:
requests.update(ds.requests)
report.step("read", line=total_lineobjs,
component=total_components,
host=total_hosts,
target=total_targets,
thread=len(threads),
request=len(requests))
return targets_byname
def proceed(logfolder, sr, plugin, report):
datasources = loadsources(logfolder, sr, plugin)
targetobjs = readsources(datasources, sr, report)
return targetobjs
| 36.352941 | 89 | 0.567242 | 5,869 | 0.527598 | 1,200 | 0.107875 | 1,231 | 0.110662 | 0 | 0 | 2,215 | 0.199119 |
c7f405a9090e4db54d759cf9f413be8921191675 | 3,890 | py | Python | IPython/lib/tests/test_irunner_pylab_magic.py | dchichkov/ipython | 8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4 | [
"BSD-3-Clause-Clear"
]
| null | null | null | IPython/lib/tests/test_irunner_pylab_magic.py | dchichkov/ipython | 8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4 | [
"BSD-3-Clause-Clear"
]
| 3 | 2015-04-01T13:14:57.000Z | 2015-05-26T16:01:37.000Z | IPython/lib/tests/test_irunner_pylab_magic.py | dchichkov/ipython | 8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4 | [
"BSD-3-Clause-Clear"
]
| 1 | 2021-10-06T07:59:25.000Z | 2021-10-06T07:59:25.000Z | """Test suite for pylab_import_all magic
Modified from the irunner module but using regex.
"""
# Global to make tests extra verbose and help debugging
VERBOSE = True
# stdlib imports
import StringIO
import sys
import unittest
import re
# IPython imports
from IPython.lib import irunner
from IPython.testing import decorators
def pylab_not_importable():
"""Test if importing pylab fails with RuntimeError (true when having no display)"""
try:
import pylab
return False
except RuntimeError:
return True
# Testing code begins
class RunnerTestCase(unittest.TestCase):
def setUp(self):
self.out = StringIO.StringIO()
#self.out = sys.stdout
def _test_runner(self,runner,source,output):
"""Test that a given runner's input/output match."""
runner.run_source(source)
out = self.out.getvalue()
#out = ''
# this output contains nasty \r\n lineends, and the initial ipython
# banner. clean it up for comparison, removing lines of whitespace
output_l = [l for l in output.splitlines() if l and not l.isspace()]
out_l = [l for l in out.splitlines() if l and not l.isspace()]
mismatch = 0
if len(output_l) != len(out_l):
message = ("Mismatch in number of lines\n\n"
"Expected:\n"
"~~~~~~~~~\n"
"%s\n\n"
"Got:\n"
"~~~~~~~~~\n"
"%s"
) % ("\n".join(output_l), "\n".join(out_l))
self.fail(message)
for n in range(len(output_l)):
# Do a line-by-line comparison
ol1 = output_l[n].strip()
ol2 = out_l[n].strip()
if not re.match(ol1,ol2):
mismatch += 1
if VERBOSE:
print '<<< line %s does not match:' % n
print repr(ol1)
print repr(ol2)
print '>>>'
self.assert_(mismatch==0,'Number of mismatched lines: %s' %
mismatch)
@decorators.skipif_not_matplotlib
@decorators.skipif(pylab_not_importable, "Likely a run without X.")
def test_pylab_import_all_enabled(self):
"Verify that plot is available when pylab_import_all = True"
source = """
from IPython.config.application import Application
app = Application.instance()
app.pylab_import_all = True
pylab
ip=get_ipython()
'plot' in ip.user_ns
"""
output = """
In \[1\]: from IPython\.config\.application import Application
In \[2\]: app = Application\.instance\(\)
In \[3\]: app\.pylab_import_all = True
In \[4\]: pylab
^Welcome to pylab, a matplotlib-based Python environment
For more information, type 'help\(pylab\)'\.
In \[5\]: ip=get_ipython\(\)
In \[6\]: \'plot\' in ip\.user_ns
Out\[6\]: True
"""
runner = irunner.IPythonRunner(out=self.out)
self._test_runner(runner,source,output)
@decorators.skipif_not_matplotlib
@decorators.skipif(pylab_not_importable, "Likely a run without X.")
def test_pylab_import_all_disabled(self):
"Verify that plot is not available when pylab_import_all = False"
source = """
from IPython.config.application import Application
app = Application.instance()
app.pylab_import_all = False
pylab
ip=get_ipython()
'plot' in ip.user_ns
"""
output = """
In \[1\]: from IPython\.config\.application import Application
In \[2\]: app = Application\.instance\(\)
In \[3\]: app\.pylab_import_all = False
In \[4\]: pylab
^Welcome to pylab, a matplotlib-based Python environment
For more information, type 'help\(pylab\)'\.
In \[5\]: ip=get_ipython\(\)
In \[6\]: \'plot\' in ip\.user_ns
Out\[6\]: False
"""
runner = irunner.IPythonRunner(out=self.out)
self._test_runner(runner,source,output)
| 32.689076 | 87 | 0.608226 | 3,325 | 0.854756 | 0 | 0 | 1,749 | 0.449614 | 0 | 0 | 1,905 | 0.489717 |
c7f4992bb494868e3842c501796146ce55443adc | 2,241 | py | Python | checkpoint.py | GooLee0123/MBRNN | c313bc286b34a2f6e0cbc1ec0941c511ff8dc8d3 | [
"MIT"
]
| 1 | 2021-12-07T03:59:51.000Z | 2021-12-07T03:59:51.000Z | checkpoint.py | GooLee0123/MBRNN | c313bc286b34a2f6e0cbc1ec0941c511ff8dc8d3 | [
"MIT"
]
| null | null | null | checkpoint.py | GooLee0123/MBRNN | c313bc286b34a2f6e0cbc1ec0941c511ff8dc8d3 | [
"MIT"
]
| 1 | 2022-02-23T02:15:56.000Z | 2022-02-23T02:15:56.000Z | import logging
import os
import shutil
import time
import torch
model_state = 'model_state.pt'
trainer_state = 'trainer_state.pt'
class Checkpoint():
def __init__(self, step, epoch, model, optim, path=None, opt=None):
self.step = step
self.epoch = epoch
self.model = model
self.optim = optim
self._path = path
self.opt = opt
self.logger = logging.getLogger(__name__)
@property
def path(self):
if self._path is None:
raise LookupError("The checkpoint has not been saved.")
return self._path
@classmethod
def load(cls, model, optim=None, opt=None):
logger = logging.getLogger(__name__)
all_times = sorted(os.listdir(opt.ckpt_fd), reverse=True)
fchckpt = os.path.join(opt.ckpt_fd, all_times[0])
logger.info("load checkpoint from %s" % fchckpt)
resume_model = torch.load(os.path.join(fchckpt, model_state),
map_location=opt.device)
resume_checkpoint = torch.load(os.path.join(fchckpt, trainer_state),
map_location=opt.device)
model.load_state_dict(resume_model)
if optim is not None:
optim.load_state_dict(resume_checkpoint['optimizer'])
return Checkpoint(step=resume_checkpoint['step'],
epoch=resume_checkpoint['epoch'],
model=model,
optim=optim,
path=opt.ckpt_fd)
def save(self):
date_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())
path = os.path.join(self.opt.ckpt_fd, date_time)
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
torch.save(
{'epoch': self.epoch,
'step': self.step,
'optimizer': self.optim.state_dict()},
os.path.join(path, trainer_state))
torch.save(
self.model.state_dict(), os.path.join(path, model_state))
log_msg = "Validation loss being smaller than previous "
log_msg += "minimum, checkpoint is saved at %s" % path
self.logger.info(log_msg)
return path
| 30.69863 | 76 | 0.583222 | 2,106 | 0.939759 | 0 | 0 | 1,094 | 0.488175 | 0 | 0 | 244 | 0.10888 |
c7f4e1c0cff8588ab79a5f138125b800da16d5b8 | 4,250 | py | Python | test/eval_mines_color.py | alalagong/LEDNet | 5dee5ee4edc75c24e6cda50dc1661d8f0b1e6469 | [
"MIT"
]
| 3 | 2019-08-13T07:21:23.000Z | 2020-06-27T16:18:22.000Z | test/eval_mines_color.py | alalagong/LEDNet | 5dee5ee4edc75c24e6cda50dc1661d8f0b1e6469 | [
"MIT"
]
| 1 | 2020-12-14T05:56:44.000Z | 2020-12-14T05:56:44.000Z | test/eval_mines_color.py | alalagong/LEDNet | 5dee5ee4edc75c24e6cda50dc1661d8f0b1e6469 | [
"MIT"
]
| 1 | 2019-11-13T12:09:58.000Z | 2019-11-13T12:09:58.000Z | import numpy as np
import torch
import os
import cv2
import importlib
from dataset import *
from PIL import Image
from argparse import ArgumentParser
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, CenterCrop, Normalize, Resize
from torchvision.transforms import ToTensor, ToPILImage
from dataset import cityscapes
from lednet import Net
from transform import Relabel, ToLabel, Colorize
import visdom
NUM_CHANNELS = 3
NUM_CLASSES = 20
#* *******************测试单张图片****************************
image_transform = ToPILImage()
input_transform_cityscapes = Compose([
Resize((512, 1024), Image.BILINEAR),
ToTensor(),
# Normalize([.485, .456, .406], [.229, .224, .225]),
])
def main(args):
modelpath = args.loadDir + args.loadModel
weightspath = args.loadDir + args.loadWeights
print("Loading model: " + modelpath)
print("Loading weights: " + weightspath)
model = Net(NUM_CLASSES)
model = torch.nn.DataParallel(model)
if (not args.cpu):
model = model.cuda()
# model.load_state_dict(torch.load(args.state))
# model.load_state_dict(torch.load(weightspath)) #not working if missing key
def load_my_state_dict(model, state_dict): # custom function to load model when not all dict elements
own_state = model.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
own_state[name].copy_(param)
return model
model = load_my_state_dict(model, torch.load(weightspath))
print("Model and weights LOADED successfully")
model.eval()
if (not os.path.exists(args.datadir)):
print("Error: datadir could not be loaded")
# loader = DataLoader(
# cityscapes('/home/liqi/PycharmProjects/LEDNet/4.png', input_transform_cityscapes, target_transform_cityscapes, subset=args.subset),
# num_workers=args.num_workers, batch_size=1 ,shuffle=False)
input_transform_cityscapes = Compose([
Resize((512, 1024), Image.BILINEAR),
ToTensor(),
# Normalize([.485, .456, .406], [.229, .224, .225]),
])
name ="4.png"
with open(image_path_city('/home/gongyiqun/images', name), 'rb') as f:
images = load_image(f).convert('RGB')
images = input_transform_cityscapes(images)
# For visualizer:
# must launch in other window "python3.6 -m visdom.server -port 8097"
# and access localhost:8097 to see it
if (args.visualize):
vis = visdom.Visdom()
if (not args.cpu):
images = images.cuda()
# labels = labels.cuda()
a=torch.unsqueeze(images,0)
inputs = Variable(a)
# targets = Variable(labels)
with torch.no_grad():
outputs = model(inputs)
label = outputs[0].max(0)[1].byte().cpu().data
# label_cityscapes = cityscapes_trainIds2labelIds(label.unsqueeze(0))
label_color = Colorize()(label.unsqueeze(0))
filenameSave = "./save_color/"+"Others/"+name
os.makedirs(os.path.dirname(filenameSave), exist_ok=True)
# image_transform(label.byte()).save(filenameSave)
label_save = ToPILImage()(label_color)
label_save = label_save.resize((1241, 376), Image.BILINEAR)
# label_save = cv2.resize(label_save, (376, 1224),interpolation=cv2.INTER_AREA)
label_save.save(filenameSave)
if (args.visualize):
vis.image(label_color.numpy())
# print(step, filenameSave)
# for step, (images, labels, filename, filenameGt) in enumerate(loader):
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--state')
parser.add_argument('--loadDir', default="../save/logs(KITTI)/")
parser.add_argument('--loadWeights', default="model_best.pth")
parser.add_argument('--loadModel', default="lednet.py")
parser.add_argument('--subset', default="val") # can be val, test, train, demoSequence
parser.add_argument('--datadir', default="")
parser.add_argument('--num-workers', type=int, default=4)
parser.add_argument('--batch-size', type=int, default=1)
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--visualize', action='store_true')
main(parser.parse_args())
| 31.481481 | 141 | 0.675059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,470 | 0.344908 |
1bdbd0dddd803ccbb1c990600d899d8ab9de0788 | 2,440 | py | Python | tests/test_resource_linkage.py | firesock/pydantic-jsonapi | b7dc891892ab3439a71f78a9a5fd067c4d651ca8 | [
"MIT"
]
| null | null | null | tests/test_resource_linkage.py | firesock/pydantic-jsonapi | b7dc891892ab3439a71f78a9a5fd067c4d651ca8 | [
"MIT"
]
| null | null | null | tests/test_resource_linkage.py | firesock/pydantic-jsonapi | b7dc891892ab3439a71f78a9a5fd067c4d651ca8 | [
"MIT"
]
| null | null | null | import pytest
from pytest import raises
from pydantic_jsonapi.resource_linkage import ResourceLinkage
from pydantic import BaseModel, ValidationError
class ThingWithLinkageData(BaseModel):
data: ResourceLinkage
class TestResourceLinks:
@pytest.mark.parametrize(
'linkage, message',
[
(
None,
'null is valid for empty to-one relationships',
),
(
[],
'empty list valid for empty to-many relationships.',
),
(
{'id': 'abc123', 'type': 'item', 'meta': None},
'single resource identifier valid for non-empty to-one relationships.',
),
(
[
{'id': 'abc123', 'type': 'item', 'meta': None},
{'id': 'def456', 'type': 'item', 'meta': None},
],
'array of resource identifiers valid for non-empty to-many relationships.',
),
],
)
def test_valid_possibilities(self, linkage, message):
structure_to_validate = {
'data': linkage
}
validated = ThingWithLinkageData(**structure_to_validate)
assert validated.dict() == structure_to_validate, message
def test_invalid_resource_identifier(self):
structure_to_validate = {
'data': {}
}
with raises(ValidationError) as e:
ThingWithLinkageData(**structure_to_validate)
assert e.value.errors() == [
{'loc': ('data', 'id'), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('data', 'type'), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('data',), 'msg': 'value is not a valid list', 'type': 'type_error.list'},
]
def test_invalid_resource_identifier_array(self):
structure_to_validate = {
'data': [
{}
],
}
with raises(ValidationError) as e:
ThingWithLinkageData(**structure_to_validate)
assert e.value.errors() == [
{'loc': ('data',), 'msg': 'value is not a valid dict', 'type': 'type_error.dict'},
{'loc': ('data', 0, 'id'), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('data', 0, 'type'), 'msg': 'field required', 'type': 'value_error.missing'},
]
| 34.857143 | 97 | 0.527869 | 2,283 | 0.935656 | 0 | 0 | 1,050 | 0.430328 | 0 | 0 | 755 | 0.309426 |
1bdd2e9e5e9fd87db022a69e90bc6723cd058b21 | 2,046 | py | Python | src/tensorflow/keras_cnn.py | del680202/MachineLearning-memo | 29284ca24041969eeb59851a43ab6c28c685fae5 | [
"Apache-2.0"
]
| 4 | 2017-04-24T15:01:55.000Z | 2019-11-03T11:11:54.000Z | src/tensorflow/keras_cnn.py | aasd145tw/MachineLearning-memo | 29284ca24041969eeb59851a43ab6c28c685fae5 | [
"Apache-2.0"
]
| null | null | null | src/tensorflow/keras_cnn.py | aasd145tw/MachineLearning-memo | 29284ca24041969eeb59851a43ab6c28c685fae5 | [
"Apache-2.0"
]
| 12 | 2017-05-10T13:39:17.000Z | 2019-12-15T14:01:05.000Z | import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
import keras.callbacks
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf
batch_size = 128
nb_classes = 10
nb_epoch = 20
nb_data = 28*28
log_filepath = '/tmp/keras_log'
# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# reshape
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1]*X_train.shape[2])
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1]*X_test.shape[2])
# rescale
X_train = X_train.astype(np.float32)
X_train /= 255
X_test = X_test.astype(np.float32)
X_test /= 255
# convert class vectors to binary class matrices (one hot vectors)
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
old_session = KTF.get_session()
with tf.Graph().as_default():
session = tf.Session('')
KTF.set_session(session)
KTF.set_learning_phase(1)
# build model
model = Sequential()
model.add(Dense(512, input_shape=(nb_data,), init='normal',name='dense1'))
model.add(Activation('relu', name='relu1'))
model.add(Dropout(0.2, name='dropout1'))
model.add(Dense(512, init='normal', name='dense2'))
model.add(Activation('relu', name='relu2'))
model.add(Dropout(0.2, name='dropout2'))
model.add(Dense(10, init='normal', name='dense3'))
model.add(Activation('softmax', name='softmax1'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.001), metrics=['accuracy'])
tb_cb = keras.callbacks.TensorBoard(log_dir=log_filepath, histogram_freq=1)
cbks = [tb_cb]
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch = nb_epoch, verbose=1, callbacks=cbks)
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy;', score[1])
KTF.set_session(old_session)
| 31 | 112 | 0.725806 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 304 | 0.148583 |
1be156b5a97033cae1d2dce7ad771f398dbde2ad | 4,942 | py | Python | tests/blas/nodes/ger_test.py | xiacijie/dace | 2d942440b1d7b139ba112434bfa78f754e10bfe5 | [
"BSD-3-Clause"
]
| 1 | 2021-07-26T07:58:06.000Z | 2021-07-26T07:58:06.000Z | tests/blas/nodes/ger_test.py | xiacijie/dace | 2d942440b1d7b139ba112434bfa78f754e10bfe5 | [
"BSD-3-Clause"
]
| null | null | null | tests/blas/nodes/ger_test.py | xiacijie/dace | 2d942440b1d7b139ba112434bfa78f754e10bfe5 | [
"BSD-3-Clause"
]
| 1 | 2021-03-04T13:01:48.000Z | 2021-03-04T13:01:48.000Z | #!/usr/bin/env python3
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
from dace.transformation.dataflow.streaming_memory import StreamingMemory
from dace.transformation.interstate.sdfg_nesting import InlineSDFG
from dace.transformation.interstate.fpga_transform_sdfg import FPGATransformSDFG
import numpy as np
import argparse
import scipy
import dace
from dace.memlet import Memlet
import dace.libraries.blas as blas
from dace.libraries.standard.memory import aligned_ndarray
def pure_graph(implementation, dtype, veclen):
m = dace.symbol("m")
n = dace.symbol("n")
vtype = dace.vector(dtype, veclen)
sdfg = dace.SDFG("ger_test")
state = sdfg.add_state("ger")
sdfg.add_symbol("alpha", dtype)
sdfg.add_array("x", shape=[m], dtype=dtype)
sdfg.add_array("y", shape=[n / veclen], dtype=vtype)
sdfg.add_array("A", shape=[m, n / veclen], dtype=vtype)
sdfg.add_array("res", shape=[m, n / veclen], dtype=vtype)
x = state.add_read("x")
y = state.add_read("y")
A = state.add_read("A")
res = state.add_write("res")
ger_node = blas.Ger(name="ger")
ger_node.implementation = implementation
state.add_memlet_path(x, ger_node, dst_conn="_x", memlet=Memlet("x[0:m]"))
state.add_memlet_path(y,
ger_node,
dst_conn="_y",
memlet=Memlet(f"y[0:n/{veclen}]"))
state.add_memlet_path(A,
ger_node,
dst_conn="_A",
memlet=Memlet(f"A[0:m, 0:n/{veclen}]"))
state.add_memlet_path(ger_node,
res,
src_conn="_res",
memlet=Memlet(f"res[0:m, 0:n/{veclen}]"))
return ger_node, state, sdfg
def fpga_graph(dtype, veclen, tile_size_x, tile_size_y):
ger_node, state, sdfg = pure_graph("FPGA", dtype, veclen)
ger_node.expand(sdfg, state, tile_size_x=tile_size_x, tile_size_y=tile_size_y)
sdfg.apply_transformations_repeated([FPGATransformSDFG, InlineSDFG])
sdfg.expand_library_nodes()
sdfg.apply_transformations_repeated(
[InlineSDFG, StreamingMemory], [{}, {
"storage": dace.StorageType.FPGA_Local
}])
return sdfg
def run_test(ger, target):
x = np.ndarray(m, dtype=np.float32)
y = np.ndarray(n, dtype=np.float32)
A = np.ndarray((m, n), dtype=np.float32)
res = A.copy()
ref = res.copy()
x[:] = np.random.rand(m).astype(np.float32)
y[:] = np.random.rand(n).astype(np.float32)
A[:] = np.random.rand(m, n).astype(np.float32)
ger(alpha=alpha, x=x, y=y, A=A, res=res, m=m, n=n)
ref = scipy.linalg.blas.sger(alpha=alpha, x=x, y=y, a=A)
diff = np.linalg.norm(np.subtract(res, ref))
if diff >= args.eps * n * m:
raise RuntimeError(
"Unexpected result returned from ger rank 1 operation: "
"got:\n{}\nexpected:\n{} on {}".format(A, ref, target))
else:
print("Ok")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("N", type=int, nargs="?", default=256)
parser.add_argument("M", type=int, nargs="?", default=512)
parser.add_argument("tile_size_x", type=int, nargs="?", default=16)
parser.add_argument("tile_size_y", type=int, nargs="?", default=32)
parser.add_argument("alpha", type=np.float32, nargs="?", default=1.0)
parser.add_argument("--target", dest="target", default="pure")
parser.add_argument("--eps", type=float, default=1e-6)
parser.add_argument("--veclen", type=int, default=8)
args = parser.parse_args()
n = args.N
m = args.M
tile_size_x = args.tile_size_x
tile_size_y = args.tile_size_y
alpha = args.alpha
veclen = args.veclen
if args.target == "pure":
ger_node, state, sdfg = pure_graph("pure", dace.float32, veclen)
ger_node.expand(sdfg, state)
sdfg.apply_transformations_repeated([InlineSDFG])
elif args.target == "fpga":
sdfg = fpga_graph(dace.float32, veclen, tile_size_x, tile_size_y)
else:
print("Unsupported target")
exit(-1)
x = aligned_ndarray(np.random.rand(m).astype(np.float32), alignment=4*veclen)
y = aligned_ndarray(np.random.rand(n).astype(np.float32), alignment=4*veclen)
A = aligned_ndarray(np.random.rand(m, n).astype(np.float32), alignment=4*veclen)
res = aligned_ndarray(np.empty(A.shape, dtype=A.dtype), alignment=4*veclen)
ref = aligned_ndarray(np.empty(A.shape, dtype=A.dtype), alignment=4*veclen)
res[:] = A[:]
ref[:] = A[:]
sdfg(x=x, y=y, A=A, res=res, m=dace.int32(m), n=dace.int32(n), alpha=alpha)
ref = scipy.linalg.blas.sger(alpha=alpha, x=x, y=y, a=ref)
diff = np.linalg.norm(res - ref)
if diff >= args.eps * n * m:
raise RuntimeError(f"Validation failed: {diff}")
else:
print("Validation successful.")
| 33.849315 | 84 | 0.633347 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 551 | 0.111493 |
1be16c8b647df2316a1c8f8f394a926e8273c86d | 1,925 | py | Python | spp.py | ninfueng/torch-cifar | f829c3375a9d9823cef4659f8bdfbd3800d51e80 | [
"MIT"
]
| null | null | null | spp.py | ninfueng/torch-cifar | f829c3375a9d9823cef4659f8bdfbd3800d51e80 | [
"MIT"
]
| null | null | null | spp.py | ninfueng/torch-cifar | f829c3375a9d9823cef4659f8bdfbd3800d51e80 | [
"MIT"
]
| null | null | null | import math
from typing import List, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
@torch.jit.script
def spatial_pyramid_pool(
input: Tensor, bins: Union[int, List[int]], mode: str = "max"
) -> Tensor:
"""Spatial Pyramid Pooling: https://arxiv.org/pdf/1406.4729.pdf
Args:
input (Tensor): an input tensor expected from the convolutional layer.
bins (List[int]): a list of integer of preferred size of outputs.
mode (str): how to reduce the spatial space.
Returns:
outputs (Tensor): a flatten tensor with size (batch, bins[0] * bins[0] + bins[1]
* bins[1] + ...)
"""
assert mode in ["max", "mean", "average", "avg"]
b, _, h, w = input.shape
bins = [bins] if isinstance(bins, int) else bins
outputs = []
for bin_ in bins:
h_kernel = math.ceil(h / bin_)
w_kernel = math.ceil(w / bin_)
h_stride = math.floor(h / bin_)
w_stride = math.floor(w / bin_)
if mode == "max":
output = F.max_pool2d(
input, kernel_size=(h_kernel, w_kernel), stride=(h_stride, w_stride)
)
else:
output = F.avg_pool2d(
input, kernel_size=(h_kernel, w_kernel), stride=(h_stride, w_stride)
)
output = output.view(b, -1)
outputs.append(output)
outputs = torch.cat(outputs, dim=-1)
return outputs
class SpaitalPyramidPool(nn.Module):
def __init__(self, bins: Union[int, List[int]], mode: str = "max") -> None:
super().__init__()
self.bins = bins
self.mode = mode
def forward(self, input: Tensor) -> Tensor:
return spatial_pyramid_pool(input, bins=self.bins, mode=self.mode)
if __name__ == "__main__":
input = torch.zeros(1, 512, 13, 13)
output = spatial_pyramid_pool(input, [1, 2, 3], "max")
print(output.shape)
| 29.166667 | 88 | 0.603636 | 317 | 0.164675 | 0 | 0 | 1,314 | 0.682597 | 0 | 0 | 471 | 0.244675 |
1be1d0ad6c2cd6a6b3082cd64ad7f9633b3033de | 21,417 | py | Python | src/SparseSC/utils/AzureBatch/azure_batch_client.py | wofein/SparseSC | fd8125015c65829458bfee2ae94c24981112d2d8 | [
"MIT"
]
| null | null | null | src/SparseSC/utils/AzureBatch/azure_batch_client.py | wofein/SparseSC | fd8125015c65829458bfee2ae94c24981112d2d8 | [
"MIT"
]
| null | null | null | src/SparseSC/utils/AzureBatch/azure_batch_client.py | wofein/SparseSC | fd8125015c65829458bfee2ae94c24981112d2d8 | [
"MIT"
]
| null | null | null | """
usage requires these additional modules
pip install azure-batch azure-storage-blob jsonschema pyyaml && pip install git+https://github.com/microsoft/SparseSC.git@ad4bf27edb28f517508f6934f21eb65d17fb6543 && scgrad start
usage:
from SparseSC import fit, aggregate_batch_results
from SparseSC.utils.azure_batch_client import BatchConfig, run
_TIMESTAMP = datetime.utcnow().strftime("%Y%m%d%H%M%S")
BATCH_DIR= "path/to/my/batch_config/"
fit(x=x,..., batchDir=BATCH_DIR)
my_config = BatchConfig(
BATCH_ACCOUNT_NAME="MySecret",
BATCH_ACCOUNT_KEY="MySecret",
BATCH_ACCOUNT_URL="MySecret",
STORAGE_ACCOUNT_NAME="MySecret",
STORAGE_ACCOUNT_KEY="MySecret",
POOL_ID="my-compute-pool",
POOL_NODE_COUNT=0,
POOL_LOW_PRIORITY_NODE_COUNT=20,
POOL_VM_SIZE="STANDARD_A1_v2",
DELETE_POOL_WHEN_DONE=False,
JOB_ID="my-job" + _TIMESTAMP,
DELETE_JOB_WHEN_DONE=False,
CONTAINER_NAME="my-blob-container",
BATCH_DIRECTORY=BATCH_DIR,
)
run(my_config)
fitted_model = aggregate_batch_results("path/to/my/batch_config")
"""
# pylint: disable=differing-type-doc, differing-param-doc, missing-param-doc, missing-raises-doc, missing-return-doc
from __future__ import print_function
import datetime
import io
import os
import sys
import time
import pathlib
import importlib
from collections import defaultdict
import azure.storage.blob as azureblob
from azure.storage.blob.models import ContainerPermissions
import azure.batch.batch_service_client as batch
import azure.batch.batch_auth as batch_auth
import azure.batch.models as models
from SparseSC.cli.stt import get_config
from ..print_progress import print_progress
from .BatchConfig import BatchConfig, validate_config
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
from .constants import (
_STANDARD_OUT_FILE_NAME,
_CONTAINER_OUTPUT_FILE,
_CONTAINER_INPUT_FILE,
_BATCH_CV_FILE_NAME,
)
FOLD_FILE_PATTERN = "fold_{}.yaml"
# pylint: disable=bad-continuation, invalid-name, protected-access, line-too-long, fixme
sys.path.append(".")
sys.path.append("..")
# Update the Batch and Storage account credential strings in config.py with values
# unique to your accounts. These are used when constructing connection strings
# for the Batch and Storage client objects.
def build_output_sas_url(config, _blob_client):
"""
build a sas token for the output container
"""
sas_token = _blob_client.generate_container_shared_access_signature(
config.CONTAINER_NAME,
ContainerPermissions.READ
+ ContainerPermissions.WRITE
+ ContainerPermissions.DELETE
+ ContainerPermissions.LIST,
datetime.datetime.utcnow() + datetime.timedelta(hours=config.STORAGE_ACCESS_DURATION_HRS),
start=datetime.datetime.utcnow(),
)
_sas_url = "https://{}.blob.core.windows.net/{}?{}".format(
config.STORAGE_ACCOUNT_NAME, config.CONTAINER_NAME, sas_token
)
return _sas_url
def print_batch_exception(batch_exception):
"""
Prints the contents of the specified Batch exception.
:param batch_exception:
"""
print("-------------------------------------------")
print("Exception encountered:")
if (
batch_exception.error
and batch_exception.error.message
and batch_exception.error.message.value
):
print(batch_exception.error.message.value)
if batch_exception.error.values:
print()
for mesg in batch_exception.error.values:
print("{}:\t{}".format(mesg.key, mesg.value))
print("-------------------------------------------")
def build_output_file(container_sas_url, fold_number):
"""
Uploads a local file to an Azure Blob storage container.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
# where to store the outputs
container_dest = models.OutputFileBlobContainerDestination(
container_url=container_sas_url, path=FOLD_FILE_PATTERN.format(fold_number)
)
dest = models.OutputFileDestination(container=container_dest)
# under what conditions should you attempt to extract the outputs?
upload_options = models.OutputFileUploadOptions(
upload_condition=models.OutputFileUploadCondition.task_success
)
# https://docs.microsoft.com/en-us/azure/batch/batch-task-output-files#specify-output-files-for-task-output
return models.OutputFile(
file_pattern=_CONTAINER_OUTPUT_FILE,
destination=dest,
upload_options=upload_options,
)
def upload_file_to_container(block_blob_client, container_name, file_path, duration_hours=24):
"""
Uploads a local file to an Azure Blob storage container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param str file_path: The local path to the file.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
blob_name = os.path.basename(file_path)
print("Uploading file {} to container [{}]...".format(file_path, container_name))
block_blob_client.create_blob_from_path(container_name, blob_name, file_path)
sas_token = block_blob_client.generate_blob_shared_access_signature(
container_name,
blob_name,
permission=azureblob.BlobPermissions.READ,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=duration_hours),
)
sas_url = block_blob_client.make_blob_url(
container_name, blob_name, sas_token=sas_token
)
return models.ResourceFile(http_url=sas_url, file_path=_CONTAINER_INPUT_FILE)
def create_pool(config, batch_service_client):
"""
Creates a pool of compute nodes with the specified OS settings.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str pool_id: An ID for the new pool.
:param str publisher: Marketplace image publisher
:param str offer: Marketplace image offer
:param str sku: Marketplace image sku
"""
# Create a new pool of Linux compute nodes using an Azure Virtual Machines
# Marketplace image. For more information about creating pools of Linux
# nodes, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
image_ref_to_use = models.ImageReference(
publisher="microsoft-azure-batch",
offer="ubuntu-server-container",
sku="16-04-lts",
version="latest",
)
if config.REGISTRY_USERNAME:
registry = batch.models.ContainerRegistry(
user_name=config.REGISTRY_USERNAME,
password=config.REGISTRY_PASSWORD,
registry_server=config.REGISTRY_SERVER,
)
container_conf = batch.models.ContainerConfiguration(
container_image_names=[config.DOCKER_CONTAINER],
container_registries=[registry],
)
else:
container_conf = batch.models.ContainerConfiguration(
container_image_names=[config.DOCKER_CONTAINER]
)
new_pool = batch.models.PoolAddParameter(
id=config.POOL_ID,
virtual_machine_configuration=batch.models.VirtualMachineConfiguration(
image_reference=image_ref_to_use,
container_configuration=container_conf,
node_agent_sku_id="batch.node.ubuntu 16.04",
),
vm_size=config.POOL_VM_SIZE,
target_dedicated_nodes=config.POOL_NODE_COUNT,
target_low_priority_nodes=config.POOL_LOW_PRIORITY_NODE_COUNT,
)
batch_service_client.pool.add(new_pool)
def create_job(batch_service_client, job_id, pool_id):
"""
Creates a job with the specified ID, associated with the specified pool.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID for the job.
:param str pool_id: The ID for the pool.
"""
print("Creating job [{}]...".format(job_id))
job_description = batch.models.JobAddParameter(
id=job_id, pool_info=batch.models.PoolInformation(pool_id=pool_id)
)
batch_service_client.job.add(job_description)
def add_tasks(
config,
_blob_client,
batch_service_client,
container_sas_url,
job_id,
_input_file,
count,
):
"""
Adds a task for each input file in the collection to the specified job.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID of the job to which to add the tasks.
:param list input_files: The input files
:param output_container_sas_token: A SAS token granting write access to
the specified Azure Blob storage container.
"""
print("Adding {} tasks to job [{}]...".format(count, job_id))
tasks = list()
for fold_number in range(count):
output_file = build_output_file(container_sas_url, fold_number)
# command_line = '/bin/bash -c \'echo "Hello World" && echo "hello: world" > output.yaml\''
command_line = "/bin/bash -c 'stt {} {} {}'".format(
_CONTAINER_INPUT_FILE, _CONTAINER_OUTPUT_FILE, fold_number
)
task_container_settings = models.TaskContainerSettings(
image_name=config.DOCKER_CONTAINER
)
tasks.append(
batch.models.TaskAddParameter(
id="Task_{}".format(fold_number),
command_line=command_line,
resource_files=[_input_file],
output_files=[output_file],
container_settings=task_container_settings,
)
)
batch_service_client.task.add_collection(job_id, tasks)
def wait_for_tasks_to_complete(batch_service_client, job_id, timeout):
"""
Returns when all tasks in the specified job reach the Completed state.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The id of the job whose tasks should be to monitored.
:param timedelta timeout: The duration to wait for task completion. If all
tasks in the specified job do not reach Completed state within this time
period, an exception will be raised.
"""
_start_time = datetime.datetime.now()
timeout_expiration = _start_time + timeout
# print( "Monitoring all tasks for 'Completed' state, timeout in {}...".format(timeout), end="",)
while datetime.datetime.now() < timeout_expiration:
sys.stdout.flush()
tasks = [t for t in batch_service_client.task.list(job_id)]
incomplete_tasks = [
task for task in tasks if task.state != models.TaskState.completed
]
hours, remainder = divmod((datetime.datetime.now() - _start_time).seconds, 3600)
minutes, seconds = divmod(remainder, 60)
print_progress(
len(tasks) - len(incomplete_tasks),
len(tasks),
prefix="Time elapsed {:02}:{:02}:{:02}".format(
int(hours), int(minutes), int(seconds)
),
decimals=1,
bar_length=min(len(tasks), 50),
)
error_codes = [t.execution_info.exit_code for t in tasks if t.execution_info and t.execution_info.exit_code ]
if error_codes:
codes = defaultdict(lambda : 0)
for cd in error_codes:
codes[cd] +=1
# import pdb; pdb.set_trace()
raise RuntimeError( "\nSome tasks have exited with a non-zero exit code including: " + ", ".join([ "{}({})".format(k,v) for k, v in codes.items() ] ))
if not incomplete_tasks:
print()
return True
time.sleep(1)
print()
raise RuntimeError(
"ERROR: Tasks did not reach 'Completed' state within "
"timeout period of " + str(timeout)
)
def print_task_output(batch_service_client, job_id, encoding=None):
"""Prints the stdout.txt file for each task in the job.
:param batch_client: The batch client to use.
:type batch_client: `batchserviceclient.BatchServiceClient`
:param str job_id: The id of the job with task output files to print.
"""
print("Printing task output...")
tasks = batch_service_client.task.list(job_id)
for task in tasks:
node_id = batch_service_client.task.get(job_id, task.id).node_info.node_id
print("Task: {}".format(task.id))
print("Node: {}".format(node_id))
stream = batch_service_client.file.get_from_task(
job_id, task.id, _STANDARD_OUT_FILE_NAME
)
file_text = _read_stream_as_string(stream, encoding)
print("Standard output:")
print(file_text)
def _read_stream_as_string(stream, encoding):
"""Read stream as string
:param stream: input stream generator
:param str encoding: The encoding of the file. The default is utf-8.
:return: The file content.
:rtype: str
"""
output = io.BytesIO()
try:
for data in stream:
output.write(data)
if encoding is None:
encoding = "utf-8"
return output.getvalue().decode(encoding)
finally:
output.close()
raise RuntimeError("could not write data to stream or decode bytes")
def _download_files(config, _blob_client, out_path, count):
pathlib.Path(config.BATCH_DIRECTORY).mkdir(parents=True, exist_ok=True)
blob_names = [b.name for b in _blob_client.list_blobs(config.CONTAINER_NAME)]
for i in range(count):
blob_name = FOLD_FILE_PATTERN.format(i)
if not blob_name in blob_names:
raise RuntimeError("incomplete blob set: missing blob {}".format(blob_name))
out_path = os.path.join(config.BATCH_DIRECTORY, blob_name)
_blob_client.get_blob_to_path(config.CONTAINER_NAME, blob_name, out_path)
def _download_results(config, _blob_client, out_path, count, ptrn=FOLD_FILE_PATTERN):
pathlib.Path(config.BATCH_DIRECTORY).mkdir(parents=True, exist_ok=True)
blob_names = [b.name for b in _blob_client.list_blobs(config.CONTAINER_NAME)]
results = []
for i in range(count):
blob_name = ptrn.format(i)
if not blob_name in blob_names:
raise RuntimeError("incomplete blob set: missing blob {}".format(blob_name))
out_path = os.path.join(config.BATCH_DIRECTORY, blob_name)
with _blob_client.get_blob_to_stream(
config.CONTAINER_NAME, blob_name, out_path
) as blob:
results[i] = load(blob, Loader=Loader)
return results
def run(config: BatchConfig, wait=True) -> None:
r"""
:param config: A :class:`BatchConfig` instance with the Azure Batch run parameters
:type config: :class:BatchConfig
:param boolean wait: If true, wait for the batch to complete and then
download the results to file
:raises BatchErrorException: If raised by the Azure Batch Python SDK
"""
# pylint: disable=too-many-locals
# replace any missing values in the configuration with environment variables
config = validate_config(config)
start_time = datetime.datetime.now().replace(microsecond=0)
print(
'Synthetic Controls Run "{}" start time: {}'.format(config.JOB_ID, start_time)
)
print()
_LOCAL_INPUT_FILE = os.path.join(config.BATCH_DIRECTORY, _BATCH_CV_FILE_NAME)
v_pen, w_pen, model_data = get_config(_LOCAL_INPUT_FILE)
n_folds = len(model_data["folds"]) * len(v_pen) * len(w_pen)
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=config.STORAGE_ACCOUNT_NAME, account_key=config.STORAGE_ACCOUNT_KEY
)
# Use the blob client to create the containers in Azure Storage if they
# don't yet exist.
blob_client.create_container(config.CONTAINER_NAME, fail_on_exist=False)
CONTAINER_SAS_URL = build_output_sas_url(config, blob_client)
# The collection of data files that are to be processed by the tasks.
input_file_path = os.path.join(sys.path[0], _LOCAL_INPUT_FILE)
# Upload the data files.
input_file = upload_file_to_container(
blob_client, config.CONTAINER_NAME, input_file_path, config.STORAGE_ACCESS_DURATION_HRS
)
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage
credentials = batch_auth.SharedKeyCredentials(
config.BATCH_ACCOUNT_NAME, config.BATCH_ACCOUNT_KEY
)
batch_client = batch.BatchServiceClient(
credentials, batch_url=config.BATCH_ACCOUNT_URL
)
try:
# Create the pool that will contain the compute nodes that will execute the
# tasks.
try:
create_pool(config, batch_client)
print("Created pool: ", config.POOL_ID)
except models.BatchErrorException:
print("Using pool: ", config.POOL_ID)
# Create the job that will run the tasks.
create_job(batch_client, config.JOB_ID, config.POOL_ID)
# Add the tasks to the job.
add_tasks(
config,
blob_client,
batch_client,
CONTAINER_SAS_URL,
config.JOB_ID,
input_file,
n_folds,
)
if not wait:
return
# Pause execution until tasks reach Completed state.
wait_for_tasks_to_complete(
batch_client, config.JOB_ID, datetime.timedelta(hours=config.STORAGE_ACCESS_DURATION_HRS)
)
_download_files(config, blob_client, config.BATCH_DIRECTORY, n_folds)
except models.BatchErrorException as err:
print_batch_exception(err)
raise err
# Clean up storage resources
# TODO: re-enable this and delete the output container too
# -- print("Deleting container [{}]...".format(input_container_name))
# -- blob_client.delete_container(input_container_name)
# Print out some timing info
end_time = datetime.datetime.now().replace(microsecond=0)
print()
print("Sample end: {}".format(end_time))
print("Elapsed time: {}".format(end_time - start_time))
print()
# Clean up Batch resources (if the user so chooses).
if config.DELETE_POOL_WHEN_DONE:
batch_client.pool.delete(config.POOL_ID)
if config.DELETE_JOB_WHEN_DONE:
batch_client.job.delete(config.JOB_ID)
def load_results(config: BatchConfig) -> None:
r"""
:param config: A :class:`BatchConfig` instance with the Azure Batch run parameters
:type config: :class:BatchConfig
:raises BatchErrorException: If raised by the Azure Batch Python SDK
"""
# pylint: disable=too-many-locals
# replace any missing values in the configuration with environment variables
config = validate_config(config)
start_time = datetime.datetime.now().replace(microsecond=0)
print('Load result for job "{}" start time: {}'.format(config.JOB_ID, start_time))
print()
_LOCAL_INPUT_FILE = os.path.join(config.BATCH_DIRECTORY, _BATCH_CV_FILE_NAME)
v_pen, w_pen, model_data = get_config(_LOCAL_INPUT_FILE)
n_folds = len(model_data["folds"]) * len(v_pen) * len(w_pen)
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=config.STORAGE_ACCOUNT_NAME, account_key=config.STORAGE_ACCOUNT_KEY
)
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage
credentials = batch_auth.SharedKeyCredentials(
config.BATCH_ACCOUNT_NAME, config.BATCH_ACCOUNT_KEY
)
batch_client = batch.BatchServiceClient(
credentials, batch_url=config.BATCH_ACCOUNT_URL
)
try:
# Pause execution until tasks reach Completed state.
wait_for_tasks_to_complete(
batch_client, config.JOB_ID, datetime.timedelta(hours=config.STORAGE_ACCESS_DURATION_HRS)
)
_download_files(config, blob_client, config.BATCH_DIRECTORY, n_folds)
except models.BatchErrorException as err:
print_batch_exception(err)
raise err
# Clean up storage resources
# TODO: re-enable this and delete the output container too
# -- print("Deleting container [{}]...".format(input_container_name))
# -- blob_client.delete_container(input_container_name)
# Print out some timing info
end_time = datetime.datetime.now().replace(microsecond=0)
print()
print("Sample end: {}".format(end_time))
print("Elapsed time: {}".format(end_time - start_time))
print()
# Clean up Batch resources (if the user so chooses).
if config.DELETE_POOL_WHEN_DONE:
batch_client.pool.delete(config.POOL_ID)
if config.DELETE_JOB_WHEN_DONE:
batch_client.job.delete(config.JOB_ID)
if __name__ == "__main__":
# TODO: this is not an ideal API
config_module = importlib.__import__("config")
run(config_module.config)
| 34.487923 | 178 | 0.693561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,273 | 0.386282 |
1be2bb16aca1a3770cbb4668f10786667f95971a | 63 | py | Python | src/vilbert/datasets/__init__.py | NoOneUST/COMP5212 | 171b564f08841e426545f58e3b52870c0e090586 | [
"MIT"
]
| 3 | 2020-04-05T06:50:46.000Z | 2020-04-05T08:20:33.000Z | src/vilbert/datasets/__init__.py | NoOneUST/COMP5212Project | 171b564f08841e426545f58e3b52870c0e090586 | [
"MIT"
]
| 2 | 2021-05-21T16:24:54.000Z | 2022-02-10T01:21:54.000Z | src/vilbert/datasets/__init__.py | NoOneUST/COMP5212Project | 171b564f08841e426545f58e3b52870c0e090586 | [
"MIT"
]
| 1 | 2020-06-15T16:22:20.000Z | 2020-06-15T16:22:20.000Z | from .visual_entailment_dataset import VisualEntailmentDataset
| 31.5 | 62 | 0.920635 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1be2fe74c868aa22cedb699484c807fd62b32107 | 14,174 | py | Python | Dungeoneer/Treasure.py | jameslemon81/Dungeoneer | 8a2a1bfea06ae09f1898583999bf449c82ba4ce9 | [
"BSD-3-Clause"
]
| 12 | 2015-01-29T17:15:46.000Z | 2022-02-23T05:58:49.000Z | Dungeoneer/Treasure.py | jameslemon81/Dungeoneer | 8a2a1bfea06ae09f1898583999bf449c82ba4ce9 | [
"BSD-3-Clause"
]
| null | null | null | Dungeoneer/Treasure.py | jameslemon81/Dungeoneer | 8a2a1bfea06ae09f1898583999bf449c82ba4ce9 | [
"BSD-3-Clause"
]
| 8 | 2016-07-04T18:09:50.000Z | 2022-02-23T05:58:48.000Z | # Basic Fantasy RPG Dungeoneer Suite
# Copyright 2007-2012 Chris Gonnerman
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, self list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, self list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of the author nor the names of any contributors
# may be used to endorse or promote products derived from self software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
# Treasure.py -- generate treasures for Basic Fantasy RPG
###############################################################################
import Gems, Art, Coins, Magic, Unknown
import Dice
import string
def combine(lst):
lst.sort()
hits = 1
while hits:
hits = 0
for i in range(len(lst) - 1):
if lst[i] is not None and lst[i+1] is not None:
if lst[i].cat == lst[i+1].cat \
and lst[i].name == lst[i+1].name \
and lst[i].value == lst[i+1].value:
lst[i].qty += lst[i+1].qty
lst[i+1] = None
hits += 1
if hits:
lst = filter(lambda x: x is not None, lst)
return lst
def _gen_coins(argtup):
kind, n, s, b, mul = argtup
return [ Coins.Coin(kind, (Dice.D(n, s, b) * mul)) ]
def _gen_gems(argtup):
n, s, b, mul = argtup
lst = []
qty = Dice.D(n, s, b) * mul
for i in range(qty):
lst = lst + [ Gems.Gem() ]
return lst
def _gen_art(argtup):
n, s, b, mul = argtup
lst = []
qty = Dice.D(n, s, b) * mul
for i in range(qty):
lst = lst + [ Art.Art() ]
return lst
def __gen_magic(argtup):
kind, n, s, b, mul = argtup
lst = []
qty = Dice.D(n, s, b) * mul
for i in range(qty):
lst = lst + [ Magic.Magic(kind) ]
return lst
def _gen_magic(argtup):
if type(argtup) is type([]):
lst = []
for i in argtup:
lst = lst + __gen_magic(i)
return lst
else:
return __gen_magic(argtup)
_treasure_table = {
# lair treasure
'A': [
(50, _gen_coins, ("cp", 5, 6, 0, 100)),
(60, _gen_coins, ("sp", 5, 6, 0, 100)),
(40, _gen_coins, ("ep", 5, 4, 0, 100)),
(70, _gen_coins, ("gp", 10, 6, 0, 100)),
(50, _gen_coins, ("pp", 1, 10, 0, 100)),
(50, _gen_gems, (6, 6, 0, 1)),
(50, _gen_art, (6, 6, 0, 1)),
(30, _gen_magic, ("Any", 0, 0, 3, 1)),
],
'B': [
(75, _gen_coins, ("cp", 5, 10, 0, 100)),
(50, _gen_coins, ("sp", 5, 6, 0, 100)),
(50, _gen_coins, ("ep", 5, 4, 0, 100)),
(50, _gen_coins, ("gp", 3, 6, 0, 100)),
(25, _gen_gems, (1, 6, 0, 1)),
(25, _gen_art, (1, 6, 0, 1)),
(10, _gen_magic, ("AW", 0, 0, 1, 1)),
],
'C': [
(60, _gen_coins, ("cp", 6, 6, 0, 100)),
(60, _gen_coins, ("sp", 5, 4, 0, 100)),
(30, _gen_coins, ("ep", 2, 6, 0, 100)),
(25, _gen_gems, (1, 4, 0, 1)),
(25, _gen_art, (1, 4, 0, 1)),
(15, _gen_magic, ("Any", 1, 2, 0, 1)),
],
'D': [
(30, _gen_coins, ("cp", 4, 6, 0, 100)),
(45, _gen_coins, ("sp", 6, 6, 0, 100)),
(90, _gen_coins, ("gp", 5, 8, 0, 100)),
(30, _gen_gems, (1, 8, 0, 1)),
(30, _gen_art, (1, 8, 0, 1)),
(20, _gen_magic, [
("Any", 1, 2, 0, 1),
("Potion", 0, 0, 1, 1),
]
),
],
'E': [
(30, _gen_coins, ("cp", 2, 8, 0, 100)),
(60, _gen_coins, ("sp", 6, 10, 0, 100)),
(50, _gen_coins, ("ep", 3, 8, 0, 100)),
(50, _gen_coins, ("gp", 4, 10, 0, 100)),
(10, _gen_gems, (1, 10, 0, 1)),
(10, _gen_art, (1, 10, 0, 1)),
(30, _gen_magic, [
("Any", 1, 4, 0, 1),
("Scroll", 0, 0, 1, 1),
]
),
],
'F': [
(40, _gen_coins, ("sp", 3, 8, 0, 100)),
(50, _gen_coins, ("ep", 4, 8, 0, 100)),
(85, _gen_coins, ("gp", 6, 10, 0, 100)),
(70, _gen_coins, ("pp", 2, 8, 0, 100)),
(20, _gen_gems, (2, 12, 0, 1)),
(20, _gen_art, (1, 12, 0, 1)),
(35, _gen_magic, [
("Non-Weapon", 1, 4, 0, 1),
("Scroll", 0, 0, 1, 1),
("Potion", 0, 0, 1, 1),
]
),
],
'G': [
(90, _gen_coins, ("gp", 4, 6, 0, 1000)),
(75, _gen_coins, ("pp", 5, 8, 0, 100)),
(25, _gen_gems, (3, 6, 0, 1)),
(25, _gen_art, (1, 10, 0, 1)),
(50, _gen_magic, [
("Any", 1, 4, 0, 1),
("Scroll", 0, 0, 1, 1),
]
),
],
'H': [
(75, _gen_coins, ("cp", 8, 10, 0, 100)),
(75, _gen_coins, ("sp", 6, 10, 0, 1000)),
(75, _gen_coins, ("ep", 3, 10, 0, 1000)),
(75, _gen_coins, ("gp", 5, 8, 0, 1000)),
(75, _gen_coins, ("pp", 9, 8, 0, 100)),
(50, _gen_gems, ( 1, 100, 0, 1)),
(50, _gen_art, (10, 4, 0, 1)),
(20, _gen_magic, [
("Any", 1, 4, 0, 1),
("Scroll", 0, 0, 1, 1),
("Potion", 0, 0, 1, 1),
]
),
],
'I': [
(80, _gen_coins, ("pp", 3, 10, 0, 100)),
(50, _gen_gems, (2, 6, 0, 1)),
(50, _gen_art, (2, 6, 0, 1)),
(15, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'J': [
(45, _gen_coins, ("cp", 3, 8, 0, 100)),
(45, _gen_coins, ("sp", 1, 8, 0, 100)),
],
'K': [
(90, _gen_coins, ("cp", 2, 10, 0, 100)),
(35, _gen_coins, ("sp", 1, 8, 0, 100)),
],
'L': [
(50, _gen_gems, (1, 4, 0, 1)),
],
'M': [
(90, _gen_coins, ("gp", 4, 10, 0, 100)),
(90, _gen_coins, ("pp", 2, 8, 0, 1000)),
],
'N': [
(40, _gen_magic, ("Potion", 2, 4, 0, 1)),
],
'O': [
(50, _gen_magic, ("Scroll", 1, 4, 0, 1)),
],
# personal treasure
'P': [
(100, _gen_coins, ("cp", 3, 8, 0, 1)),
],
'Q': [
(100, _gen_coins, ("sp", 3, 6, 0, 1)),
],
'R': [
(100, _gen_coins, ("ep", 2, 6, 0, 1)),
],
'S': [
(100, _gen_coins, ("gp", 2, 4, 0, 1)),
],
'T': [
(100, _gen_coins, ("pp", 1, 6, 0, 1)),
],
'U': [
( 50, _gen_coins, ("cp", 1, 20, 0, 1)),
( 50, _gen_coins, ("sp", 1, 20, 0, 1)),
( 25, _gen_coins, ("gp", 1, 20, 0, 1)),
( 5, _gen_gems, (1, 4, 0, 1)),
( 5, _gen_art, (1, 4, 0, 1)),
( 2, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'V': [
( 25, _gen_coins, ("sp", 1, 20, 0, 1)),
( 25, _gen_coins, ("ep", 1, 20, 0, 1)),
( 50, _gen_coins, ("gp", 1, 20, 0, 1)),
( 25, _gen_coins, ("pp", 1, 20, 0, 1)),
( 10, _gen_gems, (1, 4, 0, 1)),
( 10, _gen_art, (1, 4, 0, 1)),
( 5, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U1': [
( 75, _gen_coins, ("cp", 1, 8, 0, 100)),
( 50, _gen_coins, ("sp", 1, 6, 0, 100)),
( 25, _gen_coins, ("ep", 1, 4, 0, 100)),
( 7, _gen_coins, ("gp", 1, 4, 0, 100)),
( 1, _gen_coins, ("pp", 1, 4, 0, 100)),
( 7, _gen_gems, (1, 4, 0, 1)),
( 3, _gen_art, (1, 4, 0, 1)),
( 2, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U2': [
( 50, _gen_coins, ("cp", 1, 10, 0, 100)),
( 50, _gen_coins, ("sp", 1, 8, 0, 100)),
( 25, _gen_coins, ("ep", 1, 6, 0, 100)),
( 20, _gen_coins, ("gp", 1, 6, 0, 100)),
( 2, _gen_coins, ("pp", 1, 4, 0, 100)),
( 10, _gen_gems, (1, 6, 0, 1)),
( 7, _gen_art, (1, 4, 0, 1)),
( 5, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U3': [
( 30, _gen_coins, ("cp", 2, 6, 0, 100)),
( 50, _gen_coins, ("sp", 1, 10, 0, 100)),
( 25, _gen_coins, ("ep", 1, 8, 0, 100)),
( 50, _gen_coins, ("gp", 1, 6, 0, 100)),
( 4, _gen_coins, ("pp", 1, 4, 0, 100)),
( 15, _gen_gems, (1, 6, 0, 1)),
( 7, _gen_art, (1, 6, 0, 1)),
( 8, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U45': [
( 20, _gen_coins, ("cp", 3, 6, 0, 100)),
( 50, _gen_coins, ("sp", 2, 6, 0, 100)),
( 25, _gen_coins, ("ep", 1, 10, 0, 100)),
( 50, _gen_coins, ("gp", 2, 6, 0, 100)),
( 8, _gen_coins, ("pp", 1, 4, 0, 100)),
( 20, _gen_gems, (1, 8, 0, 1)),
( 10, _gen_art, (1, 6, 0, 1)),
( 12, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U67': [
( 15, _gen_coins, ("cp", 4, 6, 0, 100)),
( 50, _gen_coins, ("sp", 3, 6, 0, 100)),
( 25, _gen_coins, ("ep", 1, 12, 0, 100)),
( 70, _gen_coins, ("gp", 2, 8, 0, 100)),
( 15, _gen_coins, ("pp", 1, 4, 0, 100)),
( 30, _gen_gems, (1, 8, 0, 1)),
( 15, _gen_art, (1, 6, 0, 1)),
( 16, _gen_magic, ("Any", 0, 0, 1, 1)),
],
'U8': [
( 10, _gen_coins, ("cp", 5, 6, 0, 100)),
( 50, _gen_coins, ("sp", 5, 6, 0, 100)),
( 25, _gen_coins, ("ep", 2, 8, 0, 100)),
( 75, _gen_coins, ("gp", 4, 6, 0, 100)),
( 30, _gen_coins, ("pp", 1, 4, 0, 100)),
( 40, _gen_gems, (1, 8, 0, 1)),
( 30, _gen_art, (1, 8, 0, 1)),
( 20, _gen_magic, ("Any", 0, 0, 1, 1)),
],
# coinage
'cp': [
(100, _gen_coins, ("cp", 0, 0, 1, 1)),
],
'sp': [
(100, _gen_coins, ("sp", 0, 0, 1, 1)),
],
'ep': [
(100, _gen_coins, ("ep", 0, 0, 1, 1)),
],
'gp': [
(100, _gen_coins, ("gp", 0, 0, 1, 1)),
],
'pp': [
(100, _gen_coins, ("pp", 0, 0, 1, 1)),
],
# magic classes
'MAGIC': [ (100, _gen_magic, ("Any", 0, 0, 1, 1)), ],
'POTION': [ (100, _gen_magic, ("Potion", 0, 0, 1, 1)), ],
'SCROLL': [ (100, _gen_magic, ("Scroll", 0, 0, 1, 1)), ],
'RING': [ (100, _gen_magic, ("Ring", 0, 0, 1, 1)), ],
'WSR': [ (100, _gen_magic, ("WSR", 0, 0, 1, 1)), ],
'MISC': [ (100, _gen_magic, ("Misc", 0, 0, 1, 1)), ],
'ARMOR': [ (100, _gen_magic, ("Armor", 0, 0, 1, 1)), ],
'WEAPON': [ (100, _gen_magic, ("Weapon", 0, 0, 1, 1)), ],
}
_treasure_table['U4'] = _treasure_table['U45']
_treasure_table['U5'] = _treasure_table['U45']
_treasure_table['U6'] = _treasure_table['U67']
_treasure_table['U7'] = _treasure_table['U67']
def Types():
types = _treasure_table.keys()
ones = filter(lambda x: len(x) == 1, types)
mults = filter(lambda x: len(x) > 1, types)
ones.sort()
mults.sort()
return ones + mults
def Treasure(typ):
tr = []
try:
tbl = _treasure_table[string.upper(typ)]
for i in tbl:
if Dice.D(1, 100, 0) <= i[0]:
tr = tr + i[1](i[2])
except:
tr = [ Unknown.Unknown(typ) ]
return tr
def Factory(args):
types = []
tr = []
mult = 1
for i in args:
if type(i) is tuple:
i = Dice.D(*i)
try:
nmult = int(i)
mult = nmult
types.append("%d" % mult)
continue
except:
pass
types.append(i + ",")
for n in range(mult):
tr += Treasure(i)
types = string.join(types, " ")
if types[-1] == ',':
types = types[:-1]
return (types.upper(), combine(tr))
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print "Usage: Treasure.py treasuretype [ treasuretype ... ]"
sys.exit(0)
types, tr = Factory(sys.argv[1:])
print "Treasure Type " + string.upper(types)
vtot = 0.0
ocat = ''
qty_len = 1
for t in tr:
qty_len = max(len(str(t.qty)), qty_len)
qty_fmt = "%" + str(qty_len) + "d"
for t in tr:
if t.cat != ocat:
print t.cat
ocat = t.cat
if t.value != 0:
print " ", qty_fmt % t.qty, t.name, t.value, "GP ea.", \
t.value * t.qty, "GP total"
else:
print " ", qty_fmt % t.qty, t.name
for i in t.desc:
print " ", i
vtot = vtot + (t.qty * t.value)
print "----- Total Value", vtot, "GP\n"
# end of script.
| 32.734411 | 79 | 0.417172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,726 | 0.192324 |
1be31bb2955f81221fbda20bbf33d2351c12d6c3 | 20,773 | py | Python | covid19/COVID19/code/controller/main.py | zhanqingheng/COVID-19 | d050ad2effedb9090865d1104ccd5c5d04343f53 | [
"MIT"
]
| 16 | 2020-06-08T10:14:13.000Z | 2022-03-30T02:44:04.000Z | covid19/COVID19/code/controller/main.py | zhanqingheng/COVID-19 | d050ad2effedb9090865d1104ccd5c5d04343f53 | [
"MIT"
]
| 1 | 2021-11-18T10:03:42.000Z | 2021-11-18T10:03:42.000Z | covid19/COVID19/code/controller/main.py | zhanqingheng/COVID-19 | d050ad2effedb9090865d1104ccd5c5d04343f53 | [
"MIT"
]
| 4 | 2021-03-06T04:44:03.000Z | 2021-12-09T07:22:50.000Z | from flask import Flask, current_app
from flask import render_template
from flask import jsonify
from jieba.analyse import extract_tags
import string
from DB import chinaSQL
from DB import worldSQL
app = Flask(__name__, template_folder='../../web', static_folder='../../static')
@app.route('/', methods=["get", "post"])
def hello_world():
return render_template("china.html")
@app.route('/china', methods=["get", "post"])
def china():
return render_template("china.html")
@app.route('/world', methods=["get", "post"])
def world():
return render_template("world.html")
@app.route('/favicon.ico')
def favicon():
return current_app.send_static_file('image/favicon-32x32-sun.ico')
@app.route("/time")
def time():
data = chinaSQL.time()
return str(data[0])
@app.route("/chinaEightNumber")
def chinaEightNumber():
data = chinaSQL.chinaEightNumber()
return jsonify({"confirmTotal": data[0],
"healTotal": data[1],
"deadTotal": data[2],
"nowConfirmTotal": data[3],
"suspectTotal": data[4],
"nowSevereTotal": data[5],
"importedCaseTotal": data[6],
"noInfectTotal": data[7],
"confirmAdd": data[8],
"healAdd": data[9],
"deadAdd": data[10],
"nowConfirmAdd": data[11],
"suspectAdd": data[12],
"nowSevereAdd": data[13],
"importedCaseAdd": data[14],
"noInfectAdd": data[15]
})
@app.route('/chinaMap', methods=['GET'])
def chinaMap():
data = chinaSQL.chinaMap()
confirmToday, nowConfirmTotal, confirmTotal, healTotal, deadTotal = [], [], [], [], []
for a, b, c, d, e, f in data:
confirmToday.append({"name": a, "value": b})
nowConfirmTotal.append({"name": a, "value": c})
confirmTotal.append({"name": a, "value": d})
healTotal.append({"name": a, "value": e})
deadTotal.append({"name": a, "value": f})
return jsonify({"confirmToday": confirmToday, "nowConfirmTotal": nowConfirmTotal,
"confirmTotal": confirmTotal, "healTotal": healTotal, "deadTotal": deadTotal})
@app.route('/chinaProvinceMap', methods=['GET'])
def chinaProvinceMap():
data = chinaSQL.chinaProvinceMap()
confirmToday, nowConfirmTotal, confirmTotal, healTotal, deadTotal = [], [], [], [], []
for a, b, c, d, e, f in data:
confirmToday.append({"name": a + "市", "value": b})
nowConfirmTotal.append({"name": a + "市", "value": c})
confirmTotal.append({"name": a + "市", "value": d})
healTotal.append({"name": a + "市", "value": e})
deadTotal.append({"name": a + "市", "value": f})
return jsonify({"confirmToday": confirmToday, "nowConfirmTotal": nowConfirmTotal,
"confirmTotal": confirmTotal, "healTotal": healTotal, "deadTotal": deadTotal})
@app.route("/nationalTotal")
def nationalTotal():
data = chinaSQL.nationalTotal()
day, \
confirmChinaDayList, \
healChinaDayList, \
deadChinaDayList, \
importedCaseChinaDayList = [], [], [], [], []
for a, b, c, d, e in data:
day.append(a.strftime("%m-%d"))
confirmChinaDayList.append(b)
healChinaDayList.append(c)
deadChinaDayList.append(d)
importedCaseChinaDayList.append(e)
return jsonify({"day": day,
"confirmChinaDayList": confirmChinaDayList,
"healChinaDayList": healChinaDayList,
"deadChinaDayList": deadChinaDayList,
"importedCaseChinaDayList": importedCaseChinaDayList
})
@app.route("/dailyAdditionsNationwide")
def dailyAdditionsNationwide():
data = chinaSQL.dailyAdditionsNationwide()
day, \
confirmChinaDayAddList, \
healChinaDayAddList, \
deadChinaDayAddList, \
importedCaseChinaDayAddList = [], [], [], [], []
for a, b, c, d, e in data[7:]:
day.append(a.strftime("%m-%d"))
confirmChinaDayAddList.append(b)
healChinaDayAddList.append(c)
deadChinaDayAddList.append(d)
importedCaseChinaDayAddList.append(e)
return jsonify({"day": day,
"confirmChinaDayAddList": confirmChinaDayAddList,
"healChinaDayAddList": healChinaDayAddList,
"deadChinaDayAddList": deadChinaDayAddList,
"importedCaseChinaDayAddList": importedCaseChinaDayAddList
})
@app.route("/dailyCasesNationwide")
def dailyCasesNationwide():
data = chinaSQL.dailyCasesNationwide()
day, \
suspectChinaDayList, \
noInfectChinaDayList, \
nowConfirmChinaDayList, \
nowSevereChinaDayList = [], [], [], [], []
for a, b, c, d, e in data[7:]:
day.append(a.strftime("%m-%d"))
suspectChinaDayList.append(b)
noInfectChinaDayList.append(c)
nowConfirmChinaDayList.append(d)
nowSevereChinaDayList.append(e)
return jsonify({"day": day,
"suspectChinaDayList": suspectChinaDayList,
"noInfectChinaDayList": noInfectChinaDayList,
"nowConfirmChinaDayList": nowConfirmChinaDayList,
"nowSevereChinaDayList": nowSevereChinaDayList
})
@app.route("/nationalCumulativeCureMortalityRate")
def nationalCumulativeCureMortalityRate():
data = chinaSQL.nationalCumulativeCureMortalityRate()
day, \
healRateChinaDayList, \
deadRateChinaDayList = [], [], []
for a, b, c in data[7:]:
day.append(a.strftime("%m-%d"))
healRateChinaDayList.append(b)
deadRateChinaDayList.append(c)
return jsonify({"day": day,
"healRateChinaDayList": healRateChinaDayList,
"deadRateChinaDayList": deadRateChinaDayList
})
@app.route("/detailedDataByProvince")
def detailedDataByProvince():
data = chinaSQL.detailedDataByProvince()
provinceName, \
confirmTotal, \
healTotal, \
deadTotal, \
healRateTotal, \
deadRateTotal = [], [], [], [], [], []
for a, b, c, d, e, f in data:
provinceName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
healRateTotal.append(e)
deadRateTotal.append(f)
return jsonify({"provinceName": provinceName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal,
"healRateTotal": healRateTotal,
"deadRateTotal": deadRateTotal
})
@app.route("/cumulativeNumberOfConfirmedCasesInAllProvinces")
def cumulativeNumberOfConfirmedCasesInAllProvinces():
data = chinaSQL.cumulativeNumberOfConfirmedCasesInAllProvinces()
provincedetails = []
for provinceName, confirmTotal in data:
provincedetails.append({"name": provinceName, "value": confirmTotal})
return jsonify({"data": provincedetails})
@app.route("/currentConfirmedDataInAllProvinces")
def currentConfirmedDataInAllProvinces():
data = chinaSQL.currentConfirmedDataInAllProvinces()
provinceName, \
nowConfirmTotal, \
confirmToday, \
suspectTotal = [], [], [], []
for a, b, c, d in data:
provinceName.append(a)
nowConfirmTotal.append(b)
confirmToday.append(c)
suspectTotal.append(d)
return jsonify({"provinceName": provinceName,
"nowConfirmTotal": nowConfirmTotal,
"confirmToday": confirmToday,
"suspectTotal": suspectTotal
})
@app.route("/existingDiagnosticClassificationInChina")
def existingDiagnosticClassificationInChina():
data = chinaSQL.existingDiagnosticClassificationInChina()
nowconfirmstatis = []
nowconfirmstatis.append({"name": '港澳台现存确诊', "value": data[0][0]})
nowconfirmstatis.append({"name": '境外输入现存确诊', "value": data[0][1]})
nowconfirmstatis.append({"name": '31省本土现有确诊', "value": data[0][2]})
return jsonify({"data": nowconfirmstatis})
@app.route("/totalNumberOfOverseasImportsFromTop10Provinces")
def totalNumberOfOverseasImportsFromTop10Provinces():
data = chinaSQL.totalNumberOfOverseasImportsFromTop10Provinces()
importstatis = []
for province, importedCase in data:
importstatis.append({"name": province, "value": importedCase})
return jsonify({"data": importstatis})
@app.route("/eachProvinceComparesYesterdayData")
def eachProvinceComparesYesterdayData():
data = chinaSQL.eachProvinceComparesYesterdayData()
province, \
nowConfirm, \
confirmAdd, \
heal, \
dead, \
zero = [], [], [], [], [], []
for a, b, c, d, e, f in data:
province.append(a)
nowConfirm.append(b)
confirmAdd.append(c)
heal.append(d)
dead.append(e)
zero.append(f)
return jsonify({"province": province,
"nowConfirm": nowConfirm,
"confirmAdd": confirmAdd,
"heal": heal,
"dead": dead,
"zero": zero
})
@app.route("/hubeiNonHubeiNationalCumulativeData")
def hubeiNonHubeiNationalCumulativeData():
data = chinaSQL.hubeiNonHubeiNationalCumulativeData()
day, \
hubeiNowConfirm, \
hubeiHeal, \
hubeiDead, \
notHubeiNowConfirm, \
notHubeiHeal, \
notHubeiDead, \
countryNowConfirm, \
countryHeal, \
countryDead = [], [], [], [], [], [], [], [], [], []
for a, b, c, d, e, f, g, h, i, j in data:
day.append(a.strftime("%m-%d"))
hubeiNowConfirm.append(b)
hubeiHeal.append(c)
hubeiDead.append(d)
notHubeiNowConfirm.append(e)
notHubeiHeal.append(f)
notHubeiDead.append(g)
countryNowConfirm.append(h)
countryHeal.append(i)
countryDead.append(j)
return jsonify({"day": day,
"hubeiNowConfirm": hubeiNowConfirm,
"hubeiHeal": hubeiHeal,
"hubeiDead": hubeiDead,
"notHubeiNowConfirm": notHubeiNowConfirm,
"notHubeiHeal": notHubeiHeal,
"notHubeiDead": notHubeiDead,
"countryNowConfirm": countryNowConfirm,
"countryHeal": countryHeal,
"countryDead": countryDead
})
@app.route("/hubeiNonHubeiNationalCureMortalityRate")
def hubeiNonHubeiNationalCureMortalityRate():
data = chinaSQL.hubeiNonHubeiNationalCureMortalityRate()
day, \
hubeiHealRate, \
hubeiDeadRate, \
notHubeiHealRate, \
notHubeiDeadRate, \
countryHealRate, \
countryDeadRate = [], [], [], [], [], [], []
for a, b, c, d, e, f, g in data:
day.append(a.strftime("%m-%d"))
hubeiHealRate.append(b)
hubeiDeadRate.append(c)
notHubeiHealRate.append(d)
notHubeiDeadRate.append(e)
countryHealRate.append(f)
countryDeadRate.append(g)
return jsonify({"day": day,
"hubeiHealRate": hubeiHealRate,
"hubeiDeadRate": hubeiDeadRate,
"notHubeiHealRate": notHubeiHealRate,
"notHubeiDeadRate": notHubeiDeadRate,
"countryHealRate": countryHealRate,
"countryDeadRate": countryDeadRate
})
@app.route("/hubeiNonHubeiNationalDailyNew")
def hubeiNonHubeiNationalDailyNew():
data = chinaSQL.hubeiNonHubeiNationalDailyNew()
day, \
hubei, \
notHubei, \
country = [], [], [], []
for a, b, c, d in data[7:]:
day.append(a.strftime("%m-%d"))
hubei.append(b)
notHubei.append(c)
country.append(d)
return jsonify({"day": day,
"hubei": hubei,
"notHubei": notHubei,
"country": country
})
@app.route("/wuhanNotWuhanNotHubeiNewlyConfirmed")
def wuhanNotWuhanNotHubeiNewlyConfirmed():
data = chinaSQL.wuhanNotWuhanNotHubeiNewlyConfirmed()
day, \
wuhan, \
notWuhan, \
notHubei = [], [], [], []
for a, b, c, d in data:
day.append(a.strftime("%m-%d"))
wuhan.append(b)
notWuhan.append(c)
notHubei.append(d)
return jsonify({"day": day,
"wuhan": wuhan,
"notWuhan": notWuhan,
"notHubei": notHubei
})
@app.route("/totalConfirmedTop20UrbanAreas")
def totalConfirmedTop20UrbanAreas():
data = chinaSQL.totalConfirmedTop20UrbanAreas()
cityName, \
deadRateTotal, \
healRateTotal = [], [], []
for a, b, c in data:
cityName.append(a)
deadRateTotal.append(b)
healRateTotal.append(c)
return jsonify({"cityName": cityName,
"deadRateTotal": deadRateTotal,
"healRateTotal": healRateTotal
})
@app.route("/existingConfirmedTop20UrbanAreas")
def existingConfirmedTop20UrbanAreas():
data = chinaSQL.existingConfirmedTop20UrbanAreas()
cityName, \
nowConfirmTotal, \
confirmToday, \
suspectTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
nowConfirmTotal.append(b)
confirmToday.append(c)
suspectTotal.append(d)
return jsonify({"cityName": cityName,
"nowConfirmTotal": nowConfirmTotal,
"confirmToday": confirmToday,
"suspectTotal": suspectTotal
})
@app.route("/urbanDataOfHubeiProvince")
def urbanDataOfHubeiProvince():
data = chinaSQL.urbanDataOfHubeiProvince()
cityName, \
confirmTotal, \
healTotal, \
deadTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
return jsonify({"cityName": cityName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal
})
@app.route("/accumulativeDataExceptHubeiProvince")
def accumulativeDataExceptHubeiProvince():
data = chinaSQL.accumulativeDataExceptHubeiProvince()
cityName, \
confirmTotal, \
healTotal, \
deadTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
return jsonify({"cityName": cityName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal
})
@app.route("/provincesWithFatalCasesNationwide")
def provincesWithFatalCasesNationwide():
data = chinaSQL.provincesWithFatalCasesNationwide()
provincedetails = []
provincedetails.append({"name": "无死亡病例省份数量", "value": data[0][0]})
provincedetails.append({"name": "有死亡病例省份数量", "value": data[0][1]})
return jsonify({"data": provincedetails})
@app.route("/numberOfDeathsInCities")
def numberOfDeathsInCities():
data = chinaSQL.numberOfDeathsInCities()
dataCityCount = []
dataCityCount.append({"name": "无死亡病例城市数量", "value": data[0][0]})
dataCityCount.append({"name": "有死亡病例城市数量", "value": data[0][1]})
return jsonify({"data": dataCityCount})
@app.route("/outbreakOut")
def outbreakOut():
data = chinaSQL.outbreakOut()
d = []
for i in data:
k = i[0].rstrip(string.digits)
v = i[0][len(k):]
ks = extract_tags(k)
for j in ks:
if not j.isdigit():
d.append({"name": j, "value": v})
return jsonify({"kws": d})
@app.route("/worldFourNumber")
def worldFourNumber():
data = worldSQL.worldFourNumber()
return jsonify({"nowConfirm": data[0],
"confirm": data[1],
"heal": data[2],
"dead": data[3],
"nowConfirmAdd": data[4],
"confirmAdd": data[5],
"healAdd": data[6],
"deadAdd": data[7]
})
@app.route('/worldMapNoChina', methods=['GET'])
def worldMapNoChina():
data = worldSQL.worldMapNoChina()
nowConfirm, confirm, heal, dead = [], [], [], []
for a, b, c, d, e in data:
nowConfirm.append({"name": a, "value": b})
confirm.append({"name": a, "value": c})
heal.append({"name": a, "value": d})
dead.append({"name": a, "value": e})
data1 = worldSQL.worldMapChina()
nowConfirm.append({"name": "中国", "value": data1[0][0]})
confirm.append({"name": "中国", "value": data1[0][1]})
heal.append({"name": "中国", "value": data1[0][2]})
dead.append({"name": "中国", "value": data1[0][3]})
return jsonify({"nowConfirm": nowConfirm,
"confirm": confirm,
"heal": heal,
"dead": dead
})
@app.route("/globalCumulativeTrend")
def globalCumulativeTrend():
data = worldSQL.globalCumulativeTrend()
day, \
confirm, \
heal, \
dead, \
newAddConfirm = [], [], [], [], []
for a, b, c, d, e in data:
day.append(a.strftime("%m-%d"))
confirm.append(b)
heal.append(c)
dead.append(d)
newAddConfirm.append(e)
return jsonify({"day": day,
"confirm": confirm,
"heal": heal,
"dead": dead,
"newAddConfirm": newAddConfirm
})
@app.route("/globalCumulativeCureMortality")
def globalCumulativeCureMortality():
data = worldSQL.globalCumulativeCureMortality()
day, \
healRate, \
deadRate = [], [], []
for a, b, c in data:
day.append(a.strftime("%m-%d"))
healRate.append(b)
deadRate.append(c)
return jsonify({"day": day,
"healRate": healRate,
"deadRate": deadRate
})
@app.route("/foreignCumulativeDiagnosisTop10Countries")
def foreignCumulativeDiagnosisTop10Countries():
data = worldSQL.foreignCumulativeDiagnosisTop10Countries()
name, \
nowConfirm, \
confirm, \
heal, \
dead = [], [], [], [], []
for a, b, c, d, e in data:
name.append(a)
nowConfirm.append(b)
confirm.append(c)
heal.append(d)
dead.append(e)
return jsonify({"name": name,
"nowConfirm": nowConfirm,
"confirm": confirm,
"heal": heal,
"dead": dead
})
@app.route("/theTop10CountriesGrewFastestInSevenDays")
def theTop10CountriesGrewFastestInSevenDays():
data = worldSQL.theTop10CountriesGrewFastestInSevenDays()
nation, \
day7, \
day, \
rate = [], [], [], []
for a, b, c, d in data:
nation.append(a)
day7.append(b)
day.append(c)
rate.append(d)
return jsonify({"nation": nation,
"day7": day7,
"day0": day,
"rate": rate
})
@app.route("/overseasCountriesWithMoreThan10000ConfirmedCases")
def overseasCountriesWithMoreThan10000ConfirmedCases():
data = worldSQL.overseasCountriesWithMoreThan10000ConfirmedCases()
foreignlist = []
for name, confirm in data:
foreignlist.append({"name": name, "value": confirm})
return jsonify({"data": foreignlist})
@app.route("/overseasCountriesWithMoreThan10000HaveBeenConfirmedCases")
def overseasCountriesWithMoreThan10000HaveBeenConfirmedCases():
data = worldSQL.overseasCountriesWithMoreThan10000HaveBeenConfirmedCases()
foreignlist = []
for name, nowConfirm in data:
foreignlist.append({"name": name, "value": nowConfirm})
return jsonify({"data": foreignlist})
@app.route("/newCasesInTheTop10CountriesWithin24Hours")
def newCasesInTheTop10CountriesWithin24Hours():
data = worldSQL.newCasesInTheTop10CountriesWithin24Hours()
nationAddConfirm = []
for nation, addConfirm in data:
nationAddConfirm.append({"name": nation, "value": addConfirm})
return jsonify({"data": nationAddConfirm})
@app.route("/theNumberOfForeignCountriesWithConfirmedCases")
def theNumberOfForeignCountriesWithConfirmedCases():
data = worldSQL.theNumberOfForeignCountriesWithConfirmedCases()
foreignlist = []
for continent, count in data:
foreignlist.append({"name": continent, "value": count})
return jsonify({"data": foreignlist})
if __name__ == '__main__':
app.run()
| 33.078025 | 98 | 0.588264 | 0 | 0 | 0 | 0 | 20,475 | 0.978962 | 0 | 0 | 3,714 | 0.177576 |
1be38ec637c07219a45f7c7ba15326a16a343d58 | 396 | py | Python | T2API/migrations/0008_product_weight.py | hackhb18-T2/api | c42be466492d07d6451ff3145985cd8cc0927257 | [
"Apache-2.0"
]
| null | null | null | T2API/migrations/0008_product_weight.py | hackhb18-T2/api | c42be466492d07d6451ff3145985cd8cc0927257 | [
"Apache-2.0"
]
| null | null | null | T2API/migrations/0008_product_weight.py | hackhb18-T2/api | c42be466492d07d6451ff3145985cd8cc0927257 | [
"Apache-2.0"
]
| null | null | null | # Generated by Django 2.0.2 on 2018-02-17 10:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('T2API', '0007_apiuser_deviceuser'),
]
operations = [
migrations.AddField(
model_name='product',
name='weight',
field=models.IntegerField(default=None, null=True),
),
]
| 20.842105 | 63 | 0.60101 | 303 | 0.765152 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.242424 |
1be41a8ed3e94194a6131c0c94be533e83696d98 | 3,402 | py | Python | contrib/cirrus/podbot.py | juhp/libpod | bc7afd6d71da4173e4894ff352667a25987fa2ea | [
"Apache-2.0"
]
| 2 | 2021-09-20T00:29:06.000Z | 2021-11-28T08:36:20.000Z | contrib/cirrus/podbot.py | juhp/libpod | bc7afd6d71da4173e4894ff352667a25987fa2ea | [
"Apache-2.0"
]
| 2 | 2020-01-04T03:31:18.000Z | 2021-05-17T09:54:03.000Z | contrib/cirrus/podbot.py | juhp/libpod | bc7afd6d71da4173e4894ff352667a25987fa2ea | [
"Apache-2.0"
]
| 1 | 2019-04-08T21:58:07.000Z | 2019-04-08T21:58:07.000Z | #!/usr/bin/env python3
# Simple and dumb script to send a message to the #podman IRC channel on frenode
# Based on example from: https://pythonspot.com/building-an-irc-bot/
import os
import time
import random
import errno
import socket
import sys
class IRC:
response_timeout = 10 # seconds
irc = socket.socket()
def __init__(self, server, nickname, channel):
self.server = server
self.nickname = nickname
self.channel = channel
self.irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def _send(self, cmdstr):
self.irc.send(bytes(cmdstr + '\r\n', 'utf-8'))
def message(self, msg):
data = 'PRIVMSG {0} :{1}\r\n'.format(self.channel, msg)
print(data)
self._send(data)
@staticmethod
def fix_newlines(bufr):
return bufr.replace('\\r\\n', '\n')
def _required_response(self, needle, haystack):
start = time.time()
end = start + self.response_timeout
while time.time() < end:
if haystack.find(needle) != -1:
return (False, haystack)
time.sleep(0.1)
try:
haystack += str(self.irc.recv(4096, socket.MSG_DONTWAIT))
except socket.error as serr:
if serr.errno == errno.EWOULDBLOCK:
continue
raise # can't handle this
return (True, haystack) # Error
def connect(self, username, password):
# This is ugly as sin, but seems to be a working send/expect sequence
print("connecting to: {0}".format(self.server))
self.irc.connect((self.server, 6667)) #connects to the server
self._send("USER {0} {0} {0} :I am {0}".format(self.nickname))
self._send("NICK {0}".format(self.nickname))
err, haystack = self._required_response('End of /MOTD command.'
''.format(self.nickname), "")
if err:
print(self.fix_newlines(haystack))
print("Error connecting to {0}".format(self.server))
return True
print("Logging in as {0}".format(username))
self._send("PRIVMSG NickServ :IDENTIFY {0} {1}".format(username, password))
err, _ = self._required_response("You are now identified for", "")
if err:
print("Error logging in to {0} as {1}".format(self.server, username))
return True
print("Joining {0}".format(self.channel))
self._send("JOIN {0}".format(self.channel))
err, haystack = self._required_response("{0} {1} :End of /NAMES list."
"".format(self.nickname, self.channel),
haystack)
print(self.fix_newlines(haystack))
if err:
print("Error joining {0}".format(self.channel))
return True
return False
def quit(self):
print("Quitting")
self._send("QUIT :my work is done here")
self.irc.close()
if len(sys.argv) < 3:
print("Error: Must pass desired nick and message as parameters")
else:
irc = IRC("irc.freenode.net", sys.argv[1], "#podman")
err = irc.connect(*os.environ.get('IRCID', 'Big Bug').split(" ", 2))
if not err:
irc.message(" ".join(sys.argv[2:]))
time.sleep(5.0) # avoid join/quit spam
irc.quit()
| 34.363636 | 87 | 0.569959 | 2,794 | 0.821282 | 0 | 0 | 85 | 0.024985 | 0 | 0 | 811 | 0.238389 |
1be5b77cc2bbea8d65329992b137d52e24f4e227 | 441 | py | Python | changes/api/build_coverage.py | vault-the/changes | 37e23c3141b75e4785cf398d015e3dbca41bdd56 | [
"Apache-2.0"
]
| 443 | 2015-01-03T16:28:39.000Z | 2021-04-26T16:39:46.000Z | changes/api/build_coverage.py | vault-the/changes | 37e23c3141b75e4785cf398d015e3dbca41bdd56 | [
"Apache-2.0"
]
| 12 | 2015-07-30T19:07:16.000Z | 2016-11-07T23:11:21.000Z | changes/api/build_coverage.py | vault-the/changes | 37e23c3141b75e4785cf398d015e3dbca41bdd56 | [
"Apache-2.0"
]
| 47 | 2015-01-09T10:04:00.000Z | 2020-11-18T17:58:19.000Z | from changes.api.base import APIView
from changes.lib.coverage import get_coverage_by_build_id, merged_coverage_data
from changes.models.build import Build
class BuildTestCoverageAPIView(APIView):
def get(self, build_id):
build = Build.query.get(build_id)
if build is None:
return '', 404
coverage = merged_coverage_data(get_coverage_by_build_id(build.id))
return self.respond(coverage)
| 25.941176 | 79 | 0.730159 | 280 | 0.634921 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0.004535 |
1be723fadb484c2875b98748f51d456625b23262 | 5,251 | py | Python | topopt/mechanisms/problems.py | arnavbansal2764/topopt | 74d8f17568a9d3349632e23840a9dc5b0d6c4d1f | [
"MIT"
]
| 53 | 2020-04-14T10:13:04.000Z | 2022-02-24T03:16:57.000Z | topopt/mechanisms/problems.py | arnavbansal2764/topopt | 74d8f17568a9d3349632e23840a9dc5b0d6c4d1f | [
"MIT"
]
| 5 | 2020-11-12T23:56:30.000Z | 2021-09-30T19:24:06.000Z | topopt/mechanisms/problems.py | arnavbansal2764/topopt | 74d8f17568a9d3349632e23840a9dc5b0d6c4d1f | [
"MIT"
]
| 15 | 2020-02-12T01:32:07.000Z | 2022-02-20T02:44:55.000Z | """Compliant mechanism synthesis problems using topology optimization."""
import numpy
import scipy.sparse
from ..problems import ElasticityProblem
from .boundary_conditions import MechanismSynthesisBoundaryConditions
from ..utils import deleterowcol
class MechanismSynthesisProblem(ElasticityProblem):
r"""
Topology optimization problem to generate compliant mechanisms.
:math:`\begin{aligned}
\max_{\boldsymbol{\rho}} \quad &
\{u_{\text{out}}=\mathbf{l}^{T} \mathbf{u}\}\\
\textrm{subject to}: \quad & \mathbf{K}\mathbf{u} =
\mathbf{f}_\text{in}\\
& \sum_{e=1}^N v_e\rho_e \leq V_\text{frac},
\quad 0 < \rho_\min \leq \rho_e \leq 1,
\quad e=1, \dots, N.\\
\end{aligned}`
where :math:`\mathbf{l}` is a vector with the value 1 at the degree(s) of
freedom corresponding to the output point and with zeros at all other
places.
Attributes
----------
spring_stiffnesses: numpy.ndarray
The spring stiffnesses of the
actuator and output displacement.
Emin: float
The minimum stiffness of elements.
Emax: float
The maximum stiffness of elements.
"""
@staticmethod
def lk(E: float = 1.0, nu: float = 0.3) -> numpy.ndarray:
"""
Build the element stiffness matrix.
Parameters
----------
E:
Young's modulus of the material.
nu:
Poisson's ratio of the material.
Returns
-------
The element stiffness matrix for the material.
"""
return ElasticityProblem.lk(1e0, nu)
def __init__(
self, bc: MechanismSynthesisBoundaryConditions, penalty: float):
"""
Create the topology optimization problem.
Parameters
----------
nelx:
Number of elements in the x direction.
nely:
Number of elements in the x direction.
penalty:
Penalty value used to penalize fractional densities in SIMP.
bc:
Boundary conditions of the problem.
"""
super().__init__(bc, penalty)
self.Emin = 1e-6 # Minimum stiffness of elements
self.Emax = 1e2 # Maximum stiffness of elements
# Spring stiffnesses for the actuator and output displacement
self.spring_stiffnesses = numpy.full(
numpy.nonzero(self.f)[0].shape, 10.0)
def build_K(self, xPhys: numpy.ndarray, remove_constrained: bool = True
) -> scipy.sparse.coo.coo_matrix:
"""
Build the stiffness matrix for the problem.
Parameters
----------
xPhys:
The element densisities used to build the stiffness matrix.
remove_constrained:
Should the constrained nodes be removed?
Returns
-------
The stiffness matrix for the mesh.
"""
# Build the stiffness matrix using inheritance
K = super().build_K(xPhys, remove_constrained=False).tocsc()
# Add spring stiffnesses
spring_ids = numpy.nonzero(self.f)[0]
K[spring_ids, spring_ids] += self.spring_stiffnesses
# K = (K.T + K) / 2. # Make sure the stiffness matrix is symmetric
# Remove constrained dofs from matrix and convert to coo
if remove_constrained:
K = deleterowcol(K, self.fixed, self.fixed)
return K.tocoo()
def compute_objective(self, xPhys: numpy.ndarray, dobj: numpy.ndarray
) -> float:
r"""
Compute the objective and gradient of the mechanism synthesis problem.
The objective is :math:`u_{\text{out}}=\mathbf{l}^{T} \mathbf{u}`
where :math:`\mathbf{l}` is a vector with the value 1 at
the degree(s) of freedom corresponding to the output point and with
zeros at all other places. The gradient of the objective is
:math:`\begin{align}
u_\text{out} &= \mathbf{l}^T\mathbf{u} = \mathbf{l}^T\mathbf{u} +
\boldsymbol{\lambda}^T(\mathbf{K}\mathbf{u} - \mathbf{f})\\
\frac{\partial u_\text{out}}{\partial \rho_e} &=
(\mathbf{K}\boldsymbol{\lambda} + \mathbf{l})^T
\frac{\partial \mathbf u}{\partial \rho_e} +
\boldsymbol{\lambda}^T\frac{\partial \mathbf K}{\partial \rho_e}
\mathbf{u}
= \boldsymbol{\lambda}^T\frac{\partial \mathbf K}{\partial \rho_e}
\mathbf{u}
\end{align}`
where :math:`\mathbf{K}\boldsymbol{\lambda} = -\mathbf{l}`.
Parameters
----------
xPhys:
The density design variables.
dobj:
The gradient of the objective to compute.
Returns
-------
The objective of the compliant mechanism synthesis problem.
"""
# Setup and solve FE problem
self.update_displacements(xPhys)
u = self.u[:, 0][self.edofMat].reshape(-1, 8) # Displacement
λ = self.u[:, 1][self.edofMat].reshape(-1, 8) # Fixed vector (Kλ = -l)
obj = self.f[:, 1].T @ self.u[:, 0]
self.obje[:] = (λ @ self.KE * u).sum(1)
self.compute_young_moduli(xPhys, dobj) # Stores the derivative in dobj
dobj *= -self.obje
return obj
| 33.234177 | 79 | 0.591316 | 4,998 | 0.951275 | 0 | 0 | 433 | 0.082413 | 0 | 0 | 3,645 | 0.693757 |
1be7ab6f787e652d44d15533e2b5246954d6801d | 932 | py | Python | tests/test_parse_icao24bit.py | Collen-Roller/arp | 08eaa2dda3adb1dbd600597a6d03603669c8e06d | [
"MIT"
]
| 2 | 2020-10-28T17:03:14.000Z | 2021-01-27T10:44:33.000Z | tests/test_parse_icao24bit.py | Collen-Roller/arp | 08eaa2dda3adb1dbd600597a6d03603669c8e06d | [
"MIT"
]
| 8 | 2020-12-08T16:42:43.000Z | 2020-12-29T00:41:33.000Z | tests/test_parse_icao24bit.py | Collen-Roller/arp | 08eaa2dda3adb1dbd600597a6d03603669c8e06d | [
"MIT"
]
| 1 | 2020-12-09T20:35:52.000Z | 2020-12-09T20:35:52.000Z | import unittest
from flydenity import Parser
class TestParseIcao24Bit(unittest.TestCase):
def setUp(self):
self.parser = Parser()
def test_parse_simple(self):
match = self.parser.parse("3D2591", icao24bit=True)
self.assertEqual(match, {"nation": "Germany", "description": "general", "iso2": "DE", "iso3": "DEU"})
def test_parse_strict(self):
sloppy_reg_sloppy_parser = self.parser.parse("3DX", icao24bit=True, strict=False)
sloppy_reg_strict_parser = self.parser.parse("3DX", icao24bit=True, strict=True)
strict_reg_sloppy_parser = self.parser.parse("3D2591", icao24bit=True, strict=False)
strict_reg_strict_parser = self.parser.parse("3D2591", icao24bit=True, strict=True)
self.assertTrue(sloppy_reg_sloppy_parser == strict_reg_sloppy_parser == strict_reg_strict_parser != sloppy_reg_strict_parser)
if __name__ == "__main__":
unittest.main()
| 35.846154 | 133 | 0.713519 | 834 | 0.89485 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.111588 |
1be82da5cbe879b6b36fe90dd23217980058a69e | 465 | py | Python | ever/util/_main.py | Bobholamovic/ever | f38060674a40ed53072b9d9be99cc656a830398f | [
"Apache-2.0"
]
| 22 | 2021-08-21T00:13:18.000Z | 2022-03-28T19:38:10.000Z | ever/util/_main.py | Bobholamovic/ever | f38060674a40ed53072b9d9be99cc656a830398f | [
"Apache-2.0"
]
| 2 | 2021-09-01T06:28:38.000Z | 2021-12-06T07:17:57.000Z | ever/util/_main.py | Bobholamovic/ever | f38060674a40ed53072b9d9be99cc656a830398f | [
"Apache-2.0"
]
| 6 | 2021-08-21T06:32:47.000Z | 2022-02-10T07:41:29.000Z | import os
def create_project(path):
dirs = ['configs', 'module', 'data']
dirs = [os.path.join(path, d) for d in dirs]
for d in dirs:
os.makedirs(d)
train_script = r"""
import ever as er
def train(trainer_name):
trainer = er.trainer.get_trainer(trainer_name)()
trainer.run()
"""
with open(os.path.join(path, 'train.py'), 'w') as f:
f.write(train_script)
print('created project in {}'.format(path))
| 19.375 | 56 | 0.597849 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 196 | 0.421505 |
1be88ec83f0700967a841e75b99ce2ed109f8dfc | 473 | py | Python | src/app/services/metrics_service.py | chrisbpoint/the-app | 960307718c2cef7cdd86d348d380381aafaab10a | [
"MIT"
]
| null | null | null | src/app/services/metrics_service.py | chrisbpoint/the-app | 960307718c2cef7cdd86d348d380381aafaab10a | [
"MIT"
]
| null | null | null | src/app/services/metrics_service.py | chrisbpoint/the-app | 960307718c2cef7cdd86d348d380381aafaab10a | [
"MIT"
]
| null | null | null | class MetricsService:
def __init__(self, adc_data, metrics_data):
self._adc_data = adc_data
self._metrics_data = metrics_data
@property
def metrics_data(self):
return self._metrics_data
def update(self):
self._metrics_data.is_new_data_available = False
if self._adc_data.is_new_data_available:
self._metrics_data.update(self._adc_data.trace)
self._metrics_data.is_new_data_available = True
| 29.5625 | 59 | 0.69556 | 472 | 0.997886 | 0 | 0 | 71 | 0.150106 | 0 | 0 | 0 | 0 |
1bea69b9a810613a8cdcc7d4cd5f8e74e2b87b61 | 687 | py | Python | resthelper/tests/test_build_url.py | rklonner/resthelper | c129a7ff3efb5447aeb9794142c4d640261d962d | [
"MIT"
]
| null | null | null | resthelper/tests/test_build_url.py | rklonner/resthelper | c129a7ff3efb5447aeb9794142c4d640261d962d | [
"MIT"
]
| null | null | null | resthelper/tests/test_build_url.py | rklonner/resthelper | c129a7ff3efb5447aeb9794142c4d640261d962d | [
"MIT"
]
| null | null | null | import unittest
from resthelper.utils import build_restful_url
class TestBuildUrl(unittest.TestCase):
def test_is_restful_https_url(self):
url = build_restful_url('https://jenkins1.tttech.com',
'testuser', '/rest/1.0/request')
self.assertEqual(url,
'https://[email protected]/rest/1.0/request')
def test_is_restful_http_url(self):
url = build_restful_url('http://jenkins1.tttech.com',
'testuser', '/rest/1.0/request')
self.assertEqual(url,
'http://[email protected]/rest/1.0/request')
if __name__ == '__main__':
unittest.main() | 32.714286 | 68 | 0.622999 | 572 | 0.832606 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.340611 |
1beb0ef06d9c6f7de745f499f7af1a9f705e4a88 | 929 | py | Python | sendsms/backends/rq.py | this-is-the-bard/django-sendsms | 8944b7d276f91b019ad6aa2e7e29324fa107fa01 | [
"MIT"
]
| null | null | null | sendsms/backends/rq.py | this-is-the-bard/django-sendsms | 8944b7d276f91b019ad6aa2e7e29324fa107fa01 | [
"MIT"
]
| null | null | null | sendsms/backends/rq.py | this-is-the-bard/django-sendsms | 8944b7d276f91b019ad6aa2e7e29324fa107fa01 | [
"MIT"
]
| null | null | null | """ python-rq based backend
This backend will send your messages asynchronously with python-rq.
Before using this backend, make sure that django-rq is installed and
configured.
Usage
-----
In settings.py
SENDSMS_BACKEND = 'sendsms.backends.rq.SmsBackend'
RQ_SENDSMS_BACKEND = 'actual.backend.to.use.SmsBackend'
"""
from sendsms.api import get_connection
from sendsms.backends.base import BaseSmsBackend
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django_rq import job
RQ_SENDSMS_BACKEND = getattr(settings, 'RQ_SENDSMS_BACKEND', None)
if not RQ_SENDSMS_BACKEND:
raise ImproperlyConfigured('Set RQ_SENDSMS_BACKEND')
@job
def send_messages(messages):
connection = get_connection(RQ_SENDSMS_BACKEND)
connection.send_messages(messages)
class SmsBackend(BaseSmsBackend):
def send_messages(self, messages):
send_messages.delay(messages)
| 22.119048 | 68 | 0.787944 | 111 | 0.119483 | 0 | 0 | 124 | 0.133477 | 0 | 0 | 374 | 0.402583 |
1bed3f78be12183f03bd98f78582fb16d8457339 | 2,435 | py | Python | venv/Lib/site-packages/openpyxl/worksheet/errors.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
]
| 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | venv/Lib/site-packages/openpyxl/worksheet/errors.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
]
| 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | venv/Lib/site-packages/openpyxl/worksheet/errors.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
]
| 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | #Autogenerated schema
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
String,
Bool,
Sequence,
)
from openpyxl.descriptors.excel import CellRange
class Extension(Serialisable):
tagname = "extension"
uri = String(allow_none=True)
def __init__(self,
uri=None,
):
self.uri = uri
class ExtensionList(Serialisable):
tagname = "extensionList"
# uses element group EG_ExtensionList
ext = Sequence(expected_type=Extension)
__elements__ = ('ext',)
def __init__(self,
ext=(),
):
self.ext = ext
class IgnoredError(Serialisable):
tagname = "ignoredError"
sqref = CellRange
evalError = Bool(allow_none=True)
twoDigitTextYear = Bool(allow_none=True)
numberStoredAsText = Bool(allow_none=True)
formula = Bool(allow_none=True)
formulaRange = Bool(allow_none=True)
unlockedFormula = Bool(allow_none=True)
emptyCellReference = Bool(allow_none=True)
listDataValidation = Bool(allow_none=True)
calculatedColumn = Bool(allow_none=True)
def __init__(self,
sqref=None,
evalError=False,
twoDigitTextYear=False,
numberStoredAsText=False,
formula=False,
formulaRange=False,
unlockedFormula=False,
emptyCellReference=False,
listDataValidation=False,
calculatedColumn=False,
):
self.sqref = sqref
self.evalError = evalError
self.twoDigitTextYear = twoDigitTextYear
self.numberStoredAsText = numberStoredAsText
self.formula = formula
self.formulaRange = formulaRange
self.unlockedFormula = unlockedFormula
self.emptyCellReference = emptyCellReference
self.listDataValidation = listDataValidation
self.calculatedColumn = calculatedColumn
class IgnoredErrors(Serialisable):
tagname = "ignoredErrors"
ignoredError = Sequence(expected_type=IgnoredError)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('ignoredError', 'extLst')
def __init__(self,
ignoredError=(),
extLst=None,
):
self.ignoredError = ignoredError
self.extLst = extLst
| 25.904255 | 64 | 0.631622 | 2,208 | 0.906776 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.057495 |
1bee0a3b08699aa37d40800889d795e3cdf9fb23 | 2,918 | py | Python | cwbot/kolextra/request/ItemDescriptionRequest.py | zeryl/RUcwbot | 734716506066da599fcbc96d0a815a5e30f6e077 | [
"BSD-3-Clause"
]
| null | null | null | cwbot/kolextra/request/ItemDescriptionRequest.py | zeryl/RUcwbot | 734716506066da599fcbc96d0a815a5e30f6e077 | [
"BSD-3-Clause"
]
| 1 | 2019-04-15T02:48:19.000Z | 2019-04-15T03:02:36.000Z | cwbot/kolextra/request/ItemDescriptionRequest.py | rlbond86/cwbot | 2432a9c9d048b7600b53d5cb8f7ef608c6613258 | [
"BSD-3-Clause"
]
| null | null | null | from kol.request.GenericRequest import GenericRequest
from kol.manager import PatternManager
import re
class ItemDescriptionRequest(GenericRequest):
"Gets the description of an item and then parses various information from the response."
_itemIdPattern = re.compile(r'(?i)<!--\s*itemid:\s*(\d+)\s*-->')
def __init__(self, session, descId):
super(ItemDescriptionRequest, self).__init__(session)
self.url = session.serverURL + "desc_item.php?whichitem=%s" % descId
def parseResponse(self):
# Get the item name.
itemNamePattern = PatternManager.getOrCompilePattern("itemName")
match = itemNamePattern.search(self.responseText)
self.responseData["name"] = match.group(1)
# Get the item image.
imagePattern = PatternManager.getOrCompilePattern("itemImage")
match = imagePattern.search(self.responseText)
self.responseData["image"] = match.group(1)
# Get the item type.
typePattern = PatternManager.getOrCompilePattern("itemType")
match = typePattern.search(self.responseText)
if match:
self.responseData["type"] = match.group(1).rstrip()
# Get the autosell value.
autosellPattern = PatternManager.getOrCompilePattern("itemAutosell")
match = autosellPattern.search(self.responseText)
if match:
self.responseData["autosell"] = int(match.group(1))
else:
self.responseData["autosell"] = 0
# See if this is a cooking ingredient.
cookingPattern = PatternManager.getOrCompilePattern("isCookingIngredient")
match = cookingPattern.search(self.responseText)
if match:
self.responseData["isCookingIngredient"] = True
# See if the item is a cocktailcrafting ingredient.
cocktailcraftingPattern = PatternManager.getOrCompilePattern("isCocktailcraftingIngredient")
match = cocktailcraftingPattern.search(self.responseText)
if match:
self.responseData["isCocktailcraftingIngredient"] = True
# See if the item is a meatsmithing component.
meatsmithingPattern = PatternManager.getOrCompilePattern("isMeatsmithingComponent")
match = meatsmithingPattern.search(self.responseText)
if match:
self.responseData["isMeatsmithingComponent"] = True
# See if the item is a jewelrymaking component.
jewelrymakingPattern = PatternManager.getOrCompilePattern("isJewelrymakingComponent")
match = jewelrymakingPattern.search(self.responseText)
if match:
self.responseData["isJewelrymakingComponent"] = True
# See if the itemId is listed
match = self._itemIdPattern.search(self.responseText)
if match:
self.responseData["id"] = int(match.group(1))
else:
self.responseData["id"] = None
| 42.911765 | 100 | 0.675805 | 2,801 | 0.959904 | 0 | 0 | 0 | 0 | 0 | 0 | 744 | 0.254969 |
1beeb9bf708d482300442a926d31325bbdca0e33 | 619 | py | Python | SmartMove/SmartConnector/cpapi/utils.py | themichaelasher/SmartMove | 074c6e1a854fdfc21fb292e575a869719d56c5d5 | [
"Apache-2.0"
]
| 24 | 2018-03-15T09:00:51.000Z | 2022-03-17T05:19:47.000Z | SmartMove/SmartConnector/cpapi/utils.py | themichaelasher/SmartMove | 074c6e1a854fdfc21fb292e575a869719d56c5d5 | [
"Apache-2.0"
]
| 8 | 2020-01-20T15:44:42.000Z | 2021-10-18T05:39:04.000Z | SmartMove/SmartConnector/cpapi/utils.py | themichaelasher/SmartMove | 074c6e1a854fdfc21fb292e575a869719d56c5d5 | [
"Apache-2.0"
]
| 22 | 2018-06-04T20:36:41.000Z | 2022-03-16T17:10:44.000Z | import json
import sys
def compatible_loads(json_data):
"""
Function json.loads in python 3.0 - 3.5 can't handle bytes, so this function handle it.
:param json_data:
:return: unicode (str if it's python 3)
"""
if isinstance(json_data, bytes) and (3, 0) <= sys.version_info < (3, 6):
json_data = json_data.decode("utf-8")
return json.loads(json_data)
def get_massage_from_io_error(error):
"""
:param: IOError
:return: error message
"""
if sys.version_info >= (3, 0):
return error.strerror
else:
return error.message
| 24.76 | 92 | 0.610662 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 241 | 0.389338 |
1bef48d1f47271bb3d6c33f78c3cf6b32220029d | 3,578 | py | Python | VokeScan.py | DaduVoke/VokeScan | a80c8e99ab74dd15a4f9bc3ba7e01abd81840f2c | [
"MIT"
]
| 2 | 2021-12-05T04:00:50.000Z | 2022-03-24T17:53:26.000Z | VokeScan.py | DaduVoke/VokeScan | a80c8e99ab74dd15a4f9bc3ba7e01abd81840f2c | [
"MIT"
]
| null | null | null | VokeScan.py | DaduVoke/VokeScan | a80c8e99ab74dd15a4f9bc3ba7e01abd81840f2c | [
"MIT"
]
| null | null | null | import sys,time
def sprint(str):
for c in str + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(3./90)
from colorama import Fore, Back, Style
sprint (Fore.RED + "გამარჯობა. tool-ი შექმინლია ლევან ყიფიანი-DaduVoke-ის მიერ @2021")
import socket
import _thread
import time
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Core(object):
ipurl=0
mode=1024
menu1=False
f=None
network_speed="სიჩქარე"
menu2=False
def GetData(self, url):
self.url = url
try:
self.ipurl = socket.gethostbyname(self.url)
except Exception as e:
print ("თქვენ არასწორად შეიყვანეთ IP ან URL")
exit(0)
Core.ipurl=self.ipurl
print (22*" ",bcolors.OKGREEN,"=/=\=\=/=\=/=\=/=\=/=\=/=\=/=\=/=\=/VokeScaner=/=\=\=/=\=/=\=/=\=/=\=/=\=/=\=/=\=",bcolors.OKGREEN)
sprint('გთხოვთ აირჩიოთ 1 ან 2')
while Core.menu1 is not True:
choice = input("\n1 - მოკლე\n2 - გრძელი\n")
if choice == "1":
Core.mode=1024
menu=True
break
elif choice == "2":
Core.mode=64000
menu = True
break
else:
sprint("გთხოვთ აირჩიოთ პირველი ან მეორე. პროგრამის გასაშვებად ტერმინალში გამოიყენეთ ბრძანება 1 ან 2")
while Core.menu2 is not True:
sprint("მეორე ეტაპი! გთხოვთ აირჩიოთ გამოყენებული ინტერნეტის სიჩქარე (0.05(1) 0.03(2))")
choice = input("\n1 - მოკლე \n2 - გრძელი\n")
if choice == "1":
Core.network_speed=0.05
menu2=True
break
elif choice == "2":
Core.network_speed=0.3
menu2 = True
break
else:
print("გთხოვთ აირჩიოთ პირველი ან მეორე. პროგრამის გასაშვებად ტერმინალში გამოიყენეთ ბრძანება 1 ან 2")
def Start_Scan(self, port_start, port_end):
Core.f = open(Core.ipurl, "a")
try:
for x in range(port_start,port_end):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
res = sock.connect_ex((Core.ipurl,x))
if res is 0:
tmp="პორტი",x,"გახსნილია", socket.getservbyport(x)
tmp1=str(tmp[0])+" "+str(tmp[1])+" "+str(tmp[2])+" "+str(tmp[3])
print(bcolors.OKGREEN,tmp1)
Core.f.write(str(tmp)+"\n")
Core.f.close()
except Exception as e:
print (e)
try:
scan = Core()
scan.GetData(input("ჩაწერეთ IP ან მისამართი URL\n"))
print(bcolors.WARNING,"სიხშირე:",Core.mode,"\n სამიზნე:",Core.ipurl,"\n სკანერის სიჩქარე:",Core.network_speed,bcolors.ENDC)
print(bcolors.BOLD,"გთხოვთ დაიცადოთ რამდენიმე წამი...",bcolors.ENDC)
for count in range(0,Core.mode):
time.sleep(Core.network_speed)
_thread.start_new_thread(scan.Start_Scan, (count,count+1))
if count > Core.mode:
exit(0)
except Exception as e:
print (e)
| 18.162437 | 139 | 0.488262 | 3,213 | 0.734568 | 0 | 0 | 0 | 0 | 0 | 0 | 1,575 | 0.360082 |
1bef4c913e56949ae48100d1d528ebecb2bb01d8 | 53,296 | py | Python | agent/src/clacks/agent/objects/object.py | gonicus/clacks | da579f0acc4e48cf2e9451417ac6792282cf7ab6 | [
"ZPL-2.1"
]
| 2 | 2015-01-26T07:15:19.000Z | 2015-11-09T13:42:11.000Z | agent/src/clacks/agent/objects/object.py | gonicus/clacks | da579f0acc4e48cf2e9451417ac6792282cf7ab6 | [
"ZPL-2.1"
]
| null | null | null | agent/src/clacks/agent/objects/object.py | gonicus/clacks | da579f0acc4e48cf2e9451417ac6792282cf7ab6 | [
"ZPL-2.1"
]
| null | null | null | # This file is part of the clacks framework.
#
# http://clacks-project.org
#
# Copyright:
# (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de
#
# License:
# GPL-2: http://www.gnu.org/licenses/gpl-2.0.html
#
# See the LICENSE file in the project's top-level directory for details.
"""
The object base class.
"""
import copy
import zope.event
import pkg_resources
import os
from lxml import etree
from lxml.builder import E
from logging import getLogger
from zope.interface import Interface, implements
from clacks.common import Environment
from clacks.common.utils import N_, is_uuid
from clacks.common.components import PluginRegistry
from clacks.common.error import ClacksErrorHandler as C
from clacks.agent.objects.backend.registry import ObjectBackendRegistry
from clacks.agent.exceptions import ObjectException
# Status
STATUS_OK = 0
STATUS_CHANGED = 1
# Register the errors handled by us
C.register_codes(dict(
CREATE_NEEDS_BASE=N_("Creation of '%(location)s' lacks a base DN"),
READ_BACKEND_PROPERTIES=N_("Error reading properties for backend '%(backend)s'"),
ATTRIBUTE_BLOCKED_BY=N_("Attribute is blocked by %(source)s==%(value)s"),
ATTRIBUTE_READ_ONLY=N_("Attribute is read only"),
ATTRIBUTE_MANDATORY=N_("Attribute is mandatory"),
ATTRIBUTE_INVALID_CONSTANT=N_("Value is invalid - expected one of %(elements)s"),
ATTRIBUTE_INVALID_LIST=N_("Value is invalid - expected a list"),
ATTRIBUTE_INVALID=N_("Value is invalid - expected value of type '%(type)s'"),
ATTRIBUTE_CHECK_FAILED=N_("Value is invalid"),
ATTRIBUTE_NOT_UNIQUE=N_("Value is not unique (%(value)s)"),
ATTRIBUTE_NOT_FOUND=N_("Attribute not found"),
OBJECT_MODE_NOT_AVAILABLE=N_("Mode '%(mode)s' is not available for base objects"),
OBJECT_MODE_BASE_AVAILABLE=N_("Mode '%(mode)s' is only available for base objects"),
OBJECT_NOT_SUB_FOR=N_("Object of type '%(ext)s' cannot be added as to the '%(base)s' container"),
OBJECT_REMOVE_NON_BASE_OBJECT=N_("Cannot remove non base object"),
OBJECT_MOVE_NON_BASE_OBJECT=N_("Cannot move non base object"),
OBJECT_BASE_NO_RETRACT=N_("Base object cannot be retracted"),
FILTER_INVALID_KEY=N_("Invalid key '%(key)s' for filter '%(filter)s'"),
FILTER_MISSING_KEY=N_("Missing key '%(key)s' after processing filter '%(filter)s'"),
FILTER_NO_LIST=N_("Filter '%(filter)s' did not return a %(type)s value - a list was expected"),
ATTRIBUTE_DEPEND_LOOP=N_("Potential loop in attribute dependencies")
))
class Object(object):
"""
This class is the base class for all objects.
It contains getter and setter methods for the object
attributes and it is able to initialize itself by reading data from
backends.
It also contains the ability to execute the in- and out-filters for the
object properties.
All meta-classes for objects, created by the XML defintions, will inherit this class.
"""
_reg = None
_backend = None
_mode = False
_propsByBackend = {}
uuid = None
dn = None
orig_dn = None
log = None
createTimestamp = None
modifyTimestamp = None
myProperties = None
env = None
parent = None
owner = None
attributesInSaveOrder = None
def __saveOrder(self):
"""
Returns a list containing all attributes in the correct
save-order.
Due to the fact that some attributes depend on another,
we have to save some attributes first and then the others.
"""
data = self.__saveOrderHelper()
attrs = []
for level in sorted(data.keys(), reverse=True):
for attr in data[level]:
if attr not in attrs:
attrs.append(attr)
return attrs
def __saveOrderHelper(self, res=None, item=None, level=0):
"""
Helper method for '__saveOrder' to detect the dependency
depth (level) for an attribute
"""
if not res:
res = {}
if not level in res:
res[level] = []
if level == 10:
raise ValueError(C.make_error('ATTRIBUTE_DEPEND_LOOP'))
if not item:
for key in self.myProperties:
self.__saveOrderHelper(res, key, level + 1)
else:
if len(self.myProperties[item]['depends_on']):
for key in self.myProperties[item]['depends_on']:
self.__saveOrderHelper(res, key, level + 1)
res[level].append(item)
return res
def __init__(self, where=None, mode="update"):
self.env = Environment.getInstance()
# Instantiate Backend-Registry
self._reg = ObjectBackendRegistry.getInstance()
self.log = getLogger(__name__)
self.log.debug("new object instantiated '%s'" % type(self).__name__)
# Group attributes by Backend
propsByBackend = {}
props = getattr(self, '__properties')
self.myProperties = copy.deepcopy(props)
self.attributesInSaveOrder = self.__saveOrder()
atypes = self._objectFactory.getAttributeTypes()
for key in self.myProperties:
# Load dynamic dropdown-values
if self.myProperties[key]['values_populate']:
cr = PluginRegistry.getInstance('CommandRegistry')
values = cr.call(self.myProperties[key]['values_populate'])
if type(values).__name__ == "dict":
self.myProperties[key]['values'] = values
else:
self.myProperties[key]['values'] = atypes['String'].convert_to(self.myProperties[key]['type'], values)
# Initialize an empty array for each backend
for be in self.myProperties[key]['backend']:
if be not in propsByBackend:
propsByBackend[be] = []
# Append property
propsByBackend[be].append(key)
self._propsByBackend = propsByBackend
self._mode = mode
# Initialize object using a DN
if where:
if mode == "create":
if is_uuid(where):
raise ValueError(C.make_error('CREATE_NEEDS_BASE', "base", location=where))
self.orig_dn = self.dn = where
else:
self._read(where)
# Set status to modified for attributes that do not have a value but are
# mandatory and have a default.
# This ensures that default values are passed to the out_filters and get saved
# afterwards.
# (Defaults will be passed to in-filters too, if they are not overwritten by _read())
for key in self.myProperties:
if not(self.myProperties[key]['value']) and self.myProperties[key]['default'] is not None and \
len(self.myProperties[key]['default']):
self.myProperties[key]['value'] = copy.deepcopy(self.myProperties[key]['default'])
if self.myProperties[key]['mandatory']:
self.myProperties[key]['status'] = STATUS_CHANGED
def set_foreign_value(self, attr, original):
self.myProperties[attr]['value'] = original['value']
self.myProperties[attr]['in_value'] = original['in_value']
self.myProperties[attr]['orig_value'] = original['orig_value']
def listProperties(self):
return self.myProperties.keys()
def getProperties(self):
return copy.deepcopy(self.myProperties)
def listMethods(self):
methods = getattr(self, '__methods')
return methods.keys()
def hasattr(self, attr):
return attr in self.myProperties
def _read(self, where):
"""
This method tries to initialize a object instance by reading data
from the defined backend.
Attributes will be grouped by their backend to ensure that only one
request per backend will be performed.
"""
# Generate missing values
if is_uuid(where):
#pylint: disable=E1101
if self._base_object:
self.dn = self._reg.uuid2dn(self._backend, where)
else:
self.dn = None
self.uuid = where
else:
self.dn = where
self.uuid = self._reg.dn2uuid(self._backend, where)
# Get last change timestamp
self.orig_dn = self.dn
if self.dn:
self.createTimestamp, self.modifyTimestamp = self._reg.get_timestamps(self._backend, self.dn)
# Load attributes for each backend.
# And then assign the values to the properties.
self.log.debug("object uuid: %s" % self.uuid)
for backend in self._propsByBackend:
try:
# Create a dictionary with all attributes we want to fetch
# {attribute_name: type, name: type}
info = dict([(k, self.myProperties[k]['backend_type']) for k in self._propsByBackend[backend]])
self.log.debug("loading attributes for backend '%s': %s" % (backend, str(info)))
be = ObjectBackendRegistry.getBackend(backend)
be_attrs = self._backendAttrs[backend] if backend in self._backendAttrs else None
attrs = be.load(self.uuid, info, be_attrs)
except ValueError as e:
raise ObjectException(C.make_error('READ_BACKEND_PROPERTIES', backend=backend))
# Assign fetched value to the properties.
for key in self._propsByBackend[backend]:
if key not in attrs:
self.log.debug("attribute '%s' was not returned by load" % key)
continue
# Keep original values, they may be overwritten in the in-filters.
self.myProperties[key]['in_value'] = self.myProperties[key]['value'] = attrs[key]
self.log.debug("%s: %s" % (key, self.myProperties[key]['value']))
# Once we've loaded all properties from the backend, execute the
# in-filters.
for key in self.myProperties:
# Skip loading in-filters for None values
if self.myProperties[key]['value'] is None:
self.myProperties[key]['in_value'] = self.myProperties[key]['value'] = []
continue
# Execute defined in-filters.
if len(self.myProperties[key]['in_filter']):
self.log.debug("found %s in-filter(s) for attribute '%s'" % (str(len(self.myProperties[key]['in_filter'])), key))
# Execute each in-filter
for in_f in self.myProperties[key]['in_filter']:
self.__processFilter(in_f, key, self.myProperties)
# Convert the received type into the target type if not done already
#pylint: disable=E1101
atypes = self._objectFactory.getAttributeTypes()
for key in self.myProperties:
# Convert values from incoming backend-type to required type
if self.myProperties[key]['value']:
a_type = self.myProperties[key]['type']
be_type = self.myProperties[key]['backend_type']
# Convert all values to required type
if not atypes[a_type].is_valid_value(self.myProperties[key]['value']):
try:
self.myProperties[key]['value'] = atypes[a_type].convert_from(be_type, self.myProperties[key]['value'])
except Exception as e:
self.log.error("conversion of '%s' from '%s' to type '%s' failed: %s" % (key, be_type, a_type, str(e)))
else:
self.log.debug("converted '%s' from type '%s' to type '%s'!" % (key, be_type, a_type))
# Keep the initial value
self.myProperties[key]['last_value'] = self.myProperties[key]['orig_value'] = copy.deepcopy(self.myProperties[key]['value'])
def _delattr_(self, name):
"""
Deleter method for properties.
"""
if name in self.attributesInSaveOrder:
# Check if this attribute is blocked by another attribute and its value.
for bb in self.myProperties[name]['blocked_by']:
if bb['value'] in self.myProperties[bb['name']]['value']:
raise AttributeError(C.make_error(
'ATTRIBUTE_BLOCKED_BY', name,
source=bb['name'], value=bb['value']))
# Do not allow to write to read-only attributes.
if self.myProperties[name]['readonly']:
raise AttributeError(C.make_error('ATTRIBUTE_READ_ONLY', name))
# Do not allow remove mandatory attributes
if self.myProperties[name]['mandatory']:
raise AttributeError(C.make_error('ATTRIBUTE_MANDATORY', name))
# If not already in removed state
if len(self.myProperties[name]['value']) != 0:
self.myProperties[name]['status'] = STATUS_CHANGED
self.myProperties[name]['last_value'] = copy.deepcopy(self.myProperties[name]['value'])
self.myProperties[name]['value'] = []
else:
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def _setattr_(self, name, value):
"""
This is the setter method for object attributes.
Each given attribute value is validated with the given set of
validators.
"""
# Store non property values
try:
object.__getattribute__(self, name)
self.__dict__[name] = value
return
except AttributeError:
pass
# A none value was passed to clear the value
if value is None:
self._delattr_(name)
return
# Try to save as property value
if name in self.myProperties:
# Check if this attribute is blocked by another attribute and its value.
for bb in self.myProperties[name]['blocked_by']:
if bb['value'] in self.myProperties[bb['name']]['value']:
raise AttributeError(C.make_error(
'ATTRIBUTE_BLOCKED_BY', name,
source=bb['name'], value=bb['value']))
# Do not allow to write to read-only attributes.
if self.myProperties[name]['readonly']:
raise AttributeError(C.make_error('ATTRIBUTE_READ_ONLY', name))
# Check if the given value has to match one out of a given list.
if len(self.myProperties[name]['values']) and value not in self.myProperties[name]['values']:
raise TypeError(C.make_error(
'ATTRIBUTE_INVALID_CONSTANT', name,
elements=", ".join(self.myProperties[name]['values'])))
# Set the new value
if self.myProperties[name]['multivalue']:
# Check if the new value is s list.
if type(value) != list:
raise TypeError(C.make_error('ATTRIBUTE_INVALID_LIST', name))
new_value = value
else:
new_value = [value]
# Eventually fixup value from incoming JSON string
s_type = self.myProperties[name]['type']
try:
new_value = self._objectFactory.getAttributeTypes()[s_type].fixup(new_value)
except Exception:
raise TypeError(C.make_error('ATTRIBUTE_INVALID', name, type=s_type))
# Check if the new value is valid
#pylint: disable=E1101
if not self._objectFactory.getAttributeTypes()[s_type].is_valid_value(new_value):
raise TypeError(C.make_error('ATTRIBUTE_INVALID', name, type=s_type))
# Validate value
if self.myProperties[name]['validator']:
props_copy = copy.deepcopy(self.myProperties)
res, error = self.__processValidator(self.myProperties[name]['validator'], name, new_value, props_copy)
if not res:
if len(error):
raise ValueError(C.make_error('ATTRIBUTE_CHECK_FAILED',
name, details=error))
else:
raise ValueError(C.make_error('ATTRIBUTE_CHECK_FAILED', name))
# Ensure that unique values stay unique. Let the backend test this.
#if self.myProperties[name]['unique']:
# backendI = ObjectBackendRegistry.getBackend(self.myProperties[name]['backend'])
# if not backendI.is_uniq(name, new_value):
# raise ObjectException(C.make_error('ATTRIBUTE_NOT_UNIQUE', name, value=value))
# Assign the properties new value.
self.myProperties[name]['value'] = new_value
self.log.debug("updated property value of [%s|%s] %s:%s" % (type(self).__name__, self.uuid, name, new_value))
# Update status if there's a change
t = self.myProperties[name]['type']
current = copy.deepcopy(self.myProperties[name]['value'])
#pylint: disable=E1101
if not self._objectFactory.getAttributeTypes()[t].values_match(self.myProperties[name]['value'], self.myProperties[name]['orig_value']):
self.myProperties[name]['status'] = STATUS_CHANGED
self.myProperties[name]['last_value'] = current
else:
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def _getattr_(self, name):
"""
The getter method object attributes.
(It differentiates between object attributes and class-members)
"""
methods = getattr(self, '__methods')
# If the requested property exists in the object-attributes, then return it.
if name in self.myProperties:
# We can have single and multivalues, return the correct type here.
value = None
if self.myProperties[name]['multivalue']:
value = self.myProperties[name]['value']
else:
if len(self.myProperties[name]['value']):
value = self.myProperties[name]['value'][0]
return value
# The requested property-name seems to be a method, return the method reference.
elif name in methods:
def m_call(*args, **kwargs):
return methods[name]['ref'](self, *args, **kwargs)
return m_call
else:
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def getTemplate(self, theme="default"):
"""
Return the template data - if any. Else None.
"""
return Object.getNamedTemplate(self.env, self._templates, theme)
@staticmethod
def getNamedTemplate(env, templates, theme="default"):
"""
Return the template data - if any. Else None.
"""
ui = []
# If there's a template file, try to find it
if templates:
for template in templates:
path = None
# Absolute path
if template.startswith(os.path.sep):
path = template
# Relative path
else:
# Find path
path = pkg_resources.resource_filename('clacks.agent', os.path.join('data', 'templates', theme, template)) #@UndefinedVariable
if not os.path.exists(path):
path = os.path.join(env.config.getBaseDir(), 'templates', theme, template)
if not os.path.exists(path):
path = pkg_resources.resource_filename('clacks.agent', os.path.join('data', 'templates', "default", template)) #@UndefinedVariable
if not os.path.exists(path):
path = os.path.join(env.config.getBaseDir(), 'templates', "default", template)
if not os.path.exists(path):
return None
with open(path, "r") as f:
_ui = f.read()
# Build new merged resource element
root = etree.fromstring(_ui)
new_resources = []
resources = root.find("resources")
for include in resources.findall("include"):
rc = include.get("location")
location = os.path.join(os.path.dirname(path), rc)
if not os.path.exists(location):
raise IOError(C.make_error("NO_SUCH_RESOURCE", resource=location))
res = ""
with open(location, "r") as f:
res = f.read()
for resource in etree.fromstring(res).findall("qresource"):
files = []
prefix = resource.get("prefix")
for f in resource.findall("file"):
files.append(E.file(os.path.join(prefix, unicode(f.text))))
new_resources.append(E.resource(*files, location=rc))
root.replace(root.find("resources"), E.resources(*new_resources))
ui.append(etree.tostring(root))
return ui
def getAttrType(self, name):
"""
Return the type of a given object attribute.
"""
if name in self.myProperties:
return self.myProperties[name]['type']
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def check(self, propsFromOtherExtensions=None):
"""
Checks whether everything is fine with the extension and its given values or not.
"""
if not propsFromOtherExtensions:
propsFromOtherExtensions = {}
# Create a copy to avoid touching the original values
props = copy.deepcopy(self.myProperties)
# Check if _mode matches with the current object type
#pylint: disable=E1101
if self._base_object and not self._mode in ['create', 'remove', 'update']:
raise ObjectException(C.make_error('OBJECT_MODE_NOT_AVAILABLE', mode=self._mode))
if not self._base_object and self._mode in ['create', 'remove']:
raise ObjectException(C.make_error('OBJECT_MODE_BASE_AVAILABLE', mode=self._mode))
# Check if we are allowed to create this base object on the given base
if self._base_object and self._mode == "create":
base_type = self.get_object_type_by_dn(self.dn)
if not base_type:
raise ObjectException(C.make_error('OBJECT_MODE_BASE_AVAILABLE', mode=self._mode))
if self.__class__.__name__ not in self._objectFactory.getAllowedSubElementsForObject(base_type):
raise ObjectException(C.make_error('OBJECT_NOT_SUB_FOR',
ext=self.__class__.__name__,
base=base_type))
# Transfer values form other commit processes into ourselfes
for key in self.attributesInSaveOrder:
if props[key]['foreign'] and key in propsFromOtherExtensions:
props[key]['value'] = propsFromOtherExtensions[key]['value']
# Transfer status into commit status
props[key]['commit_status'] = props[key]['status']
# Collect values by store and process the property filters
for key in self.attributesInSaveOrder:
# Skip foreign properties
if props[key]['foreign']:
continue
# Check if this attribute is blocked by another attribute and its value.
is_blocked = False
for bb in props[key]['blocked_by']:
if bb['value'] in props[bb['name']]['value']:
is_blocked = True
break
# Check if all required attributes are set. (Skip blocked once, they cannot be set!)
if not is_blocked and props[key]['mandatory'] and not len(props[key]['value']):
raise ObjectException(C.make_error('ATTRIBUTE_MANDATORY', key))
# Process each and every out-filter with a clean set of input values,
# to avoid that return-values overwrite themselves.
if len(props[key]['out_filter']):
self.log.debug(" found %s out-filter for %s" % (str(len(props[key]['out_filter'])), key,))
for out_f in props[key]['out_filter']:
self.__processFilter(out_f, key, props)
# Collect properties by backend
for prop_key in self.attributesInSaveOrder:
# Skip foreign properties
if props[prop_key]['foreign']:
continue
# Ensure that mandatory values are set
if props[prop_key]['mandatory'] and not len(props[prop_key]['value']):
raise ObjectException(C.make_error('ATTRIBUTE_MANDATORY', prop_key))
# Do not save untouched values
if not props[prop_key]['commit_status'] & STATUS_CHANGED:
continue
return props
def commit(self, propsFromOtherExtensions=None):
"""
Commits changes of an object to the corresponding backends.
"""
if not propsFromOtherExtensions:
propsFromOtherExtensions = {}
self.check(propsFromOtherExtensions)
self.log.debug("saving object modifications for [%s|%s]" % (type(self).__name__, self.uuid))
# Create a copy to avoid touching the original values
props = copy.deepcopy(self.myProperties)
# Transfer status into commit status
for key in self.attributesInSaveOrder:
props[key]['commit_status'] = props[key]['status']
# Transfer values form other commit processes into ourselfes
if props[key]['foreign'] and key in propsFromOtherExtensions:
props[key]['value'] = propsFromOtherExtensions[key]['value']
# Adapt property states
# Run this once - If any state was adapted, then run again to ensure
# that all dependencies are processed.
first = True
_max = 5
required = False
while (first or required) and _max:
first = False
required = False
_max -= 1
for key in self.attributesInSaveOrder:
# Adapt status from dependent properties.
for propname in props[key]['depends_on']:
old = props[key]['commit_status']
props[key]['commit_status'] |= props[propname]['status'] & STATUS_CHANGED
props[key]['commit_status'] |= props[propname]['commit_status'] & STATUS_CHANGED
if props[key]['commit_status'] != old:
required = True
# Collect values by store and process the property filters
collectedAttrs = {}
for key in self.attributesInSaveOrder:
# Skip foreign properties
if props[key]['foreign']:
continue
# Do not save untouched values
if not props[key]['commit_status'] & STATUS_CHANGED:
continue
# Get the new value for the property and execute the out-filter
self.log.debug("changed: %s" % (key,))
# Process each and every out-filter with a clean set of input values,
# to avoid that return-values overwrite themselves.
if len(props[key]['out_filter']):
self.log.debug(" found %s out-filter for %s" % (str(len(props[key]['out_filter'])), key,))
for out_f in props[key]['out_filter']:
self.__processFilter(out_f, key, props)
# Collect properties by backend
for prop_key in self.attributesInSaveOrder:
# Skip foreign properties
if props[prop_key]['foreign']:
continue
# Do not save untouched values
if not props[prop_key]['commit_status'] & STATUS_CHANGED:
continue
collectedAttrs[prop_key] = props[prop_key]
# Create a backend compatible list of all changed attributes.
toStore = {}
for prop_key in collectedAttrs:
# Collect properties by backend
for be in props[prop_key]['backend']:
if not be in toStore:
toStore[be] = {}
# Convert the properities type to the required format - if its not of the expected type.
be_type = collectedAttrs[prop_key]['backend_type']
s_type = collectedAttrs[prop_key]['type']
if not self._objectFactory.getAttributeTypes()[be_type].is_valid_value(collectedAttrs[prop_key]['value']):
collectedAttrs[prop_key]['value'] = self._objectFactory.getAttributeTypes()[s_type].convert_to(
be_type, collectedAttrs[prop_key]['value'])
# Append entry to the to-be-stored list
toStore[be][prop_key] = {'foreign': collectedAttrs[prop_key]['foreign'],
'orig': collectedAttrs[prop_key]['in_value'],
'value': collectedAttrs[prop_key]['value'],
'type': collectedAttrs[prop_key]['backend_type']}
# We may have a plugin without any attributes, like the group asterisk extension, in
# this case we've to update the object despite of the lack of properties.
if not len(toStore) and self._backend:
toStore[self._backend] = {}
# Leave the show if there's nothing to do
tmp = {}
for key, value in toStore.items():
# Skip NULL backend. Nothing to save, anyway.
if key == "NULL":
continue
tmp[key] = value
toStore = tmp
# Skip the whole process if there's no change at all
if not toStore:
return {}
# Update references using the toStore information
changes = {}
for be in toStore:
changes.update(toStore[be])
self.update_refs(changes)
# Handle by backend
p_backend = getattr(self, '_backend')
obj = self
zope.event.notify(ObjectChanged("pre %s" % self._mode, obj))
# Call pre-hooks now
if self._mode in ["extend", "create"]:
self.__execute_hook("PreCreate")
if self._mode in ["update"]:
self.__execute_hook("PreModify")
# First, take care about the primary backend...
if p_backend in toStore:
beAttrs = self._backendAttrs[p_backend] if p_backend in self._backendAttrs else {}
be = ObjectBackendRegistry.getBackend(p_backend)
if self._mode == "create":
obj.uuid = be.create(self.dn, toStore[p_backend], self._backendAttrs[p_backend])
elif self._mode == "extend":
be.extend(self.uuid, toStore[p_backend],
self._backendAttrs[p_backend],
self.getForeignProperties())
else:
be.update(self.uuid, toStore[p_backend], beAttrs)
# Eventually the DN has changed
if self._base_object:
dn = be.uuid2dn(self.uuid)
# Take DN for newly created objects
if self._mode == "create":
if self._base_object:
obj.dn = dn
elif dn != obj.dn:
self.update_dn_refs(dn)
obj.dn = dn
if self._base_object:
zope.event.notify(ObjectChanged("post move", obj))
obj.orig_dn = dn
# ... then walk thru the remaining ones
for backend, data in toStore.items():
# Skip primary backend - already done
if backend == p_backend:
continue
be = ObjectBackendRegistry.getBackend(backend)
beAttrs = self._backendAttrs[backend] if backend in self._backendAttrs else {}
if self._mode == "create":
be.create(self.dn, data, beAttrs)
elif self._mode == "extend":
be.extend(self.uuid, data, beAttrs, self.getForeignProperties())
else:
be.update(self.uuid, data, beAttrs)
zope.event.notify(ObjectChanged("post %s" % self._mode, obj))
# Call post-hooks now
if self._mode in ["extend", "create"]:
self.__execute_hook("PostCreate")
if self._mode in ["update"] and "PostModify":
self.__execute_hook("PostModify")
return props
def revert(self):
"""
Reverts all changes made to this object since it was loaded.
"""
for key in self.myProperties:
self.myProperties[key]['value'] = self.myProperties[key]['last_value']
self.log.debug("reverted object modifications for [%s|%s]" % (type(self).__name__, self.uuid))
def getExclusiveProperties(self):
return [x for x, y in self.myProperties.items() if not y['foreign']]
def getForeignProperties(self):
return [x for x, y in self.myProperties.items() if y['foreign']]
def __processValidator(self, fltr, key, value, props_copy):
"""
This method processes a given process-list (fltr) for a given property (prop).
And return TRUE if the value matches the validator set and FALSE if
not.
"""
# This is our process-line pointer it points to the process-list line
# we're executing at the moment
lptr = 0
# Our filter result stack
stack = list()
self.log.debug(" validator started (%s)" % key)
self.log.debug(" value: %s" % (value, ))
# Process the list till we reach the end..
lasterrmsg = ""
errormsgs = []
while (lptr + 1) in fltr:
# Get the current line and increase the process list pointer.
lptr += 1
curline = fltr[lptr]
# A condition matches for something and returns a boolean value.
# We'll put this value on the stack for later use.
if 'condition' in curline:
# Build up argument list
args = [props_copy, key, value] + curline['params']
# Process condition and keep results
fname = type(curline['condition']).__name__
v, errors = (curline['condition']).process(*args)
# Log what happend!
self.log.debug(" %s: [Filter] %s(%s) called and returned: %s" % (
lptr, fname, ", ".join(["\"" + x + "\"" for x in curline['params']]), v))
# Append the result to the stack.
stack.append(v)
if not v:
if len(errors):
lasterrmsg = errors.pop()
# A comparator compares two values from the stack and then returns a single
# boolean value.
elif 'operator' in curline:
v1 = stack.pop()
v2 = stack.pop()
fname = type(curline['operator']).__name__
res = (curline['operator']).process(v1, v2)
stack.append(res)
# Add last error message
if not res:
errormsgs.append(lasterrmsg)
lasterrmsg = ""
# Log what happend!
self.log.debug(" %s: [OPERATOR] %s(%s, %s) called and returned: %s" % (
lptr, fname, v1, v2, res))
# Attach last error message
res = stack.pop()
if not res and lasterrmsg != "":
errormsgs.append(lasterrmsg)
self.log.debug(" <- VALIDATOR ENDED (%s)" % key)
return res, errormsgs
def __processFilter(self, fltr, key, prop):
"""
This method processes a given process-list (fltr) for a given property (prop).
For example: When a property has to be stored in the backend, it will
run through the out-filter-process-list and thus will be transformed into a storable
key, value pair.
"""
# Search for replaceable patterns in the process-list.
fltr = self.__fillInPlaceholders(fltr, prop)
# This is our process-line pointer it points to the process-list line
# we're executing at the moment
lptr = 0
# Our filter result stack
stack = list()
# Log values
self.log.debug(" -> FILTER STARTED (%s)" % key)
# Process the list till we reach the end..
while (lptr + 1) in fltr:
# Get the current line and increase the process list pointer.
lptr += 1
curline = fltr[lptr]
# A filter is used to manipulate the 'value' or the 'key' or maybe both.
if 'filter' in curline:
# Build up argument list
args = [self, key, prop]
fname = type(curline['filter']).__name__
for entry in curline['params']:
args.append(entry)
# Process filter and keep results
key, prop = (curline['filter']).process(*args)
# Ensure that the processed data is still valid.
# Filter may mess things up and then the next cannot process correctly.
if key not in prop:
raise ObjectException(C.make_error('FILTER_INVALID_KEY',
key=key, filter=fname))
# Check if the filter returned all expected property values.
for pk in prop:
if not all(k in prop[pk] for k in ('backend', 'value', 'type')):
missing = ", ".join({'backend', 'value', 'type'} - set(prop[pk].keys()))
raise ObjectException(C.make_error('FILTER_MISSING_KEY', key=missing, filter=fname))
# Check if the returned value-type is list or None.
if type(prop[pk]['value']) not in [list, type(None)]:
raise ObjectException(C.make_error('FILTER_NO_LIST',
key=pk, filter=fname, type=type(prop[pk]['value'])))
self.log.debug(" %s: [Filter] %s(%s) called " % (lptr, fname,
", ".join(["\"" + x + "\"" for x in curline['params']])))
# A condition matches for something and returns a boolean value.
# We'll put this value on the stack for later use.
elif 'condition' in curline:
# Build up argument list
args = [key] + curline['params']
# Process condition and keep results
stack.append((curline['condition']).process(*args))
fname = type(curline['condition']).__name__
self.log.debug(" %s: [Condition] %s(%s) called " % (lptr, fname, ", ".join(curline['params'])))
# Handle jump, for example if a condition has failed, jump over its filter-chain.
elif 'jump' in curline:
# Jump to <line> -1 because we will increase the line ptr later.
olptr = lptr
if curline['jump'] == 'conditional':
if stack.pop():
lptr = curline['onTrue'] - 1
else:
lptr = curline['onFalse'] - 1
else:
lptr = curline['to'] - 1
self.log.debug(" %s: [Goto] %s ()" % (olptr, lptr))
# A comparator compares two values from the stack and then returns a single
# boolean value.
elif 'operator' in curline:
a = stack.pop()
b = stack.pop()
stack.append((curline['operator']).process(a, b))
fname = type(curline['operator']).__name__
self.log.debug(" %s: [Condition] %s(%s, %s) called " % (lptr, fname, a, b))
# Log current values
#self.log.debug(" result")
#for pkey in prop:
# self.log.debug(" %s: %s" % (pkey, prop[pkey]['value']))
self.log.debug(" <- FILTER ENDED")
return prop
def __fillInPlaceholders(self, fltr, props):
"""
This method fill in placeholder into in- and out-filters.
"""
# Collect all property values
propList = {}
for key in props:
if props[key]['multivalue']:
propList[key] = props[key]['value']
else:
if props[key]['value'] and len(props[key]['value']):
propList[key] = props[key]['value'][0]
else:
propList[key] = None
# An inline function which replaces format string tokens
def _placeHolder(x):
try:
x = x % propList
except KeyError:
pass
return x
# Walk trough each line of the process list an replace placeholders.
for line in fltr:
if 'params' in fltr[line]:
fltr[line]['params'] = map(_placeHolder,
fltr[line]['params'])
return fltr
def get_object_type_by_dn(self, dn):
"""
Returns the objectType for a given DN
"""
index = PluginRegistry.getInstance("ObjectIndex")
res = index.search({'dn': dn}, {'_type': 1})
return res[0]['_type'] if res.count() == 1 else None
def get_references(self, override=None):
res = []
index = PluginRegistry.getInstance("ObjectIndex")
for ref, info in self._objectFactory.getReferences(override or self.__class__.__name__).items():
for ref_attribute, dsc in info.items():
for idsc in dsc:
if self.myProperties[idsc[1]]['orig_value'] and len(self.myProperties[idsc[1]]['orig_value']):
oval = self.myProperties[idsc[1]]['orig_value'][0]
else:
oval = None
dns = index.search({'_type': ref, ref_attribute: oval}, {'dn': 1})
if dns.count():
dns = [x['dn'] for x in dns]
res.append((
ref_attribute,
idsc[1],
getattr(self, idsc[1]),
dns or [],
self.myProperties[idsc[1]]['multivalue']))
return res
def update_refs(self, data):
for ref_attr, self_attr, value, refs, multivalue in self.get_references(): #@UnusedVariable
for ref in refs:
# Next iterration if there's no change for the relevant
# attribute
if not self_attr in data:
continue
# Load object and change value to the new one
c_obj = ObjectProxy(ref)
c_value = getattr(c_obj, ref_attr)
o_value = data[self_attr]['orig']
if type(c_value) == list:
if type(o_value) == list:
c_value = filter(lambda x: x not in o_value, c_value)
else:
c_value = filter(lambda x: x != o_value, c_value)
if multivalue:
c_value.append(data[self_attr]['value'])
else:
c_value.append(data[self_attr]['value'][0])
setattr(c_obj, ref_attr, list(set(c_value)))
else:
setattr(c_obj, ref_attr, data[self_attr]['value'][0])
c_obj.commit()
def remove_refs(self):
for ref_attr, self_attr, value, refs, multivalue in self.get_references(): #@UnusedVariable
for ref in refs:
c_obj = ObjectProxy(ref)
c_value = getattr(c_obj, ref_attr)
if type(c_value) == list:
if type(value) == list:
c_value = filter(lambda x: x not in value, c_value)
else:
c_value = filter(lambda x: x != value, c_value)
setattr(c_obj, ref_attr, c_value)
else:
setattr(c_obj, ref_attr, None)
c_obj.commit()
def get_dn_references(self):
res = []
index = PluginRegistry.getInstance("ObjectIndex")
for info in self._objectFactory.getReferences("*", "dn").values():
for ref_attribute in info.keys():
dns = index.search({ref_attribute: self.dn}, {'dn': 1})
if dns.count():
dns = [x['dn'] for x in dns]
res.append((
ref_attribute,
map(lambda s: s.decode('utf-8'), dns if dns else [])
))
return res
def update_dn_refs(self, new_dn):
for ref_attr, refs in self.get_dn_references():
for ref in refs:
c_obj = ObjectProxy(ref)
c_value = getattr(c_obj, ref_attr)
if type(c_value) == list:
c_value = filter(lambda x: x != self.dn, c_value)
c_value.append(new_dn)
setattr(c_obj, ref_attr, list(set(c_value)))
else:
setattr(c_obj, ref_attr, new_dn)
c_obj.commit()
def remove_dn_refs(self):
for ref_attr, refs in self.get_dn_references():
for ref in refs:
c_obj = ObjectProxy(ref)
c_value = getattr(c_obj, ref_attr)
if type(c_value) == list:
c_value = filter(lambda x: x != self.dn, c_value)
setattr(c_obj, ref_attr, list(set(c_value)))
else:
setattr(c_obj, ref_attr, None)
c_obj.commit()
def remove(self):
"""
Removes this object - and eventually it's containements.
"""
#pylint: disable=E1101
if not self._base_object:
raise ObjectException(C.make_error('OBJECT_REMOVE_NON_BASE_OBJECT'))
# Remove all references to ourselves
self.remove_refs()
# Collect backends
backends = [getattr(self, '_backend')]
be_attrs = {getattr(self, '_backend'): {}}
for prop, info in self.myProperties.items():
for backend in info['backend']:
if not backend in backends:
backends.append(backend)
if not backend in be_attrs:
be_attrs[backend] = {}
if self.is_attr_set(prop):
be_attrs[backend][prop] = {'foreign': info['foreign'],
'orig': info['in_value'],
'value': info['value'],
'type': info['backend_type']}
# Remove for all backends, removing the primary one as the last one
backends.reverse()
obj = self
zope.event.notify(ObjectChanged("pre remove", obj))
# Call pre-remove now
self.__execute_hook("PreRemove")
for backend in backends:
be = ObjectBackendRegistry.getBackend(backend)
r_attrs = self.getExclusiveProperties()
# Remove all non exclusive properties
remove_attrs = {}
for attr in be_attrs[backend]:
if attr in r_attrs:
remove_attrs[attr] = be_attrs[backend][attr]
self.remove_refs()
self.remove_dn_refs()
#pylint: disable=E1101
be.remove(self.uuid, remove_attrs, self._backendAttrs[backend] \
if backend in self._backendAttrs else None)
zope.event.notify(ObjectChanged("post remove", obj))
# Call post-remove now
self.__execute_hook("PostRemove")
def simulate_move(self, orig_dn):
"""
Simulate a moves for this object
"""
#pylint: disable=E1101
if not self._base_object:
raise ObjectException(C.make_error('OBJECT_MOVE_NON_BASE_OBJECT'))
obj = self
zope.event.notify(ObjectChanged("pre move", obj, dn=self.dn, orig_dn=orig_dn))
# Update the DN refs which have most probably changed
self.update_dn_refs(self.dn)
zope.event.notify(ObjectChanged("post move", obj, dn=self.dn, orig_dn=orig_dn))
def move(self, new_base):
"""
Moves this object - and eventually it's containements.
"""
#pylint: disable=E1101
if not self._base_object:
raise ObjectException(C.make_error('OBJECT_MOVE_NON_BASE_OBJECT'))
# Collect backends
backends = [getattr(self, '_backend')]
# Collect all other backends
for info in self.myProperties.values():
for be in info['backend']:
if not be in backends:
backends.append(be)
obj = self
zope.event.notify(ObjectChanged("pre move", obj))
# Move for primary backend
be = ObjectBackendRegistry.getBackend(backends[0])
be.move(self.uuid, new_base)
# Update the DN refs which have most probably changed
p_backend = getattr(self, '_backend')
be = ObjectBackendRegistry.getBackend(p_backend)
dn = be.uuid2dn(self.uuid)
self.update_dn_refs(dn)
zope.event.notify(ObjectChanged("post move", obj, dn=dn))
def retract(self):
"""
Removes this object extension
"""
#pylint: disable=E1101
if self._base_object:
raise ObjectException(C.make_error('OBJECT_BASE_NO_RETRACT'))
# Call pre-remove now
self.__execute_hook("PreRemove")
# Remove all references to ourselves
self.remove_refs()
# Collect backends
backends = [getattr(self, '_backend')]
be_attrs = {getattr(self, '_backend'): {}}
for prop, info in self.myProperties.items():
for backend in info['backend']:
if not backend in backends:
backends.append(backend)
if not backend in be_attrs:
be_attrs[backend] = {}
if self.is_attr_set(prop):
be_attrs[backend][prop] = {'foreign': info['foreign'],
'orig': info['in_value'],
'value': info['value'],
'type': info['backend_type']}
# Retract for all backends, removing the primary one as the last one
backends.reverse()
obj = self
zope.event.notify(ObjectChanged("pre retract", obj))
for backend in backends:
be = ObjectBackendRegistry.getBackend(backend)
r_attrs = self.getExclusiveProperties()
# Remove all non exclusive properties
remove_attrs = {}
for attr in be_attrs[backend]:
if attr in r_attrs:
remove_attrs[attr] = be_attrs[backend][attr]
self.remove_refs()
self.remove_dn_refs()
#pylint: disable=E1101
be.retract(self.uuid, remove_attrs, self._backendAttrs[backend] \
if backend in self._backendAttrs else None)
zope.event.notify(ObjectChanged("post retract", obj))
# Call post-remove now
self.__execute_hook("PostRemove")
def is_attr_set(self, name):
return len(self.myProperties[name]['in_value'])
def is_attr_using_default(self, name):
return not self.is_attr_set(name) and self.myProperties[name]['default']
def __execute_hook(self, hook_type):
# Call post-remove now
hooks = getattr(self, '__hooks')
if hook_type in hooks:
for hook in hooks[hook_type]:
hook["ref"](self)
class IObjectChanged(Interface):
def __init__(self, obj):
pass
class IAttributeChanged(Interface):
def __init__(self, attr, value):
pass
class ObjectChanged(object):
implements(IObjectChanged)
def __init__(self, reason, obj=None, dn=None, uuid=None, orig_dn=None, o_type=None):
self.reason = reason
self.uuid = uuid or obj.uuid
self.dn = dn or obj.dn
self.orig_dn = orig_dn or obj.orig_dn
self.o_type = o_type or obj.__class__.__name__
class AttributeChanged(object):
implements(IAttributeChanged)
def __init__(self, reason, obj, target):
self.reason = reason
self.target = target
self.uuid = obj.uuid
from clacks.agent.objects.proxy import ObjectProxy
| 38.287356 | 158 | 0.564001 | 50,723 | 0.951722 | 0 | 0 | 2,558 | 0.047996 | 0 | 0 | 16,055 | 0.301242 |
1bef7a1aa389a58d40ce648d1ed75a0579e889d3 | 8,752 | py | Python | tests/test_benchmark.py | fossabot/BIRL | 62e91523ac5797a13a7b78b9869ccfdf61cc60d8 | [
"BSD-3-Clause"
]
| null | null | null | tests/test_benchmark.py | fossabot/BIRL | 62e91523ac5797a13a7b78b9869ccfdf61cc60d8 | [
"BSD-3-Clause"
]
| null | null | null | tests/test_benchmark.py | fossabot/BIRL | 62e91523ac5797a13a7b78b9869ccfdf61cc60d8 | [
"BSD-3-Clause"
]
| null | null | null | """
Testing default benchmarks in single thred and parallel configuration
Check whether it generates correct outputs and resulting values
Copyright (C) 2017-2019 Jiri Borovec <[email protected]>
"""
import argparse
import logging
import os
import shutil
import sys
import unittest
try: # python 3
from unittest.mock import patch
except ImportError: # python 2
from mock import patch
import numpy as np
import pandas as pd
from numpy.testing import assert_raises, assert_array_almost_equal
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
from birl.utilities.data_io import update_path, save_config_yaml
from birl.utilities.dataset import args_expand_parse_images
from birl.utilities.experiments import parse_arg_params, try_decorator
from birl.benchmark import ImRegBenchmark
from birl.bm_template import BmTemplate
PATH_ROOT = os.path.dirname(update_path('birl'))
PATH_DATA = update_path('data-images')
PATH_CSV_COVER_MIX = os.path.join(PATH_DATA, 'pairs-imgs-lnds_mix.csv')
PATH_CSV_COVER_ANHIR = os.path.join(PATH_DATA, 'pairs-imgs-lnds_histol.csv')
# logging.basicConfig(level=logging.INFO)
class TestBmRegistration(unittest.TestCase):
@classmethod
def setUpClass(cls):
logging.basicConfig(level=logging.INFO)
cls.path_out = os.path.join(PATH_ROOT, 'output-testing')
shutil.rmtree(cls.path_out, ignore_errors=True)
os.mkdir(cls.path_out)
def _remove_default_experiment(self, bm_name):
path_expt = os.path.join(self.path_out, bm_name)
shutil.rmtree(path_expt, ignore_errors=True)
@classmethod
def test_benchmark_invalid_inputs(self):
# test missing some parameters
params = {
'path_table': 'x',
'path_out': 'x',
'nb_workers': 0,
'unique': False,
}
# try a missing params
for miss in ['path_table', 'path_out', 'unique']:
params_miss = params.copy()
del params_miss[miss]
assert_raises(AssertionError, ImRegBenchmark, params_miss)
# not defined output folder
assert_raises(Exception, ImRegBenchmark, params)
def test_benchmark_failing(self):
""" test run in parallel with failing experiment """
params = {
'path_table': PATH_CSV_COVER_MIX,
'path_dataset': PATH_DATA,
'path_out': self.path_out,
'preprocessing': 'nothing',
'nb_workers': 4,
'visual': True,
'unique': True,
}
benchmark = ImRegBenchmark(params)
benchmark.run()
# no landmarks was copy and also no experiment results was produced
list_csv = [
len([csv for csv in files if os.path.splitext(csv)[-1] == '.csv'])
for _, _, files in os.walk(benchmark.params['path_exp'])
]
self.assertEqual(sum(list_csv), 0)
del benchmark
def test_benchmark_parallel(self):
""" test run in parallel (2 threads) """
self._remove_default_experiment(ImRegBenchmark.__name__)
params = {
'path_table': PATH_CSV_COVER_MIX,
'path_out': self.path_out,
'preprocessing': ['gray', 'matching-rgb'],
'nb_workers': 2,
'visual': True,
'unique': False,
}
benchmark = ImRegBenchmark(params)
# run it for the first time, complete experiment
benchmark.run()
# rerun experiment simulated repeating unfinished benchmarks
benchmark.run()
self.check_benchmark_results(benchmark, final_means=[0., 0., 0., 0., 0.], final_stds=[0., 0., 0., 0., 0.])
del benchmark
def test_benchmark_simple(self):
""" test run in sequence (1 thread) """
self._remove_default_experiment(ImRegBenchmark.__name__)
params = {
'path_table': PATH_CSV_COVER_ANHIR,
'path_dataset': PATH_DATA,
'path_out': self.path_out,
'preprocessing': ['matching-hsv', 'gray'],
'nb_workers': 1,
'visual': True,
'unique': False,
}
benchmark = ImRegBenchmark(params)
benchmark.run()
self.check_benchmark_results(benchmark, final_means=[0., 0.], final_stds=[0., 0.])
del benchmark
def test_benchmark_template(self):
""" test run in single thread """
path_config = os.path.join(self.path_out, 'sample_config.yaml')
save_config_yaml(path_config, {})
params = {
'path_table': PATH_CSV_COVER_MIX,
'path_out': self.path_out,
'path_config': path_config,
'nb_workers': 2,
'unique': False,
'visual': True,
}
benchmark = BmTemplate(params)
benchmark.run()
self.check_benchmark_results(
benchmark, final_means=[28., 68., 73., 76., 95.], final_stds=[1., 13., 28., 28., 34.]
)
os.remove(path_config)
del benchmark
def check_benchmark_results(self, benchmark, final_means, final_stds):
""" check whether the benchmark folder contains all required files
and compute statistic correctly """
bm_name = benchmark.__class__.__name__
path_bm = os.path.join(self.path_out, bm_name)
self.assertTrue(os.path.exists(path_bm), msg='Missing benchmark: %s' % bm_name)
# required output files
for file_name in [
benchmark.NAME_CSV_REGISTRATION_PAIRS, benchmark.NAME_RESULTS_CSV, benchmark.NAME_RESULTS_TXT
]:
self.assertTrue(
os.path.isfile(os.path.join(path_bm, file_name)),
msg='Missing "%s" file in the BM experiment' % file_name
)
# load registration file
path_csv = os.path.join(path_bm, benchmark.NAME_CSV_REGISTRATION_PAIRS)
df_regist = pd.read_csv(path_csv, index_col=0)
# only two items in the benchmark
self.assertEqual(
len(df_regist),
len(benchmark._df_overview),
msg='Found only %i records instead of %i' % (len(df_regist), len(benchmark._df_overview))
)
# test presence of particular columns
for col in list(benchmark.COVER_COLUMNS) + [benchmark.COL_IMAGE_MOVE_WARP]:
self.assertIn(col, df_regist.columns, msg='Missing column "%s" in result table' % col)
cols_lnds_warp = [
col in df_regist.columns for col in [benchmark.COL_POINTS_REF_WARP, benchmark.COL_POINTS_MOVE_WARP]
]
self.assertTrue(any(cols_lnds_warp), msg='Missing any column of warped landmarks')
col_lnds_warp = benchmark.COL_POINTS_REF_WARP if cols_lnds_warp[0] \
else benchmark.COL_POINTS_MOVE_WARP
# check existence of all mentioned files
for _, row in df_regist.iterrows():
self.assertTrue(
os.path.isfile(os.path.join(path_bm, row[benchmark.COL_IMAGE_MOVE_WARP])),
msg='Missing image "%s"' % row[benchmark.COL_IMAGE_MOVE_WARP]
)
self.assertTrue(
os.path.isfile(os.path.join(path_bm, row[col_lnds_warp])),
msg='Missing landmarks "%s"' % row[col_lnds_warp]
)
# check existence of statistical results
for stat_name in ['Mean', 'STD', 'Median', 'Min', 'Max']:
self.assertTrue(
any(stat_name in col for col in df_regist.columns), msg='Missing statistics "%s"' % stat_name
)
# test specific results
assert_array_almost_equal(sorted(df_regist['TRE Mean'].values), np.array(final_means), decimal=0)
assert_array_almost_equal(sorted(df_regist['TRE STD'].values), np.array(final_stds), decimal=0)
def test_try_wrap(self):
self.assertIsNone(try_wrap())
def test_argparse(self):
with patch('argparse._sys.argv', ['script.py']):
args = parse_arg_params(argparse.ArgumentParser())
self.assertIsInstance(args, dict)
def test_argparse_images(self):
with patch('argparse._sys.argv', ['script.py', '-i', 'an_image.png']):
args = args_expand_parse_images(argparse.ArgumentParser())
self.assertIsInstance(args, dict)
def test_fail_visual(self):
fig = ImRegBenchmark._visual_image_move_warp_lnds_move_warp({ImRegBenchmark.COL_POINTS_MOVE_WARP: 'abc'})
self.assertIsNone(fig)
fig = ImRegBenchmark._visual_image_move_warp_lnds_ref_warp({ImRegBenchmark.COL_POINTS_REF_WARP: 'abc'})
self.assertIsNone(fig)
fig = ImRegBenchmark.visualise_registration((0, {}))
self.assertIsNone(fig)
@try_decorator
def try_wrap():
return '%i' % '42'
| 38.725664 | 114 | 0.63654 | 7,546 | 0.862203 | 0 | 0 | 860 | 0.098263 | 0 | 0 | 1,969 | 0.224977 |
1bf02d45108f641ace7558443cc9e030c46ebd2f | 65 | py | Python | python/UdemyCourse/2022_Python_Bootcamp/basics/errors_exception_handling/__init__.py | pradyotprksh/development_learning | b6c5494196842f3c273965063815ad222a18b4da | [
"MIT"
]
| 9 | 2021-09-03T06:20:48.000Z | 2022-03-19T12:43:30.000Z | python/UdemyCourse/2022_Python_Bootcamp/basics/errors_exception_handling/__init__.py | pradyotprksh/development_learning | b6c5494196842f3c273965063815ad222a18b4da | [
"MIT"
]
| null | null | null | python/UdemyCourse/2022_Python_Bootcamp/basics/errors_exception_handling/__init__.py | pradyotprksh/development_learning | b6c5494196842f3c273965063815ad222a18b4da | [
"MIT"
]
| 6 | 2021-08-16T01:13:36.000Z | 2022-03-19T12:44:10.000Z | from .errors_exception_handling import errors_exception_handling
| 32.5 | 64 | 0.923077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1bf0738223b67b02abba6e6aa0c92e93cd84b652 | 470 | py | Python | mtstub.py | shimniok/rockblock | 7a84d7da7df34c2dbe1a288fb6de24558eb4485f | [
"MIT"
]
| 1 | 2020-05-30T01:29:06.000Z | 2020-05-30T01:29:06.000Z | mtstub.py | shimniok/rockblock | 7a84d7da7df34c2dbe1a288fb6de24558eb4485f | [
"MIT"
]
| 1 | 2017-10-16T03:30:55.000Z | 2018-01-14T19:05:43.000Z | mtstub.py | shimniok/rockblock | 7a84d7da7df34c2dbe1a288fb6de24558eb4485f | [
"MIT"
]
| 1 | 2019-08-05T10:31:46.000Z | 2019-08-05T10:31:46.000Z | #!/usr/bin/env python
##################################################################################################
## mtstub.py
##
## emulates rockblock api so I don't have to burn credits testing...
##################################################################################################
import cgi
#import cgitb; cgitb.enable() # for troubleshooting
import config
print "Content-type: plain/text"
print
form = cgi.FieldStorage()
print "OK,12345"
| 24.736842 | 98 | 0.417021 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 387 | 0.823404 |
1bf2d4c209e500db17a5c6d33e7442b5b858b75b | 343 | py | Python | sum.py | PraghadeshManivannan/Built-in-Functions-Python | a3120641e03e7be8e1408dd467997ad6fdf04d87 | [
"MIT"
]
| null | null | null | sum.py | PraghadeshManivannan/Built-in-Functions-Python | a3120641e03e7be8e1408dd467997ad6fdf04d87 | [
"MIT"
]
| null | null | null | sum.py | PraghadeshManivannan/Built-in-Functions-Python | a3120641e03e7be8e1408dd467997ad6fdf04d87 | [
"MIT"
]
| null | null | null |
#sum(iterable, start=0, /)
#Return the sum of a 'start' value (default: 0) plus an iterable of numbers
#When the iterable is empty, return the start value.
'''This function is intended specifically for use with numeric values and may
reject non-numeric types.'''
a = [1,3,5,7,9,4,6,2,8]
print(sum(a))
print(sum(a,start = 4))
| 24.5 | 78 | 0.676385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.766764 |
1bf4cd25d9e85b2b0cb4131798b2cd2ef33b36d7 | 10,926 | py | Python | idaes/apps/matopt/materials/lattices/diamond_lattice.py | carldlaird/idaes-pse | cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f | [
"RSA-MD"
]
| 112 | 2019-02-11T23:16:36.000Z | 2022-03-23T20:59:57.000Z | idaes/apps/matopt/materials/lattices/diamond_lattice.py | carldlaird/idaes-pse | cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f | [
"RSA-MD"
]
| 621 | 2019-03-01T14:44:12.000Z | 2022-03-31T19:49:25.000Z | idaes/apps/matopt/materials/lattices/diamond_lattice.py | carldlaird/idaes-pse | cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f | [
"RSA-MD"
]
| 154 | 2019-02-01T23:46:33.000Z | 2022-03-23T15:07:10.000Z | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
from copy import deepcopy
from math import sqrt
import numpy as np
from .unit_cell_lattice import UnitCell, UnitCellLattice
from ..geometry import Cube
from ..tiling import CubicTiling
from ..transform_func import ScaleFunc, RotateFunc
from ...util.util import ListHasPoint
class DiamondLattice(UnitCellLattice):
RefIAD = sqrt(3) / 4
# === STANDARD CONSTRUCTOR
def __init__(self, IAD):
RefUnitCellShape = Cube(1, BotBackLeftCorner=np.array([0, 0, 0], dtype=float))
RefUnitCellTiling = CubicTiling(RefUnitCellShape)
RefFracPositions = [np.array([0.0, 0.0, 0.0]),
np.array([0.5, 0.5, 0.0]),
np.array([0.0, 0.5, 0.5]),
np.array([0.5, 0.0, 0.5]),
np.array([0.25, 0.25, 0.25]),
np.array([0.25, 0.75, 0.75]),
np.array([0.75, 0.25, 0.75]),
np.array([0.75, 0.75, 0.25])]
RefUnitCell = UnitCell(RefUnitCellTiling, RefFracPositions)
UnitCellLattice.__init__(self, RefUnitCell)
self._IAD = DiamondLattice.RefIAD # IAD is set correctly after calling applyTransF
self.applyTransF(ScaleFunc(IAD / DiamondLattice.RefIAD))
self._NthNeighbors = [[[np.array([0.25, 0.25, 0.25]),
np.array([-0.25, -0.25, 0.25]),
np.array([-0.25, 0.25, -0.25]),
np.array([0.25, -0.25, -0.25])],
[np.array([-0.25, -0.25, -0.25]),
np.array([0.25, 0.25, -0.25]),
np.array([0.25, -0.25, 0.25]),
np.array([-0.25, 0.25, 0.25])]],
[[np.array([0.0, 0.5, 0.5]),
np.array([0.0, 0.5, -0.5]),
np.array([0.0, -0.5, 0.5]),
np.array([0.0, -0.5, -0.5]),
np.array([0.5, 0.5, 0.0]),
np.array([0.5, 0.0, 0.5]),
np.array([0.5, -0.5, 0.0]),
np.array([0.5, 0.0, -0.5]),
np.array([-0.5, 0.5, 0.0]),
np.array([-0.5, 0.0, 0.5]),
np.array([-0.5, -0.5, 0.0]),
np.array([-0.5, 0.0, -0.5])],
[np.array([0.0, 0.5, 0.5]),
np.array([0.0, 0.5, -0.5]),
np.array([0.0, -0.5, 0.5]),
np.array([0.0, -0.5, -0.5]),
np.array([0.5, 0.5, 0.0]),
np.array([0.5, 0.0, 0.5]),
np.array([0.5, -0.5, 0.0]),
np.array([0.5, 0.0, -0.5]),
np.array([-0.5, 0.5, 0.0]),
np.array([-0.5, 0.0, 0.5]),
np.array([-0.5, -0.5, 0.0]),
np.array([-0.5, 0.0, -0.5])]]]
self._typeDict = {0: 0, 3: 1}
self._relativePositions = {0: np.array([0.0, 0.0, 0.0]), 3: np.array([0.25, 0.25, 0.25])}
# === CONSTRUCTOR - Aligned with {100}
@classmethod
def alignedWith100(cls, IAD):
return cls(IAD) # Default implementation
# === CONSTRUCTOR - Aligned with {110}
@classmethod
def aligndWith110(cls, IAD):
result = cls(IAD)
thetaX = 0
thetaY = np.pi * 0.25
thetaZ = 0
result.applyTransF(RotateFunc.fromXYZAngles(thetaX, thetaY, thetaZ))
return result
# === CONSTRUCTOR - Aligned with {111}
@classmethod
def alignedWith111(cls, IAD, blnTrianglesAlignedWithX=True):
result = cls(IAD)
thetaX = -np.pi * 0.25
thetaY = -np.arctan2(-sqrt(2), 2)
thetaZ = (np.pi * 0.5 if blnTrianglesAlignedWithX else 0)
result.applyTransF(RotateFunc.fromXYZAngles(thetaX, thetaY, thetaZ))
return result
# === CONSTRUCTOR - Aligned with {xyz}
@classmethod
def alignedWith(cls, IAD, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return cls(IAD)
elif MI in ['110', '101', '011']:
return cls.aligndWith110(IAD)
elif MI == '111':
return cls.alignedWith111(IAD)
else:
result = cls(IAD)
a = np.array([0.0, 0.0, 1.0])
b = np.array([float(MI[0]), float(MI[1]), float(MI[2])])
axis = np.cross(a, b)
angle = np.arccos(np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)))
result.applyTransF(RotateFunc.fromAxisAngle(axis, angle))
return result
return ValueError('DiamondLattice.alignedWith: Input direction is not correct.')
# === MANIPULATION METHODS
def applyTransF(self, TransF):
if isinstance(TransF, ScaleFunc):
if TransF.isIsometric:
self._IAD *= TransF.Scale[0]
else:
raise ValueError('DiamondLattice.applyTransF: Can only scale isometrically')
UnitCellLattice.applyTransF(self, TransF)
# === AUXILIARY METHODS
def _getPointType(self, P):
return (int(round(P[0] * 4)) + int(round(P[1] * 4)) + int(round(P[2] * 4))) % 4
# === PROPERTY EVALUATION METHODS
# NOTE: inherited from UnitCellLattice
# def isOnLattice(self,P):
def areNeighbors(self, P1, P2):
return np.linalg.norm(P2 - P1) <= self.IAD
def getNeighbors(self, P, layer=1):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
if PType not in self._typeDict.keys():
raise ValueError('DiamondLattice.getNeighbors Should never reach here!')
if layer > len(self._NthNeighbors):
self._calculateNeighbors(layer)
NBs = deepcopy(self._NthNeighbors[layer - 1][self._typeDict[PType]])
for NeighP in NBs:
NeighP += RefP
self._convertFromReference(NeighP)
return NBs
def _calculateNeighbors(self, layer):
NList = []
for k, v in self._typeDict.items():
tmp = [np.array([0, 0, 0], dtype=float)]
for nb in self._NthNeighbors:
tmp.extend(nb[v])
NList.append(tmp)
for _ in range(layer - len(self._NthNeighbors)):
tmp = [[] for _ in self._typeDict.keys()]
for k, v in self._typeDict.items():
for P in self._NthNeighbors[len(self._NthNeighbors) - 1][v]:
PType = self._getPointType(P + self._relativePositions[k])
for Q in self._NthNeighbors[0][self._typeDict[PType]]:
N = P + Q
if not ListHasPoint(NList[v], N, 0.001 * DiamondLattice.RefIAD):
tmp[v].append(N)
NList[v].append(N)
self._NthNeighbors.append(tmp)
def isASite(self, P):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
return PType == 0
def isBSite(self, P):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
return PType == 3
def setDesign(self, D, AType, BType):
for i, P in enumerate(D.Canvas.Points):
if self.isASite(P):
D.setContent(i, AType)
elif self.isBSite(P):
D.setContent(i, BType)
else:
raise ValueError('setDesign can not set site not on lattice')
# === BASIC QUERY METHODS
@property
def IAD(self):
return self._IAD
@property
def Diamond100LayerSpacing(self):
return self.IAD / sqrt(3)
@property
def Diamond110LayerSpacing(self):
return self.IAD * sqrt(2) / sqrt(3)
@property
def Diamond111LayerSpacing(self):
return self.IAD * 4 / 3
@property
def Diamond112LayerSpacing(self):
return self.IAD * sqrt(2) / 3
def getLayerSpacing(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return self.Diamond100LayerSpacing
elif MI in ['110', '101', '011']:
return self.Diamond110LayerSpacing
elif MI == '111':
return self.Diamond111LayerSpacing
elif MI in ['112', '121', '211']:
return self.Diamond112LayerSpacing
else:
raise NotImplementedError('DiamondLattice.getLayerSpacing: Input direction is not supported.')
return ValueError('DiamondLattice.getLayerSpacing: Input direction is not correct.')
def getShellSpacing(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001', '110', '101', '011', '111']:
return self.IAD * sqrt(8) / sqrt(3)
elif MI in ['112', '121', '211']:
return self.IAD * sqrt(2) / sqrt(3)
else:
raise NotImplementedError('DiamondLattice.getShellSpacing: Input direction is not supported.')
return ValueError('The input direction is not correct.')
def getUniqueLayerCount(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return 4
elif MI in ['110', '101', '011']:
return 2
elif MI == '111':
return 3
elif MI in ['112', '121', '211']:
return 6
else:
raise NotImplementedError('DiamondLattice.getUniqueLayerCount: Input direction is not supported.')
return ValueError('The input direction is not correct.')
| 43.185771 | 114 | 0.507139 | 9,873 | 0.903624 | 0 | 0 | 1,930 | 0.176643 | 0 | 0 | 1,928 | 0.17646 |
1bf4f3ec8b611663d899f073f4f41ae66286507f | 12,055 | py | Python | elateridae_baits.py | AAFC-BICoE/elateridae-ortholog-baitset | 8e17212a26539dfd79b414ffe8f243a906d32149 | [
"MIT"
]
| null | null | null | elateridae_baits.py | AAFC-BICoE/elateridae-ortholog-baitset | 8e17212a26539dfd79b414ffe8f243a906d32149 | [
"MIT"
]
| null | null | null | elateridae_baits.py | AAFC-BICoE/elateridae-ortholog-baitset | 8e17212a26539dfd79b414ffe8f243a906d32149 | [
"MIT"
]
| null | null | null | # coding: utf8
"""
Ortholog Based Bait Design Script for creating Elateridae ortholog based baits suitable submission to myBaits
Compares t_coffee AA alignment scores with nucleotide tranalignments to find conserved blocks
Author Jackson Eyres [email protected]
License: MIT
Copywright: Government of Canada
"""
import glob
import os
from Bio import AlignIO, SeqIO
import time
import argparse
import random
def main():
"""
Main Function to run Staphylinidae Bait Designer
:return:
"""
parser = argparse.ArgumentParser(description='Processes T_Coffee AA alignments to generate a ortholog bait set')
parser.add_argument('-o', type=str, required=True,
help='Output Directory')
parser.add_argument('-i', type=str, required=True,
help='T_Coffee Directory containing aa based .score_ascii files')
parser.add_argument('-n', type=str, required=True,
help='Directory containing tranalign nucleotide alignments')
# parser.add_argument('-p', type=str, required=True,
# help='Priorities File for Staphylinidae')
args = parser.parse_args()
print("Starting Staphylinidae Ortholog Bait Design".format(args.o))
print(args.o, args.i, args.n)
dict_of_max_sums = longest_exon_length(args.i)
sum_file = write_sums(args.o, dict_of_max_sums)
blocks_dir = extract_conserved_blocks(sum_file, args.n, args.o)
window_ranges = [600]
for window in window_ranges:
filtered_blocks_dir = filter_blocks(blocks_dir, args.o, window)
processed_blocks_dir = filtered_blocks_dir
# Original was going to stagger tile the baits, but bait manufacturer inherently does this
# tiled_blocks_dir = tile_blocks(filtered_blocks_dir, args.o, window)
# processed_blocks_dir = tiled_blocks_dir
merge_baits(processed_blocks_dir, args.o, "Elateridae", window)
def extract_conserved_blocks(sum_file, alignment_directory, results_directory):
"""
Takes an AA T_coffee alignment score_ascii file, the corresponding nt fasta tranalign file, and the sum file to
Extract out a conserved block
:param sum_file:
:param alignment_directory:
:param results_directory:
:return: Output Directory of conserved blocks
"""
output_directory = os.path.join(results_directory, "conserved_blocks")
if not os.path.exists(output_directory):
os.makedirs(output_directory)
with open(sum_file) as f:
lines = f.readlines()
lines.pop(0)
for line in lines:
list_of_seqs = []
split = line.rstrip().split(",")
name = split[0].replace(".aa.summarized.score_ascii", "_tranaligned.fa")
window_range = int(split[2])*3
index = int(split[3])*3
file_path = os.path.join(alignment_directory, name)
if os.path.isfile(file_path):
with open(file_path) as g:
alignments = AlignIO.read(g, "fasta")
for alignment in alignments:
list_of_seqs.append(alignment[index:index + window_range])
orthogroup = split[0].split(".")[0]
file_name = "{}_block.fasta".format(orthogroup)
file_path = os.path.join(output_directory, file_name)
with open(file_path, "w") as h:
for seq in list_of_seqs:
h.write(seq.format("fasta"))
return output_directory
def longest_exon_length(directory):
"""
Scans t_coffee alignments in score_ascii format for a region of between 75-2000 positions in length that is
highly conserved, and sorts by the degree of conservation into an output file
:param directory: Directory of T_coffee results (containing score_ascii and aln files)
:return: Dictionary of Orthogroups with a 300bp region TCS scores above 2400
"""
increments = [150, 200]
increments_rev = increments[::-1]
dict_of_max_sums = {}
files = glob.glob(os.path.join(directory, "*.score_ascii"))
count = 0
for file in files:
count += 1
if count % 100 == 0:
print(count)
# Scans an alignment and converts the cons string of numbers into a continous list of numbers
number_string = ""
with open(file) as f:
number_of_specimens = f.read().count(":") - 4
f.seek(0)
if number_of_specimens < 5:
print("Skipping {} Due to Low Specimen Count".format(file))
continue
for line in f:
if line.startswith("cons") and ":" not in line:
number = line.rstrip().split(" ")[-1]
number_string += number
number_list = [int(i) for i in number_string]
# Scans number list for sequence containing the highest window range of conserved bases within 95% of max
# TCS score for said window range aka 9*Window Range
# Sort the list so the highest score block within the window range is first. If the window range
# has 95% quality or higher, add it to dictionary and move on to next file, otherwise decrease
# window range and try again
for window_range in increments_rev:
list_of_sums = []
if len(number_list) > window_range:
for i in range(0, len(number_list) - window_range):
the_sum = sum(number_list[i:i + window_range])
list_of_sums.append((the_sum, window_range, i))
sorted_list = sorted(list_of_sums, reverse=True, key=lambda element: (element[0]))
if float(sorted_list[0][0]) >= float(9 * window_range * .95):
if os.path.basename(file) not in dict_of_max_sums:
dict_of_max_sums[os.path.basename(file)] = sorted_list[0]
break
return dict_of_max_sums
def write_sums(directory, dict_of_max_sums):
"""
Writes the dictionary of all ortholog T_coffee scores/sums to csv file
:param directory:
:param dict_of_max_sums:
:return:
"""
if not os.path.exists(directory):
os.makedirs(directory)
timestr = time.strftime("%Y%m%d-%H%M%S")
file_name = "Conserved_Exons_Sums_{}.csv".format(timestr)
file_path = os.path.join(directory, file_name)
# Sorts dictionary into a list by score sum and then window length
sorted_x = sorted(dict_of_max_sums.items(), reverse=True, key=lambda x: (x[1][0], x[1][1]))
print("Writing T_Coffee score analysis to {}".format(file_path))
with open(file_path, "w") as f:
f.write("Orthogroup,Sum,Window,Index\n")
for entry in sorted_x:
f.write("{},{},{},{}\n".format(entry[0], entry[1][0], entry[1][1], entry[1][2]))
return file_path
def filter_blocks(directory, results_dir, window):
"""
Filters blocks generated by longest exon length and write sum functions based on various criteria
:param directory: Directory of fasta blocks to filter
:param results_dir: Parent Result Folder
:param window: Minimum length of a conserved block in basepairs
:return: Output Directory of filtered blocks
"""
fastas = glob.glob(os.path.join(directory, "*.fasta"))
output_dir = os.path.join(results_dir, "filtered_blocks_{}".format(window))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
total_seq_length = 0
total_after_gap_removal = 0
total_sequences = 0
gene_count = 0
# For each block/file extract out sequences that meet the following critiera:
# Part of Priority List = 1
# Minimum Length of Window size in basepairs
# Gaps represent less than 20% of sequence
# Block contains atleast 5 sequences from priority list = 1
for fasta in fastas:
seqs = []
with open(fasta) as f:
file_name = os.path.basename(fasta).replace(".fasta", "_filtered.fasta")
for seq in SeqIO.parse(f, 'fasta'):
gaps = seq.seq.count("-")
gap_percent = float(gaps / len(seq.seq))
if gap_percent > 0.20:
pass
else:
if len(seq.seq) >= window:
seqs.append(seq)
if len(seqs) < 5:
pass
else:
gene_count += 1
# Randomly take 3 contigs from the bait set to ensure even distribution of species across all orthologs
random.shuffle(seqs)
seqs = seqs[:3]
total_sequences += len(seqs)
for seq in seqs:
total_seq_length += len(seq.seq)
seq.seq = seq.seq.ungap(gap="-")
total_after_gap_removal += len(seq.seq)
new_file = os.path.join(output_dir, file_name)
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
print("Total Genes: {}, "
"Total Sequences: {}, "
"Total Length in bp: {}, "
"After Gap Removal: {}".format(gene_count, total_sequences, total_seq_length, total_after_gap_removal))
return output_dir
def tile_blocks(directory, results_dir, window):
"""
Takes a prefiltered block generated by the filtered_blocks function and tiles each bait
The first 0, 40 or 80 basepairs of each sequence are removed so the baits tile amongst each other
:param directory:
:param results_dir:
:param window:
:return:
"""
fastas = glob.glob(os.path.join(directory, "*.fasta"))
output_dir = os.path.join(results_dir, "tiled_blocks_{}".format(window))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for fasta in fastas:
seqs = []
with open(fasta) as f:
count = 0
for seq in SeqIO.parse(f, 'fasta'):
seq.description = ""
# Remove the first 0, 40 or 80 basepairs of the sequence every 3rd time
count += 1
if count == 1:
pass
if count == 2:
seq.seq = seq.seq[40:]
if count == 3:
seq.seq = seq.seq[80:]
count = 0
seqs.append(seq)
file_name = os.path.basename(fasta).replace("_block_filtered", "_block_tiled")
new_file = os.path.join(output_dir, file_name)
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
return output_dir
def merge_baits(directory, results_dir, prefix, window):
"""
Merges multifastas in the input directory into a single multi fasta file. Can be accomplished with bash cat, but
using biopython ensures each fasta entry is formatted correctly
:param directory: Input directory of fastas
:param results_dir: Output Parent directory
:param prefix: Name of the output file
:param window:
:return:
"""
output_dir = os.path.join(results_dir, "final_baits")
if not os.path.exists(output_dir):
os.mkdir(output_dir)
fastas = glob.glob(os.path.join(directory, "*.fasta"))
seqs = []
total_dna = 0
total_seqs = 0
total_orthologs = 0
for fasta in fastas:
if total_dna > 3900000:
break
total_orthologs += 1
with open(fasta) as f:
for seq in SeqIO.parse(f, 'fasta'):
total_seqs += 1
total_dna += len(seq.seq)
seq.description = ""
seqs.append(seq)
file_name = "{}-{}-final-baits.fasta".format(prefix, window)
new_file = os.path.join(output_dir, file_name)
print("Bait File {} "
"with Total Orthologs {}, "
"Total Seqs {}, Total_Dna {} bp".format(new_file, total_orthologs, total_seqs, total_dna))
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
return output_dir
if __name__ == "__main__":
main()
| 36.41994 | 117 | 0.616093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,435 | 0.367897 |
1bf638f00910b809a7d45e1aeabdb75e4e5aef9c | 1,361 | py | Python | poilab.py | octeufer/Annotate_Optimize | 32d9cecc0159882d3f962990aba07168c4a023f5 | [
"Apache-2.0"
]
| null | null | null | poilab.py | octeufer/Annotate_Optimize | 32d9cecc0159882d3f962990aba07168c4a023f5 | [
"Apache-2.0"
]
| null | null | null | poilab.py | octeufer/Annotate_Optimize | 32d9cecc0159882d3f962990aba07168c4a023f5 | [
"Apache-2.0"
]
| null | null | null | import sys
import numpy as np
sys.path.append("d:/data/annooptimize")
import triangle
import time
tinternal = list()
def labstart():
points,tri = triangle.gentri("d:/data/annooptimize/Annodata/200600/poise.shp")
plabels = triangle.dynamicSize(points)
conflictg = triangle.conflictgraphdy(points,tri,plabels)
acg = triangle.accesssubg(conflictg)
len(acg)
allsolve = np.zeros((len(points),4,2),np.float64)
points2,tri2 = triangle.gentri("d:/data/annooptimize/Annodata/200600/POIhalf.shp")
plabels2 = triangle.dynamicSize(points2)
conflictg2 = triangle.conflictgraphdy(points2,tri2,plabels2)
acg2 = triangle.accesssubg(conflictg2)
points3,tri3 = triangle.gentri("d:/data/annooptimize/Annodata/200600/POIall.shp")
plabels3 = triangle.dynamicSize(points3)
conflictg3 = triangle.conflictgraphdy(points3,tri3,plabels3)
acg3 = triangle.accesssubg(conflictg3)
time.clock()
costs,tabucs= triangle.globaltabuiter2dy(acg,points,1,plabels)
tinternal.append(time.clock())
costs2,tabucs2= triangle.globaltabuiter2dy(acg2,points2,1,plabels2)
tinternal.append(time.clock())
costs3,tabucs3= triangle.globaltabuiter2dy(acg3,points3,1,plabels3)
tinternal.append(time.clock())
return tinternal,(costs,tabucs),(costs2,tabucs2),(costs3,tabucs3)
| 38.885714 | 87 | 0.722998 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.124173 |
1bf69ac1479d462fb413d5e64a7b2f979173894e | 5,091 | py | Python | t_core/tc_python/xrule.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
]
| 3 | 2017-06-02T19:26:27.000Z | 2021-06-14T04:25:45.000Z | t_core/tc_python/xrule.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
]
| 8 | 2016-08-24T07:04:07.000Z | 2017-05-26T16:22:47.000Z | t_core/tc_python/xrule.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
]
| 1 | 2019-10-31T06:00:23.000Z | 2019-10-31T06:00:23.000Z |
from util.infinity import INFINITY
from tc_python.arule import ARule
from t_core.rollbacker import Rollbacker
from t_core.resolver import Resolver
class XRule(ARule):
'''
Applies the transformation on one match with roll-back capability.
'''
def __init__(self, LHS, RHS, max_iterations=INFINITY):
'''
Applies the transformation on one match with roll-back capability.
@param LHS: The pre-condition pattern (LHS + NACs).
@param RHS: The post-condition pattern (RHS).
'''
# external_matches_only=True because further matches of this rule are only processed after a roll-back
super(XRule, self).__init__(LHS, RHS)
self.M.max = max_iterations
self.I.max_iterations = max_iterations
self.B = Rollbacker(condition=LHS, max_iterations=max_iterations)
def packet_in(self, packet):
self.exception = None
self.is_success = False
# Checkpoint the original packet
self.B.packet_in(packet)
if not self.B.is_success:
self.exception = self.B.exception
return packet
# Match
packet = self.M.packet_in(packet)
if not self.M.is_success:
packet = self.B.restore(packet)
if self.M.exception:
self.exception = self.M.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
# Choose one match
packet = self.I.packet_in(packet)
if not self.I.is_success:
packet = self.B.restore(packet)
if self.I.exception:
self.exception = self.I.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
# Rewrite
packet = self.W.packet_in(packet)
if not self.W.is_success:
packet = self.B.restore(packet)
if self.W.exception:
self.exception = self.W.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
self.is_success = True
return packet
def next_in(self, packet):
self.exception = None
self.is_success = False
packet = self.B.next_in(packet)
if not self.B.is_success:
self.exception = self.B.exception
return packet
# Choose the next match
packet = self.I.packet_in(packet)
if not self.I.is_success:
packet = self.B.next_in(packet)
if self.I.exception:
self.exception = self.I.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
# Rewrite
packet = self.W.packet_in(packet)
if not self.W.is_success:
packet = self.B.next_in(packet)
if self.W.exception:
self.exception = self.W.exception
elif self.B.exception:
self.exception = self.B.exception
return packet
# Output success packet
self.is_success = True
return packet
class XRule_r(XRule):
'''
Applies the transformation on one match with roll-back capability.
'''
def __init__(self, LHS, RHS, external_matches_only=False, custom_resolution=lambda packet: False):
'''
Applies the transformation on one match with roll-back capability.
@param LHS: The pre-condition pattern (LHS + NACs).
@param RHS: The post-condition pattern (RHS).
@param external_matches_only: Resolve conflicts ignoring the matches found in this ARule.
@param custom_resolution: Override the default resolution function.
'''
super(XRule_r, self).__init__()
self.R = Resolver(external_matches_only=external_matches_only,
custom_resolution=custom_resolution)
def packet_in(self, packet):
packet = super(XRule_r, self).packet_in(packet)
# is_success is True
if self.exception is None:
# Resolve any conflicts if necessary
packet = self.R.packet_in(packet)
if not self.R.is_success:
self.exception = self.R.exception
return packet
# Output success packet
else:
self.is_success = False
return packet
def next_in(self, packet):
packet = super(XRule_r, self).next_in(packet)
# is_success is True
if self.exception is None:
# Resolve any conflicts if necessary
packet = self.R.packet_in(packet)
if not self.R.is_success:
self.exception = self.R.exception
return packet
# Output success packet
else:
self.is_success = False
return packet
| 37.433824 | 111 | 0.575329 | 4,924 | 0.967197 | 0 | 0 | 0 | 0 | 0 | 0 | 1,195 | 0.234728 |
1bf74b762d2902af1c8ee402ce83c52345c29025 | 5,266 | py | Python | tests/commonsense/semantic_lexicon_knowledge/ai2_lexicon_test.py | keisks/propara | 49fa8fe0481291df18b2c7b48e7ba1dafaad48e2 | [
"Apache-2.0"
]
| 84 | 2018-06-02T02:00:53.000Z | 2022-03-13T12:17:42.000Z | tests/commonsense/semantic_lexicon_knowledge/ai2_lexicon_test.py | keisks/propara | 49fa8fe0481291df18b2c7b48e7ba1dafaad48e2 | [
"Apache-2.0"
]
| 3 | 2018-10-31T00:28:31.000Z | 2020-05-12T01:06:53.000Z | tests/commonsense/semantic_lexicon_knowledge/ai2_lexicon_test.py | keisks/propara | 49fa8fe0481291df18b2c7b48e7ba1dafaad48e2 | [
"Apache-2.0"
]
| 13 | 2018-09-14T20:37:51.000Z | 2021-03-23T09:24:49.000Z | from unittest import TestCase
from propara.commonsense.semantic_lexicon_knowledge.ai2_lexicon import AI2Lexicon, AI2LexiconPredicate, AI2LexiconArg, AI2LexiconIndications, \
AI2LexiconPattern
class TestAI2Lexicon(TestCase):
def setUp(self):
self.lexicon_fp = "tests/fixtures/ie/TheSemanticLexicon-v3.0_withadj.tsv"
def testLoads(self):
self.lexicon = AI2Lexicon(self.lexicon_fp)
# print(f"evaporate.subj: {self.lexicon.what_happens_to_subj('evaporate', has_agent=True, has_patient=False)}")
# print(f"evaporate.obj: {self.lexicon.what_happens_to_obj('evaporate', has_agent=True, has_patient=False)}")
#
# print(f"evaporate.subj: {self.lexicon.what_happens_to_subj('evaporate')}")
# print(f"evaporate.obj: {self.lexicon.what_happens_to_obj('evaporate')}")
# v2 doesn't contain size, temperature, phase attributes
# infile = "tests/fixtures/ie/ai2-lexicon-v2.tsv"
# the following path is useful when debugging from browser.
# self.lexicon = AI2Lexicon("tests/fixtures/ie/TheSemanticLexicon-v3.0_withadj.tsv")
assert self.lexicon._after_subj(("blend in", AI2LexiconPattern.SO)) == {
AI2LexiconPredicate.IS_AT: AI2LexiconArg.OBJECT,
AI2LexiconPredicate.NOT_IS_AT: AI2LexiconArg.PREP_SRC,
}
assert self.lexicon._after_obj(("absorb", AI2LexiconPattern.SO))[
AI2LexiconPredicate.IS_AT] == AI2LexiconArg.SUBJECT
# assert self.lexicon._after_obj(("absorbs", AI2LexiconPattern.SO)).get(AI2LexiconPredicate.IS_AT, "") == AI2LexiconArg.SUBJECT
assert len(self.lexicon._after_obj(("blend in", AI2LexiconPattern.SO))) == 0
assert len(self.lexicon._after_obj(("blend blend2", AI2LexiconPattern.SO))) == 0
assert AI2LexiconIndications.MOVED not in self.lexicon.what_happens_to_subj("absorbs")
assert AI2LexiconIndications.MOVED in self.lexicon.what_happens_to_obj("absorbs")
assert AI2LexiconIndications.CREATED in self.lexicon.what_happens_to_obj("sprout")
assert AI2LexiconIndications.CREATED in self.lexicon.what_happens_to_subj("sprout", has_agent=True,
has_patient=False)
assert AI2LexiconIndications.DESTROYED not in self.lexicon.what_happens_to_subj("sprout")
assert AI2LexiconIndications.DESTROYED not in self.lexicon.what_happens_to_obj("sprout")
assert AI2LexiconIndications.TEMPERATURE_INC not in self.lexicon.what_happens_to_obj("turn")
assert AI2LexiconIndications.TEMPERATURE_INC in self.lexicon.what_happens_to_subj("gets hot")
assert AI2LexiconIndications.SIZE_INC in self.lexicon.what_happens_to_subj("gets bigger")
assert AI2LexiconIndications.SIZE_INC in self.lexicon.what_happens_to_subj("become bigger")
assert AI2LexiconIndications.SIZE_INC in self.lexicon.what_happens_to_subj("turned bigger")
assert AI2LexiconIndications.SIZE_INC not in self.lexicon.what_happens_to_obj("turns into bigger")
assert AI2LexiconIndications.MOVED not in self.lexicon.what_happens_to_subj("turned")
assert AI2LexiconIndications.PHASE_UNK_GAS in self.lexicon.what_happens_to_subj("turned gaseous")
assert AI2LexiconIndications.PHASE_LIQUID_SOLID in self.lexicon.what_happens_to_subj("solidify", has_agent=True,
has_patient=False)
assert AI2LexiconIndications.PHASE_LIQUID_SOLID in self.lexicon.what_happens_to_obj("solidify", has_agent=True,
has_patient=True)
assert AI2LexiconIndications.PHASE_UNK_SOLID not in self.lexicon.what_happens_to_subj("solidifies")
assert AI2LexiconIndications.PHASE_SOLID_GAS in self.lexicon.what_happens_to_subj("sublime", has_agent=True,
has_patient=False)
assert AI2LexiconIndications.PHASE_SOLID_GAS in self.lexicon.what_happens_to_obj("sublime", has_agent=True,
has_patient=True)
# if agent and patient both are present or only 1
# the difference is whether object is given or not
# this happens for all verbs that can be both transitive/intransitive
# they will have 2 entries.
#
# A big rock stops the stream of water from uphill => stream of water moved from uphill to rock
# car stops at the intersection ==> car moved to intersection
# we have removed lots of fine details in the patterns (VerbNet had much more info there)
# if agent and patient both are present or only 1
def test_type_of_pattern(self):
input = "SUBJECT VERB OBJECT PREP-SRC PREP-DEST"
assert AI2Lexicon.type_of_pattern(input) == AI2LexiconPattern.SO
input = "SUBJECT VERB OBJECT"
assert AI2Lexicon.type_of_pattern(input) == AI2LexiconPattern.SO
input = "SUBJECT VERB PREP-SRC PREP-DEST"
assert AI2Lexicon.type_of_pattern(input) == AI2LexiconPattern.S
| 71.162162 | 143 | 0.681732 | 5,066 | 0.962021 | 0 | 0 | 0 | 0 | 0 | 0 | 1,629 | 0.309343 |
1bf7f1bc739f582663b9e33d97b9d4189cae0d04 | 473 | py | Python | fitbit/__init__.py | erichilarysmithsr/python-fitbit | 38cf916d0318aedc91b31d15431fa9c49a13d15f | [
"Apache-2.0"
]
| null | null | null | fitbit/__init__.py | erichilarysmithsr/python-fitbit | 38cf916d0318aedc91b31d15431fa9c49a13d15f | [
"Apache-2.0"
]
| null | null | null | fitbit/__init__.py | erichilarysmithsr/python-fitbit | 38cf916d0318aedc91b31d15431fa9c49a13d15f | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Fitbit API Library
------------------
:copyright: 2012-2015 ORCAS.
:license: BSD, see LICENSE for more details.
"""
from .api import Fitbit, FitbitOauthClient, FitbitOauth2Client
# Meta.
__title__ = 'fitbit'
__author__ = 'Issac Kelly and ORCAS'
__author_email__ = '[email protected]'
__copyright__ = 'Copyright 2012-2015 ORCAS'
__license__ = 'Apache 2.0'
__version__ = '0.1.3'
__release__ = '0.1.3'
# Module namespace.
all_tests = []
| 18.192308 | 62 | 0.684989 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 276 | 0.58351 |
1bf7f576395a0ca86f448e1c60010a3d363f6af6 | 468 | py | Python | bitcoinExchange/exchange/api/urls.py | pogginicolo98/start2impact_exchange | 559c42cdeb2dec890d4b1145ed66a1a2f7c362cb | [
"MIT"
]
| 1 | 2021-09-08T16:39:07.000Z | 2021-09-08T16:39:07.000Z | bitcoinExchange/exchange/api/urls.py | pogginicolo98/start2impact_exchange | 559c42cdeb2dec890d4b1145ed66a1a2f7c362cb | [
"MIT"
]
| null | null | null | bitcoinExchange/exchange/api/urls.py | pogginicolo98/start2impact_exchange | 559c42cdeb2dec890d4b1145ed66a1a2f7c362cb | [
"MIT"
]
| null | null | null | from django.urls import include, path
from exchange.api.views import LatestOrdersListAPIView, OrderViewSet, ProfileAPIView
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'orders', OrderViewSet, basename='orders')
urlpatterns = [
path('profile/', ProfileAPIView.as_view(), name='profile-detail'),
path('orders/latest/', LatestOrdersListAPIView.as_view(), name='orders-latest'),
path('', include(router.urls))
]
| 36 | 84 | 0.767094 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.162393 |
1bf850828538bccd6c9bdc855d6c93aec2b25061 | 52 | py | Python | python/testData/formatter/indentInGenerator_after.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
]
| 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/formatter/indentInGenerator_after.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
]
| 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/formatter/indentInGenerator_after.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
]
| 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | def dbl():
return (
(a, a) for a in [])
| 13 | 27 | 0.384615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1bf8ddafa4dc0ba6cd6a406c255c3270696943bb | 848 | py | Python | kevin/aggregate/process_html.py | toddoh/thisisallabout_backend | a0c7bad675bd3fff97f99c3e2b49f19a1fef7640 | [
"MIT"
]
| null | null | null | kevin/aggregate/process_html.py | toddoh/thisisallabout_backend | a0c7bad675bd3fff97f99c3e2b49f19a1fef7640 | [
"MIT"
]
| 5 | 2021-03-18T22:18:49.000Z | 2022-03-11T23:40:56.000Z | kevin/aggregate/process_html.py | toddoh/thisisallabout_backend | a0c7bad675bd3fff97f99c3e2b49f19a1fef7640 | [
"MIT"
]
| 1 | 2019-10-16T19:29:12.000Z | 2019-10-16T19:29:12.000Z | from bs4 import BeautifulSoup
import requests
import re
def retrieveText():
print("Parsing text from online target")
url = "https://www.whitehouse.gov/the-press-office/2017/10/16/remarks-president-trump-and-senate-majority-leader-mitch-mcconnell-joint"
response = requests.get(url)
soup = BeautifulSoup(response.content, "lxml")
textwrapper = soup.find("div", { "class" : "field-item" })
textel = textwrapper.find_all("p", { "class" : None })
textstripped = []
for element in textel:
stripped = element.text.replace("\r", "\n").replace("\r", "").replace("\n", "").replace("Q ", "0002reporter: ").replace("THE PRESIDENT: ", "0001president: ").strip()
if "P.M." not in stripped and "A.M." not in stripped:
textstripped.append(stripped)
# print(textstripped)
return textstripped | 38.545455 | 173 | 0.660377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 309 | 0.364387 |
1bf9ff44f1b06f0e0c18c710168ee340dcb2a97f | 869 | py | Python | cfmacro/_resources/examples/lambda.py | gchiesa/cfmacro | 9c546b7930a54a9b44efffdf87401726981e1b2a | [
"MIT"
]
| null | null | null | cfmacro/_resources/examples/lambda.py | gchiesa/cfmacro | 9c546b7930a54a9b44efffdf87401726981e1b2a | [
"MIT"
]
| 1 | 2019-07-30T08:49:20.000Z | 2019-07-30T08:49:20.000Z | cfmacro/_resources/examples/lambda.py | gchiesa/cfmacro | 9c546b7930a54a9b44efffdf87401726981e1b2a | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from cfmacro.processors import SgProcessor
from cfmacro.core.engine import ProcessorEngine
from cfmacro.core.template import TemplateProcessor
def lambda_handler(event, context):
"""
Implement a core handler for security groups ingress / egress
:param event:
:param context:
:return:
"""
print(f'event received: {event}')
processor_engine = ProcessorEngine()
processor_engine.register_processor(SgProcessor)
template_processor = TemplateProcessor(processor_engine)
result = template_processor.process(fragment=event['fragment'],
template_params=event['templateParameterValues']).to_dict()
print(f'event processed. Result: \n{result}')
return {
"requestId": event['requestId'],
"status": "success",
"fragment": result
}
| 28.966667 | 99 | 0.674338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 301 | 0.346375 |
1bfcaa846cbe80234230889e864b2dd049be6c62 | 8,038 | py | Python | tf2qa/predict_long.py | mikelkl/TF2-QA | 3bca786d26565335df45538714532d6d3c070a2b | [
"MIT"
]
| 17 | 2020-01-29T10:31:07.000Z | 2022-01-10T03:36:00.000Z | tf2qa/predict_long.py | mikelkl/TF2-QA | 3bca786d26565335df45538714532d6d3c070a2b | [
"MIT"
]
| null | null | null | tf2qa/predict_long.py | mikelkl/TF2-QA | 3bca786d26565335df45538714532d6d3c070a2b | [
"MIT"
]
| 4 | 2021-01-27T15:42:45.000Z | 2021-12-12T20:41:51.000Z | import torch
import argparse
from roberta_modeling import RobertaJointForLong
from transformers.modeling_roberta import RobertaConfig, RobertaModel
from torch.utils.data import TensorDataset, SequentialSampler, DataLoader
import utils
from tqdm import tqdm
import os
import json
import collections
import pickle
import pandas as pd
from utils_nq import read_candidates_from_one_split, compute_long_pred
from roberta_long_preprocess import InputLongFeatures
RawResult = collections.namedtuple("RawResult",
["unique_id",
"long_start_logits",
"long_end_logits"])
def load_cached_data(feature_dir, output_features=False, evaluate=False):
features = torch.load(feature_dir)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
if evaluate:
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions)
if output_features:
return dataset, features
return dataset
def to_list(tensor):
return tensor.detach().cpu().tolist()
def make_submission(output_prediction_file, output_dir):
print("***** Making submmision *****")
test_answers_df = pd.read_json(output_prediction_file)
def create_short_answer(entry):
"""
:param entry: dict
:return: str
"""
if entry['answer_type'] == 0:
return ""
# if entry["short_answers_score"] < 1.5:
# return ""
if entry["yes_no_answer"] != "NONE":
return entry["yes_no_answer"]
answer = []
for short_answer in entry["short_answers"]:
if short_answer["start_token"] > -1:
answer.append(str(short_answer["start_token"]) + ":" + str(short_answer["end_token"]))
return " ".join(answer)
def create_long_answer(entry):
if entry['answer_type'] == 0:
return ''
# if entry["long_answer_score"] < 1.5:
# return ""
answer = []
if entry["long_answer"]["start_token"] > -1:
answer.append(str(entry["long_answer"]["start_token"]) + ":" + str(entry["long_answer"]["end_token"]))
return " ".join(answer)
for var_name in ['long_answer_score', 'short_answers_score', 'answer_type']:
test_answers_df[var_name] = test_answers_df['predictions'].apply(lambda q: q[var_name])
test_answers_df["long_answer"] = test_answers_df["predictions"].apply(create_long_answer)
test_answers_df["short_answer"] = test_answers_df["predictions"].apply(create_short_answer)
test_answers_df["example_id"] = test_answers_df["predictions"].apply(lambda q: str(q["example_id"]))
long_answers = dict(zip(test_answers_df["example_id"], test_answers_df["long_answer"]))
short_answers = dict(zip(test_answers_df["example_id"], test_answers_df["short_answer"]))
sample_submission = pd.read_csv("data/sample_submission.csv")
long_prediction_strings = sample_submission[sample_submission["example_id"].str.contains("_long")].apply(
lambda q: long_answers[q["example_id"].replace("_long", "")], axis=1)
short_prediction_strings = sample_submission[sample_submission["example_id"].str.contains("_short")].apply(
lambda q: short_answers[q["example_id"].replace("_short", "")], axis=1)
sample_submission.loc[
sample_submission["example_id"].str.contains("_long"), "PredictionString"] = long_prediction_strings
sample_submission.loc[
sample_submission["example_id"].str.contains("_short"), "PredictionString"] = short_prediction_strings
sample_submission.to_csv(os.path.join(output_dir, "submission.csv"), index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--gpu_ids", default="0,1,2,3,4,5,6,7", type=str)
parser.add_argument("--eval_batch_size", default=128, type=int)
parser.add_argument("--n_best_size", default=20, type=int)
parser.add_argument("--max_answer_length", default=30, type=int)
parser.add_argument("--float16", default=True, type=bool)
parser.add_argument("--bert_config_file", default='roberta_large/config.json', type=str)
parser.add_argument("--init_restore_dir", default='check_points/roberta-large-long-V00/best_checkpoint.pth', type=str)
parser.add_argument("--predict_file", default='data/simplified-nq-test.jsonl', type=str)
parser.add_argument("--output_dir", default='check_points/roberta-large-long-V00',
type=str)
parser.add_argument("--predict_feat", default='dataset/test_data_maxlen512_roberta_tfidf_features.bin',
type=str)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
print("device %s n_gpu %d" % (device, n_gpu))
print("device: {} n_gpu: {} 16-bits training: {}".format(device, n_gpu, args.float16))
bert_config = RobertaConfig.from_json_file(args.bert_config_file)
model = RobertaJointForLong(RobertaModel(bert_config), bert_config)
utils.torch_show_all_params(model)
utils.torch_init_model(model, args.init_restore_dir)
if args.float16:
model.half()
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
dataset, features = load_cached_data(feature_dir=args.predict_feat, output_features=True, evaluate=True)
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
print("***** Running evaluation *****")
print(" Num examples =", len(dataset))
print(" Batch size =", args.eval_batch_size)
all_results = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
input_ids, input_mask, segment_ids, example_indices = batch
inputs = {'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': segment_ids}
start_logits, end_logits = model(**inputs)
for i, example_index in enumerate(example_indices):
eval_feature = features[example_index.item()]
unique_id = str(eval_feature.unique_id)
result = RawResult(unique_id=unique_id,
long_start_logits=start_logits[i].cpu().numpy(),
long_end_logits=end_logits[i].cpu().numpy())
all_results.append(result)
pickle.dump(all_results, open(os.path.join(args.output_dir, 'RawResults_test.pkl'), 'wb'))
# all_results = pickle.load(open(os.path.join(args.output_dir, 'RawResults_test.pkl'), 'rb'))
print("Going to candidates file")
candidates_dict = read_candidates_from_one_split(args.predict_file)
print("Compute_pred_dict")
nq_pred_dict = compute_long_pred(candidates_dict, features, all_results, args.n_best_size)
output_prediction_file = os.path.join(args.output_dir, 'test_predictions.json')
print("Saving predictions to", output_prediction_file)
with open(output_prediction_file, 'w') as f:
json.dump({'predictions': list(nq_pred_dict.values())}, f)
# make_submission(output_prediction_file, args.output_dir)
| 43.923497 | 122 | 0.686365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,812 | 0.225429 |
1bfcf985c108d567ad3614fe9d2baeec4a87e0f1 | 9,385 | py | Python | city-infrastructure-platform/settings.py | City-of-Helsinki/city-infrastructure-platform | c14513a9e54405412085f1047f91ec58b263eac0 | [
"CC0-1.0"
]
| 2 | 2020-11-23T22:08:58.000Z | 2022-03-02T13:13:20.000Z | city-infrastructure-platform/settings.py | City-of-Helsinki/city-infrastructure-platform | c14513a9e54405412085f1047f91ec58b263eac0 | [
"CC0-1.0"
]
| 170 | 2019-12-31T13:37:04.000Z | 2022-03-12T14:03:35.000Z | city-infrastructure-platform/settings.py | City-of-Helsinki/city-infrastructure-platform | c14513a9e54405412085f1047f91ec58b263eac0 | [
"CC0-1.0"
]
| 3 | 2020-05-08T05:58:02.000Z | 2022-03-15T16:07:25.000Z | """
Django settings for city-infrastructure-platform project.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import sentry_sdk
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import gettext_lazy as _
from helusers.defaults import SOCIAL_AUTH_PIPELINE # noqa: F401
from sentry_sdk.integrations.django import DjangoIntegration
from .utils import git_version
# Set up .env file
checkout_dir = environ.Path(__file__) - 2
assert os.path.exists(checkout_dir("manage.py"))
parent_dir = checkout_dir.path("..")
if parent_dir() != "/" and os.path.isdir(parent_dir("etc")):
env_file = parent_dir("etc/env")
default_var_root = parent_dir("var")
else:
env_file = checkout_dir(".env")
default_var_root = checkout_dir("var")
BASE_DIR = checkout_dir()
env = environ.Env(
DEBUG=(bool, False),
TIER=(str, "dev"), # one of: prod, qa, stage, test, dev
SECRET_KEY=(str, ""),
VAR_ROOT=(str, default_var_root),
ALLOWED_HOSTS=(list, []),
TRUST_X_FORWARDED_HOST=(bool, False),
DATABASE_URL=(
str,
"postgis:///city-infrastructure-platform",
),
CACHE_URL=(str, "locmemcache://"),
EMAIL_URL=(str, "consolemail://"),
SENTRY_DSN=(str, ""),
AZURE_DEPLOYMENT=(bool, False),
AZURE_ACCOUNT_KEY=(str, False),
AZURE_CONTAINER=(str, False),
AZURE_ACCOUNT_NAME=(str, False),
OIDC_AUTHENTICATION_ENABLED=(bool, True),
SOCIAL_AUTH_TUNNISTAMO_KEY=(str, None),
SOCIAL_AUTH_TUNNISTAMO_SECRET=(str, None),
OIDC_API_TOKEN_AUTH_AUDIENCE=(str, None),
OIDC_API_TOKEN_AUTH_ISSUER=(str, None),
TOKEN_AUTH_MAX_TOKEN_AGE=(int, 600),
OIDC_ENDPOINT=(str, None),
HELUSERS_ADGROUPS_CLAIM=(str, "groups"),
LOGGING_AUTH_DEBUG=(bool, False),
OVERLAY_SOURCE_URL=(str, "https://geoserver.hel.fi/geoserver/city-infra/wms"),
BASEMAP_SOURCE_URL=(str, "https://kartta.hel.fi/ws/geoserver/avoindata/wms"),
STATIC_URL=(str, "/static/"),
MEDIA_URL=(str, "/media/"),
)
if os.path.exists(env_file):
env.read_env(env_file)
SOCIAL_AUTH_TUNNISTAMO_KEY = env("SOCIAL_AUTH_TUNNISTAMO_KEY")
SOCIAL_AUTH_TUNNISTAMO_SECRET = env("SOCIAL_AUTH_TUNNISTAMO_SECRET")
HELUSERS_ADGROUPS_CLAIM = env("HELUSERS_ADGROUPS_CLAIM")
SOCIAL_AUTH_ID_TOKEN_IN_END_SESSION = False
if env("OIDC_ENDPOINT"):
SOCIAL_AUTH_TUNNISTAMO_OIDC_ENDPOINT = env("OIDC_ENDPOINT")
OIDC_API_TOKEN_AUTH = {
"AUDIENCE": env("OIDC_API_TOKEN_AUTH_AUDIENCE"),
"ISSUER": env("OIDC_API_TOKEN_AUTH_ISSUER"),
}
# General settings
DEBUG = env("DEBUG")
OIDC_AUTHENTICATION_ENABLED = env("OIDC_AUTHENTICATION_ENABLED")
TIER = env("TIER")
SECRET_KEY = env("SECRET_KEY")
if DEBUG and not SECRET_KEY:
SECRET_KEY = "xxx"
ALLOWED_HOSTS = env("ALLOWED_HOSTS")
if OIDC_AUTHENTICATION_ENABLED and (
not SOCIAL_AUTH_TUNNISTAMO_KEY
or not SOCIAL_AUTH_TUNNISTAMO_SECRET
or not OIDC_API_TOKEN_AUTH["AUDIENCE"]
or not OIDC_API_TOKEN_AUTH["ISSUER"]
):
raise ImproperlyConfigured("Authentication not configured properly")
CACHES = {"default": env.cache()}
vars().update(env.email_url()) # EMAIL_BACKEND etc.
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"timestamped_named": {
"format": "%(asctime)s %(name)s %(levelname)s: %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "timestamped_named",
},
# Just for reference, not used
"blackhole": {"class": "logging.NullHandler"},
},
"loggers": {
"django": {"handlers": ["console"], "level": "INFO"},
"helusers": {
"handlers": ["console"],
"level": "DEBUG" if env("LOGGING_AUTH_DEBUG") else "INFO",
"propagate": False,
},
},
}
# Application definition
DJANGO_APPS = [
"helusers",
"social_django",
"helusers.apps.HelusersAdminConfig",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.gis",
]
THIRD_PARTY_APPS = [
"django_extensions",
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
"drf_yasg",
"django_filters",
"auditlog",
]
LOCAL_APPS = [
"users.apps.UsersConfig",
"traffic_control.apps.TrafficControlConfig",
"map.apps.MapConfig",
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
AUTHENTICATION_BACKENDS = (
"helusers.tunnistamo_oidc.TunnistamoOIDCAuth",
"django.contrib.auth.backends.ModelBackend",
)
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "/admin/"
LOGOUT_REDIRECT_URL = "/admin/login/"
SOCIAL_AUTH_TUNNISTAMO_AUTH_EXTRA_ARGUMENTS = {"ui_locales": "fi"}
WAGTAIL_SITE_NAME = _("City Infrastructure Platform")
SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
MIDDLEWARE = [
"deployment.middleware.HealthCheckMiddleware",
"azure_client_ip.middleware.AzureClientIPMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.locale.LocaleMiddleware",
"auditlog.middleware.AuditlogMiddleware",
]
ROOT_URLCONF = "city-infrastructure-platform.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [checkout_dir("templates"), checkout_dir("map-view/build")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "city-infrastructure-platform.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "fi"
LANGUAGES = [("fi", _("Finnish")), ("en", _("English"))]
TIME_ZONE = "Europe/Helsinki"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
var_root = env.path("VAR_ROOT")
STATIC_ROOT = var_root("static")
MEDIA_ROOT = var_root("media")
STATIC_URL = env("STATIC_URL")
MEDIA_URL = env("MEDIA_URL")
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
STATICFILES_DIRS = [checkout_dir("map-view/build/static")]
# Whether to trust X-Forwarded-Host headers for all purposes
# where Django would need to make use of its own hostname
# fe. generating absolute URLs pointing to itself
# Most often used in reverse proxy setups
USE_X_FORWARDED_HOST = env("TRUST_X_FORWARDED_HOST")
# Django REST Framework
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [
"helusers.oidc.ApiTokenAuthentication",
"rest_framework.authentication.TokenAuthentication",
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
],
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"DEFAULT_FILTER_BACKENDS": ["django_filters.rest_framework.DjangoFilterBackend"],
"PAGE_SIZE": 20,
"OIDC_LEEWAY": env("TOKEN_AUTH_MAX_TOKEN_AGE"),
"GROUP_CLAIM_NAME": "groups",
}
# django-cors
if DEBUG:
CORS_ORIGIN_ALLOW_ALL = True
# Azure CLIENT_IP middleware
AZURE_DEPLOYMENT = env.bool("AZURE_DEPLOYMENT")
if AZURE_DEPLOYMENT:
AZURE_ACCOUNT_KEY = env.str("AZURE_ACCOUNT_KEY")
AZURE_CONTAINER = env.str("AZURE_CONTAINER")
AZURE_ACCOUNT_NAME = env.str("AZURE_ACCOUNT_NAME")
DEFAULT_FILE_STORAGE = "storages.backends.azure_storage.AzureStorage"
# Sentry-SDK
SENTRY_DSN = env.str("SENTRY_DSN")
VERSION = git_version()
if SENTRY_DSN:
sentry_sdk.init(dsn=SENTRY_DSN, integrations=[DjangoIntegration()], release=VERSION)
# Custom settings
SRID = 3879 # the spatial reference id used for geometries
OVERLAY_SOURCE_URL = env.str("OVERLAY_SOURCE_URL")
BASEMAP_SOURCE_URL = env.str("BASEMAP_SOURCE_URL")
LOCALE_PATHS = [
"./templates/locale",
]
| 31.599327 | 90 | 0.716356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,871 | 0.51902 |
1bfd7e8367e5e96a626394bb27f0b9266054e693 | 1,184 | py | Python | test/tc/tet_tc_base_predict_multiclass.py | dumpmemory/Pytorch-NLU | 864fb9acc7751fc51abd3d05d24b5a9a7eab7110 | [
"Apache-2.0"
]
| 115 | 2021-08-29T04:28:40.000Z | 2022-03-29T22:57:48.000Z | test/tc/tet_tc_base_predict_multiclass.py | dumpmemory/Pytorch-NLU | 864fb9acc7751fc51abd3d05d24b5a9a7eab7110 | [
"Apache-2.0"
]
| 2 | 2022-01-14T01:52:07.000Z | 2022-03-04T11:40:10.000Z | test/tc/tet_tc_base_predict_multiclass.py | dumpmemory/Pytorch-NLU | 864fb9acc7751fc51abd3d05d24b5a9a7eab7110 | [
"Apache-2.0"
]
| 18 | 2021-09-23T06:41:10.000Z | 2022-03-22T04:37:05.000Z | # !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2021/7/25 19:30
# @author : Mo
# @function: predict model, 预测模块-多类分类
# 适配linux
import platform
import json
import sys
import os
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
path_sys = os.path.join(path_root, "pytorch_nlu", "pytorch_textclassification")
print(path_root)
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from tcPredict import TextClassificationPredict
if __name__ == "__main__":
path_config = "../output/text_classification/model_ERNIE/tc.config"
tcp = TextClassificationPredict(path_config)
texts = [{"text": "平乐县,古称昭州,隶属于广西壮族自治区桂林市,位于广西东北部,桂林市东南部,东临钟山县,南接昭平,西北毗邻阳朔,北连恭城,总面积1919.34平方公里。"},
{"text": "平乐县主要旅游景点有榕津千年古榕、冷水石景苑、仙家温泉、桂江风景区、漓江风景区等,平乐县为漓江分界点,平乐以北称漓江,以南称桂江,是著名的大桂林旅游区之一。"},
{"text": "印岭玲珑,昭水晶莹,环绕我平中。青年的乐园,多士受陶熔。生活自觉自治,学习自发自动。五育并重,手脑并用。迎接新潮流,建设新平中"},
{"text": "桂林山水甲天下, 阳朔山水甲桂林"},
]
res = tcp.predict(texts, logits_type="sigmoid")
print(res)
while True:
print("请输入:")
question = input()
res = tcp.predict([{"text": question}], logits_type="sigmoid")
print(res)
| 32 | 104 | 0.663007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,053 | 0.635103 |
1bfe4bc4102702ea9b7f00d3aaa75d9c6a870a4d | 5,645 | py | Python | tests/test_create_spreadsheet_values.py | Tunous/StringSheet | 3b0bd00db6ae780c523524b71774f6d3da44435f | [
"MIT"
]
| 14 | 2017-09-17T12:41:39.000Z | 2020-12-15T07:42:55.000Z | tests/test_create_spreadsheet_values.py | Tunous/StringSheet | 3b0bd00db6ae780c523524b71774f6d3da44435f | [
"MIT"
]
| 10 | 2017-09-12T20:06:47.000Z | 2021-03-22T17:16:10.000Z | tests/test_create_spreadsheet_values.py | Tunous/StringSheet | 3b0bd00db6ae780c523524b71774f6d3da44435f | [
"MIT"
]
| 1 | 2017-09-18T04:55:34.000Z | 2017-09-18T04:55:34.000Z | import unittest
from stringsheet.parser import create_spreadsheet_values
from stringsheet.parser import create_language_sheet_values
from stringsheet.parser import parse_resources
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.resources = parse_resources('test-resources/res')
class CreateSpreadsheetValuesTestCase(BaseTestCase):
def setUp(self):
super(CreateSpreadsheetValuesTestCase, self).setUp()
self.values = create_spreadsheet_values(self.resources)
def test_rows_are_valid(self):
rows = [
['id', 'comment', 'default', 'de', 'pl', 'zh-rCN', 'zh-rTW'],
['a_string', '', 'A string', '', '', '', ''],
['partly_added', '', 'Partly added', 'Partly added (de)', '', '',
''],
['string', 'String with comment', 'String', 'String (de)',
'String (pl)', 'String (zh-rCN)', 'String (zh-rTW)'],
['string_2', '', 'String 2', '', '', '', ''],
['array[0]', 'Item comment', 'First', '', '', '', ''],
['array[1]', '', 'Second', '', '', '', ''],
['array_comment[0]', 'Array comment', 'Some item', '', '', '', ''],
['array_comment[1]', 'Array comment', 'More items', '', '', '', ''],
['array_comment[2]', 'Comment', 'More', '', '', '', ''],
['plural{zero}', 'Parent comment', 'Other', '', '', '', ''],
['plural{one}', 'Parent comment', 'One', '', '', '', ''],
['plural{two}', 'Parent comment', 'Other', '', '', '', ''],
['plural{few}', 'Parent comment', 'Other', '', '', '', ''],
['plural{many}', 'Parent comment', 'Other', '', '', '', ''],
['plural{other}', 'Comment', 'Other', '', '', '', ''],
['plurals{zero}', 'Item comment', 'Zero', '', '', '', ''],
['plurals{one}', '', 'One', '', '', '', ''],
['plurals{two}', '', 'Two', '', '', '', ''],
['plurals{few}', '', 'Few', '', '', '', ''],
['plurals{many}', '', 'Many', '', '', '', ''],
['plurals{other}', '', 'Other', '', '', '', ''],
]
self.assertEqual(len(rows), len(self.values))
for index, row in enumerate(rows):
self.assertEqual(row, self.values[index])
class CreateLanguageSpreadsheetValuesTestCase(BaseTestCase):
def setUp(self):
super(CreateLanguageSpreadsheetValuesTestCase, self).setUp()
self.values = create_language_sheet_values(self.resources, 'de')
def test_rows_are_valid(self):
rows = [
['id', 'comment', 'default', 'de'],
['a_string', '', 'A string', ''],
['partly_added', '', 'Partly added', 'Partly added (de)'],
['string', 'String with comment', 'String', 'String (de)'],
['string_2', '', 'String 2', ''],
['array[0]', 'Item comment', 'First', ''],
['array[1]', '', 'Second', ''],
['array_comment[0]', 'Array comment', 'Some item', ''],
['array_comment[1]', 'Array comment', 'More items', ''],
['array_comment[2]', 'Comment', 'More', ''],
['plural{zero}', 'Parent comment', 'Other', ''],
['plural{one}', 'Parent comment', 'One', ''],
['plural{two}', 'Parent comment', 'Other', ''],
['plural{few}', 'Parent comment', 'Other', ''],
['plural{many}', 'Parent comment', 'Other', ''],
['plural{other}', 'Comment', 'Other', ''],
['plurals{zero}', 'Item comment', 'Zero', ''],
['plurals{one}', '', 'One', ''],
['plurals{two}', '', 'Two', ''],
['plurals{few}', '', 'Few', ''],
['plurals{many}', '', 'Many', ''],
['plurals{other}', '', 'Other', ''],
]
self.assertEqual(len(rows), len(self.values))
for index, row in enumerate(rows):
self.assertEqual(row, self.values[index])
class CreateTemplateSpreadsheetValuesTestCase(BaseTestCase):
def setUp(self):
super(CreateTemplateSpreadsheetValuesTestCase, self).setUp()
self.values = create_language_sheet_values(self.resources, 'Template')
def test_rows_are_valid(self):
rows = [
['id', 'comment', 'default', 'language-id'],
['a_string', '', 'A string', ''],
['partly_added', '', 'Partly added', ''],
['string', 'String with comment', 'String', ''],
['string_2', '', 'String 2', ''],
['array[0]', 'Item comment', 'First', ''],
['array[1]', '', 'Second', ''],
['array_comment[0]', 'Array comment', 'Some item', ''],
['array_comment[1]', 'Array comment', 'More items', ''],
['array_comment[2]', 'Comment', 'More', ''],
['plural{zero}', 'Parent comment', 'Other', ''],
['plural{one}', 'Parent comment', 'One', ''],
['plural{two}', 'Parent comment', 'Other', ''],
['plural{few}', 'Parent comment', 'Other', ''],
['plural{many}', 'Parent comment', 'Other', ''],
['plural{other}', 'Comment', 'Other', ''],
['plurals{zero}', 'Item comment', 'Zero', ''],
['plurals{one}', '', 'One', ''],
['plurals{two}', '', 'Two', ''],
['plurals{few}', '', 'Few', ''],
['plurals{many}', '', 'Many', ''],
['plurals{other}', '', 'Other', ''],
]
self.assertEqual(len(rows), len(self.values))
for index, row in enumerate(rows):
self.assertEqual(row, self.values[index])
if __name__ == '__main__':
unittest.main()
| 46.270492 | 80 | 0.478654 | 5,403 | 0.95713 | 0 | 0 | 0 | 0 | 0 | 0 | 2,423 | 0.429229 |
1bff3ce09a664d524ed5b17fd85a06acad12da24 | 195 | py | Python | libs/imgutils.py | EpicKiwi/projet-datascience | 90b59fc674fc2146634d1c1681f9b65083a7aa91 | [
"MIT"
]
| null | null | null | libs/imgutils.py | EpicKiwi/projet-datascience | 90b59fc674fc2146634d1c1681f9b65083a7aa91 | [
"MIT"
]
| null | null | null | libs/imgutils.py | EpicKiwi/projet-datascience | 90b59fc674fc2146634d1c1681f9b65083a7aa91 | [
"MIT"
]
| 2 | 2020-01-14T07:53:50.000Z | 2020-01-14T12:24:35.000Z | import cv2
def img_path2array(path):
return cv2.cvtColor(cv2.imread(path, 10), cv2.COLOR_BGR2RGB)
def img_array2file(path, array):
cv2.imwrite(path, cv2.cvtColor(array, cv2.COLOR_RGB2BGR)) | 27.857143 | 64 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1bff51099f471eb1158044ba33a024f093e0aed7 | 3,079 | py | Python | bin/nsa_fail/nsa_fail.py | changhoonhahn/SEDflow | 4561ecfe3a38cc4c25df263d971a87e8a83f88ce | [
"MIT"
]
| 18 | 2022-03-16T03:11:04.000Z | 2022-03-30T16:01:42.000Z | bin/nsa_fail/nsa_fail.py | changhoonhahn/SEDflow | 4561ecfe3a38cc4c25df263d971a87e8a83f88ce | [
"MIT"
]
| null | null | null | bin/nsa_fail/nsa_fail.py | changhoonhahn/SEDflow | 4561ecfe3a38cc4c25df263d971a87e8a83f88ce | [
"MIT"
]
| null | null | null | import os, sys
import numpy as np
from sedflow import obs as Obs
from sedflow import train as Train
from provabgs import infer as Infer
from provabgs import models as Models
####################################################
# input
####################################################
sample = sys.argv[1]
itrain = int(sys.argv[2])
nhidden = int(sys.argv[3])
nblocks = int(sys.argv[4])
niter = int(sys.argv[5])
i0 = int(sys.argv[6])
i1 = int(sys.argv[7])
####################################################
# compile NSA failures
####################################################
# u, g, r, i, z, sigma_u, sigma_g, sigma_r, sigma_i, sigma_z, redshift
y_nsa = Obs.load_nsa_data(test_set=False)
igals = np.load('/scratch/network/chhahn/sedflow/nsa_fail/fail.igals.npy')
# convert to flux
y_flux = Train.mag2flux(y_nsa[:,:5])
y_ivar = Train.sigma_mag2flux(y_nsa[:,5:10], y_nsa[:,:5])**-2
y_zred = y_nsa[:,-1]
####################################################
# setup inference
####################################################
# SPS parameter priors
prior_sps = Infer.load_priors([
Infer.UniformPrior(7., 12.5, label='sed'),
Infer.FlatDirichletPrior(4, label='sed'), # flat dirichilet priors
Infer.UniformPrior(0., 1., label='sed'), # burst fraction
Infer.UniformPrior(1e-2, 13.27, label='sed'), # tburst
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust1
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust2
Infer.UniformPrior(-2., 1., label='sed') # uniform priors on dust_index
])
# SPS model
m_sps = Models.NMF(burst=True, emulator=True)
def run_mcmc(i_obs):
# desi MCMC object
nsa_mcmc = Infer.nsaMCMC(model=m_sps, prior=prior_sps)
fmcmc = os.path.join('/scratch/network/chhahn/sedflow/nsa_fail',
'mcmc.nsa.%i.hdf5' % i_obs)
if not os.path.isfile(fmcmc):
print('%s running' % os.path.basename(fmcmc))
if not np.all(np.isfinite(y_flux[i_obs])):
print('NaN photometry', y_flux[i_obs])
return None
if not np.all(np.isfinite(y_ivar[i_obs])):
print('NaN ivar', y_ivar[i_obs])
return None
# run MCMC
zeus_chain = nsa_mcmc.run(
bands='sdss', # u, g, r, i, z
photo_obs=y_flux[i_obs],
photo_ivar_obs=y_ivar[i_obs],
zred=y_zred[i_obs],
vdisp=0.,
sampler='zeus',
nwalkers=30,
burnin=0,
opt_maxiter=2000,
niter=niter,
progress=True,
writeout=fmcmc)
else:
print('%s already exists' % os.path.basename(fmcmc))
return None
for i in range(i0, i1+1):
run_mcmc(igals[i])
| 33.107527 | 92 | 0.528743 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 950 | 0.308542 |
400075fe46c49c54066ef8f12574919b2debe75a | 2,709 | py | Python | studio/gs_provider.py | NunoEdgarGFlowHub/studio | 42b221892a81535842ff25cbbcc434d6422a19e5 | [
"Apache-2.0"
]
| null | null | null | studio/gs_provider.py | NunoEdgarGFlowHub/studio | 42b221892a81535842ff25cbbcc434d6422a19e5 | [
"Apache-2.0"
]
| null | null | null | studio/gs_provider.py | NunoEdgarGFlowHub/studio | 42b221892a81535842ff25cbbcc434d6422a19e5 | [
"Apache-2.0"
]
| null | null | null | import json
import time
import re
from .keyvalue_provider import KeyValueProvider
from .gcloud_artifact_store import GCloudArtifactStore
from .util import timeit
class GSProvider(KeyValueProvider):
def __init__(self, config, blocking_auth=True, verbose=10, store=None):
self.config = config
self.bucket = config.get('bucket', 'studioml-meta')
self.meta_store = GCloudArtifactStore(config, verbose)
super(GSProvider, self).__init__(
config,
blocking_auth,
verbose,
store)
@timeit
def _get(self, key, shallow=False):
bucket = self.meta_store._get_bucket_obj()
retval = {}
if shallow:
blob_iterator = bucket.list_blobs(
prefix=key, delimiter='/')
bloblist = list(blob_iterator)
blobnames = {b.name for b in bloblist}
prefixes = blob_iterator.prefixes
suffixes = [re.sub('^' + key, '', p) for p in prefixes | blobnames]
retval = set({})
for s in suffixes:
if s.endswith('/'):
retval.add(s[:-1])
else:
retval.add(s)
return retval
else:
blob_iterator = bucket.list_blobs(prefix=key)
for blob in blob_iterator:
suffix = re.sub('^' + key, '', blob.name)
if suffix == '':
return json.loads(blob.download_as_string())
path = suffix.split('/')
path = [p for p in path if p != '']
current_dict = retval
for subdir in path[:-1]:
if subdir != '':
if subdir not in current_dict.keys():
current_dict[subdir] = {}
current_dict = current_dict[subdir]
try:
current_dict[path[-1]] = json.loads(
blob.download_as_string())
except BaseException:
pass
if not any(retval):
return None
else:
return retval
def _delete(self, key):
self.meta_store._delete_file(key)
def _set(self, key, value):
no_retries = 10
sleep_time = 1
for i in range(no_retries):
try:
self.meta_store._get_bucket_obj().blob(key) \
.upload_from_string(json.dumps(value))
break
except BaseException as e:
self.logger.error('uploading data raised an exception:')
self.logger.exception(e)
time.sleep(sleep_time)
| 31.137931 | 79 | 0.51495 | 2,544 | 0.939092 | 0 | 0 | 1,608 | 0.593577 | 0 | 0 | 85 | 0.031377 |
40007ef606785b22cbc7c72b9274d6584b3f3fb5 | 46,830 | py | Python | gslib/tests/test_ls.py | MikeJeffrey/gsutil | 12f4258540ee83aee255ec1baf50e7e6faee10e2 | [
"Apache-2.0"
]
| null | null | null | gslib/tests/test_ls.py | MikeJeffrey/gsutil | 12f4258540ee83aee255ec1baf50e7e6faee10e2 | [
"Apache-2.0"
]
| null | null | null | gslib/tests/test_ls.py | MikeJeffrey/gsutil | 12f4258540ee83aee255ec1baf50e7e6faee10e2 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ls command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from datetime import datetime
import os
import posixpath
import re
import stat
import subprocess
import sys
import time
import gslib
from gslib.commands import ls
from gslib.cs_api_map import ApiSelector
from gslib.project_id import PopulateProjectId
import gslib.tests.testcase as testcase
from gslib.tests.testcase.integration_testcase import SkipForGS
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.testcase.integration_testcase import SkipForXML
from gslib.tests.util import CaptureStdout
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import RUN_S3_TESTS
from gslib.tests.util import SetBotoConfigForTest
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT1_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT2_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT3_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT4_MD5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5_CRC32C
from gslib.tests.util import TEST_ENCRYPTION_CONTENT5_MD5
from gslib.tests.util import TEST_ENCRYPTION_KEY1
from gslib.tests.util import TEST_ENCRYPTION_KEY1_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY2
from gslib.tests.util import TEST_ENCRYPTION_KEY2_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY3
from gslib.tests.util import TEST_ENCRYPTION_KEY3_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY4
from gslib.tests.util import TEST_ENCRYPTION_KEY4_SHA256_B64
from gslib.tests.util import unittest
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils.constants import UTF8
from gslib.utils.ls_helper import PrintFullInfoAboutObject
from gslib.utils.retry_util import Retry
from gslib.utils.system_util import IS_WINDOWS
from six import add_move, MovedModule
add_move(MovedModule('mock', 'mock', 'unittest.mock'))
from six.moves import mock
KMS_XML_SKIP_MSG = ('gsutil does not support KMS operations for S3 buckets, '
'or listing KMS keys with the XML API.')
BUCKET_LOCK_SKIP_MSG = ('gsutil does not support bucket lock operations for '
'S3 buckets or listing retention policy with XML API.')
class TestLsUnit(testcase.GsUtilUnitTestCase):
"""Unit tests for ls command."""
def test_one_object_with_L_storage_class_update(self):
"""Tests the JSON storage class update time field."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'XML API has no concept of storage class update time')
# Case 1: Create an object message where Storage class update time is the
# same as Creation time.
current_time = datetime(2017, 1, 2, 3, 4, 5, 6, tzinfo=None)
obj_metadata = apitools_messages.Object(
name='foo',
bucket='bar',
timeCreated=current_time,
updated=current_time,
timeStorageClassUpdated=current_time,
etag='12345')
# Create mock object to point to obj_metadata.
obj_ref = mock.Mock()
obj_ref.root_object = obj_metadata
obj_ref.url_string = 'foo'
# Print out attributes of object message.
with CaptureStdout() as output:
PrintFullInfoAboutObject(obj_ref)
output = '\n'.join(output)
# Verify that no Storage class update time field displays since it's the
# same as Creation time.
find_stor_update_re = re.compile(
r'^\s*Storage class update time:+(?P<stor_update_time_val>.+)$',
re.MULTILINE)
stor_update_time_match = re.search(find_stor_update_re, output)
self.assertIsNone(stor_update_time_match)
# Case 2: Create an object message where Storage class update time differs
# from Creation time.
new_update_time = datetime(2017, 2, 3, 4, 5, 6, 7, tzinfo=None)
obj_metadata2 = apitools_messages.Object(
name='foo2',
bucket='bar2',
timeCreated=current_time,
updated=current_time,
timeStorageClassUpdated=new_update_time,
etag='12345')
# Create mock object to point to obj_metadata2.
obj_ref2 = mock.Mock()
obj_ref2.root_object = obj_metadata2
obj_ref2.url_string = 'foo2'
# Print out attributes of object message.
with CaptureStdout() as output2:
PrintFullInfoAboutObject(obj_ref2)
output2 = '\n'.join(output2)
# Verify that Creation time and Storage class update time fields display and
# are the same as the times set in the object message.
find_time_created_re = re.compile(
r'^\s*Creation time:\s+(?P<time_created_val>.+)$', re.MULTILINE)
time_created_match = re.search(find_time_created_re, output2)
self.assertIsNotNone(time_created_match)
time_created = time_created_match.group('time_created_val')
self.assertEqual(
time_created,
datetime.strftime(current_time, '%a, %d %b %Y %H:%M:%S GMT'))
find_stor_update_re_2 = re.compile(
r'^\s*Storage class update time:+(?P<stor_update_time_val_2>.+)$',
re.MULTILINE)
stor_update_time_match_2 = re.search(find_stor_update_re_2, output2)
self.assertIsNotNone(stor_update_time_match_2)
stor_update_time = stor_update_time_match_2.group('stor_update_time_val_2')
self.assertEqual(
stor_update_time,
datetime.strftime(new_update_time, '%a, %d %b %Y %H:%M:%S GMT'))
@mock.patch.object(ls.LsCommand, 'WildcardIterator')
def test_satisfies_pzs_is_displayed_if_present(self, mock_wildcard):
bucket_uri = self.CreateBucket(bucket_name='foo')
bucket_metadata = apitools_messages.Bucket(name='foo', satisfiesPZS=True)
bucket_uri.root_object = bucket_metadata
bucket_uri.url_string = 'foo'
bucket_uri.storage_url = mock.Mock()
mock_wildcard.return_value.IterBuckets.return_value = [bucket_uri]
# MockKey doesn't support hash_algs, so the MD5 will not match.
with SetBotoConfigForTest([('GSUtil', 'check_hashes', 'never')]):
stdout = self.RunCommand('ls', ['-Lb', suri(bucket_uri)],
return_stdout=True)
self.assertRegex(stdout, 'Satisfies PZS:\t\t\tTrue')
class TestLs(testcase.GsUtilIntegrationTestCase):
"""Integration tests for ls command."""
def test_blank_ls(self):
if not RUN_S3_TESTS: # Blank `ls` command lists GS buckets.
self.RunGsUtil(['ls'])
def test_empty_bucket(self):
bucket_uri = self.CreateBucket()
self.AssertNObjectsInBucket(bucket_uri, 0)
def test_empty_bucket_with_b(self):
bucket_uri = self.CreateBucket()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-b', suri(bucket_uri)],
return_stdout=True)
self.assertEqual('%s/\n' % suri(bucket_uri), stdout)
_Check1()
def test_bucket_with_Lb(self):
"""Tests ls -Lb."""
bucket_uri = self.CreateBucket()
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
# Check that the bucket URI is displayed.
self.assertIn(suri(bucket_uri), stdout)
# Check that we don't see output corresponding to listing objects rather
# than buckets.
self.assertNotIn('TOTAL:', stdout)
# Toggle versioning on the bucket so that the modification time will be
# greater than the creation time.
self.RunGsUtil(['versioning', 'set', 'on', suri(bucket_uri)])
self.RunGsUtil(['versioning', 'set', 'off', suri(bucket_uri)])
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
find_metageneration_re = re.compile(
r'^\s*Metageneration:\s+(?P<metageneration_val>.+)$', re.MULTILINE)
find_time_created_re = re.compile(
r'^\s*Time created:\s+(?P<time_created_val>.+)$', re.MULTILINE)
find_time_updated_re = re.compile(
r'^\s*Time updated:\s+(?P<time_updated_val>.+)$', re.MULTILINE)
metageneration_match = re.search(find_metageneration_re, stdout)
time_created_match = re.search(find_time_created_re, stdout)
time_updated_match = re.search(find_time_updated_re, stdout)
if self.test_api == ApiSelector.XML:
# Check that lines for JSON-specific fields are not displayed.
self.assertIsNone(metageneration_match)
self.assertIsNone(time_created_match)
self.assertIsNone(time_updated_match)
elif self.test_api == ApiSelector.JSON:
# Check that time created/updated lines are displayed.
self.assertIsNotNone(metageneration_match)
self.assertIsNotNone(time_created_match)
self.assertIsNotNone(time_updated_match)
# Check that updated time > created time.
time_created = time_created_match.group('time_created_val')
time_created = time.strptime(time_created, '%a, %d %b %Y %H:%M:%S %Z')
time_updated = time_updated_match.group('time_updated_val')
time_updated = time.strptime(time_updated, '%a, %d %b %Y %H:%M:%S %Z')
self.assertGreater(time_updated, time_created)
# Check that for bucket policy only fields.
self._AssertBucketPolicyOnly(False, stdout)
def test_bucket_with_Lb_bucket_policy_only(self):
if self.test_api == ApiSelector.JSON:
bucket_uri = self.CreateBucket(bucket_policy_only=True,
prefer_json_api=True)
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)],
return_stdout=True)
self._AssertBucketPolicyOnly(True, stdout)
def _AssertBucketPolicyOnly(self, value, stdout):
bucket_policy_only_re = re.compile(
r'^\s*Bucket Policy Only enabled:\s+(?P<bpo_val>.+)$', re.MULTILINE)
bucket_policy_only_match = re.search(bucket_policy_only_re, stdout)
bucket_policy_only_val = bucket_policy_only_match.group('bpo_val')
self.assertEqual(str(value), bucket_policy_only_val)
def test_bucket_with_lb(self):
"""Tests ls -lb."""
bucket_uri = self.CreateBucket()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-lb', suri(bucket_uri)],
return_stdout=True)
self.assertIn(suri(bucket_uri), stdout)
self.assertNotIn('TOTAL:', stdout)
_Check1()
def test_bucket_list_wildcard(self):
"""Tests listing multiple buckets with a wildcard."""
random_prefix = self.MakeRandomTestString()
bucket1_name = self.MakeTempName('bucket', prefix=random_prefix)
bucket2_name = self.MakeTempName('bucket', prefix=random_prefix)
bucket1_uri = self.CreateBucket(bucket_name=bucket1_name)
bucket2_uri = self.CreateBucket(bucket_name=bucket2_name)
# This just double checks that the common prefix of the two buckets is what
# we think it should be (based on implementation detail of CreateBucket).
# We want to be careful when setting a wildcard on buckets to make sure we
# don't step outside the test buckets to affect other buckets.
common_prefix = posixpath.commonprefix(
[suri(bucket1_uri), suri(bucket2_uri)])
self.assertTrue(
common_prefix.startswith(
'%s://%sgsutil-test-test-bucket-list-wildcard' %
(self.default_provider, random_prefix)))
wildcard = '%s*' % common_prefix
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-b', wildcard], return_stdout=True)
expected = set([suri(bucket1_uri) + '/', suri(bucket2_uri) + '/'])
actual = set(stdout.split())
self.assertEqual(expected, actual)
_Check1()
def test_nonexistent_bucket_with_ls(self):
"""Tests a bucket that is known not to exist."""
stderr = self.RunGsUtil(
['ls', '-lb', 'gs://%s' % self.nonexistent_bucket_name],
return_stderr=True,
expected_status=1)
self.assertIn('404', stderr)
stderr = self.RunGsUtil(
['ls', '-Lb', 'gs://%s' % self.nonexistent_bucket_name],
return_stderr=True,
expected_status=1)
self.assertIn('404', stderr)
stderr = self.RunGsUtil(
['ls', '-b', 'gs://%s' % self.nonexistent_bucket_name],
return_stderr=True,
expected_status=1)
self.assertIn('404', stderr)
def test_list_missing_object(self):
"""Tests listing a non-existent object."""
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(['ls', suri(bucket_uri, 'missing')],
return_stderr=True,
expected_status=1)
self.assertIn('matched no objects', stderr)
def test_with_one_object(self):
bucket_uri = self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', suri(bucket_uri)], return_stdout=True)
self.assertEqual('%s\n' % obj_uri, stdout)
_Check1()
def location_redirect_test_helper(self, bucket_region, client_region):
bucket_host = 's3.%s.amazonaws.com' % bucket_region
client_host = 's3.%s.amazonaws.com' % client_region
with SetBotoConfigForTest([('s3', 'host', bucket_host)]):
bucket_uri = self.CreateBucket(location=bucket_region)
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1(uri):
stdout = self.RunGsUtil(['ls', uri], return_stdout=True)
self.assertEqual('%s\n' % obj_uri, stdout)
with SetBotoConfigForTest([('s3', 'host', client_host)]):
# sends a GET request
_Check1(suri(bucket_uri))
# sends a HEAD request, meaning error body is not included.
_Check1(suri(obj_uri))
@SkipForGS('Only s3 V4 signatures error on location mismatches.')
def test_400_location_redirect(self):
# ap-east-1 used here since regions launched before March 20, 2019 do
# some temporary redirecting for new buckets which suppresses 400 errors.
self.location_redirect_test_helper('ap-east-1', 'us-east-2')
@SkipForGS('Only s3 V4 signatures error on location mismatches.')
def test_301_location_redirect(self):
self.location_redirect_test_helper('eu-west-1', 'us-east-2')
@SkipForXML('Credstore file gets created only for json API')
def test_credfile_lock_permissions(self):
tmpdir = self.CreateTempDir()
filepath = os.path.join(tmpdir, 'credstore2')
option = 'GSUtil:state_dir={}'.format(tmpdir)
bucket_uri = self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(
['-o', option, 'ls', suri(bucket_uri)], return_stdout=True)
self.assertEqual('%s\n' % obj_uri, stdout)
if os.name == 'posix':
self.assertTrue(os.path.exists(filepath))
mode = oct(stat.S_IMODE(os.stat(filepath).st_mode))
# Assert that only user has read/write permission
self.assertEqual(oct(0o600), mode)
_Check1()
def test_one_object_with_l(self):
"""Tests listing one object with -l."""
obj_uri = self.CreateObject(contents=b'foo')
stdout = self.RunGsUtil(['ls', '-l', suri(obj_uri)], return_stdout=True)
output_items = stdout.split()
self.assertTrue(output_items[0].isdigit())
# Throws exception if time string is not formatted correctly.
time.strptime(stdout.split()[1], '%Y-%m-%dT%H:%M:%SZ')
self.assertEqual(output_items[2], suri(obj_uri))
def test_one_object_with_L(self):
"""Tests listing one object with -L."""
obj_uri = self.CreateObject(contents=b'foo')
# Ensure that creation and update don't take place in the same second.
time.sleep(2)
# Check that the creation time, rather than the updated time, is displayed.
self.RunGsUtil(['setmeta', '-h', 'x-goog-meta-foo:bar', suri(obj_uri)])
find_time_created_re = re.compile(
r'^\s*Creation time:\s+(?P<time_created_val>.+)$', re.MULTILINE)
find_time_updated_re = re.compile(
r'^\s*Update time:\s+(?P<time_updated_val>.+)$', re.MULTILINE)
stdout = self.RunGsUtil(['ls', '-L', suri(obj_uri)], return_stdout=True)
time_created_match = re.search(find_time_created_re, stdout)
time_updated_match = re.search(find_time_updated_re, stdout)
time_created = time_created_match.group('time_created_val')
self.assertIsNotNone(time_created)
time_created = time.strptime(time_created, '%a, %d %b %Y %H:%M:%S %Z')
if self.test_api == ApiSelector.XML:
# XML API has no concept of updated time.
self.assertIsNone(time_updated_match)
elif self.test_api == ApiSelector.JSON:
time_updated = time_updated_match.group('time_updated_val')
self.assertIsNotNone(time_updated)
time_updated = time.strptime(time_updated, '%a, %d %b %Y %H:%M:%S %Z')
self.assertGreater(time_updated, time_created)
def test_subdir(self):
"""Tests listing a bucket subdirectory."""
bucket_uri = self.CreateBucket(test_objects=1)
k1_uri = bucket_uri.clone_replace_name('foo')
k1_uri.set_contents_from_string('baz')
k2_uri = bucket_uri.clone_replace_name('dir/foo')
k2_uri.set_contents_from_string('bar')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '%s/dir' % suri(bucket_uri)],
return_stdout=True)
self.assertEqual('%s\n' % suri(k2_uri), stdout)
stdout = self.RunGsUtil(['ls', suri(k1_uri)], return_stdout=True)
self.assertEqual('%s\n' % suri(k1_uri), stdout)
_Check1()
def test_subdir_nocontents(self):
"""Tests listing a bucket subdirectory using -d.
Result will display subdirectory names instead of contents. Uses a wildcard
to show multiple matching subdirectories.
"""
bucket_uri = self.CreateBucket(test_objects=1)
k1_uri = bucket_uri.clone_replace_name('foo')
k1_uri.set_contents_from_string('baz')
k2_uri = bucket_uri.clone_replace_name('dir/foo')
k2_uri.set_contents_from_string('bar')
k3_uri = bucket_uri.clone_replace_name('dir/foo2')
k3_uri.set_contents_from_string('foo')
k4_uri = bucket_uri.clone_replace_name('dir2/foo3')
k4_uri.set_contents_from_string('foo2')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(
['ls', '-d', '%s/dir*' % suri(bucket_uri)], return_stdout=True)
self.assertEqual(
'%s/dir/\n%s/dir2/\n' % (suri(bucket_uri), suri(bucket_uri)), stdout)
stdout = self.RunGsUtil(['ls', suri(k1_uri)], return_stdout=True)
self.assertEqual('%s\n' % suri(k1_uri), stdout)
_Check1()
def test_versioning(self):
"""Tests listing a versioned bucket."""
bucket1_uri = self.CreateBucket(test_objects=1)
bucket2_uri = self.CreateVersionedBucket(test_objects=1)
self.AssertNObjectsInBucket(bucket1_uri, 1, versioned=True)
bucket_list = list(bucket1_uri.list_bucket())
objuri = [
bucket1_uri.clone_replace_key(key).versionless_uri
for key in bucket_list
][0]
self.RunGsUtil(['cp', objuri, suri(bucket2_uri)])
self.RunGsUtil(['cp', objuri, suri(bucket2_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-a', suri(bucket2_uri)],
return_stdout=True)
self.assertNumLines(stdout, 3)
stdout = self.RunGsUtil(['ls', '-la', suri(bucket2_uri)],
return_stdout=True)
self.assertIn('%s#' % bucket2_uri.clone_replace_name(bucket_list[0].name),
stdout)
self.assertIn('metageneration=', stdout)
_Check2()
def test_etag(self):
"""Tests that listing an object with an etag."""
bucket_uri = self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# TODO: When testcase setup can use JSON, match against the exact JSON
# etag.
etag = obj_uri.get_key().etag.strip('"\'')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-l', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertNotIn(etag, stdout)
else:
self.assertNotIn('etag=', stdout)
_Check1()
def _Check2():
stdout = self.RunGsUtil(['ls', '-le', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertIn(etag, stdout)
else:
self.assertIn('etag=', stdout)
_Check2()
def _Check3():
stdout = self.RunGsUtil(['ls', '-ale', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertIn(etag, stdout)
else:
self.assertIn('etag=', stdout)
_Check3()
def test_labels(self):
"""Tests listing on a bucket with a label/tagging configuration."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
# No labels are present by default.
self.assertRegex(stdout, r'Labels:\s+None')
# Add a label and check that it shows up.
self.RunGsUtil(['label', 'ch', '-l', 'labelkey:labelvalue', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
label_regex = re.compile(r'Labels:\s+\{\s+"labelkey":\s+"labelvalue"\s+\}',
re.MULTILINE)
self.assertRegex(stdout, label_regex)
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_location_constraint(self):
"""Tests listing a bucket with location constraint."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No location constraint should be shown for `-lb`
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Location constraint:', stdout)
# Default location constraint is US
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
# Default location may vary between test environments; test that some
# non-whitespace character is present after the whitespace:
self.assertRegex(stdout, r'Location constraint:\s+\S')
# TODO(b/135700569): Stop skipping this once this field is available to all
# projects.
@unittest.skip('b/135700569')
@SkipForXML('Location type not available when using the GCS XML API.')
@SkipForS3('Location type not printed for S3 buckets.')
def test_location_type(self):
"""Tests listing a bucket with location constraint."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No location type should be shown for `-lb`
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Location type:', stdout)
# Default location type may vary between test environments; test that some
# non-whitespace character is present after the whitespace:
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertRegex(stdout, r'Location type:\s+\S')
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_logging(self):
"""Tests listing a bucket with logging config."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No logging info
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Logging configuration', stdout)
# Logging configuration is absent by default
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Logging configuration:\t\tNone', stdout)
# Enable and check
self.RunGsUtil(['logging', 'set', 'on', '-b', bucket_suri, bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Logging configuration:\t\tPresent', stdout)
# Disable and check
self.RunGsUtil(['logging', 'set', 'off', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Logging configuration:\t\tNone', stdout)
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_web(self):
"""Tests listing a bucket with website config."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No website configuration
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Website configuration', stdout)
# Website configuration is absent by default
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Website configuration:\t\tNone', stdout)
# Initialize and check
self.RunGsUtil(['web', 'set', '-m', 'google.com', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Website configuration:\t\tPresent', stdout)
# Clear and check
self.RunGsUtil(['web', 'set', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Website configuration:\t\tNone', stdout)
@SkipForS3('S3 bucket configuration values are not supported via ls.')
@SkipForXML('Requester Pays is not supported for the XML API.')
def test_requesterpays(self):
"""Tests listing a bucket with requester pays (billing) config."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No requester pays configuration
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Requester Pays enabled', stdout)
# Requester Pays configuration is absent by default
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Requester Pays enabled:\t\tNone', stdout)
# Initialize and check
self.RunGsUtil(['requesterpays', 'set', 'on', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Requester Pays enabled:\t\tTrue', stdout)
# Clear and check
self.RunGsUtil(['requesterpays', 'set', 'off', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Requester Pays enabled:\t\tFalse', stdout)
def test_list_sizes(self):
"""Tests various size listing options."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri, contents=b'x' * 2048)
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-l', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check1()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check2()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check3():
stdout = self.RunGsUtil(['ls', '-al', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check3()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check4():
stdout = self.RunGsUtil(['ls', '-lh', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2 KiB', stdout)
_Check4()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check5():
stdout = self.RunGsUtil(['ls', '-alh', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2 KiB', stdout)
_Check5()
@unittest.skipIf(IS_WINDOWS,
'Unicode handling on Windows requires mods to site-packages')
def test_list_unicode_filename(self):
"""Tests listing an object with a unicode filename."""
# Note: This test fails on Windows (command.exe). I was able to get ls to
# output Unicode filenames correctly by hacking the UniStream class code
# shown at
# http://stackoverflow.com/questions/878972/windows-cmd-encoding-change-causes-python-crash/3259271
# into the start of gslib/commands/ls.py, along with no-op flush and
# isastream functions (as an experiment). However, even with that change,
# the current test still fails, since it also needs to run that
# stdout/stderr-replacement code. That UniStream class replacement really
# needs to be added to the site-packages on Windows python.
object_name = u'Аудиоархив'
bucket_uri = self.CreateVersionedBucket()
key_uri = self.CreateObject(bucket_uri=bucket_uri,
contents=b'foo',
object_name=object_name)
self.AssertNObjectsInBucket(bucket_uri, 1, versioned=True)
stdout = self.RunGsUtil(['ls', '-ael', suri(key_uri)], return_stdout=True)
self.assertIn(object_name, stdout)
if self.default_provider == 'gs':
self.assertIn(str(key_uri.generation), stdout)
self.assertIn('metageneration=%s' % key_uri.get_key().metageneration,
stdout)
if self.test_api == ApiSelector.XML:
self.assertIn(key_uri.get_key().etag.strip('"\''), stdout)
else:
# TODO: When testcase setup can use JSON, match against the exact JSON
# etag.
self.assertIn('etag=', stdout)
elif self.default_provider == 's3':
self.assertIn(key_uri.version_id, stdout)
self.assertIn(key_uri.get_key().etag.strip('"\''), stdout)
def test_list_acl(self):
"""Tests that long listing includes an ACL."""
key_uri = self.CreateObject(contents=b'foo')
stdout = self.RunGsUtil(['ls', '-L', suri(key_uri)], return_stdout=True)
self.assertIn('ACL:', stdout)
self.assertNotIn('ACCESS DENIED', stdout)
def test_list_gzip_content_length(self):
"""Tests listing a gzipped object."""
file_size = 10000
file_contents = b'x' * file_size
fpath = self.CreateTempFile(contents=file_contents, file_name='foo.txt')
key_uri = self.CreateObject()
self.RunGsUtil(['cp', '-z', 'txt', suri(fpath), suri(key_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', suri(key_uri)], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
find_content_length_re = r'Content-Length:\s+(?P<num>\d)'
self.assertRegex(stdout, find_content_length_re)
m = re.search(find_content_length_re, stdout)
content_length = int(m.group('num'))
self.assertGreater(content_length, 0)
self.assertLess(content_length, file_size)
_Check1()
def test_output_chopped(self):
"""Tests that gsutil still succeeds with a truncated stdout."""
bucket_uri = self.CreateBucket(test_objects=2)
# Run Python with the -u flag so output is not buffered.
gsutil_cmd = [
sys.executable, '-u', gslib.GSUTIL_PATH, 'ls',
suri(bucket_uri)
]
# Set bufsize to 0 to make sure output is not buffered.
p = subprocess.Popen(gsutil_cmd, stdout=subprocess.PIPE, bufsize=0)
# Immediately close the stdout pipe so that gsutil gets a broken pipe error.
p.stdout.close()
p.wait()
# Make sure it still exited cleanly.
self.assertEqual(p.returncode, 0)
@SkipForS3('Boto lib required for S3 does not handle paths '
'starting with slash.')
def test_recursive_list_slash_only(self):
"""Tests listing an object with a trailing slash."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri, object_name='/', contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
stdout = self.RunGsUtil(['ls', '-R', suri(bucket_uri)], return_stdout=True)
# Note: The suri function normalizes the URI, so the double slash gets
# removed.
self.assertIn(suri(bucket_uri) + '/', stdout)
def test_recursive_list_trailing_slash(self):
"""Tests listing an object with a trailing slash."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo/',
contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
stdout = self.RunGsUtil(['ls', '-R', suri(bucket_uri)], return_stdout=True)
# Note: The suri function normalizes the URI, so the double slash gets
# removed.
self.assertIn(suri(bucket_uri) + '/foo/', stdout)
@SkipForS3('Boto lib required for S3 does not handle paths '
'starting with slash.')
def test_recursive_list_trailing_two_slash(self):
"""Tests listing an object with two trailing slashes."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri, object_name='//', contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
stdout = self.RunGsUtil(['ls', '-R', suri(bucket_uri)], return_stdout=True)
# Note: The suri function normalizes the URI, so the double slash gets
# removed.
self.assertIn(suri(bucket_uri) + '//', stdout)
def test_wildcard_prefix(self):
"""Tests that an object name with a wildcard does not infinite loop."""
bucket_uri = self.CreateBucket()
wildcard_folder_object = 'wildcard*/'
object_matching_folder = 'wildcard10/foo'
self.CreateObject(bucket_uri=bucket_uri,
object_name=wildcard_folder_object,
contents=b'foo')
self.CreateObject(bucket_uri=bucket_uri,
object_name=object_matching_folder,
contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 2)
stderr = self.RunGsUtil(['ls', suri(bucket_uri, 'wildcard*')],
return_stderr=True,
expected_status=1)
self.assertIn(
'Cloud folder %s%s contains a wildcard' %
(suri(bucket_uri), '/wildcard*/'), stderr)
# Listing with a flat wildcard should still succeed.
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
stdout = self.RunGsUtil(['ls', '-l', suri(bucket_uri, '**')],
return_stdout=True)
self.assertNumLines(stdout, 3) # 2 object lines, one summary line.
_Check()
@SkipForS3('S3 anonymous access is not supported.')
def test_get_object_without_list_bucket_permission(self):
# Bucket is not publicly readable by default.
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='permitted',
contents=b'foo')
# Set this object to be publicly readable.
self.RunGsUtil(['acl', 'set', 'public-read', suri(object_uri)])
# Drop credentials.
with self.SetAnonymousBotoCreds():
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)],
return_stdout=True)
self.assertIn(suri(object_uri), stdout)
@SkipForS3('S3 customer-supplied encryption keys are not supported.')
def test_list_encrypted_object(self):
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
object_uri = self.CreateObject(object_name='foo',
contents=TEST_ENCRYPTION_CONTENT1,
encryption_key=TEST_ENCRYPTION_KEY1)
# Listing object with key should return unencrypted hashes.
with SetBotoConfigForTest([('GSUtil', 'encryption_key',
TEST_ENCRYPTION_KEY1)]):
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _ListExpectDecrypted():
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)],
return_stdout=True)
self.assertIn(TEST_ENCRYPTION_CONTENT1_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT1_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY1_SHA256_B64.decode('ascii'), stdout)
_ListExpectDecrypted()
# Listing object without a key should return encrypted hashes.
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _ListExpectEncrypted():
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)],
return_stdout=True)
self.assertNotIn(TEST_ENCRYPTION_CONTENT1_MD5, stdout)
self.assertNotIn(TEST_ENCRYPTION_CONTENT1_CRC32C, stdout)
self.assertIn('encrypted', stdout)
self.assertIn(TEST_ENCRYPTION_KEY1_SHA256_B64.decode('ascii'), stdout)
_ListExpectEncrypted()
# Listing object with a non-matching key should return encrypted hashes.
with SetBotoConfigForTest([('GSUtil', 'encryption_key',
TEST_ENCRYPTION_KEY2)]):
_ListExpectEncrypted()
@SkipForS3('S3 customer-supplied encryption keys are not supported.')
def test_list_mixed_encryption(self):
"""Tests listing objects with various encryption interactions."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=TEST_ENCRYPTION_CONTENT1,
encryption_key=TEST_ENCRYPTION_KEY1)
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo2',
contents=TEST_ENCRYPTION_CONTENT2,
encryption_key=TEST_ENCRYPTION_KEY2)
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo3',
contents=TEST_ENCRYPTION_CONTENT3,
encryption_key=TEST_ENCRYPTION_KEY3)
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo4',
contents=TEST_ENCRYPTION_CONTENT4,
encryption_key=TEST_ENCRYPTION_KEY4)
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo5',
contents=TEST_ENCRYPTION_CONTENT5)
# List 5 objects, one encrypted with each of four keys, and one
# unencrypted. Supplying keys [1,3,4] should result in four unencrypted
# listings and one encrypted listing (for key 2).
with SetBotoConfigForTest([
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1),
('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY3),
('GSUtil', 'decryption_key2', TEST_ENCRYPTION_KEY4)
]):
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _ListExpectMixed():
"""Validates object listing."""
stdout = self.RunGsUtil(['ls', '-L', suri(bucket_uri)],
return_stdout=True)
self.assertIn(TEST_ENCRYPTION_CONTENT1_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT1_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY1_SHA256_B64.decode('ascii'), stdout)
self.assertNotIn(TEST_ENCRYPTION_CONTENT2_MD5, stdout)
self.assertNotIn(TEST_ENCRYPTION_CONTENT2_CRC32C, stdout)
self.assertIn('encrypted', stdout)
self.assertIn(TEST_ENCRYPTION_KEY2_SHA256_B64.decode('ascii'), stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT3_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT3_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY3_SHA256_B64.decode('ascii'), stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT4_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT4_CRC32C, stdout)
self.assertIn(TEST_ENCRYPTION_KEY4_SHA256_B64.decode('ascii'), stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT5_MD5, stdout)
self.assertIn(TEST_ENCRYPTION_CONTENT5_CRC32C, stdout)
_ListExpectMixed()
def test_non_ascii_project_fails(self):
stderr = self.RunGsUtil(['ls', '-p', 'ã', 'gs://fobarbaz'],
expected_status=1,
return_stderr=True)
self.assertIn('Invalid non-ASCII', stderr)
def set_default_kms_key_on_bucket(self, bucket_uri):
# Make sure our keyRing and cryptoKey exist.
keyring_fqn = self.kms_api.CreateKeyRing(
PopulateProjectId(None),
testcase.KmsTestingResources.KEYRING_NAME,
location=testcase.KmsTestingResources.KEYRING_LOCATION)
key_fqn = self.kms_api.CreateCryptoKey(
keyring_fqn, testcase.KmsTestingResources.CONSTANT_KEY_NAME)
# Make sure that the service account for the desired bucket's parent project
# is authorized to encrypt with the key above.
self.RunGsUtil(['kms', 'encryption', '-k', key_fqn, suri(bucket_uri)])
return key_fqn
@SkipForXML(KMS_XML_SKIP_MSG)
@SkipForS3(KMS_XML_SKIP_MSG)
def test_default_kms_key_listed_for_bucket(self):
bucket_uri = self.CreateBucket()
# Default KMS key is not set by default.
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertRegex(stdout, r'Default KMS key:\s+None')
# Default KMS key's name should be listed after being set on the bucket.
key_fqn = self.set_default_kms_key_on_bucket(bucket_uri)
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertRegex(stdout, r'Default KMS key:\s+%s' % key_fqn)
@SkipForXML(KMS_XML_SKIP_MSG)
@SkipForS3(KMS_XML_SKIP_MSG)
def test_kms_key_listed_for_kms_encrypted_object(self):
bucket_uri = self.CreateBucket()
key_fqn = self.set_default_kms_key_on_bucket(bucket_uri)
# Copy an object into our bucket and encrypt using the key from above.
obj_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'foo',
kms_key_name=key_fqn)
stdout = self.RunGsUtil(['ls', '-L', suri(obj_uri)], return_stdout=True)
self.assertRegex(stdout, r'KMS key:\s+%s' % key_fqn)
@SkipForXML(BUCKET_LOCK_SKIP_MSG)
@SkipForS3(BUCKET_LOCK_SKIP_MSG)
def test_list_retention_policy(self):
bucket_uri = self.CreateBucketWithRetentionPolicy(
retention_period_in_seconds=1)
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertRegex(stdout, r'Retention Policy\:\t*Present')
# Clearing Retention Policy on the bucket.
self.RunGsUtil(['retention', 'clear', suri(bucket_uri)])
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertNotRegex(stdout, r'Retention Policy:')
@SkipForXML(BUCKET_LOCK_SKIP_MSG)
@SkipForS3(BUCKET_LOCK_SKIP_MSG)
def test_list_default_event_based_hold(self):
bucket_uri = self.CreateBucket()
self.RunGsUtil(['retention', 'event-default', 'set', suri(bucket_uri)])
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertRegex(stdout, r'Default Event-Based Hold:\t* *True')
# Clearing the default Event-Based Hold on the bucket.
self.RunGsUtil(['retention', 'event-default', 'release', suri(bucket_uri)])
stdout = self.RunGsUtil(['ls', '-Lb', suri(bucket_uri)], return_stdout=True)
self.assertNotRegex(stdout, r'Default Event-Based Hold')
@SkipForXML(BUCKET_LOCK_SKIP_MSG)
@SkipForS3(BUCKET_LOCK_SKIP_MSG)
def test_list_temporary_hold(self):
object_uri = self.CreateObject(contents=b'content')
self.RunGsUtil(['retention', 'temp', 'set', suri(object_uri)])
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
self.assertRegex(stdout, r'Temporary Hold')
# Clearing the Temporary Hold on the object.
self.RunGsUtil(['retention', 'temp', 'release', suri(object_uri)])
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
self.assertNotRegex(stdout, r'Temporary Hold')
@SkipForXML(BUCKET_LOCK_SKIP_MSG)
@SkipForS3(BUCKET_LOCK_SKIP_MSG)
def test_list_event_based_hold(self):
object_uri = self.CreateObject(contents=b'content')
self.RunGsUtil(['retention', 'event', 'set', suri(object_uri)])
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
self.assertRegex(stdout, r'Event-Based Hold')
# Clearing the Event-Based Hold on the object.
self.RunGsUtil(['retention', 'event', 'release', suri(object_uri)])
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
self.assertNotRegex(stdout, r'Event-Based Hold')
| 44.137606 | 103 | 0.688661 | 43,327 | 0.92498 | 0 | 0 | 23,738 | 0.506778 | 0 | 0 | 13,851 | 0.295702 |
4001312cef0d9f28268935ec40cf1f39b54d853e | 131 | py | Python | onadata/libs/utils/audit.py | ubpd/kobocat | 45906e07e8f05c30e3e26bab5570a8ab1ee264db | [
"BSD-2-Clause"
]
| null | null | null | onadata/libs/utils/audit.py | ubpd/kobocat | 45906e07e8f05c30e3e26bab5570a8ab1ee264db | [
"BSD-2-Clause"
]
| null | null | null | onadata/libs/utils/audit.py | ubpd/kobocat | 45906e07e8f05c30e3e26bab5570a8ab1ee264db | [
"BSD-2-Clause"
]
| null | null | null | # coding: utf-8
from __future__ import unicode_literals, print_function, division, absolute_import
HOME_ACCESSED = "home-accessed"
| 32.75 | 82 | 0.824427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.229008 |
4001b461738a1a675ced54e42a87a9e7681bbab2 | 2,217 | py | Python | places/management/commands/load_places.py | aevtikheev/dvmn-yandex-afisha | 7112977d6615124412b7e7ffc4abdcaa969b4078 | [
"MIT"
]
| null | null | null | places/management/commands/load_places.py | aevtikheev/dvmn-yandex-afisha | 7112977d6615124412b7e7ffc4abdcaa969b4078 | [
"MIT"
]
| null | null | null | places/management/commands/load_places.py | aevtikheev/dvmn-yandex-afisha | 7112977d6615124412b7e7ffc4abdcaa969b4078 | [
"MIT"
]
| null | null | null | import logging
from urllib.parse import unquote, urlparse
from pathlib import PurePosixPath
import requests
from requests.exceptions import ReadTimeout, ConnectionError, HTTPError
from django.core.management.base import BaseCommand
from django.core.files.base import ContentFile
from places.models import Place, Image
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
class Command(BaseCommand):
help = 'Uploads data for a place'
def add_arguments(self, parser):
parser.add_argument('data_urls', nargs='+', type=str)
def handle(self, *args, **options):
for url in options['data_urls']:
response = requests.get(url)
response.raise_for_status()
place_data = response.json()
new_place, created = Place.objects.get_or_create(
title=place_data['title'],
defaults={
'short_description': place_data['description_short'],
'long_description': place_data['description_long'],
'longitude': place_data['coordinates']['lng'],
'latitude': place_data['coordinates']['lat']
}
)
if created:
logging.info(f'Place "{new_place.title}" created')
else:
logging.info(f'Place "{new_place.title}" already exists')
for image_position, image_url in enumerate(place_data['imgs']):
try:
response = requests.get(image_url)
response.raise_for_status()
except (ReadTimeout, ConnectionError, HTTPError) as exception:
logging.exception(exception)
continue
new_image, _ = Image.objects.get_or_create(
place=new_place,
position=image_position
)
image_content = ContentFile(response.content)
image_name = PurePosixPath(unquote(urlparse(image_url).path)).parts[-1]
new_image.image.save(image_name, image_content)
logging.info(f'Image {image_name} for place "{new_place.title}" uploaded')
| 39.589286 | 90 | 0.59991 | 1,817 | 0.819576 | 0 | 0 | 0 | 0 | 0 | 0 | 361 | 0.162833 |
4002a9f7b6d3888657a9b000e3fb8c2cb6fac5dd | 18,227 | py | Python | gslib/utils/ls_helper.py | dickmao/gsutil | 3b61bf0e6188f65f78c72c79ea3cb69e9c61da4b | [
"Apache-2.0"
]
| 1 | 2021-09-11T23:58:39.000Z | 2021-09-11T23:58:39.000Z | gslib/utils/ls_helper.py | shinfan/gsutil | 45b5fc020bed44c6342fe70ce8b081aa222d9213 | [
"Apache-2.0"
]
| null | null | null | gslib/utils/ls_helper.py | shinfan/gsutil | 45b5fc020bed44c6342fe70ce8b081aa222d9213 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions and class for listing commands such as ls and du."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import fnmatch
import sys
import six
from gslib.cloud_api import EncryptionException
from gslib.exception import CommandException
from gslib.plurality_checkable_iterator import PluralityCheckableIterator
from gslib.storage_url import GenerationFromUrlAndString
from gslib.utils.constants import S3_ACL_MARKER_GUID
from gslib.utils.constants import S3_DELETE_MARKER_GUID
from gslib.utils.constants import S3_MARKER_GUIDS
from gslib.utils.constants import UTF8
from gslib.utils.system_util import IS_WINDOWS
from gslib.utils.translation_helper import AclTranslation
from gslib.utils import text_util
from gslib.wildcard_iterator import StorageUrlFromString
ENCRYPTED_FIELDS = [
'md5Hash',
'crc32c',
]
UNENCRYPTED_FULL_LISTING_FIELDS = [
'acl',
'cacheControl',
'componentCount',
'contentDisposition',
'contentEncoding',
'contentLanguage',
'contentType',
'customTime',
'kmsKeyName',
'customerEncryption',
'etag',
'eventBasedHold',
'generation',
'metadata',
'metageneration',
'retentionExpirationTime',
'size',
'storageClass',
'temporaryHold',
'timeCreated',
'timeDeleted',
'timeStorageClassUpdated',
'updated',
]
def MakeMetadataLine(label, value, indent=1):
"""Returns a string with a vertically aligned label and value.
Labels of the same indentation level will start at the same column. Values
will all start at the same column (unless the combined left-indent and
label length is excessively long). If a value spans multiple lines,
indentation will only be applied to the first line. Example output from
several calls:
Label1: Value (default indent of 1 was used)
Sublabel1: Value (used indent of 2 here)
Label2: Value
Args:
label: The label to print in the first column.
value: The value to print in the second column.
indent: (4 * indent) spaces will be placed before the label.
Returns:
A string with a vertically aligned label and value.
"""
return '{}{}'.format(((' ' * indent * 4) + label + ':').ljust(28), value)
def PrintBucketHeader(bucket_listing_ref): # pylint: disable=unused-argument
"""Default function for printing headers for buckets.
Header is printed prior to listing the contents of the bucket.
Args:
bucket_listing_ref: BucketListingRef of type BUCKET.
"""
pass
def PrintDir(bucket_listing_ref):
"""Default function for printing buckets or prefixes.
Args:
bucket_listing_ref: BucketListingRef of type BUCKET or PREFIX.
"""
text_util.print_to_fd(bucket_listing_ref.url_string)
# pylint: disable=unused-argument
def PrintDirSummary(num_bytes, bucket_listing_ref):
"""Off-by-default function for printing buckets or prefix size summaries.
Args:
num_bytes: Number of bytes contained in the directory.
bucket_listing_ref: BucketListingRef of type BUCKET or PREFIX.
"""
pass
def PrintDirHeader(bucket_listing_ref):
"""Default function for printing headers for prefixes.
Header is printed prior to listing the contents of the prefix.
Args:
bucket_listing_ref: BucketListingRef of type PREFIX.
"""
text_util.print_to_fd('{}:'.format(bucket_listing_ref.url_string))
def PrintNewLine():
"""Default function for printing new lines between directories."""
text_util.print_to_fd()
# pylint: disable=too-many-statements
def PrintFullInfoAboutObject(bucket_listing_ref, incl_acl=True):
"""Print full info for given object (like what displays for gsutil ls -L).
Args:
bucket_listing_ref: BucketListingRef being listed.
Must have ref_type OBJECT and a populated root_object
with the desired fields.
incl_acl: True if ACL info should be output.
Returns:
Tuple (number of objects, object_length)
Raises:
Exception: if calling bug encountered.
"""
url_str = bucket_listing_ref.url_string
storage_url = StorageUrlFromString(url_str)
obj = bucket_listing_ref.root_object
if (obj.metadata and
S3_DELETE_MARKER_GUID in obj.metadata.additionalProperties):
num_bytes = 0
num_objs = 0
url_str += '<DeleteMarker>'
else:
num_bytes = obj.size
num_objs = 1
text_util.print_to_fd('{}:'.format(url_str))
if obj.timeCreated:
text_util.print_to_fd(
MakeMetadataLine('Creation time',
obj.timeCreated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.updated:
text_util.print_to_fd(
MakeMetadataLine('Update time',
obj.updated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if (obj.timeStorageClassUpdated and
obj.timeStorageClassUpdated != obj.timeCreated):
text_util.print_to_fd(
MakeMetadataLine(
'Storage class update time',
obj.timeStorageClassUpdated.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.storageClass:
text_util.print_to_fd(MakeMetadataLine('Storage class', obj.storageClass))
if obj.temporaryHold:
text_util.print_to_fd(MakeMetadataLine('Temporary Hold', 'Enabled'))
if obj.eventBasedHold:
text_util.print_to_fd(MakeMetadataLine('Event-Based Hold', 'Enabled'))
if obj.retentionExpirationTime:
text_util.print_to_fd(
MakeMetadataLine(
'Retention Expiration',
obj.retentionExpirationTime.strftime('%a, %d %b %Y %H:%M:%S GMT')))
if obj.kmsKeyName:
text_util.print_to_fd(MakeMetadataLine('KMS key', obj.kmsKeyName))
if obj.cacheControl:
text_util.print_to_fd(MakeMetadataLine('Cache-Control', obj.cacheControl))
if obj.contentDisposition:
text_util.print_to_fd(
MakeMetadataLine('Content-Disposition', obj.contentDisposition))
if obj.contentEncoding:
text_util.print_to_fd(
MakeMetadataLine('Content-Encoding', obj.contentEncoding))
if obj.contentLanguage:
text_util.print_to_fd(
MakeMetadataLine('Content-Language', obj.contentLanguage))
text_util.print_to_fd(MakeMetadataLine('Content-Length', obj.size))
text_util.print_to_fd(MakeMetadataLine('Content-Type', obj.contentType))
if obj.componentCount:
text_util.print_to_fd(
MakeMetadataLine('Component-Count', obj.componentCount))
if obj.customTime:
text_util.print_to_fd(MakeMetadataLine('Custom-Time', obj.customTime))
if obj.timeDeleted:
text_util.print_to_fd(
MakeMetadataLine('Noncurrent time',
obj.timeDeleted.strftime('%a, %d %b %Y %H:%M:%S GMT')))
marker_props = {}
if obj.metadata and obj.metadata.additionalProperties:
non_marker_props = []
for add_prop in obj.metadata.additionalProperties:
if add_prop.key not in S3_MARKER_GUIDS:
non_marker_props.append(add_prop)
else:
marker_props[add_prop.key] = add_prop.value
if non_marker_props:
text_util.print_to_fd(MakeMetadataLine('Metadata', ''))
for ap in non_marker_props:
ap_key = '{}'.format(ap.key)
ap_value = '{}'.format(ap.value)
meta_data_line = MakeMetadataLine(ap_key, ap_value, indent=2)
text_util.print_to_fd(meta_data_line)
if obj.customerEncryption:
if not obj.crc32c:
text_util.print_to_fd(MakeMetadataLine('Hash (crc32c)', 'encrypted'))
if not obj.md5Hash:
text_util.print_to_fd(MakeMetadataLine('Hash (md5)', 'encrypted'))
text_util.print_to_fd(
MakeMetadataLine('Encryption algorithm',
obj.customerEncryption.encryptionAlgorithm))
text_util.print_to_fd(
MakeMetadataLine('Encryption key SHA256',
obj.customerEncryption.keySha256))
if obj.crc32c:
text_util.print_to_fd(MakeMetadataLine('Hash (crc32c)', obj.crc32c))
if obj.md5Hash:
text_util.print_to_fd(MakeMetadataLine('Hash (md5)', obj.md5Hash))
text_util.print_to_fd(MakeMetadataLine('ETag', obj.etag.strip('"\'')))
if obj.generation:
generation_str = GenerationFromUrlAndString(storage_url, obj.generation)
text_util.print_to_fd(MakeMetadataLine('Generation', generation_str))
if obj.metageneration:
text_util.print_to_fd(MakeMetadataLine('Metageneration',
obj.metageneration))
if incl_acl:
# JSON API won't return acls as part of the response unless we have
# full control scope
if obj.acl:
text_util.print_to_fd(
MakeMetadataLine('ACL', AclTranslation.JsonFromMessage(obj.acl)))
elif S3_ACL_MARKER_GUID in marker_props:
text_util.print_to_fd(
MakeMetadataLine('ACL', marker_props[S3_ACL_MARKER_GUID]))
else:
# Empty ACLs are possible with Bucket Policy Only and no longer imply
# ACCESS DENIED anymore.
text_util.print_to_fd(MakeMetadataLine('ACL', '[]'))
return (num_objs, num_bytes)
def PrintObject(bucket_listing_ref):
"""Default printing function for objects.
Args:
bucket_listing_ref: BucketListingRef of type OBJECT.
Returns:
(num_objects, num_bytes).
"""
try:
text_util.print_to_fd(bucket_listing_ref.url_string)
except IOError as e:
# Windows throws an IOError 0 here for object names containing Unicode
# chars. Ignore it.
if not (IS_WINDOWS and e.errno == 0):
raise
return (1, 0)
class LsHelper(object):
"""Helper class for ls and du."""
def __init__(self,
iterator_func,
logger,
print_object_func=PrintObject,
print_dir_func=PrintDir,
print_dir_header_func=PrintDirHeader,
print_bucket_header_func=PrintBucketHeader,
print_dir_summary_func=PrintDirSummary,
print_newline_func=PrintNewLine,
all_versions=False,
should_recurse=False,
exclude_patterns=None,
fields=('name',),
list_subdir_contents=True):
"""Initializes the helper class to prepare for listing.
Args:
iterator_func: Function for instantiating iterator.
Inputs-
url_string- Url string to iterate on. May include
wildcards.
all_versions=False- If true, iterate over all object
versions.
logger: Logger for outputting warnings / errors.
print_object_func: Function for printing objects.
print_dir_func: Function for printing buckets/prefixes.
print_dir_header_func: Function for printing header line for buckets
or prefixes.
print_bucket_header_func: Function for printing header line for buckets
or prefixes.
print_dir_summary_func: Function for printing size summaries about
buckets/prefixes.
print_newline_func: Function for printing new lines between dirs.
all_versions: If true, list all object versions.
should_recurse: If true, recursively listing buckets/prefixes.
exclude_patterns: Patterns to exclude when listing.
fields: Fields to request from bucket listings; this should
include all fields that need to be populated in
objects so they can be listed. Can be set to None
to retrieve all object fields. Defaults to short
listing fields.
list_subdir_contents: If true, return the directory and any contents,
otherwise return only the directory itself.
"""
self._iterator_func = iterator_func
self.logger = logger
self._print_object_func = print_object_func
self._print_dir_func = print_dir_func
self._print_dir_header_func = print_dir_header_func
self._print_bucket_header_func = print_bucket_header_func
self._print_dir_summary_func = print_dir_summary_func
self._print_newline_func = print_newline_func
self.all_versions = all_versions
self.should_recurse = should_recurse
self.exclude_patterns = exclude_patterns
self.bucket_listing_fields = fields
self.list_subdir_contents = list_subdir_contents
def ExpandUrlAndPrint(self, url):
"""Iterates over the given URL and calls print functions.
Args:
url: StorageUrl to iterate over.
Returns:
(num_objects, num_bytes) total number of objects and bytes iterated.
"""
num_objects = 0
num_dirs = 0
num_bytes = 0
print_newline = False
if url.IsBucket() or self.should_recurse:
# IsBucket() implies a top-level listing.
if url.IsBucket():
self._print_bucket_header_func(url)
return self._RecurseExpandUrlAndPrint(url.url_string,
print_initial_newline=False)
else:
# User provided a prefix or object URL, but it's impossible to tell
# which until we do a listing and see what matches.
top_level_iterator = PluralityCheckableIterator(
self._iterator_func(
url.CreatePrefixUrl(wildcard_suffix=None),
all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=self.bucket_listing_fields))
plurality = top_level_iterator.HasPlurality()
try:
top_level_iterator.PeekException()
except EncryptionException:
# Detailed listing on a single object can perform a GetObjectMetadata
# call, which raises if a matching encryption key isn't found.
# Re-iterate without requesting encrypted fields.
top_level_iterator = PluralityCheckableIterator(
self._iterator_func(
url.CreatePrefixUrl(wildcard_suffix=None),
all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=UNENCRYPTED_FULL_LISTING_FIELDS))
plurality = top_level_iterator.HasPlurality()
for blr in top_level_iterator:
if self._MatchesExcludedPattern(blr):
continue
if blr.IsObject():
nd = 0
no, nb = self._print_object_func(blr)
print_newline = True
elif blr.IsPrefix():
if print_newline:
self._print_newline_func()
else:
print_newline = True
if plurality and self.list_subdir_contents:
self._print_dir_header_func(blr)
elif plurality and not self.list_subdir_contents:
print_newline = False
expansion_url_str = StorageUrlFromString(
blr.url_string).CreatePrefixUrl(
wildcard_suffix='*' if self.list_subdir_contents else None)
nd, no, nb = self._RecurseExpandUrlAndPrint(expansion_url_str)
self._print_dir_summary_func(nb, blr)
else:
# We handle all buckets at the top level, so this should never happen.
raise CommandException(
'Sub-level iterator returned a CsBucketListingRef of type Bucket')
num_objects += no
num_dirs += nd
num_bytes += nb
return num_dirs, num_objects, num_bytes
def _RecurseExpandUrlAndPrint(self, url_str, print_initial_newline=True):
"""Iterates over the given URL string and calls print functions.
Args:
url_str: String describing StorageUrl to iterate over.
Must be of depth one or higher.
print_initial_newline: If true, print a newline before recursively
expanded prefixes.
Returns:
(num_objects, num_bytes) total number of objects and bytes iterated.
"""
num_objects = 0
num_dirs = 0
num_bytes = 0
for blr in self._iterator_func(
'%s' % url_str, all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=self.bucket_listing_fields):
if self._MatchesExcludedPattern(blr):
continue
if blr.IsObject():
nd = 0
no, nb = self._print_object_func(blr)
elif blr.IsPrefix():
if self.should_recurse:
if print_initial_newline:
self._print_newline_func()
else:
print_initial_newline = True
self._print_dir_header_func(blr)
expansion_url_str = StorageUrlFromString(
blr.url_string).CreatePrefixUrl(wildcard_suffix='*')
nd, no, nb = self._RecurseExpandUrlAndPrint(expansion_url_str)
self._print_dir_summary_func(nb, blr)
else:
nd, no, nb = 1, 0, 0
self._print_dir_func(blr)
else:
# We handle all buckets at the top level, so this should never happen.
raise CommandException(
'Sub-level iterator returned a bucketListingRef of type Bucket')
num_dirs += nd
num_objects += no
num_bytes += nb
return num_dirs, num_objects, num_bytes
def _MatchesExcludedPattern(self, blr):
"""Checks bucket listing reference against patterns to exclude.
Args:
blr: BucketListingRef to check.
Returns:
True if reference matches a pattern and should be excluded.
"""
if self.exclude_patterns:
tomatch = six.ensure_str(blr.url_string)
for pattern in self.exclude_patterns:
if fnmatch.fnmatch(tomatch, six.ensure_str(pattern)):
return True
return False
| 36.971602 | 80 | 0.681242 | 8,159 | 0.447633 | 0 | 0 | 0 | 0 | 0 | 0 | 7,383 | 0.405058 |
4003c8b3ef448fd698e5fde8ffd4368a0004acc2 | 2,250 | py | Python | app/config/cnMysql.py | itay-moav/rahl_commander | 79b9bb7d16f4f9511820d0e0ffcbba6ee8e0e42b | [
"MIT"
]
| 1 | 2016-12-19T16:09:02.000Z | 2016-12-19T16:09:02.000Z | app/config/cnMysql.py | itay-moav/rahl_commander | 79b9bb7d16f4f9511820d0e0ffcbba6ee8e0e42b | [
"MIT"
]
| 19 | 2015-01-08T18:34:13.000Z | 2018-02-26T14:51:22.000Z | app/config/cnMysql.py | itay-moav/rahl_commander | 79b9bb7d16f4f9511820d0e0ffcbba6ee8e0e42b | [
"MIT"
]
| null | null | null | '''
Created on Dec 28, 2021
@author: Itay
Abstracting the DB connection piece
'''
import mysql.connector as My
from app import logging as L
from app import exceptions as exceptions
class Connection():
'''
Abstracting the actions on a DB
'''
def __init__(self, connection_config):
self._debug_connection_info = "{}@{}".format(connection_config['username'],connection_config['host'])
L.info("Trying to connect to {}".format(self._debug_connection_info))
self._connection = My.connect(user=connection_config['username'], password=connection_config['password'],host=connection_config['host'],buffered=True)
def get_connection(self):
return self._connection
def change_db(self,new_db):
connection = self.get_connection()
if connection.database != new_db and new_db:
try:
connection.database = new_db
except My.Error as err:
if err.errno == My.errorcode.ER_BAD_DB_ERROR:
return False
else:
msg = "Error occured while changing DB in mysql connection [{}]".format(err)
L.fatal(msg)
raise exceptions.SQLError(msg)
return True
def cursor(self):
return self.get_connection().cursor()
def commit(self):
return self.get_connection().commit()
def debug_connection(self):
return "server: [{}] database: [{}]".format(self.debug_connection_info,self.get_connection().database)
def execute(self,sql,query_params=()):
L.debug("Running sql [{}]".format(sql))
cursor = self.cursor()
cursor.execute(sql,query_params)
return cursor
def execute_fetchall(self,sql):
cursor = self.execute(sql)
return cursor.fetchall()
def insert_rcom_sql_upgrades(self,db,file_values):
sql = "INSERT IGNORE INTO {}.rcom_sql_upgrades VALUES {}".format(db,file_values)
self.execute(sql)
def mark_complete_rcom_sql_upgrades(self,db,file_name):
sql = sql = "UPDATE {}.rcom_sql_upgrades SET execution_status='completed' WHERE file_name = %s LIMIT 1".format(db)
self.execute(sql,(file_name,)) | 34.615385 | 158 | 0.636444 | 2,066 | 0.918222 | 0 | 0 | 0 | 0 | 0 | 0 | 451 | 0.200444 |
4004bec8c10906a7cd716dc8ff33d14546f3a2fe | 1,527 | py | Python | src/detector/pre_process_test_data.py | DomGonthier/PecheFantome | d031a8fe5faa2ef35f2c1dbb8241281ffda22429 | [
"MIT"
]
| null | null | null | src/detector/pre_process_test_data.py | DomGonthier/PecheFantome | d031a8fe5faa2ef35f2c1dbb8241281ffda22429 | [
"MIT"
]
| 8 | 2020-02-19T20:03:44.000Z | 2022-02-03T19:27:24.000Z | src/detector/pre_process_test_data.py | DomGonthier/PecheFantome | d031a8fe5faa2ef35f2c1dbb8241281ffda22429 | [
"MIT"
]
| 3 | 2020-02-19T19:02:19.000Z | 2021-12-14T14:06:25.000Z | import os
from tqdm import tqdm
import cv2
import numpy as np
#pre process test data:
path = "raw_test_data/"
list_width = []
list_height = []
list_image = []
def pre_process():
print("analyze images")
for Files in tqdm(os.listdir(path)):
if "jpg" in Files:
#print(Files)
img = cv2.imread(path + Files, 1)
height, width, chan = img.shape
#print(width)
#print(height)
list_width.append(width)
list_height.append(height)
max_width = np.max(list_width)
max_height = np.max(list_height)
if max_height == max_width :
print("max height == max width")
print("format images: ")
for image in tqdm(os.listdir(path)):
if "jpg" in image:
#print(image)
img = cv2.imread(path + image, 1)
height, width, chan = img.shape
new_height = (round(max_height/16)+1)*16 # image dimension needs to be a multiple of 16
new_width = new_height # image needs to be squared
delta_width = new_width - width
delta_height = new_height - height
#print("delta height",delta_height)
#print("delta width",delta_width)
pad_img = cv2.copyMakeBorder(img, 0, delta_height, 0, delta_width, cv2.BORDER_CONSTANT,None, value = 0)
#list_image.append(pad_img)
cv2.imwrite("test_data/"+image, pad_img)
pre_process()
for image in list_image:
print(image.shape)
| 31.163265 | 115 | 0.59201 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 340 | 0.222659 |
4004f14ddc4bfb878b0872bfe2604774deea7bcf | 4,934 | py | Python | tensorflow/python/training/localhost_cluster_performance_test.py | connectthefuture/tensorflow | 93812423fcd5878aa2c1d0b68dc0496980c8519d | [
"Apache-2.0"
]
| 101 | 2016-12-03T11:40:52.000Z | 2017-12-23T02:02:03.000Z | tensorflow/python/training/localhost_cluster_performance_test.py | connectthefuture/tensorflow | 93812423fcd5878aa2c1d0b68dc0496980c8519d | [
"Apache-2.0"
]
| 9 | 2016-12-14T03:27:46.000Z | 2017-09-13T02:29:07.000Z | tensorflow/python/training/localhost_cluster_performance_test.py | connectthefuture/tensorflow | 93812423fcd5878aa2c1d0b68dc0496980c8519d | [
"Apache-2.0"
]
| 47 | 2016-12-04T12:37:24.000Z | 2018-01-14T18:13:07.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for creating RPC clusters on localhost."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import portpicker
import tensorflow as tf
def create_local_cluster(num_workers, num_ps, protocol="grpc"):
"""Create local GRPC servers and return their servers."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]}
cs = tf.train.ClusterSpec(cluster_dict)
workers = [
tf.train.Server(
cs, job_name="worker", protocol=protocol, task_index=ix, start=True)
for ix in range(num_workers)]
ps_servers = [
tf.train.Server(
cs, job_name="ps", protocol=protocol, task_index=ix, start=True)
for ix in range(num_ps)]
return workers, ps_servers
class CreateLocalClusterTest(tf.test.TestCase):
def testCreateLocalCluster(self):
workers, _ = create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
var0 = tf.Variable(0.0)
with tf.device("/job:ps/task:1"):
var1 = tf.Variable(1.0)
worker_sessions[0].run([var0.initializer, var1.initializer])
with tf.device("/job:ps/task:0"):
var2 = tf.Variable(2.0)
with tf.device("/job:ps/task:1"):
var3 = tf.Variable(3.0)
worker_sessions[1].run([var2.initializer, var3.initializer])
# Read values back in the opposite session
self.assertAllEqual(0.0, var0.eval(session=worker_sessions[1]))
self.assertAllEqual(1.0, var1.eval(session=worker_sessions[1]))
self.assertAllEqual(2.0, var2.eval(session=worker_sessions[0]))
self.assertAllEqual(3.0, var3.eval(session=worker_sessions[0]))
class CreateLocalClusterBenchmark(tf.test.Benchmark):
def benchmarkCreateLocalCluster(self):
deltas = []
iters = 5
for _ in range(iters):
start_time = time.time()
create_local_cluster(num_workers=1, num_ps=10)
end_time = time.time()
deltas.append(end_time - start_time)
median_deltas = np.median(deltas)
print(
"\n\nbenchmark_create_local_cluster_1_worker_10_ps. "
"iterations: %d, median wall time: %g\n\n" % (iters, median_deltas))
self.report_benchmark(
iters=iters,
wall_time=median_deltas,
name="benchmark_create_local_cluster_1_worker_10_ps")
class PartitionedVariablesBenchmark(tf.test.Benchmark):
def benchmark_create_1000_partitions_with_100_parameter_servers(self):
workers, _ = create_local_cluster(num_workers=1, num_ps=100)
worker_sessions = [tf.Session(w.target) for w in workers]
worker = worker_sessions[0]
partition_sizes = (1, 512, 1024*32, 1024*128)
partitioned = []
for partition_size in partition_sizes:
# max_shard_bytes is 4, shape is 1000*partition_size float32s which should
# partition into 1000 shards, each containing partition_size float32s.
print("Building partitioned variable with %d floats per partition"
% partition_size)
with tf.device(tf.train.replica_device_setter(ps_tasks=100)):
partitioned_ix = tf.get_variable(
"partitioned_%d" % partition_size,
shape=[1000 * partition_size],
dtype=tf.float32,
# Each partition to have exactly N float32s
partitioner=tf.variable_axis_size_partitioner(
max_shard_bytes=4 * partition_size))
# Concatenates along axis 0
partitioned.append(tf.convert_to_tensor(partitioned_ix))
tf.global_variables_initializer().run(session=worker)
for ix, partition_size in enumerate(partition_sizes):
print("Running benchmark having partitions with %d floats"
% partition_size)
self.run_op_benchmark(
worker,
partitioned[ix],
name=("read_concat_1000_partitions_from_"
"100_parameter_servers_partsize_%d_floats" % partition_size))
if __name__ == "__main__":
tf.test.main()
| 37.097744 | 80 | 0.694366 | 3,153 | 0.639035 | 0 | 0 | 0 | 0 | 0 | 0 | 1,534 | 0.310904 |
4007ccb371063c993bd22bb2370d18838e357a3f | 3,218 | py | Python | extractor/util.py | bcskda/vk-archive-deepercopy | 3619b94eb3e0f5f67860022cdfb2074e457c0cd2 | [
"Unlicense"
]
| 1 | 2020-04-24T09:24:31.000Z | 2020-04-24T09:24:31.000Z | extractor/util.py | bcskda/vk-archive-deepercopy | 3619b94eb3e0f5f67860022cdfb2074e457c0cd2 | [
"Unlicense"
]
| null | null | null | extractor/util.py | bcskda/vk-archive-deepercopy | 3619b94eb3e0f5f67860022cdfb2074e457c0cd2 | [
"Unlicense"
]
| null | null | null | import functools
import glob
import itertools
import logging
import os
from progressbar import progressbar
import re
import requests
from typing import List
class ValueSingleDispatch:
def __init__(self):
self._handlers = dict()
def register(self, key):
def decorator(fn: callable):
if key in self._handlers:
raise KeyError(key)
self._handlers[key] = fn
return fn
return decorator
def call(self, key, *args, **kwargs):
if key not in self._handlers:
raise KeyError(key)
return self._handlers[key](*args, **kwargs)
def valid_keys(self):
return self._handlers.keys()
def alphanumeric_glob(pattern: str):
"""Glob and sort alpahnumerically. Limitations: exactly one `*', no `?', file names with single extention."""
matches = glob.glob(pattern)
asterisk_pos = pattern.find('*')
matches.sort(key=lambda name: int(name[asterisk_pos:name.rfind('.')]))
return matches
def findall_in_files(pattern: re.Pattern, filenames: List[str], encoding: str) -> re.Match:
"""Generator"""
for filename in filenames:
logging.debug('util.findall_in_files: input file %s', filename)
with open(filename, 'rb') as ifile:
for match in pattern.findall(ifile.read().decode(encoding)):
logging.debug('util.findall_in_files(): match: file = %s, text = %s', filename, match)
yield match
def make_pattern(url_regex: str, extentions: List[str]) -> re.Pattern:
if extentions:
ext_regex = '({})'.format('|'.join(extentions))
else:
ext_regex = '()'
return re.compile(url_regex.format(extentions=ext_regex))
def download_by_pattern(url_regex: str, filenames: List[str], output_dir: str, *, extentions=[], encoding='windows-1251', limit=None):
logging.debug('util.download_by_pattern(): pattern = %s, extentions = %s', url_regex, extentions)
pattern = make_pattern(url_regex, extentions)
matches = findall_in_files(pattern, filenames, encoding)
if limit is not None:
matches = itertools.islice(matches, limit)
matches = list(matches)
logging.info('util.download_by_pattern(): %d matches', len(matches))
os.makedirs(output_dir, exist_ok=True)
downloads = 0
# TODO statistics by extention
for idx, (url, ext) in progressbar(enumerate(matches), max_value=len(matches)):
local_name = '{:07d}'.format(idx) + '_' + os.path.basename(url)
try:
download(url, os.path.join(output_dir, local_name))
downloads += 1
except Exception as e:
logging.warning('util.download_by_pattern(): unhandled exception: url = %s, e = %s', match_url, e)
logging.info('util.download_by_pattern(): %d successful downloads', downloads)
if downloads < len(matches):
logging.warning('util.download_by_pattern(): %d downloads failed, see log for warnings', len(matches) - downloads)
def download(url: str, local_path: str) -> bool:
logging.debug('util.download(): url = %s, local = %s', url, local_path)
req = requests.get(url)
with open(local_path, 'wb') as ofile:
ofile.write(req.content)
| 38.771084 | 134 | 0.657551 | 537 | 0.166874 | 462 | 0.143567 | 0 | 0 | 0 | 0 | 627 | 0.194842 |
400afc4da001a8c030925a65e03f44b9ed050772 | 1,637 | py | Python | setup.py | gillins/pyshepseg | bfa8d157d610bf4f581a2500d0afb42d4f92d59b | [
"MIT"
]
| 5 | 2021-02-03T05:02:56.000Z | 2022-01-31T07:55:20.000Z | setup.py | gillins/pyshepseg | bfa8d157d610bf4f581a2500d0afb42d4f92d59b | [
"MIT"
]
| 14 | 2021-02-03T04:18:48.000Z | 2022-01-24T03:50:22.000Z | setup.py | gillins/pyshepseg | bfa8d157d610bf4f581a2500d0afb42d4f92d59b | [
"MIT"
]
| 13 | 2021-02-03T03:41:17.000Z | 2022-01-24T04:21:23.000Z | #Copyright 2021 Neil Flood and Sam Gillingham. All rights reserved.
#
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without restriction,
#including without limitation the rights to use, copy, modify,
#merge, publish, distribute, sublicense, and/or sell copies of the
#Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
#ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
#CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
#WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from numpy.distutils.core import setup
import pyshepseg
setup(name='pyshepseg',
version=pyshepseg.SHEPSEG_VERSION,
description='Python implementation of the image segmentation algorithm described by Shepherd et al',
author='Neil Flood and Sam Gillingham',
scripts=['bin/test_pyshepseg.py', 'bin/test_pyshepseg_tiling.py',
'bin/test_pyshepseg_subset.py'],
packages=['pyshepseg'],
license='LICENSE.txt',
url='https://github.com/ubarsc/pyshepseg'
)
| 46.771429 | 106 | 0.756261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,377 | 0.841173 |
400c696eb52726be2cb58df8b7625711faea5a60 | 3,846 | py | Python | src/utils.py | daochenzha/SimTSC | 6e3200510e8e464049eab95db9540afdaf397f9c | [
"MIT"
]
| 23 | 2022-01-06T05:15:35.000Z | 2022-03-28T08:08:14.000Z | src/utils.py | daochenzha/SimTSC | 6e3200510e8e464049eab95db9540afdaf397f9c | [
"MIT"
]
| 2 | 2022-02-10T02:22:35.000Z | 2022-03-28T16:45:17.000Z | src/utils.py | daochenzha/SimTSC | 6e3200510e8e464049eab95db9540afdaf397f9c | [
"MIT"
]
| 5 | 2022-01-09T08:58:24.000Z | 2022-01-19T09:52:43.000Z | import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
def read_dataset_from_npy(path):
""" Read dataset from .npy file
"""
data = np.load(path, allow_pickle=True)
return data[()]['X'], data[()]['y'], data[()]['train_idx'], data[()]['test_idx']
def read_dataset(ucr_root_dir, dataset_name, shot):
""" Read univariate dataset from UCR
"""
dataset_dir = os.path.join(ucr_root_dir, dataset_name)
df_train = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TRAIN.tsv'), sep='\t', header=None)
df_test = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TEST.tsv'), sep='\t', header=None)
y_train = df_train.values[:, 0].astype(np.int64)
y_test = df_test.values[:, 0].astype(np.int64)
y = np.concatenate((y_train, y_test))
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
X_train = df_train.drop(columns=[0]).astype(np.float32)
X_test = df_test.drop(columns=[0]).astype(np.float32)
X_train.columns = range(X_train.shape[1])
X_test.columns = range(X_test.shape[1])
X_train = X_train.values
X_test = X_test.values
X = np.concatenate((X_train, X_test))
idx = np.array([i for i in range(len(X))])
np.random.shuffle(idx)
train_idx, test_idx = idx[:int(len(idx)*0.8)], idx[int(len(idx)*0.8):]
tmp = [[] for _ in range(len(np.unique(y)))]
for i in train_idx:
tmp[y[i]].append(i)
train_idx = []
for _tmp in tmp:
train_idx.extend(_tmp[:shot])
# znorm
X[np.isnan(X)] = 0
std_ = X.std(axis=1, keepdims=True)
std_[std_ == 0] = 1.0
X = (X - X.mean(axis=1, keepdims=True)) / std_
# add a dimension to make it multivariate with one dimension
X = X.reshape((X.shape[0], 1, X.shape[1]))
return X, y, train_idx, test_idx
def read_multivariate_dataset(root_dir, dataset_name, shot):
""" Read multivariate dataset
"""
X = np.load(os.path.join(root_dir, dataset_name+".npy"), allow_pickle=True)
y = np.loadtxt(os.path.join(root_dir, dataset_name+'_label.txt'))
y = y.astype(np.int64)
dim = X[0].shape[0]
max_length = 0
for _X in X:
if _X.shape[1] > max_length:
max_length = _X.shape[1]
X_list = []
for i in range(len(X)):
_X = np.zeros((dim, max_length))
_X[:, :X[i].shape[1]] = X[i]
X_list.append(_X)
X = np.array(X_list, dtype=np.float32)
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
idx = np.array([i for i in range(len(X))])
np.random.shuffle(idx)
train_idx, test_idx = idx[:int(len(idx)*0.8)], idx[int(len(idx)*0.8):]
tmp = [[] for _ in range(len(np.unique(y)))]
for i in train_idx:
tmp[y[i]].append(i)
train_idx = []
for _tmp in tmp:
train_idx.extend(_tmp[:shot])
# znorm
std_ = X.std(axis=2, keepdims=True)
std_[std_ == 0] = 1.0
X = (X - X.mean(axis=2, keepdims=True)) / std_
return X, y, train_idx, test_idx
def read_X(ucr_root_dir, dataset_name):
""" Read the raw time-series
"""
dataset_dir = os.path.join(ucr_root_dir, dataset_name)
df_train = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TRAIN.tsv'), sep='\t', header=None)
df_test = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TEST.tsv'), sep='\t', header=None)
X_train = df_train.drop(columns=[0]).astype(np.float32)
X_test = df_test.drop(columns=[0]).astype(np.float32)
X_train.columns = range(X_train.shape[1])
X_test.columns = range(X_test.shape[1])
X_train = X_train.values
X_test = X_test.values
X = np.concatenate((X_train, X_test), axis=0)
return X
class Logger:
def __init__(self, f):
self.f = f
def log(self, content):
print(content)
self.f.write(content + '\n')
self.f.flush()
| 29.584615 | 103 | 0.621945 | 171 | 0.044462 | 0 | 0 | 0 | 0 | 0 | 0 | 342 | 0.088924 |
400d71727dfe67b72a8bc6849bc10bc05b88d55b | 17,458 | py | Python | mpinterfaces/mat2d/friction/analysis.py | yw-fang/MPInterfaces | ca2e43b590fdfbcf87a116c5c758e54cb7cb2d2e | [
"MIT"
]
| 56 | 2015-06-23T03:03:18.000Z | 2022-02-06T16:41:34.000Z | mpinterfaces/mat2d/friction/analysis.py | yw-fang/MPInterfaces | ca2e43b590fdfbcf87a116c5c758e54cb7cb2d2e | [
"MIT"
]
| 21 | 2015-09-03T17:50:18.000Z | 2022-03-01T02:26:34.000Z | mpinterfaces/mat2d/friction/analysis.py | joshgabriel/MPInterfaces | 2799ae161fa94c78842092fb24ef468607afa465 | [
"MIT"
]
| 50 | 2015-09-17T19:09:36.000Z | 2021-11-15T19:13:20.000Z | from __future__ import print_function, division, unicode_literals
import os
import warnings
import numpy as np
from scipy import interpolate
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.core.structure import Structure
from pymatgen import Element
from pymatgen.analysis.local_env import ValenceIonicRadiusEvaluator as VE
__author__ = "Michael Ashton"
__copyright__ = "Copyright 2017, Henniggroup"
__maintainer__ = "Michael Ashton"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "March 3, 2017"
def get_corrugation_factor(structure):
"""
Calculate the "corrugation factor" for a 2D material.
The corrugation factor is defined as the sum of the
outer hemispheres of ionic radii of the atoms on the
material's top and bottom surfaces, divided by the
planar area of the whole unit cell's 001 plane. Top
and bottom corrugation factors are returned
separately in the final dictionary. In general,
a larger corrugation factor means a smoother surface.
Args:
structure (Structure): Pymatgen Structure object.
Returns:
corrugation_factors (dict): Dictionary of "top"
and "bottom" corrugation factors, e.g.
{"top": top_corrugation_factor,
"bottom": bottom_corrugation_factor}
"""
sites = structure.sites
valences = VE(structure).valences
formatted_valences = {}
for e in valences:
temp=e[-1]
if "+" in e or "-" in e:
try:
# Some element names have a number followed
# by a plus or minus, e.g. "O2-"
int(e[-2])
element = e[:-2]
except:
# Others are simply a plus or minus, e.g. "Cl-"
element = e[:-1]
else:
element = e
formatted_valences[Element(element)] = valences[e]
all_z_coords = [s.coords[2] for s in sites]
max_z = max(all_z_coords)
min_z = min(all_z_coords)
top_layer = [s for s in sites if abs(s.coords[2] - max_z) < 0.1]
bottom_layer = [s for s in sites if abs(s.coords[2] - min_z) < 0.1]
pi = np.pi
top_sphere_area = 0
bottom_sphere_area = 0
for site in top_layer:
if formatted_valences[site.specie] in site.specie.ionic_radii:
r = site.specie.ionic_radii[formatted_valences[site.specie]]
else:
r = site.specie.atomic_radius
top_sphere_area += 2*pi*r*r
for site in bottom_layer:
if formatted_valences[site.specie] in site.specie.ionic_radii:
r = site.specie.ionic_radii[formatted_valences[site.specie]]
else:
r = site.specie.atomic_radius
bottom_sphere_area += 2*pi*r*r
lattice = structure.lattice
area = abs(np.cross(lattice._matrix[0], lattice._matrix[1])[2])
corrugation = {"top": top_sphere_area / area,
"bottom": bottom_sphere_area / area}
return corrugation
def plot_gamma_surface(fmt='pdf'):
"""
Collect the energies from a grid of static energy
calculations to plot the Gamma surface between two layers of the 2D
material.
Args:
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
os.chdir('friction/lateral')
static_dirs = [d.split('x') for d in os.listdir(os.getcwd())
if 'x' in d and os.path.isdir(d)]
n_divs_x = max([int(d[0]) for d in static_dirs])
n_divs_y = max([int(d[1]) for d in static_dirs])
lattice = Structure.from_file('POSCAR').lattice
area = np.cross(lattice._matrix[0], lattice._matrix[1])[2]
ax = plt.figure(figsize=(n_divs_x * 1.2, n_divs_y * 1.2)).gca()
ax.set_xlim(0, n_divs_x + 1)
ax.set_ylim(0, n_divs_y + 1)
energies = []
x_values = range(n_divs_x + 1)
y_values = range(n_divs_y + 1)
not_converged = []
for x in x_values:
energies.append([])
for y in y_values:
dir = '{}x{}'.format(x, y)
os.chdir(dir)
try:
energy = Vasprun('vasprun.xml').final_energy / area
energies[x].append(energy)
except:
not_converged.append('{}x{}'.format(x, y))
energies[x].append(0)
os.chdir('../')
energies[x].append(energies[x][0])
energies.append([])
# ENERGY_ARRAY[n_divs_x] = ENERGY_ARRAY[0]
if not_converged:
warnings.warn('{} did not converge.'.format(not_converged))
for coords in not_converged:
energies[int(coords.split('x')[0])][int(coords.split('x')[1])] = energy
minima = []
maxima = []
for x in x_values:
minima.append(min(energies[x]))
maxima.append(max(energies[x]))
abs_minimum = min(minima)
abs_maximum = max(maxima)
for x in range(n_divs_x + 1):
for y in range(n_divs_y + 1):
# Plot all energies relative to the global minimum.
scaled_energy = energies[x][y] - abs_minimum
if '{}x{}'.format(x, y) in not_converged:
color_code = 'w'
else:
color_code = plt.cm.jet(
scaled_energy/(abs_maximum - abs_minimum))
ax.add_patch(plt.Rectangle((x, y), width=1, height=1,
facecolor=color_code, linewidth=0))
# Get rid of annoying ticks.
ax.axes.get_yaxis().set_ticks([])
ax.axes.get_xaxis().set_ticks([])
os.chdir('../../')
plt.savefig('gamma_surface.{}'.format(fmt), transparent=True)
plt.close()
def get_number_of_surface_atoms():
"""
Count the number of atoms at a 2D material's surface. This
enables energy and force calculations to be normalized to
the number of surface atoms.
Returns:
int. Number of surface atoms (top + bottom) for both
layers in the bilayer model.
"""
structure = Structure.from_file('friction/lateral/POSCAR')
heights = np.array([site.z for site in structure.sites])
max_height = max(heights)
min_height = min(heights)
n_atoms_top = len([height for height in heights if max_height - height < 0.1])
n_atoms_bottom = len([height for height in heights if height - min_height < 0.1])
return (n_atoms_top + n_atoms_bottom) * 2
def get_basin_and_peak_locations():
"""
Find which directories inside 'friction/lateral' represent
the minimum (basin) and maximum (peak) energy stacking
configurations.
Returns:
tuple. Of the form (basin, peak).
"""
os.chdir('friction/lateral')
static_dirs = [d.split('x') for d in os.listdir(os.getcwd())
if 'x' in d and os.path.isdir(d)]
n_divs_x = max([int(d[0]) for d in static_dirs])
n_divs_y = max([int(d[1]) for d in static_dirs])
x_values = range(n_divs_x + 1)
y_values = range(n_divs_y + 1)
abs_maximum = -np.Infinity
abs_minimum = np.Infinity
for x in x_values:
for y in y_values:
dir = '{}x{}'.format(x, y)
os.chdir(dir)
try:
energy = Vasprun('vasprun.xml').final_energy
if energy < abs_minimum:
basin = dir
abs_minimum = energy
if energy > abs_maximum:
peak = dir
abs_maximum = energy
except:
pass
os.chdir('../')
os.chdir('../../')
return(basin, peak)
def plot_friction_force(fmt='pdf'):
"""
Plot the sinusoidal curve of delta E between basin and saddle
points for each normal spacing dz.
Args:
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
n_surface_atoms = get_number_of_surface_atoms()
os.chdir('friction/normal')
f, (ax1, ax2) = plt.subplots(2, figsize=(16, 16))
spacings = sorted([float(spc) for spc in os.listdir(os.getcwd()) if
os.path.isdir(spc)])
spc_range = spacings[-1] - spacings[0] + 0.1
for spacing in spacings:
os.chdir(str(spacing))
subdirectories = os.listdir(os.getcwd())
amplitude = abs(
Vasprun('{}/vasprun.xml'.format(subdirectories[0])).final_energy
- Vasprun('{}/vasprun.xml'.format(subdirectories[1])).final_energy
) / (2 * n_surface_atoms)
start_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[0])).sites[-1].coords
end_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[1])).sites[-1].coords
dist = np.sqrt(
(start_coords[0] - end_coords[0])**2 +
(start_coords[1] - end_coords[1])**2)
b = (2 * np.pi) / (dist * 2)
x = np.arange(0, 4, 0.01)
sinx = [amplitude * np.sin(b * val) + amplitude for val in x]
cosx = [b * amplitude * np.cos(b * val)
if np.cos(b * val) > 0 else 0 for val in x]
ax1.plot(x, sinx, linewidth=8,
color=plt.cm.jet(-(spacing - 4) / spc_range), label=spacing)
ax1.set_xticklabels(ax1.get_xticks(), family='serif', fontsize=18)
ax1.set_yticklabels(ax1.get_yticks(), family='serif', fontsize=18)
ax1.set_xlabel(r'$\mathrm{\Delta d\/(\AA)}$', family='serif', fontsize=24)
ax1.set_ylabel(r'$\mathrm{E(z)\/(eV)}$', family='serif', fontsize=24)
ax2.plot(x, cosx, linewidth=8,
color=plt.cm.jet(-(spacing - 4) / spc_range), label=spacing)
ax2.set_xticklabels(ax2.get_xticks(), family='serif', fontsize=18)
ax2.set_yticklabels(ax2.get_yticks(), family='serif', fontsize=18)
ax2.set_xlabel(r'$\mathrm{\Delta d\/(\AA)}$', family='serif', fontsize=24)
ax2.set_ylabel(r'$\mathrm{F_f\/(eV/\AA)}$', family='serif', fontsize=24)
os.chdir('../')
ax1.legend(loc='upper right')
ax2.legend(loc='upper right')
os.chdir('../../')
plt.savefig('F_f.{}'.format(fmt))
def plot_normal_force(basin_dir, fmt='pdf'):
"""
Plot the LJ-like curve of the energy at the basin point
as a function of normal spacing dz.
Args:
basin_dir (str): directory corresponding to the minimum
energy on the gamma surface. Generally obtained by the
get_basin_and_peak_locations() function.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
n_surface_atoms = get_number_of_surface_atoms()
os.chdir('friction/normal')
spacings = [float(dir) for dir in os.listdir(os.getcwd())
if os.path.isdir(dir)]
spacings.sort()
fig = plt.figure(figsize=(16, 10))
ax = fig.gca()
ax2 = ax.twinx()
abs_E = [
Vasprun('{}/{}/vasprun.xml'.format(spacing, basin_dir)).final_energy / n_surface_atoms
for spacing in spacings
]
E = [energy - abs_E[-1] for energy in abs_E]
spline = interpolate.splrep(spacings, E, s=0)
xnew = np.arange(spacings[0], spacings[-1], 0.001)
ynew = interpolate.splev(xnew, spline, der=0)
ynew_slope = interpolate.splev(spacings, spline, der=1)
ax.set_xlim(spacings[0], spacings[-1])
ax.plot([spacings[0], spacings[-1]], [0, 0], '--', color=plt.cm.jet(0))
ax2.plot([spacings[0], spacings[-1]], [0, 0], '--', color=plt.cm.jet(0.9))
E_z = ax.plot(xnew, ynew, color=plt.cm.jet(0),
linewidth=4, label=r'$\mathrm{E(z)}$')
F_N = ax2.plot(spacings, [-y for y in ynew_slope], color=plt.cm.jet(0.9),
linewidth=4, label=r'$\mathrm{F_N}$')
ax.set_ylim(ax.get_ylim())
ax.set_xticklabels(ax.get_xticks(), family='serif', fontsize=18)
ax.set_yticklabels(ax.get_yticks(), family='serif', fontsize=18)
ax2.set_yticklabels(ax2.get_yticks(), family='serif', fontsize=18)
ax.set_xlabel(r'$\mathrm{z\/(\AA)}$', fontsize=24)
ax.set_ylabel(r'$\mathrm{E(z)\/(eV)}$', fontsize=24)
ax2.set_ylabel(r'$\mathrm{F_N\/(eV/\AA)}$', fontsize=24)
data = E_z + F_N
labs = [l.get_label() for l in data]
ax.legend(data, labs, loc='upper right', fontsize=24)
ax.plot(spacings, E, linewidth=0, marker='o', color=plt.cm.jet(0),
markersize=10, markeredgecolor='none')
os.chdir('../../')
plt.savefig('F_N.{}'.format(fmt))
def plot_mu_vs_F_N(basin_dir, fmt='pdf'):
"""
Plot friction coefficient 'mu' vs. F_Normal.
mu = F_friction / F_Normal.
Args:
basin_dir (str): directory corresponding to the minimum
energy on the gamma surface. Generally obtained by the
get_basin_and_peak_locations() function.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
n_surface_atoms = get_number_of_surface_atoms()
fig = plt.figure(figsize=(16, 10))
# ax = fig.gca()
# ax2 = ax.twinx()
os.chdir('friction/normal')
spacings = [float(dir) for dir in os.listdir(os.getcwd()) if
os.path.isdir(dir)]
spacings.sort()
abs_E = [
Vasprun('{}/{}/vasprun.xml'.format(spacing, basin_dir)).final_energy / n_surface_atoms
for spacing in spacings
]
E = [energy - abs_E[-1] for energy in abs_E]
spline = interpolate.splrep(spacings, E, s=0)
# xnew = np.arange(spacings[0], spacings[-1], 0.001)
# ynew = interpolate.splev(xnew, spline, der=0)
ynew_slope = interpolate.splev(spacings, spline, der=1)
F_N = [-y * 1.602 for y in ynew_slope]
F_f = []
sorted_dirs = sorted([float(spc) for spc in os.listdir(os.getcwd())
if os.path.isdir(spc)])
for spacing in sorted_dirs:
os.chdir(str(spacing))
subdirectories = os.listdir(os.getcwd())
amplitude = abs(
Vasprun('{}/vasprun.xml'.format(subdirectories[0])).final_energy
- Vasprun('{}/vasprun.xml'.format(subdirectories[1])).final_energy
) / (2 * n_surface_atoms)
start_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[0])).sites[-1].coords
end_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[1])).sites[-1].coords
dist = np.sqrt(
(start_coords[0] - end_coords[0])**2
+ (start_coords[1] - end_coords[1])**2)
b = (2 * np.pi) / (dist * 2)
x = np.arange(0, 4, 0.01)
# sinx = [amplitude * np.sin(b * val) + amplitude for val in x]
cosx = [b * amplitude * np.cos(b * val)
if np.cos(b * val) > 0 else 0 for val in x]
F_f.append(max(cosx) * 1.602)
os.chdir('../')
os.chdir('../../')
mu = [f / N for f, N in zip(F_f, F_N)]
ax = plt.figure().gca()
ax.plot(F_N, mu, linewidth=2, marker='o', markeredgecolor='none',
markersize=3, color=plt.cm.jet(0))
plt.savefig('mu_vs_F_N.{}'.format(fmt))
def get_mu_vs_F_N(basin_dir):
"""
Essentially the same function as plotting, but without the plot.
Args:
basin_dir (str): directory corresponding to the minimum
energy on the gamma surface. Generally obtained by the
get_basin_and_peak_locations() function.
Returns:
dic: Of the form {'F_N': F_N, 'mu': mu, 'F_f': F_f}, where
forces are in nN.
"""
n_surface_atoms = get_number_of_surface_atoms()
os.chdir('friction/normal')
spacings = [float(dir) for dir in os.listdir(os.getcwd())
if os.path.isdir(dir)]
spacings.sort()
abs_E = [
Vasprun('{}/{}/vasprun.xml'.format(spacing, basin_dir)).final_energy / n_surface_atoms
for spacing in spacings
]
E = [energy - abs_E[-1] for energy in abs_E]
spline = interpolate.splrep(spacings, E, s=0)
xnew = np.arange(spacings[0], spacings[-1], 0.001)
ynew = interpolate.splev(xnew, spline, der=0)
ynew_slope = interpolate.splev(spacings, spline, der=1)
# Convert eV.A to nN
F_N = [-y * 1.602 for y in ynew_slope]
F_f = []
for spacing in sorted([float(spc) for spc in os.listdir(os.getcwd()) if
os.path.isdir(spc)]):
os.chdir(str(spacing))
subdirectories = os.listdir(os.getcwd())
try:
amplitude = abs(
Vasprun('{}/vasprun.xml'.format(subdirectories[0])).final_energy
-
Vasprun('{}/vasprun.xml'.format(subdirectories[1])).final_energy
) / (2 * n_surface_atoms)
except:
print('One or more jobs in {}/ have not converged.'.format(spacing))
start_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[0])).sites[-1].coords
end_coords = Structure.from_file(
'{}/POSCAR'.format(subdirectories[1])).sites[-1].coords
dist = np.sqrt(
(start_coords[0] - end_coords[0])**2
+ (start_coords[1] - end_coords[1])**2)
b = (2 * np.pi) / (dist * 2)
x = np.arange(0, 4, 0.01)
# sinx = [amplitude * np.sin(b * val) + amplitude for val in x]
cosx = [b * amplitude * np.cos(b * val)
if np.cos(b * val) > 0 else 0 for val in x]
F_f.append(max(cosx) * 1.602)
os.chdir('../')
os.chdir('../../')
mu = [f / N for f, N in zip(F_f, F_N)]
return {'F_N': F_N, 'mu': mu, 'F_f': F_f}
| 32.815789 | 94 | 0.592966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,543 | 0.260225 |
400f0a8fc2e264478738eb502734b3f76efaa361 | 1,380 | py | Python | aiopylimit/tests/test_aiopylimit.py | zealotous/aiopylimit | 0f93a06e751b97959835187a05311deaffaed9d8 | [
"Apache-2.0"
]
| 4 | 2019-05-09T12:39:14.000Z | 2022-01-05T20:36:06.000Z | aiopylimit/tests/test_aiopylimit.py | zealotous/aiopylimit | 0f93a06e751b97959835187a05311deaffaed9d8 | [
"Apache-2.0"
]
| null | null | null | aiopylimit/tests/test_aiopylimit.py | zealotous/aiopylimit | 0f93a06e751b97959835187a05311deaffaed9d8 | [
"Apache-2.0"
]
| 1 | 2022-01-05T19:56:49.000Z | 2022-01-05T19:56:49.000Z | from aiopylimit import AIOPyRateLimit
from aiopylimit import AIOPyRateLimitException
import asynctest
import asyncio
class TestPyLimit(asynctest.TestCase):
async def test_exception(self):
limit = AIOPyRateLimit(10, 10)
await self.assertAsyncRaises(AIOPyRateLimitException,
limit.attempt('test_namespace'))
async def test_throttle(self):
AIOPyRateLimit.init(redis_host="localhost", redis_port=6379,
force_new_connection=True)
limit = AIOPyRateLimit(10, 10)
for x in range(0, 20):
await asyncio.sleep(.5)
if x < 10:
self.assertTrue(await limit.attempt('test_namespace'))
else:
self.assertFalse(await limit.attempt('test_namespace'))
await asyncio.sleep(6)
self.assertTrue(await limit.attempt('test_namespace'))
async def test_peek(self):
AIOPyRateLimit.init(redis_host="localhost", redis_port=6379,
force_new_connection=True)
limit = AIOPyRateLimit(10, 10)
for x in range(0, 10):
self.assertTrue(await limit.attempt('test_namespace2'))
self.assertTrue(await limit.is_rate_limited('test_namespace2'))
await asyncio.sleep(10)
self.assertFalse(await limit.is_rate_limited('test_namespace2'))
| 39.428571 | 72 | 0.642754 | 1,260 | 0.913043 | 0 | 0 | 0 | 0 | 1,205 | 0.873188 | 137 | 0.099275 |
4010464a9caf650b2a6706b3ea8adb7b2458ae14 | 5,772 | py | Python | bookworm/platform_services/_win32/tesseract_download.py | mush42/bookworm | a4bdd89363137a89a1bed1e9e072de4fb55576fd | [
"MIT"
]
| 18 | 2019-07-19T22:12:15.000Z | 2020-08-26T17:45:19.000Z | bookworm/platform_services/_win32/tesseract_download.py | mush42/bookworm | a4bdd89363137a89a1bed1e9e072de4fb55576fd | [
"MIT"
]
| 44 | 2019-07-15T10:17:00.000Z | 2020-07-26T11:22:53.000Z | bookworm/platform_services/_win32/tesseract_download.py | mush42/bookworm | a4bdd89363137a89a1bed1e9e072de4fb55576fd | [
"MIT"
]
| 9 | 2019-09-03T13:13:31.000Z | 2020-08-25T13:55:27.000Z | # coding: utf-8
import sys
import shutil
import requests
import wx
from pathlib import Path
from urllib.parse import urljoin, urlsplit
from tempfile import TemporaryFile
from zipfile import ZipFile
from bookworm import typehints as t
from bookworm import app
from bookworm.http_tools import RemoteJsonResource, HttpResource
from bookworm.ocr_engines.tesseract_ocr_engine import (
TesseractOcrEngine,
get_tesseract_path,
)
from bookworm.logger import logger
log = logger.getChild(__name__)
BRANCH = "develop"
TESSERACT_VERSION_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/version"
if app.arch == "x86":
TESSERACT_ENGINE_DOWNLOAD_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/tesseract_x86.zip"
else:
TESSERACT_ENGINE_DOWNLOAD_URL = f"https://raw.githubusercontent.com/blindpandas/bookworm/{BRANCH}/packages/tesseract/tesseract_x64.zip"
FAST_TRAINEDDATA_DOWNLOAD_URL = "https://raw.githubusercontent.com/tesseract-ocr/tessdata_fast/main/{lang_code}.traineddata"
BEST_TRAINEDDATA_DOWNLOAD_URL = "https://raw.githubusercontent.com/tesseract-ocr/tessdata_best/main/{lang_code}.traineddata"
def get_downloadable_languages():
return (
"afr",
"sqi",
"amh",
"ara",
"hye",
"asm",
"aze_cyrl",
"aze",
"ben",
"eus",
"bel",
"bos",
"bre",
"bul",
"mya",
"cat",
"ceb",
"chr",
"chi_sim",
"hrv",
"ces",
"dan",
"nld",
"dzo",
"eng",
"epo",
"est",
"fao",
"fil",
"fin",
"fra",
"glg",
"kat_old",
"kat",
"deu",
"ell",
"guj",
"heb",
"hin",
"hun",
"isl",
"ind",
"gle",
"ita_old",
"ita",
"jpn_vert",
"jpn",
"jav",
"kan",
"kaz",
"khm",
"kor_vert",
"kor",
"kmr",
"kir",
"lao",
"lav",
"lit",
"ltz",
"mkd",
"msa",
"mal",
"mlt",
"mri",
"mar",
"mon",
"nep",
"nor",
"ori",
"pus",
"fas",
"pol",
"por",
"pan",
"que",
"ron",
"rus",
"gla",
"srp_latn",
"srp",
"snd",
"sin",
"slk",
"slv",
"spa_old",
"spa",
"sun",
"swa",
"swe",
"tgk",
"tam",
"tat",
"tel",
"tha",
"bod",
"tir",
"ton",
"tur",
"ukr",
"urd",
"uig",
"uzb_cyrl",
"uzb",
"vie",
"cym",
"fry",
"yid",
"yor",
)
def is_tesseract_available():
return sys.platform == "win32" and TesseractOcrEngine.check()
def get_tessdata():
return get_tesseract_path() / "tessdata"
def get_language_path(language):
return Path(get_tessdata(), f"{language}.traineddata")
def is_new_tesseract_version_available():
remote_version = requests.get(TESSERACT_VERSION_URL).text
return TesseractOcrEngine.get_tesseract_version() != remote_version
def download_tesseract_engine(progress_dlg):
tesseract_directory = get_tesseract_path()
callback = lambda prog: progress_dlg.Update(prog.percentage, prog.user_message)
try:
dl_request = HttpResource(TESSERACT_ENGINE_DOWNLOAD_URL).download()
progress_dlg.set_abort_callback(dl_request.cancel)
with TemporaryFile() as dlfile:
dl_request.download_to_file(dlfile, callback)
if dl_request.is_cancelled():
return
with progress_dlg.PulseContinuously(_("Extracting file...")):
with ZipFile(dlfile, "r") as zfile:
tesseract_directory.mkdir(parents=True, exist_ok=True)
zfile.extractall(path=tesseract_directory)
wx.GetApp().mainFrame.notify_user(
# Translators: title of a messagebox
_("Success"),
# Translators: content of a messagebox
_("Tesseract engine downloaded successfully"),
)
return True
except ConnectionError:
log.debug("Failed to download tesseract OCR engine.", exc_info=True)
wx.GetApp().mainFrame.notify_user(
# Translators: title of a messagebox
_("Connection Error"),
_(
"Could not download Tesseract OCR Engine.\nPlease check your internet and try again."
),
icon=wx.ICON_ERROR,
)
except:
log.exception(
"An error occurred while installing the Tesseract OCr Engine", exc_info=True
)
wx.GetApp().mainFrame.notify_user(
_("Error"),
_("Could not install the Tesseract OCR engine.\nPlease try again."),
icon=wx.ICON_WARNING,
)
def download_language(lang_code, variant, target_file, progress_dlg):
url_prefix = (
BEST_TRAINEDDATA_DOWNLOAD_URL
if variant == "best"
else FAST_TRAINEDDATA_DOWNLOAD_URL
)
download_url = url_prefix.format(lang_code=lang_code)
callback = lambda prog: progress_dlg.Update(prog.percentage, prog.user_message)
dl_request = HttpResource(download_url).download()
progress_dlg.set_abort_callback(dl_request.cancel)
dl_request.download_to_filesystem(target_file, callback)
return not dl_request.is_cancelled()
def remove_tesseract():
tesseract_path = get_tesseract_path()
shutil.rmtree(tesseract_path, ignore_errors=False)
| 26 | 139 | 0.573458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,602 | 0.277547 |
4010dc640b95065e204f3d03308d81598d5d3d22 | 2,448 | py | Python | python/plugins/processing/algs/grass7/ext/v_proj.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
]
| null | null | null | python/plugins/processing/algs/grass7/ext/v_proj.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
]
| null | null | null | python/plugins/processing/algs/grass7/ext/v_proj.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
]
| 1 | 2021-12-25T08:40:30.000Z | 2021-12-25T08:40:30.000Z | # -*- coding: utf-8 -*-
"""
***************************************************************************
v_proj.py
---------
Date : November 2017
Copyright : (C) 2017 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'November 2017'
__copyright__ = '(C) 2017, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
from qgis.core import QgsProcessingParameterString
def processInputs(alg, parameters, context, feedback):
# Grab the projection from the input vector layer
layer = alg.parameterAsLayer(parameters, 'input', context)
alg.setSessionProjectionFromLayer(layer)
layerCrs = layer.crs().toProj4()
# Creates a new location with this Crs
newLocation = 'newProj{}'.format(alg.uniqueSuffix)
alg.commands.append('g.proj proj4="{}" location={}'.format(
layerCrs, newLocation))
# Go to the newly created location
alg.commands.append('g.mapset mapset=PERMANENT location={}'.format(
newLocation))
# Import the layer
alg.loadVectorLayerFromParameter(
'input', parameters, context, feedback, False)
# Go back to default location
alg.commands.append('g.mapset mapset=PERMANENT location=temp_location')
# Grab the projected Crs
crs = alg.parameterAsCrs(parameters, 'crs', context)
alg.commands.append('g.proj -c proj4="{}"'.format(
crs.toProj4(), newLocation))
# Remove crs parameter
alg.removeParameter('crs')
# Add the location parameter with proper value
location = QgsProcessingParameterString(
'location',
'new location',
'newProj{}'.format(alg.uniqueSuffix)
)
alg.addParameter(location)
| 36 | 75 | 0.561683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,534 | 0.625102 |
401141d52ec8be8928fc937b5ae582051fa62e45 | 1,919 | py | Python | examples/diode/gmsh_diode2d.py | QuantumOfMoose/devsim | 22f888119059a86bfc87ba9e7d9ac2cc90dadfb6 | [
"Apache-2.0"
]
| null | null | null | examples/diode/gmsh_diode2d.py | QuantumOfMoose/devsim | 22f888119059a86bfc87ba9e7d9ac2cc90dadfb6 | [
"Apache-2.0"
]
| null | null | null | examples/diode/gmsh_diode2d.py | QuantumOfMoose/devsim | 22f888119059a86bfc87ba9e7d9ac2cc90dadfb6 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2013 Devsim LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from devsim import *
from devsim.python_packages.simple_physics import *
import diode_common
device="diode2d"
region="Bulk"
diode_common.Create2DGmshMesh(device, region)
# this is is the devsim format
write_devices (file="gmsh_diode2d_out.msh")
diode_common.SetParameters(device=device, region=region)
####
#### NetDoping
####
node_model(device=device, region=region, name="Acceptors", equation="1.0e18*step(0.5e-5-y);")
node_model(device=device, region=region, name="Donors" , equation="1.0e18*step(y-0.5e-5);")
node_model(device=device, region=region, name="NetDoping", equation="Donors-Acceptors;")
diode_common.InitialSolution(device, region)
####
#### Initial DC solution
####
solve(type="dc", absolute_error=1.0, relative_error=1e-12, maximum_iterations=30)
###
### Drift diffusion simulation at equilibrium
###
diode_common.DriftDiffusionInitialSolution(device, region)
solve(type="dc", absolute_error=1e10, relative_error=1e-10, maximum_iterations=50)
v = 0.0
while v < 0.51:
set_parameter(device=device, name=GetContactBiasName("top"), value=v)
solve(type="dc", absolute_error=1e10, relative_error=1e-10, maximum_iterations=30)
PrintCurrents(device, "top")
PrintCurrents(device, "bot")
v += 0.1
write_devices(file="gmsh_diode2d.dat", type="tecplot")
write_devices(file="gmsh_diode2d_dd.msh", type="devsim")
| 30.951613 | 93 | 0.755602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 913 | 0.475769 |
40114e46f1a2c773c276da8bbeeb5529999aac68 | 470 | py | Python | python/astro_imaging/config.py | taranu/astro_imaging | a5a712576bd12762dc69f826703e077a859d8ec0 | [
"Apache-2.0"
]
| null | null | null | python/astro_imaging/config.py | taranu/astro_imaging | a5a712576bd12762dc69f826703e077a859d8ec0 | [
"Apache-2.0"
]
| null | null | null | python/astro_imaging/config.py | taranu/astro_imaging | a5a712576bd12762dc69f826703e077a859d8ec0 | [
"Apache-2.0"
]
| null | null | null | from dataclasses import dataclass
import os
path_base_default = os.getenv('ASTRO_IMAGING_DATA_PATH', default='./')
@dataclass
class Paths:
base: str = path_base_default
catalogs: str = None
images: str = None
def __post_init__(self):
if self.catalogs is None:
self.catalogs = os.path.join(self.base, 'catalogs')
if self.images is None:
self.images = os.path.join(self.base, 'images')
paths_default = Paths()
| 22.380952 | 70 | 0.66383 | 314 | 0.668085 | 0 | 0 | 325 | 0.691489 | 0 | 0 | 47 | 0.1 |
4011b94aee384459cb359f2d52855f8d32eb9b50 | 8,018 | py | Python | AT.py | MTandHJ/roboc | 43e5b2f9ea520b76221a7334d34ef4aaf9b3334b | [
"MIT"
]
| 8 | 2021-06-07T11:02:38.000Z | 2022-03-17T11:30:28.000Z | AT.py | MTandHJ/roboc | 43e5b2f9ea520b76221a7334d34ef4aaf9b3334b | [
"MIT"
]
| null | null | null | AT.py | MTandHJ/roboc | 43e5b2f9ea520b76221a7334d34ef4aaf9b3334b | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
from typing import Tuple
import argparse
from src.loadopts import *
METHOD = "RobOC-AT"
SAVE_FREQ = 5
PRINT_FREQ = 20
FMT = "{description}={scale}-{leverage}" \
"={learning_policy}-{optimizer}-{lr}" \
"={attack}-{epsilon:.4f}-{stepsize}-{steps}" \
"={batch_size}={transform}"
parser = argparse.ArgumentParser()
parser.add_argument("model", type=str)
parser.add_argument("dataset", type=str)
# for orthogonal classifier
parser.add_argument("--scale", type=float, default=10.,
help="the length of weights")
parser.add_argument("--leverage", type=float, default=0.15,
help="the hyper-parameter governs the relative weight between clean and adversarial samples")
# adversarial training settings
parser.add_argument("--attack", type=str, default="pgd-squared")
parser.add_argument("--epsilon", type=float, default=8/255)
parser.add_argument("--stepsize", type=float, default=0.25,
help="pgd:rel_stepsize, cwl2:step_size, deepfool:overshoot, bb:lr")
parser.add_argument("--steps", type=int, default=10)
# basic settings
parser.add_argument("--loss", type=str, default="square")
parser.add_argument("--optimizer", type=str, choices=("sgd", "adam"), default="sgd")
parser.add_argument("-mom", "--momentum", type=float, default=0.9,
help="the momentum used for SGD")
parser.add_argument("-beta1", "--beta1", type=float, default=0.9,
help="the first beta argument for Adam")
parser.add_argument("-beta2", "--beta2", type=float, default=0.999,
help="the second beta argument for Adam")
parser.add_argument("-wd", "--weight_decay", type=float, default=5e-4,
help="weight decay")
parser.add_argument("-lr", "--lr", "--LR", "--learning_rate", type=float, default=0.1)
parser.add_argument("-lp", "--learning_policy", type=str, default="default",
help="learning rate schedule defined in config.py")
parser.add_argument("--epochs", type=int, default=180)
parser.add_argument("-b", "--batch_size", type=int, default=128)
parser.add_argument("--transform", type=str, default='default',
help="the data augmentation which will be applied during training.")
parser.add_argument("--resume", action="store_true", default=False)
parser.add_argument("--progress", action="store_true", default=False,
help="show the progress if true")
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("-m", "--description", type=str, default="RobOC-AT")
opts = parser.parse_args()
opts.description = FMT.format(**opts.__dict__)
def load_cfg() -> Tuple[Config, str]:
from src.dict2obj import Config
from src.base import Coach, AdversaryForTrain
from src.utils import gpu, set_seed, load_checkpoint
cfg = Config()
set_seed(opts.seed)
# the model and other settings for training
model = load_model(opts.model)(
num_classes=get_num_classes(opts.dataset),
scale=opts.scale
)
device = gpu(model)
# load the dataset
trainset = load_dataset(
dataset_type=opts.dataset,
transform=opts.transform,
train=True
)
cfg['trainloader'] = load_dataloader(
dataset=trainset,
batch_size=opts.batch_size,
train=True,
show_progress=opts.progress
)
testset = load_dataset(
dataset_type=opts.dataset,
transform=opts.transform,
train=False
)
cfg['testloader'] = load_dataloader(
dataset=testset,
batch_size=opts.batch_size,
train=False,
show_progress=opts.progress
)
normalizer = load_normalizer(dataset_type=opts.dataset)
# load the optimizer and learning_policy
optimizer = load_optimizer(
model=model, optim_type=opts.optimizer, lr=opts.lr,
momentum=opts.momentum, betas=(opts.beta1, opts.beta2),
weight_decay=opts.weight_decay
)
learning_policy = load_learning_policy(
optimizer=optimizer,
learning_policy_type=opts.learning_policy,
T_max=opts.epochs
)
# generate the path for logging information and saving parameters
cfg['info_path'], cfg['log_path'] = generate_path(
method=METHOD, dataset_type=opts.dataset,
model=opts.model, description=opts.description
)
if opts.resume:
cfg['start_epoch'] = load_checkpoint(
path=cfg.info_path, model=model,
optimizer=optimizer, lr_scheduler=learning_policy
)
else:
cfg['start_epoch'] = 0
cfg['coach'] = Coach(
model=model, device=device,
loss_func=load_loss_func(opts.loss)(model=model),
normalizer=normalizer, optimizer=optimizer,
learning_policy=learning_policy
)
# set the attack
attack, bounds, preprocessing = load_attacks(
attack_type=opts.attack, dataset_type=opts.dataset,
stepsize=opts.stepsize, steps=opts.steps
)
cfg['attacker'] = AdversaryForTrain(
model=model, attacker=attack, device=device,
bounds=bounds, preprocessing=preprocessing, epsilon=opts.epsilon
)
cfg['valider'] = load_valider(
model=model, device=device, dataset_type=opts.dataset
)
return cfg
def evaluate(
valider, trainloader, testloader,
acc_logger, rob_logger, writter,
epoch = 8888
):
train_accuracy, train_success = valider.evaluate(trainloader)
valid_accuracy, valid_success = valider.evaluate(testloader)
print(f"Train >>> [TA: {train_accuracy:.5f}] [RA: {1 - train_success:.5f}]")
print(f"Test. >>> [TA: {valid_accuracy:.5f}] [RA: {1 - valid_success:.5f}]")
writter.add_scalars("Accuracy", {"train":train_accuracy, "valid":valid_accuracy}, epoch)
writter.add_scalars("Success", {"train":train_success, "valid":valid_success}, epoch)
acc_logger.train(data=train_accuracy, T=epoch)
acc_logger.valid(data=valid_accuracy, T=epoch)
rob_logger.train(data=1 - train_success, T=epoch)
rob_logger.valid(data=1 - valid_success, T=epoch)
def main(
coach, attacker, valider,
trainloader, testloader, start_epoch,
info_path, log_path
):
from src.utils import save_checkpoint, TrackMeter, ImageMeter
from src.dict2obj import Config
acc_logger = Config(
train=TrackMeter("Train"),
valid=TrackMeter("Valid")
)
acc_logger.plotter = ImageMeter(*acc_logger.values(), title="Accuracy")
rob_logger = Config(
train=TrackMeter("Train"),
valid=TrackMeter("Valid")
)
rob_logger.plotter = ImageMeter(*rob_logger.values(), title="Robustness")
for epoch in range(start_epoch, opts.epochs):
if epoch % SAVE_FREQ == 0:
save_checkpoint(info_path, coach.model, coach.optimizer, coach.learning_policy, epoch)
if epoch % PRINT_FREQ == 0:
evaluate(
valider=valider, trainloader=trainloader, testloader=testloader,
acc_logger=acc_logger, rob_logger=rob_logger, writter=writter,
epoch=epoch
)
running_loss = coach.adv_train(trainloader, attacker, leverage=opts.leverage, epoch=epoch)
writter.add_scalar("Loss", running_loss, epoch)
evaluate(
valider=valider, trainloader=trainloader, testloader=testloader,
acc_logger=acc_logger, rob_logger=rob_logger, writter=writter,
epoch=opts.epochs
)
acc_logger.plotter.plot()
rob_logger.plotter.plot()
acc_logger.plotter.save(writter)
rob_logger.plotter.save(writter)
if __name__ == "__main__":
from torch.utils.tensorboard import SummaryWriter
from src.utils import mkdirs, readme
cfg = load_cfg()
mkdirs(cfg.info_path, cfg.log_path)
readme(cfg.info_path, opts)
readme(cfg.log_path, opts, mode="a")
writter = SummaryWriter(log_dir=cfg.log_path, filename_suffix=METHOD)
main(**cfg)
cfg['coach'].save(cfg.info_path)
writter.close()
| 33.974576 | 109 | 0.669494 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,619 | 0.201921 |
4012033dc557a9acee5693b0291d1d05afe295c0 | 680 | py | Python | notesapp/api_v1/models.py | kampkelly/drf_template | 44cda3fd4ebf0dc073a46205b392d5e783d9ceea | [
"MIT"
]
| null | null | null | notesapp/api_v1/models.py | kampkelly/drf_template | 44cda3fd4ebf0dc073a46205b392d5e783d9ceea | [
"MIT"
]
| null | null | null | notesapp/api_v1/models.py | kampkelly/drf_template | 44cda3fd4ebf0dc073a46205b392d5e783d9ceea | [
"MIT"
]
| null | null | null | from django.db import models
# Create your models here.
class CommonFieldsMixin(models.Model):
"""Add created_at and updated_at fields."""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True, null=True)
class Meta:
"""Define metadata options."""
abstract = True
class Category(CommonFieldsMixin):
name = models.CharField(max_length=250,null=False,unique=True)
class Notes(CommonFieldsMixin):
title = models.CharField(max_length=250,null=False,unique=False)
body = models.TextField(null=False)
category = models.ForeignKey(Category,on_delete=models.CASCADE,default=None)
| 23.448276 | 80 | 0.733824 | 611 | 0.898529 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.145588 |
401276c3187f1d2baed3d5f8ab8ae0afba6d8f18 | 8,534 | py | Python | src/main_TS_tsconv_jma.py | inoue0406/radarJMA | f8996c3fe201f97d414fc96c4abfc6f930738d47 | [
"MIT"
]
| 6 | 2018-12-20T00:32:17.000Z | 2021-05-24T08:29:08.000Z | src/main_TS_tsconv_jma.py | inoue0406/radarJMA | f8996c3fe201f97d414fc96c4abfc6f930738d47 | [
"MIT"
]
| null | null | null | src/main_TS_tsconv_jma.py | inoue0406/radarJMA | f8996c3fe201f97d414fc96c4abfc6f930738d47 | [
"MIT"
]
| 4 | 2018-09-20T07:08:03.000Z | 2020-06-07T21:43:31.000Z | # seq2seq LSTM (no-convolutional model) for time series prediction
import numpy as np
import torch
import torchvision
import torch.utils.data as data
import torchvision.transforms as transforms
import pandas as pd
import h5py
import os
import sys
import json
import time
import pdb
from jma_timeseries_dataset import *
from scaler import *
from train_valid_epoch_tsconv import *
from utils import Logger
from opts_ts import parse_opts
def count_parameters(model,f):
for name,p in model.named_parameters():
f.write("name,"+name+", Trainable, "+str(p.requires_grad)+",#params, "+str(p.numel())+"\n")
Nparam = sum(p.numel() for p in model.parameters())
Ntrain = sum(p.numel() for p in model.parameters() if p.requires_grad)
f.write("Number of params:"+str(Nparam)+", Trainable parameters:"+str(Ntrain)+"\n")
print("Number of params:"+str(Nparam)+", Trainable parameters:"+str(Ntrain)+"\n")
if __name__ == '__main__':
# parse command-line options
opt = parse_opts()
print(opt)
# create result dir
if not os.path.exists(opt.result_path):
os.mkdir(opt.result_path)
with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
json.dump(vars(opt), opt_file)
# generic log file
logfile = open(os.path.join(opt.result_path, 'log_run.txt'),'w')
logfile.write('Start time:'+time.ctime()+'\n')
tstart = time.time()
# model information
modelinfo = open(os.path.join(opt.result_path, 'model_info.txt'),'w')
# prepare scaler for data
if opt.data_scaling == 'linear':
scl = LinearScaler()
if opt.data_scaling == 'root':
scl = RootScaler()
if not opt.no_train:
# loading datasets
train_dataset = JMATSConvDataset(csv_data=opt.train_data_path,
csv_anno=opt.train_anno_path,
use_var=opt.use_var,
root_dir=None,
tdim_use=opt.tdim_use,
resize=opt.data_resize,
transform=None)
valid_dataset = JMATSConvDataset(csv_data=opt.valid_data_path,
csv_anno=opt.valid_anno_path,
use_var=opt.use_var,
root_dir=None,
tdim_use=opt.tdim_use,
resize=opt.data_resize,
transform=None)
#tstdata = next(iter(train_dataset))
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=opt.batch_size,
num_workers=7,
drop_last=True,
shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset,
batch_size=opt.batch_size,
num_workers=7,
drop_last=True,
shuffle=False)
if opt.model_name == 'seq2seq':
# lstm seq2seq model
CONV_HID_DIM = 32
INPUT_DIM = 1 + CONV_HID_DIM
OUTPUT_DIM = 1
HID_DIM = 512
N_LAYERS = 3
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
from models.seq2seq_convlstm_ts import *
enc = Encoder(INPUT_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
model = Seq2SeqConv(enc, dec, CONV_HID_DIM, device='cuda').cuda()
if opt.transfer_path != 'None':
# Use pretrained weights for transfer learning
print('loading pretrained model:',opt.transfer_path)
model = torch.load(opt.transfer_path)
modelinfo.write('Model Structure \n')
modelinfo.write(str(model))
count_parameters(model,modelinfo)
# modelinfo.close()
if opt.loss_function == 'MSE':
loss_fn = torch.nn.MSELoss()
# Type of optimizers adam/rmsprop
if opt.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)
elif opt.optimizer == 'rmsprop':
optimizer = torch.optim.RMSprop(model.parameters(), lr=opt.learning_rate)
# learning rate scheduler
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=opt.lr_decay)
# Prep logger
train_logger = Logger(
os.path.join(opt.result_path, 'train.log'),
['epoch', 'loss', 'lr'])
train_batch_logger = Logger(
os.path.join(opt.result_path, 'train_batch.log'),
['epoch', 'batch', 'loss', 'lr'])
valid_logger = Logger(
os.path.join(opt.result_path, 'valid.log'),
['epoch', 'loss'])
# training
for epoch in range(1,opt.n_epochs+1):
if epoch < 10:
# freeze conv_encoder for first 10 epochs
submodel = next(iter(model.children()))
for param in submodel.parameters():
param.requires_grad = False
else:
# unfreeze conv_encoder for the rest
submodel = next(iter(model.children()))
for param in submodel.parameters():
param.requires_grad = True
count_parameters(model,modelinfo)
#import pdb;pdb.set_trace()
# step scheduler
scheduler.step()
# training & validation
train_epoch(epoch,opt.n_epochs,train_loader,model,loss_fn,optimizer,
train_logger,train_batch_logger,opt,scl)
valid_epoch(epoch,opt.n_epochs,valid_loader,model,loss_fn,
valid_logger,opt,scl)
if epoch % opt.checkpoint == 0:
# save the trained model for every checkpoint
# (1) as binary
torch.save(model,os.path.join(opt.result_path,
'trained_seq2seq_epoch%03d.model' % epoch))
# (2) as state dictionary
torch.save(model.state_dict(),
os.path.join(opt.result_path,
'trained_seq2seq_epoch%03d.dict' % epoch))
# save the trained model
# (1) as binary
torch.save(model,os.path.join(opt.result_path, 'trained_seq2seq.model'))
# (2) as state dictionary
torch.save(model.state_dict(),
os.path.join(opt.result_path, 'trained_seq2seq.dict'))
# test datasets if specified
if opt.test:
if opt.no_train:
#load pretrained model from results directory
model_fname = os.path.join(opt.result_path, opt.test_model_fname)
print('loading pretrained model:',model_fname)
model = torch.load(model_fname)
loss_fn = torch.nn.MSELoss()
# prepare loader
test_dataset = JMATSConvDataset(csv_data=opt.test_data_path,
csv_anno=opt.test_anno_path,
use_var=opt.use_var,
root_dir=None,
tdim_use=opt.tdim_use,
transform=None)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
# batch_size=opt.batch_size,
batch_size=3, # small batch size used
num_workers=7,
drop_last=True,
shuffle=False)
# testing for the trained model
test_epoch(test_loader,model,loss_fn,opt,scl)
# output elapsed time
logfile.write('End time: '+time.ctime()+'\n')
tend = time.time()
tdiff = float(tend-tstart)/3600.0
logfile.write('Elapsed time[hours]: %f \n' % tdiff)
| 40.832536 | 99 | 0.522498 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,525 | 0.178697 |
40131096d61db66fe053946df5d75b8d65c51a7a | 556 | py | Python | bell2014/energy/prob_abs_s.py | dmaugis/intrinsic | e223fc8abceb2bf26f9a7752d72afe598ac4e1fd | [
"MIT"
]
| 134 | 2015-01-04T04:54:19.000Z | 2021-10-16T07:39:02.000Z | bell2014/energy/prob_abs_s.py | dmaugis/intrinsic | e223fc8abceb2bf26f9a7752d72afe598ac4e1fd | [
"MIT"
]
| 10 | 2016-07-30T21:45:11.000Z | 2021-03-03T14:12:50.000Z | bell2014/energy/prob_abs_s.py | dmaugis/intrinsic | e223fc8abceb2bf26f9a7752d72afe598ac4e1fd | [
"MIT"
]
| 34 | 2015-01-14T16:39:27.000Z | 2021-10-31T11:29:50.000Z | import numpy as np
class ProbAbsoluteShading(object):
def __init__(self, params):
self.params = params
def cost(self, s_nz):
if self.params.abs_shading_weight:
if self.params.abs_shading_log:
return self.params.abs_shading_weight * \
np.abs(np.log(s_nz) - np.log(self.params.abs_shading_gray_point))
else:
return self.params.abs_shading_weight * \
np.abs(s_nz - self.params.abs_shading_gray_point)
else:
return 0
| 30.888889 | 85 | 0.591727 | 534 | 0.960432 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.