hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
86ab2a7a0d57050e80f3f20e1f2f61131ca45a9a
487
py
Python
new-influx-client.py
benlamonica/energy-monitor
86714a365c91cc05c265de81bce191ff4ab585f8
[ "MIT" ]
null
null
null
new-influx-client.py
benlamonica/energy-monitor
86714a365c91cc05c265de81bce191ff4ab585f8
[ "MIT" ]
null
null
null
new-influx-client.py
benlamonica/energy-monitor
86714a365c91cc05c265de81bce191ff4ab585f8
[ "MIT" ]
null
null
null
import influxdb_client from influxdb_client import InfluxDBClient bucket = "python-client-sandbox" org = "Energy Monitor" token = "miQdAvNXHiNDVVzPzV5FpkCaR_8qdQ-L1FlPCOXQPI325Kbrh1fgfhkcDUZ4FepaebDdpZ-A1gmtnnjU0_hViA==" url = "http://localhost:9999" client = InfluxDBClient(url=url, token=token, org=org) writeApi = client.write_api() write_api.write("my-bucket", "my-org", [{"measurement": "h2o_feet", "tags": {"location": "coyote_creek"}, "fields": {"water_level": 1}, "time": 1}])
40.583333
148
0.755647
0
0
0
0
0
0
0
0
251
0.5154
86ab8849571d80e31e545baaa8fc3a7e45faa001
6,176
py
Python
tests/test_agent/test_manhole.py
guidow/pyfarm-agent
bb5d464f9f6549a3db3529a93e3d9f388b365586
[ "Apache-2.0" ]
null
null
null
tests/test_agent/test_manhole.py
guidow/pyfarm-agent
bb5d464f9f6549a3db3529a93e3d9f388b365586
[ "Apache-2.0" ]
null
null
null
tests/test_agent/test_manhole.py
guidow/pyfarm-agent
bb5d464f9f6549a3db3529a93e3d9f388b365586
[ "Apache-2.0" ]
null
null
null
# No shebang line, this module is meant to be imported # # Copyright 2014 Oliver Palmer # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from collections import namedtuple from pprint import pprint from random import randint from StringIO import StringIO from textwrap import dedent try: from unittest.mock import patch except ImportError: # pragma: no cover from mock import patch from twisted.internet.protocol import ServerFactory from twisted.cred.portal import Portal from twisted.conch.telnet import ( ITelnetProtocol, TelnetBootstrapProtocol, TelnetTransport) from pyfarm.agent.testutil import TestCase from pyfarm.agent.manhole import ( LoggingManhole, TransportProtocolFactory, TelnetRealm, manhole_factory, show) Peer = namedtuple("Peer", ("host", "port")) class FakeLoggingManhole(LoggingManhole): QUIT = False GET_PEER_CALLS = 0 class terminal(object): RIGHT_ARROW, LEFT_ARROW = None, None class transport(object): @classmethod def getPeer(cls): FakeLoggingManhole.GET_PEER_CALLS += 1 return Peer(os.urandom(12).encode("hex"), randint(1024, 65535)) def handle_QUIT(self): self.QUIT = True class TestManholeBase(TestCase): def setUp(self): TelnetRealm.NAMESPACE = None FakeLoggingManhole.GET_PEER_CALLS = 0 FakeLoggingManhole.QUIT = False class TestManholeFactory(TestManholeBase): def test_assertions(self): with self.assertRaises(AssertionError): manhole_factory(None, "", "") with self.assertRaises(AssertionError): manhole_factory({}, None, "") with self.assertRaises(AssertionError): manhole_factory({}, "", None) def test_instance_one(self): namespace = {"bob": None} username = os.urandom(32).encode("hex") password = os.urandom(32).encode("hex") manhole_factory(namespace, username, password) with self.assertRaises(AssertionError): manhole_factory(namespace, username, password) def test_instance(self): namespace = {"bob": None} username = os.urandom(32).encode("hex") password = os.urandom(32).encode("hex") manhole = manhole_factory(namespace, username, password) self.assertEqual(namespace, {"bob": None}) self.assertEqual( TelnetRealm.NAMESPACE, {"bob": None, "pp": pprint, "show": show}) self.assertIsInstance(manhole, ServerFactory) self.assertIsInstance(manhole.protocol, TransportProtocolFactory) self.assertIsInstance(manhole.protocol.portal, Portal) # There could be multiple password checkers, check for the one # we know we should have added. for _, instance in manhole.protocol.portal.checkers.items(): found = False for user, passwd in instance.users.items(): if user == username and passwd == password: found = True if found: break else: self.fail("Failed to find correct username and password.") def test_request_avatar(self): realm = TelnetRealm() avatar = realm.requestAvatar(None, ITelnetProtocol) self.assertEqual(len(avatar), 3) self.assertIs(avatar[0], ITelnetProtocol) self.assertIsInstance(avatar[1], TelnetBootstrapProtocol) self.assertTrue(callable(avatar[2])) def test_request_avatar_error(self): realm = TelnetRealm() with self.assertRaises(NotImplementedError): realm.requestAvatar(None, None) def test_protocol_factory(self): factory = TransportProtocolFactory(None) transport = factory() self.assertIsInstance(transport, TelnetTransport) class TestManholeShow(TestManholeBase): def test_uses_namespace(self): namespace = {"bob": None} username = os.urandom(32).encode("hex") password = os.urandom(32).encode("hex") manhole_factory(namespace, username, password) output = StringIO() with patch("sys.stdout", output): show() output.seek(0) output = output.getvalue().strip() self.assertEqual(output, "objects: ['bob', 'pp', 'show']") def test_custom_object(self): class Foobar(object): a, b, c, d, e = True, 1, "yes", {}, 0.0 output = StringIO() with patch("sys.stdout", output): show(Foobar) output.seek(0) output = output.getvalue().strip() self.assertEqual( output, dedent(""" data attributes of <class 'tests.test_agent.test_manhole.Foobar'> a : True b : 1 c : yes d : {} (0 elements) e : 0.0 """).strip()) def test_wrap_long_line(self): class Foobar(object): a = " " * 90 output = StringIO() with patch("sys.stdout", output): show(Foobar) output.seek(0) output = output.getvalue().strip() self.assertEqual( output, dedent(""" data attributes of <class 'tests.test_agent.test_manhole.Foobar'> a : ' """ + """ '... """).strip()) class TestLoggingManhole(TestManholeBase): def test_line_received(self): f = FakeLoggingManhole() f.lineReceived("exit") self.assertTrue(f.QUIT)
32.505263
79
0.615771
4,864
0.787565
0
0
177
0.028659
0
0
1,433
0.232027
86abdce88613d6ee71e638ae7487297146c3e7a8
338
py
Python
func-button/klSigmode.py
xcgoo/uiKLine
80683401d7dc66262ae645db4c2780d6e71be551
[ "MIT" ]
232
2017-10-11T09:19:03.000Z
2022-03-09T01:34:49.000Z
func-button/klSigmode.py
DON-2020-LEE/uiKLine-2
fd1d0dca5fd6b1542af4b10c110e39361b29d378
[ "MIT" ]
8
2017-12-09T09:10:15.000Z
2021-04-22T03:35:26.000Z
func-button/klSigmode.py
DON-2020-LEE/uiKLine-2
fd1d0dca5fd6b1542af4b10c110e39361b29d378
[ "MIT" ]
132
2017-10-11T09:16:29.000Z
2022-02-09T10:37:57.000Z
# coding: utf-8 """ 插入所有需要的库,和函数 """ #---------------------------------------------------------------------- def klSigmode(self): """查找模式""" if self.mode == 'deal': self.canvas.updateSig(self.signalsOpen) self.mode = 'dealOpen' else: self.canvas.updateSig(self.signals) self.mode = 'deal'
21.125
71
0.446746
0
0
0
0
0
0
0
0
170
0.459459
86acd0c8a74d48d7a1cf116cc0a40300ec411cd2
16,459
py
Python
utils/thin.py
BnF-jadis/projet
212b1e7b179a564650fb959d9c2565648178f6b6
[ "CC-BY-3.0" ]
5
2021-06-17T12:48:45.000Z
2022-01-22T22:23:44.000Z
utils/thin.py
BnF-jadis/projet
212b1e7b179a564650fb959d9c2565648178f6b6
[ "CC-BY-3.0" ]
7
2020-11-13T18:42:14.000Z
2022-02-10T01:31:07.000Z
utils/thin.py
BnF-jadis/projet
212b1e7b179a564650fb959d9c2565648178f6b6
[ "CC-BY-3.0" ]
1
2021-10-17T10:49:45.000Z
2021-10-17T10:49:45.000Z
# 2020, BackThen Maps # Coded by Remi Petitpierre https://github.com/RPetitpierre # For Bibliothèque nationale de France (BnF) import cv2, thinning, os import numpy as np import pandas as pd import shapefile as shp from skimage.measure import approximate_polygon from PIL import Image, ImageDraw from utils.utils import * from utils.match import toLatLon Image.MAX_IMAGE_PIXELS = 500000000 def skeletonize(road_network: np.ndarray, path: str = "workshop/vectorized.png", largest_component: bool = False): ''' Thinning/skeletonization of the road network image to a wired model. Input(s): road_network: black and white image of the road network (streets in white) path: path where the skeletonized image should be saved largest_component: if True, only the largest road network component will be kept Output(s): vectorized: skeletonized image ''' assert len(road_network.shape) == 2, 'ERROR: road_network must be grayscale image' img = cv2.resize(road_network, (road_network.shape[1]//2, road_network.shape[0]//2)) vectorized = thinning.guo_hall_thinning(img) vectorized[vectorized > 100] = 255 vectorized[vectorized <= 100] = 0 if largest_component: try: _, labels, stats, _ = cv2.connectedComponentsWithStats(vectorized.copy(), connectivity=8, stats=cv2.CC_STAT_AREA) stats = stats[1:] main_component = (np.argmax(stats[:,4])+1).astype('int32') vectorized = (labels == main_component).astype('uint8')*255 except: 'Warning: Skeletonization failed to apply largest_component = True param. Skipping.' cv2.imwrite(path, vectorized) return vectorized def findNodes(image: np.ndarray): ''' Find the nodes in the road network skeleton image. Input(s): image: skeletonized image Output(s): nodes: array of nodes coordinates (x, y) degree: degrees of the nodes (2=endpoint, 4=crossroads of 3 streets, 5=crossroads of 4 streets, etc.) addresses: directions of the crossing roads, with regard to the node ''' img = image.copy() # Find row and column locations that are non-zero (rows, cols) = np.nonzero(img) nodes, degree, addresses = [], [], [] for (r,c) in zip(rows, cols): if r > 0 and c > 0 and r < image.shape[0]-1 and c < image.shape[1]-1: # Extract an 8-connected neighbourhood (col_neigh, row_neigh) = np.meshgrid(np.array([c-1, c, c+1]), np.array([r-1, r, r+1])) # Cast to int to index into image col_neigh = col_neigh.astype('int') row_neigh = row_neigh.astype('int') # Convert into a single 1D array and check for non-zero locations pix_neighbourhood = img[row_neigh, col_neigh].ravel() != 0 # If the number of non-zero locations equals 2, add this to our list of coordinates n_neighbours = np.sum(pix_neighbourhood) if (n_neighbours == 2) or (n_neighbours >= 4): nodes.append((r, c)) degree.append(n_neighbours) direction_set = np.where(pix_neighbourhood == True)[0] direction_set = direction_set[direction_set != 4] addresses.append(direction_set) nodes = np.asarray(nodes) return nodes, degree, addresses def cleanNodesEdges(df_nodes: pd.DataFrame): df = df_nodes.copy() new_addresses, new_degree = [], [] for ind, address in df['address'].iteritems(): new_address = avoidDiagonalEdges(address) new_addresses.append(new_address) new_degree.append(len(new_address) + 1) df['address'] = new_addresses df['degree'] = new_degree return df def avoidDiagonalEdges(address: list, direction: int = None): right, diagonal = [1, 3, 5, 7], {0: [1, 3], 2: [1, 5], 6: [3, 7], 8: [5, 7]} new_address = [] for r in right: if r in address: new_address.append(r) for d in diagonal.keys(): if d in address: if not(diagonal[d][0] in address) and not(diagonal[d][1] in address): if direction != None: if not((8-direction) in diagonal[d]): new_address.append(d) else: new_address.append(d) return new_address def explorePath(start_x: int, start_y: int, start_dir: int, image: np.ndarray, nodes_grid: np.ndarray): ''' Follow the path from one given start node and direction until the next node, and stores the pixels on the way. Input(s): start_x: start node x-coordinate start_y: start node y-coordinate start_dir: starting direction ({0, 1, 2, 3, -, 5, 6, 7, 8}) image: skeletonized image of the road network nodes_grid: grid of the nodes of the skeletonized image Output(s): way: list of pixel coordinates on the way direction: last direction to reach the 2nd node nodes_grid[x, y]: degree of the arrival node ''' def absoluteWay(x: int, y: int, way: int): if way == 0: x_, y_ = x-1, y-1 elif way == 1: x_, y_ = x-1, y elif way == 2: x_, y_ = x-1, y+1 elif way == 3: x_, y_ = x, y-1 elif way == 5: x_, y_ = x, y+1 elif way == 6: x_, y_ = x+1, y-1 elif way == 7: x_, y_ = x+1, y elif way == 8: x_, y_ = x+1, y+1 else: raise AttributeError('Parameters invalid: (' + str(x) + ',' + str(y) + ',' + str(way) + '), way \ should be comprised between 0 and 8, and != 4. x, y and way should be of type int.') return x_, y_ def noTurnBack(direction: int): wrong_paths = [] if direction == 0: wrong_paths = [5, 7] elif direction == 1: wrong_paths = [6, 8] elif direction == 2: wrong_paths = [3, 7] elif direction == 3: wrong_paths = [2, 8] elif direction == 5: wrong_paths = [0, 6] elif direction == 6: wrong_paths = [1, 5] elif direction == 7: wrong_paths = [0, 2] elif direction == 8: wrong_paths = [1, 3] return wrong_paths direction = start_dir x, y = start_x, start_y assert image[x, y] != 0, 'ERROR: start point is not white' end = False way = [(x, y)] # First iteration new_x, new_y = absoluteWay(x, y, direction) assert image[new_x, new_y] != 0, 'ERROR: 2nd point is not white' way.append((new_x, new_y)) x, y = new_x, new_y wrong_paths = noTurnBack(direction) wrong_paths_active = True if nodes_grid[x, y]: end = True direction = 8-start_dir while not(end): if x > 0 and y > 0 and x < image.shape[0]-1 and y < image.shape[1]-1: # Extract an 8-connected neighbourhood (row_neigh, col_neigh) = np.meshgrid(np.array([x-1, x, x+1]), np.array([y-1, y, y+1])) # Cast to int to index into image col_neigh, row_neigh = col_neigh.astype('int'), row_neigh.astype('int') # Convert into a single 1D array and check for non-zero locations try: pix_neighbourhood = image[row_neigh, col_neigh].transpose().ravel() != 0 except: print(x, y, image.shape, ) raise AssertionError() # If the number of non-zero locations equals 2, add this to our list of coordinates n_neighbours = np.sum(pix_neighbourhood) direction_set = np.where(pix_neighbourhood == True)[0] last_ds = [wrong_paths] last_ds.append(direction_set) direction_set = direction_set[direction_set != 4] last_ds.append(direction_set) direction_set = direction_set[direction_set != (8-direction)] last_ds.append(direction_set) direction_set = np.asarray(avoidDiagonalEdges(direction_set, direction)) last_ds.append(direction_set) if wrong_paths_active: for wrong_path in wrong_paths: direction_set = direction_set[direction_set != wrong_path] wrong_paths_active = False if len(direction_set) != 1: end = True break direction = direction_set[0] new_x, new_y = absoluteWay(x, y, direction) way.append((new_x, new_y)) x, y = new_x, new_y if nodes_grid[x, y]: end = True else: end = True return way, direction, nodes_grid[x, y] def findSegments(df_nodes: pd.DataFrame, image: np.ndarray, min_length: int = 30, return_simple_ways: bool = True): ''' Find all the road segments in the network. Keep the ones that are longer than a given length or non-terminal. Optionally, compute the Douglas-Peucker simple itinerary of each segment and return it. Input(s): df_nodes: list of nodes image: skeletonized image of the road network min_length: min segment length if the segment is terminal return_simple_ways: if True, compute the Douglas-Peucker simple itinerary of each segment and return it Output(s): (Optional)(simple_ways: the Douglas-Peucker simple itinerary of each segmenty) ways: list of segments, containing all the pixels on the way between each couple of nodes nodes_grid: image containing all the nodes found in the image and their degree ''' img = image.copy() done, ways = [], [] df_nodes = df_nodes.sort_values(by='degree').reset_index(drop=True) nodes_grid = np.zeros(image.shape) for ind, row in df_nodes[['x', 'y', 'degree']].iterrows(): nodes_grid[row['x'], row['y']] = row['degree'] nodes_grid = nodes_grid.astype('int') for ind, node in df_nodes.iterrows(): for direct in node['address']: code = str(node['x']) + '_' + str(node['y']) + '_' + str(direct) if not(code in done): way, last_direct, degree = explorePath(start_x=node['x'], start_y=node['y'], start_dir=direct, image=img, nodes_grid=nodes_grid) if not((len(way) <= min_length) and ((node['degree'] == 2) or (degree == 2))): done.append(str(way[-1][0]) + '_' + str(way[-1][1]) + '_' + str(8-last_direct)) ways.append(way) if return_simple_ways: simple_ways = [] for way in ways: inv_way = np.asarray([np.asarray(way)[:,1], image.shape[0]-np.asarray(way)[:,0]]).transpose() simple_ways.append(approximate_polygon(np.asarray(inv_way), tolerance=1.6).tolist()) return simple_ways, ways, nodes_grid else: return ways, nodes_grid def thinImage(image: np.ndarray, image_name: str, export_file_path: str, exportPNG: bool = False, exportJSON: bool = False, exportSVG: bool = False, exportSHP: bool = False, geoloc: bool = False): assert (exportPNG or exportJSON or exportSVG or exportSHP) # Convert to B&W road_network = image.copy() road_network[road_network < 254] = 0 road_network[road_network < 255/2] = 0 road_network[road_network >= 255/2] = 255 vectorized = skeletonize(road_network, largest_component = True) nodes, degree, addresses = findNodes(vectorized) if len(degree) < 0: return [], [], np.zeros((image.shape[1], image.shape[0])) df_nodes = pd.DataFrame({'x': nodes[:,0], 'y': nodes[:,1], 'degree': degree, 'address': addresses }) df_nodes = df_nodes.sort_values(by='degree').reset_index(drop=True) df_nodes = cleanNodesEdges(df_nodes) df_nodes = df_nodes[df_nodes['degree'] != 3] if (exportJSON or exportSHP): simple_segments, full_segments, nodes_grid = findSegments(df_nodes, vectorized, min_length = 15, return_simple_ways = True) else: full_segments, nodes_grid = findSegments(df_nodes, vectorized, min_length = 15, return_simple_ways = False) simple_segments = [] if exportPNG: toPNG(full_segments, vectorized, export_file_path) elif exportSVG: toPNG(full_segments, vectorized, os.path.join('workshop', 'thin.png')) if geoloc: if exportJSON: project_name = getProjectName() try: with open(os.path.join('save', project_name, 'match' , 'primary', image_name + '.json')) as data: data = json.load(data) M = np.asarray(data['M']) simple_segments_JSON = [] for segment in simple_segments: s = np.asarray([2*np.asarray(segment)[:,0], image.shape[0]-(2*np.asarray(segment)[:,1])]).T simple_segments_JSON.append(toLatLon((s@M[:, :2]) + M[:, 2:3].transpose()).tolist()) except: print("La géolocalisation de l'image {} n'a pas encore été calculée. Par conséquent, \ il n'est pas possible de calculer la géolocalisation de son réseau filaire".format(image_name)) simple_segments_JSON = simple_segments else: print('La géolocalisation du réseau filaire ne fonctionne que pour le format JSON actuellement.') else: simple_segments_JSON = simple_segments if exportJSON: with open(export_file_path.replace('png', 'json'), 'w') as outfile: json.dump(simple_segments_JSON, outfile) if exportSHP: os.makedirs(export_file_path.replace('.png', ''), exist_ok=True) toShapefile(simple_segments, os.path.join(export_file_path.replace('.png', ''), image_name)) if exportSVG: print("\nAvertissement: Si vous n'avez jamais utilisé cette commande, \ installez d'abord Homebrew, ImageMagick et Potrace via le terminal.\n") print('Pour installer Homebrew:\n', ' /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"') print('Pour installer ImageMagick:\n', ' brew install imagemagick') print('Pour installer Potrace: \n', ' brew install potrace\n') if exportPNG: png_path = export_file_path else: png_path = os.path.join('workshop', 'thin.png') pnm_path = os.path.join('workshop', 'thin.pnm') svg_path = export_file_path.replace('png', 'svg') os.system('convert ' + png_path + pnm_path) os.system('potrace ' + pnm_path + ' -s -o ' + svg_path) return simple_segments, full_segments, nodes_grid def toPNG(segments: list, vectorized: np.ndarray, out_path: str): ''' Save a given set of segments as a bitmap image from the road network. Input(s): segments: list of segments, containing all the pixels on the way between each couple of nodes vectorized: skeletonized image of the road network out_path: the path, where the output bitmap image should be save ''' canvas = (np.ones(vectorized.shape)*255).astype('uint8') cv2.imwrite('workshop/canvas.png', canvas); bitmap = Image.open('workshop/canvas.png') draw = ImageDraw.Draw(bitmap) for segment in segments: coords = [] for point in segment: coords.append((point[1], point[0])) draw.line(coords, fill = 'black', width=0) bitmap.save(out_path) def toShapefile(simple_ways, out_path): w = shp.Writer(out_path) w.field('DeletionFlag', 'C', 1, 0) w.field('gid', 'N', 11, 0) w.field('streetname', 'C', 41, 0) w.field('note', 'C', 32, 0) for i in range(len(simple_ways)): w.line([simple_ways[i]]) w.record('01', i, '', '') w.close()
37.663616
125
0.584118
0
0
0
0
0
0
0
0
4,610
0.279903
86acd82b514b30458fa54cefc7db6d72f32e8646
875
py
Python
easy2fa/tests/test_checkinput.py
lutostag/otp
0792548fa51c489cdc5fcb01a3c6dad1cd453154
[ "MIT" ]
3
2018-01-22T13:45:12.000Z
2022-01-27T04:17:52.000Z
easy2fa/tests/test_checkinput.py
lutostag/otp
0792548fa51c489cdc5fcb01a3c6dad1cd453154
[ "MIT" ]
1
2017-01-24T23:57:51.000Z
2017-12-11T14:33:32.000Z
easy2fa/tests/test_checkinput.py
lutostag/otp
0792548fa51c489cdc5fcb01a3c6dad1cd453154
[ "MIT" ]
null
null
null
from unittest import TestCase from unittest.mock import patch from easy2fa import cli class TestCheckInput(TestCase): @patch('builtins.input') def test_default(self, mock_input): mock_input.return_value = '' self.assertEquals(cli.check_input('prompt', default='one'), 'one') mock_input.return_value = 'two' self.assertEquals(cli.check_input('prompt', default='one'), 'two') @patch('builtins.input') @patch('builtins.print') def test_assertions(self, mock_print, mock_input): def assertion(value): if value not in ['yes', 'no']: return 'use yes or no' mock_input.side_effect = ['input', '', 'no'] self.assertEquals(cli.check_input('prompt', assertion=assertion), 'no') mock_print.assert_called_with('\tInvalid input: use yes or no')
33.653846
74
0.634286
785
0.897143
0
0
743
0.849143
0
0
172
0.196571
86ad342de7b5dfdb142a5dff63b155f6c655c5c6
2,845
py
Python
bert_finetuning/data_loader.py
nps1ngh/adversarial-bert-german-attacks-defense
3cca292ec4c3c07945f4198ae81e1f671462ed90
[ "Apache-2.0" ]
null
null
null
bert_finetuning/data_loader.py
nps1ngh/adversarial-bert-german-attacks-defense
3cca292ec4c3c07945f4198ae81e1f671462ed90
[ "Apache-2.0" ]
null
null
null
bert_finetuning/data_loader.py
nps1ngh/adversarial-bert-german-attacks-defense
3cca292ec4c3c07945f4198ae81e1f671462ed90
[ "Apache-2.0" ]
null
null
null
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from bert_finetuning.data import GermanData class GermanDataLoader: def __init__( self, data_paths, model_name, do_cleansing, max_sequence_length, batch_size=8, dataset_cls=GermanData, ): self.german_data = dataset_cls( data_paths, model_name, max_sequence_length=max_sequence_length, do_cleansing=do_cleansing, ) self.batch_size = batch_size self.create_loaders() def create_loaders(self): """ Create Torch dataloaders for data splits """ self.german_data.text_to_tensors() print("creating dataloaders") train_data = TensorDataset( self.german_data.train_inputs, self.german_data.train_masks, self.german_data.train_labels, ) train_sampler = RandomSampler(train_data) self.train_dataloader = DataLoader( train_data, sampler=train_sampler, batch_size=self.batch_size ) validation_data = TensorDataset( self.german_data.validation_inputs, self.german_data.validation_masks, self.german_data.validation_labels, ) validation_sampler = SequentialSampler(validation_data) self.validation_dataloader = DataLoader( validation_data, sampler=validation_sampler, batch_size=self.batch_size ) test_data = TensorDataset( self.german_data.test_inputs, self.german_data.test_masks, self.german_data.test_labels, ) test_sampler = SequentialSampler(test_data) self.test_dataloader = DataLoader( test_data, sampler=test_sampler, batch_size=self.batch_size ) print("finished creating dataloaders") """ ** FOR DEBUGGING ** if __name__ == "__main__": ## define data paths germeval_data_paths = { "train": "./datasets/hasoc_dataset/hasoc_german_train.csv", "dev": "./datasets/hasoc_dataset/hasoc_german_validation.csv", "test": "./datasets/hasoc_dataset/hasoc_german_test.csv", } hasoc_german_data_paths = { "train": "./datasets/hasoc_dataset/hasoc_german_train.csv", "dev": "./datasets/hasoc_dataset/hasoc_german_validation.csv", "test": "./datasets/hasoc_dataset/hasoc_german_test.csv", } ## create dataloaders print("creating germeval dataloaders...") germ_eval_dataloader = GermanDataLoader(germeval_data_paths) print("creating hasoc dataloaders...") hasoc_german_dataloader = GermanDataLoader(hasoc_german_data_paths) """
31.966292
89
0.634798
1,852
0.650967
0
0
0
0
0
0
963
0.338489
86ae167dd0746f0077e0b0c327435fcca99f837b
1,973
py
Python
data/dirty_mnist.py
Karthik-Ragunath/DDU
b9daae9304bdeb222857884ef8cb3b6b3d004d33
[ "MIT" ]
43
2021-05-20T14:07:53.000Z
2022-03-23T12:58:26.000Z
data/dirty_mnist.py
Karthik-Ragunath/DDU
b9daae9304bdeb222857884ef8cb3b6b3d004d33
[ "MIT" ]
3
2021-09-19T20:49:21.000Z
2022-03-07T10:25:47.000Z
data/dirty_mnist.py
Karthik-Ragunath/DDU
b9daae9304bdeb222857884ef8cb3b6b3d004d33
[ "MIT" ]
8
2021-06-26T15:28:45.000Z
2022-02-19T02:07:05.000Z
import torch import numpy as np import torch.utils.data as data from torch.utils.data import Subset from data.fast_mnist import create_MNIST_dataset from data.ambiguous_mnist.ambiguous_mnist_dataset import AmbiguousMNIST def get_train_valid_loader(root, batch_size, val_seed=1, val_size=0.1, **kwargs): error_msg = "[!] val_size should be in the range [0, 1]." assert (val_size >= 0) and (val_size <= 1), error_msg # load the dataset mnist_train_dataset, _ = create_MNIST_dataset() # AmbiguousMNIST does whiten the data itself device = torch.device("cuda" if torch.cuda.is_available() else "cpu") train_dataset = data.ConcatDataset( [mnist_train_dataset, AmbiguousMNIST(root=root, train=True, device=device),] ) valid_dataset = data.ConcatDataset( [mnist_train_dataset, AmbiguousMNIST(root=root, train=True, device=device),] ) num_train = len(train_dataset) indices = list(range(num_train)) split = int(np.floor(val_size * num_train)) np.random.seed(val_seed) np.random.shuffle(indices) train_idx, valid_idx = indices[split:], indices[:split] train_subset = Subset(train_dataset, train_idx) valid_subset = Subset(valid_dataset, valid_idx) train_loader = torch.utils.data.DataLoader(train_subset, batch_size=batch_size, num_workers=0, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_subset, batch_size=batch_size, num_workers=0, shuffle=False) return train_loader, valid_loader def get_test_loader(root, batch_size, **kwargs): # load the dataset _, mnist_test_dataset = create_MNIST_dataset() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") test_dataset = data.ConcatDataset( [mnist_test_dataset, AmbiguousMNIST(root=root, train=False, device=device),] ) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=0) return test_loader
34.017241
113
0.737456
0
0
0
0
0
0
0
0
147
0.074506
86ae868b0b9598e5f2e99607cce26d99b3a34dc3
4,147
py
Python
vantage6/server/resource/recover.py
jaspersnel/vantage6-server
88ad40d23cc36eaba57c170929f7ccdd0011720a
[ "Apache-2.0" ]
2
2020-10-19T08:59:08.000Z
2022-03-07T10:30:21.000Z
vantage6/server/resource/recover.py
jaspersnel/vantage6-server
88ad40d23cc36eaba57c170929f7ccdd0011720a
[ "Apache-2.0" ]
67
2020-04-15T09:43:31.000Z
2022-03-18T08:29:17.000Z
vantage6/server/resource/recover.py
jaspersnel/vantage6-server
88ad40d23cc36eaba57c170929f7ccdd0011720a
[ "Apache-2.0" ]
2
2021-01-21T15:09:26.000Z
2021-04-19T14:58:10.000Z
# -*- coding: utf-8 -*- import logging import datetime from flask import request, render_template from flask_jwt_extended import ( create_access_token, decode_token ) from jwt.exceptions import DecodeError from flasgger import swag_from from http import HTTPStatus from pathlib import Path from sqlalchemy.orm.exc import NoResultFound from vantage6.common import logger_name from vantage6.server import db from vantage6.server.resource import ( ServicesResources ) module_name = logger_name(__name__) log = logging.getLogger(module_name) def setup(api, api_base, services): path = "/".join([api_base, module_name]) log.info(f'Setting up "{path}" and subdirectories') api.add_resource( ResetPassword, path+'/reset', endpoint="reset_password", methods=('POST',), resource_class_kwargs=services ) api.add_resource( RecoverPassword, path+'/lost', endpoint='recover_password', methods=('POST',), resource_class_kwargs=services ) # ------------------------------------------------------------------------------ # Resources / API's # ------------------------------------------------------------------------------ class ResetPassword(ServicesResources): """user can use recover token to reset their password.""" @swag_from(str(Path(r"swagger/post_reset_password.yaml")), endpoint='reset_password') def post(self): """"submit email-adress receive token.""" # retrieve user based on email or username body = request.get_json() reset_token = body.get("reset_token") password = body.get("password") if not reset_token or not password: return {"msg": "reset token and/or password is missing!"}, \ HTTPStatus.BAD_REQUEST # obtain user try: user_id = decode_token(reset_token)['identity'].get('id') except DecodeError: return {"msg": "Invalid recovery token!"}, HTTPStatus.BAD_REQUEST log.debug(user_id) user = db.User.get(user_id) # set password user.set_password(password) user.save() log.info(f"Successfull password reset for '{user.username}'") return {"msg": "password successfully been reset!"}, \ HTTPStatus.OK class RecoverPassword(ServicesResources): """send a mail containing a recover token""" @swag_from(str(Path(r"swagger/post_recover_password.yaml")), endpoint='recover_password') def post(self): """username or email generates a token which is mailed.""" # default return string ret = {"msg": "If the username or email is our database you " "will soon receive an email"} # obtain username/email from request' body = request.get_json() username = body.get("username") email = body.get("email") if not (email or username): return {"msg": "No username or email provided!"}, \ HTTPStatus.BAD_REQUEST # find user in the database, if not here we stop! try: if username: user = db.User.get_by_username(username) else: user = db.User.get_by_email(email) except NoResultFound: # we do not tell them.... But we won't continue either return ret log.info(f"Password reset requested for '{user.username}'") # generate a token that can reset their password expires = datetime.timedelta(hours=1) reset_token = create_access_token( {"id": str(user.id)}, expires_delta=expires ) self.mail.send_email( "password reset", sender="[email protected]", recipients=[user.email], text_body=render_template("mail/reset_password_token.txt", token=reset_token), html_body=render_template("mail/reset_password_token.html", token=reset_token) ) return ret
30.718519
80
0.590306
2,911
0.701953
0
0
2,708
0.653002
0
0
1,384
0.333735
86b032b82ee76fccb3eab7e57dd8b06b6868e592
2,633
py
Python
examples/basic_examples/aws_sns_sqs_middleware_service.py
tranvietanh1991/tomodachi
a815fc718b6cc42dc3fe241abb0e5a5829eba0e8
[ "MIT" ]
1
2021-11-01T02:18:55.000Z
2021-11-01T02:18:55.000Z
examples/basic_examples/aws_sns_sqs_middleware_service.py
tranvietanh1991/tomodachi
a815fc718b6cc42dc3fe241abb0e5a5829eba0e8
[ "MIT" ]
1
2020-12-28T16:16:53.000Z
2020-12-28T16:16:53.000Z
examples/basic_examples/aws_sns_sqs_middleware_service.py
tranvietanh1991/tomodachi
a815fc718b6cc42dc3fe241abb0e5a5829eba0e8
[ "MIT" ]
null
null
null
import os from typing import Any, Callable, Dict import tomodachi from tomodachi import aws_sns_sqs, aws_sns_sqs_publish from tomodachi.discovery import AWSSNSRegistration from tomodachi.envelope import JsonBase async def middleware_function( func: Callable, service: Any, message: Any, topic: str, context: Dict, *args: Any, **kwargs: Any ) -> Any: # Functionality before function is called service.log("middleware before") return_value = await func(*args, **kwargs) # There's also the possibility to pass in extra arguments or keywords arguments, for example: # return_value = await func(*args, id='overridden', **kwargs) # Functinoality after function is called service.log("middleware after") return return_value class ExampleAWSSNSSQSService(tomodachi.Service): name = "example-aws-sns-sqs-service" log_level = "INFO" uuid = str(os.environ.get("SERVICE_UUID") or "") # Build own "discovery" functions, to be run on start and stop # See tomodachi/discovery/aws_sns_registration.py for example discovery = [AWSSNSRegistration] # The message envelope class defines how a message should be processed when sent and received # See tomodachi/envelope/json_base.py for a basic example using JSON and transferring some metadata message_envelope = JsonBase # Adds a middleware function that is run on every incoming message. # Several middlewares can be chained. message_middleware = [middleware_function] # Some options can be specified to define credentials, used ports, hostnames, access log, etc. options = { "aws_sns_sqs": { "region_name": None, # specify AWS region (example: 'eu-west-1') "aws_access_key_id": None, # specify AWS access key (example: 'AKIAXNTIENCJIY2STOCI') "aws_secret_access_key": None, # specify AWS secret key (example: 'f7sha92hNotarealsecretkeyn29ShnSYQi3nzgA') }, "aws_endpoint_urls": { "sns": None, # For example 'http://localhost:4575' if localstack is used for testing "sqs": None, # For example 'http://localhost:4576' if localstack is used for testing }, } @aws_sns_sqs("example-route1") async def route1a(self, data: Any) -> None: self.log('Received data (function: route1a) - "{}"'.format(data)) async def _started_service(self) -> None: async def publish(data: Any, topic: str) -> None: self.log('Publish data "{}"'.format(data)) await aws_sns_sqs_publish(self, data, topic=topic, wait=False) await publish("友達", "example-route1")
39.298507
122
0.692366
1,874
0.710656
0
0
152
0.057641
941
0.356845
1,355
0.513841
86b0a422c8bc9f85b86cb962da85b578f24f06e1
425
py
Python
ex9.py
ThitsarAung/python-exercises
bca97875e25f9621fc5f58ab1d360426a21efc7f
[ "MIT" ]
null
null
null
ex9.py
ThitsarAung/python-exercises
bca97875e25f9621fc5f58ab1d360426a21efc7f
[ "MIT" ]
null
null
null
ex9.py
ThitsarAung/python-exercises
bca97875e25f9621fc5f58ab1d360426a21efc7f
[ "MIT" ]
null
null
null
types_of_people = 10 x = f"There are {types_of_people} types of people." binary = "binary" do_not = "don't" y = f"Those who know {binary} and those who {do_not}." print(x) print(y) print(f"I said: {x}") print(f"I also said: '{y}'") hilarious = False joke_evaluation = "Isn't that joke so funny?! {}" print(joke_evaluation.format(hilarious)) w="This is the left side of..." e="a string with a right side." print(w + e)
18.478261
54
0.672941
0
0
0
0
0
0
0
0
236
0.555294
86b2f2b4446116811cbd5f27739dd93c92634c93
7,182
py
Python
mmdnn/conversion/caffe/writer.py
2yz/MMdnn
13d909e4b591a5043b74b611e412c3c0a5eba0cc
[ "MIT" ]
3,442
2017-11-20T08:39:51.000Z
2019-05-06T10:51:19.000Z
mmdnn/conversion/caffe/writer.py
2yz/MMdnn
13d909e4b591a5043b74b611e412c3c0a5eba0cc
[ "MIT" ]
430
2017-11-29T04:21:48.000Z
2019-05-06T05:37:37.000Z
mmdnn/conversion/caffe/writer.py
2yz/MMdnn
13d909e4b591a5043b74b611e412c3c0a5eba0cc
[ "MIT" ]
683
2017-11-20T08:50:34.000Z
2019-05-04T04:25:14.000Z
import base64 from google.protobuf import json_format from importlib import import_module import json import numpy as np import os import sys from mmdnn.conversion.caffe.errors import ConversionError from mmdnn.conversion.caffe.common_graph import fetch_attr_value from mmdnn.conversion.caffe.utils import get_lower_case, get_upper_case, get_real_name class JsonFormatter(object): '''Dumpt a DL graph into a Json file.''' def __init__(self, graph): self.graph_def = graph.as_graph_def() def dump(self, json_path): json_txt = json_format.MessageToJson(self.graph_def) parsed = json.loads(json_txt) formatted = json.dumps(parsed, indent=4, sort_keys=True) with open(json_path, 'w') as f: f.write(formatted) class PyWriter(object): '''Dumpt a DL graph into a Python script.''' def __init__(self, graph, data, target): self.graph = graph self.data = data self.tab = ' ' * 4 self.prefix = '' target = target.lower() if target == 'tensorflow': self.target = target self.net = 'TensorFlowNetwork' elif target == 'keras': self.target = target self.net = 'KerasNetwork' elif target == 'caffe': self.target = target self.net = 'CaffeNetwork' else: raise ConversionError('Target %s is not supported yet.' % target) def indent(self): self.prefix += self.tab def outdent(self): self.prefix = self.prefix[:-len(self.tab)] def statement(self, s): return self.prefix + s + '\n' def emit_imports(self): return self.statement('from dlconv.%s import %s\n' % (self.target, self.net)) def emit_class_def(self, name): return self.statement('class %s(%s):' % (name, self.net)) def emit_setup_def(self): return self.statement('def setup(self):') def emit_node(self, node): '''Emits the Python source for this node.''' def pair(key, value): return '%s=%s' % (key, value) args = [] for input in node.input: input = input.strip().split(':') name = ''.join(input[:-1]) idx = int(input[-1]) assert name in self.graph.node_dict parent = self.graph.get_node(name) args.append(parent.output[idx]) #FIXME: output = [node.output[0]] # output = node.output for k, v in node.attr: if k == 'cell_type': args.append(pair(k, "'" + fetch_attr_value(v) + "'")) else: args.append(pair(k, fetch_attr_value(v))) args.append(pair('name', "'" + node.name + "'")) # Set the node name args = ', '.join(args) return self.statement('%s = self.%s(%s)' % (', '.join(output), node.op, args)) def dump(self, code_output_dir): if not os.path.exists(code_output_dir): os.makedirs(code_output_dir) file_name = get_lower_case(self.graph.name) code_output_path = os.path.join(code_output_dir, file_name + '.py') data_output_path = os.path.join(code_output_dir, file_name + '.npy') with open(code_output_path, 'w') as f: f.write(self.emit()) with open(data_output_path, 'wb') as f: np.save(f, self.data) return code_output_path, data_output_path def emit(self): # Decompose DAG into chains chains = [] for node in self.graph.topologically_sorted(): attach_to_chain = None if len(node.input) == 1: parent = get_real_name(node.input[0]) for chain in chains: if chain[-1].name == parent: # Node is part of an existing chain. attach_to_chain = chain break if attach_to_chain is None: # Start a new chain for this node. attach_to_chain = [] chains.append(attach_to_chain) attach_to_chain.append(node) # Generate Python code line by line source = self.emit_imports() source += self.emit_class_def(self.graph.name) self.indent() source += self.emit_setup_def() self.indent() blocks = [] for chain in chains: b = '' for node in chain: b += self.emit_node(node) blocks.append(b[:-1]) source += '\n\n'.join(blocks) return source class ModelSaver(object): def __init__(self, code_output_path, data_output_path): self.code_output_path = code_output_path self.data_output_path = data_output_path def dump(self, model_output_dir): '''Return the file path containing graph in generated model files.''' if not os.path.exists(model_output_dir): os.makedirs(model_output_dir) sys.path.append(os.path.dirname(self.code_output_path)) file_name = os.path.splitext(os.path.basename(self.code_output_path))[0] module = import_module(file_name) class_name = get_upper_case(file_name) net = getattr(module, class_name) return net.dump(self.data_output_path, model_output_dir) class GraphDrawer(object): def __init__(self, toolkit, meta_path): self.toolkit = toolkit.lower() self.meta_path = meta_path def dump(self, graph_path): if self.toolkit == 'tensorflow': from dlconv.tensorflow.visualizer import TensorFlowVisualizer if self._is_web_page(graph_path): TensorFlowVisualizer(self.meta_path).dump_html(graph_path) else: raise NotImplementedError('Image format or %s is unsupported!' % graph_path) elif self.toolkit == 'keras': from dlconv.keras.visualizer import KerasVisualizer png_path, html_path = (None, None) if graph_path.endswith('.png'): png_path = graph_path elif self._is_web_page(graph_path): png_path = graph_path + ".png" html_path = graph_path else: raise NotImplementedError('Image format or %s is unsupported!' % graph_path) KerasVisualizer(self.meta_path).dump_png(png_path) if html_path: self._png_to_html(png_path, html_path) os.remove(png_path) else: raise NotImplementedError('Visualization of %s is unsupported!' % self.toolkit) def _is_web_page(self, path): return path.split('.')[-1] in ('html', 'htm') def _png_to_html(self, png_path, html_path): with open(png_path, "rb") as f: encoded = base64.b64encode(f.read()).decode('utf-8') source = """<!DOCTYPE> <html> <head> <meta charset="utf-8"> <title>Keras</title> </head> <body> <img alt="Model Graph" src="data:image/png;base64,{base64_str}" /> </body> </html>""".format(base64_str=encoded) with open(html_path, 'w', encoding='utf-8') as f: f.write(source)
35.731343
92
0.589112
6,814
0.948761
0
0
0
0
0
0
1,036
0.14425
86b35d8336f90b1f441624f230053b48e0260a33
1,258
py
Python
week1/85-maximal-rectangle.py
LionTao/algo_weekend
d25756761d47491b8c78ecf8a857080497910c76
[ "Unlicense" ]
null
null
null
week1/85-maximal-rectangle.py
LionTao/algo_weekend
d25756761d47491b8c78ecf8a857080497910c76
[ "Unlicense" ]
null
null
null
week1/85-maximal-rectangle.py
LionTao/algo_weekend
d25756761d47491b8c78ecf8a857080497910c76
[ "Unlicense" ]
null
null
null
""" leetcode-85 给定一个仅包含 0 和 1 , 大小为 rows x cols 的二维二进制矩阵, 找出只包含 1 的最大矩形, 并返回其面积。 """ from typing import List class Solution: def maximalRectangle(self, matrix: List[List[str]]) -> int: """ 统计直方图然后单调递增栈 """ rows = len(matrix) if rows == 0: return 0 columns = len(matrix[0]) res = 0 heights = [0]*columns for r in range(rows): for c in range(columns): if matrix[r][c]=="1": heights[c]+=1 else: heights[c]=0 res = max(res,self.largestRectangleArea(heights)) def largestRectangleArea(self, heights: List[int]) -> int: #单调递增栈 heights = [-1] + heights + [-1] res = 0 ascend_stack = [] for i in range(len(heights)): while ascend_stack and heights[ascend_stack[-1]] > heights[i]: window_L_height_min_height = heights[ascend_stack.pop(-1)] window_L = ascend_stack[-1] + 1 window_R = i - 1 cur_area = window_L_height_min_height * (window_R - window_L + 1) res = max(res, cur_area) ascend_stack.append(i) return res
32.25641
81
0.509539
1,183
0.867302
0
0
0
0
0
0
235
0.172287
86b35f885b38c215bfc2684f695ba3ae9b742e9a
9,347
py
Python
pandapower/test/opf/test_costs_pwl.py
mathildebadoual/pandapower
9ba4bcb78e84b644d2ba6df0c08e285c54af8ddc
[ "BSD-3-Clause" ]
1
2020-10-19T06:39:15.000Z
2020-10-19T06:39:15.000Z
pandapower/test/opf/test_costs_pwl.py
miek770/pandapower
de004efc1b7432a633792af4f551f7635a02db47
[ "BSD-3-Clause" ]
null
null
null
pandapower/test/opf/test_costs_pwl.py
miek770/pandapower
de004efc1b7432a633792af4f551f7635a02db47
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics # and Energy System Technology (IEE), Kassel. All rights reserved. import numpy as np import pytest from pandapower.optimal_powerflow import OPFNotConverged import pandapower as pp try: import pplog as logging except ImportError: import logging logger = logging.getLogger(__name__) logger.setLevel("DEBUG") def test_cost_piecewise_linear_gen(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_gen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50, min_q_kvar=-50) pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "gen", np.array([[-150, -100], [-75, -50], [0, 0]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert net.res_cost - net.res_gen.p_kw.values / 1.5 < 1e-3 def test_cost_piecewise_linear_eg(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10) pp.create_ext_grid(net, 0, max_p_kw=0, min_p_kw=-50) pp.create_gen(net, 1, p_kw=-10, max_p_kw=0, min_p_kw=-50, controllable=True) # pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "ext_grid", np.array([[-50, -500], [0, 0]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert net.res_cost - - net.res_ext_grid.p_kw.values * 10 < 1e-3 # check and assert result def test_get_costs(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_gen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50, min_q_kvar=-50) pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "gen", np.array([[-150, -300], [0, 0]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert net.res_cost == 2 * net.res_gen.p_kw.values # check and assert result def test_cost_piecewise_linear_sgen(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_sgen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50, min_q_kvar=-50) pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "sgen", np.array([[-150, -100], [-75, -50], [0, 0]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert net.res_cost - net.res_sgen.p_kw.values / 1.5 < 1e-3 def test_cost_piecewise_linear_load(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_load(net, 1, p_kw=100, controllable=True, max_p_kw=150, min_p_kw=50, max_q_kvar=0, min_q_kvar=0) pp.create_ext_grid(net, 0) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "load", np.array([[0, 0], [75, 50], [150, 100]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert abs(net.res_cost - net.res_load.p_kw.values / 1.5) < 1e-3 def test_cost_piecewise_linear_sgen_uneven_slopes(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_sgen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50, min_q_kvar=-50) pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "sgen", np.array([[-150, -200], [-75, -50], [0, 0]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert net.res_cost - net.res_sgen.p_kw.values / 1.5 < 1e-3 def test_cost_piecewise_linear_load_uneven_slopes(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_load(net, 1, p_kw=100, controllable=True, max_p_kw=150, min_p_kw=50, max_q_kvar=0, min_q_kvar=0) pp.create_ext_grid(net, 0) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "load", np.array([[0, 0], [75, 51], [150, 101]])) # run OPF with pytest.raises(OPFNotConverged): pp.runopp(net, verbose=False) assert net["OPF_converged"] assert abs(net.res_cost - net.res_load.p_kw.values / 1.5) < 1e-3 def test_cost_piecewise_linear_sgen_very_unsteady_slopes(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.5 vm_min = 0.5 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_sgen(net, 1, p_kw=-1000, controllable=True, max_p_kw=0, min_p_kw=-1500, max_q_kvar=50, min_q_kvar=-50) pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "sgen", np.array([[-1500, 2],[-750,1 ], [0,2]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] # assert net.res_cost - net.res_sgen.p_kw.values / 1.5 < 1e-3 if __name__ == "__main__": # test_cost_piecewise_linear_sgen_very_unsteady_slopes() pytest.main(["test_costs_pwl.py", "-s"])
37.09127
99
0.644378
0
0
0
0
0
0
0
0
1,645
0.175992
86b3d8112beb6b385c29392912e1d48581db14c2
680
py
Python
cookie_refresh.py
guoxianru/cookie_pool_lite
02c4b2009b4c8aa3306ae1f5f7c5decde1eb5f3f
[ "Apache-2.0" ]
null
null
null
cookie_refresh.py
guoxianru/cookie_pool_lite
02c4b2009b4c8aa3306ae1f5f7c5decde1eb5f3f
[ "Apache-2.0" ]
null
null
null
cookie_refresh.py
guoxianru/cookie_pool_lite
02c4b2009b4c8aa3306ae1f5f7c5decde1eb5f3f
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # @Author: GXR # @CreateTime: 2022-01-20 # @UpdateTime: 2022-01-20 import redis import config import cookie_login from cookie_api import app red = redis.Redis( host=config.REDIS_HOST, port=config.REDIS_PORT, db=config.REDIS_DB, decode_responses=True, ) # 刷新cookie数量 def cookie_refresh(): while 1: cookie_list = red.smembers(config.REDIS_KEY_COOKIE) if len(cookie_list) >= config.COOKIE_COUNT: break cookie_login.run_cookie_login(1) app.logger.info("[cookie数量正常]-[%s]" % len(cookie_list)) def run_cookie_refresh(): cookie_refresh() if __name__ == "__main__": run_cookie_refresh()
18.888889
59
0.679412
0
0
0
0
0
0
0
0
144
0.206897
86b46d24f10eba79c88afa632d31ceb83f18b3b1
292
py
Python
feemodel/app/__init__.py
bitcoinfees/feemodel
5d582d87eca6e54eb20b81f4e21c81273a59b468
[ "MIT" ]
12
2015-08-12T03:00:59.000Z
2017-06-08T11:01:09.000Z
feemodel/app/__init__.py
bitcoinfees/feemodel
5d582d87eca6e54eb20b81f4e21c81273a59b468
[ "MIT" ]
6
2015-11-10T04:02:25.000Z
2016-03-16T02:57:14.000Z
feemodel/app/__init__.py
bitcoinfees/feemodel
5d582d87eca6e54eb20b81f4e21c81273a59b468
[ "MIT" ]
3
2016-03-10T17:08:41.000Z
2022-02-24T18:51:21.000Z
from feemodel.app.transient import TransientOnline from feemodel.app.pools import PoolsOnlineEstimator from feemodel.app.predict import Prediction from feemodel.app.simonline import SimOnline __all__ = [ 'TransientOnline', 'PoolsOnlineEstimator', 'Prediction', 'SimOnline' ]
24.333333
51
0.784247
0
0
0
0
0
0
0
0
62
0.212329
86b4af0033c71e00f4e30f0ac3bfd045c1932aa8
760
py
Python
examples/server/models/image_file_upload.py
ParikhKadam/django-angular
1fdd2ab3211ed1655acc2d172d826ed7f3ad0574
[ "MIT" ]
941
2015-01-01T18:17:43.000Z
2022-02-26T07:45:40.000Z
examples/server/models/image_file_upload.py
ParikhKadam/django-angular
1fdd2ab3211ed1655acc2d172d826ed7f3ad0574
[ "MIT" ]
228
2015-01-11T16:36:34.000Z
2022-03-11T23:17:15.000Z
examples/server/models/image_file_upload.py
ParikhKadam/django-angular
1fdd2ab3211ed1655acc2d172d826ed7f3ad0574
[ "MIT" ]
294
2015-01-04T09:01:33.000Z
2022-02-26T07:45:41.000Z
# -*- coding: utf-8 -*- from __future__ import unicode_literals # start tutorial from django.db import models from djng.forms import NgModelFormMixin, NgFormValidationMixin from djng.styling.bootstrap3.forms import Bootstrap3ModelForm class SubscribeUser(models.Model): full_name = models.CharField( "Full name", max_length=99) avatar = models.ImageField("Avatar", blank=False, null=True) permit = models.FileField("Permit", blank=True, null=True) class SubscribeForm(NgModelFormMixin, NgFormValidationMixin, Bootstrap3ModelForm): use_required_attribute = False scope_prefix = 'subscribe_data' form_name = 'my_form' class Meta: model = SubscribeUser fields = ['full_name', 'avatar', 'permit']
28.148148
82
0.728947
519
0.682895
0
0
0
0
0
0
118
0.155263
86b6adb997cbd21ec9e8e9a5843dcd2235408ae3
2,997
py
Python
python/tvm/topi/hexagon/slice_ops/add_subtract_multiply.py
yangulei/tvm
d2cbdf381b68134951bfd7525c6a3a67838e5bdf
[ "Apache-2.0" ]
4,640
2017-08-17T19:22:15.000Z
2019-11-04T15:29:46.000Z
python/tvm/topi/hexagon/slice_ops/add_subtract_multiply.py
dmlc/tvm
1e0e9548a6875241267481a4223b4dbf29fa1641
[ "Apache-2.0" ]
2,863
2017-08-17T19:55:50.000Z
2019-11-04T17:18:41.000Z
python/tvm/topi/hexagon/slice_ops/add_subtract_multiply.py
yelite/tvm
7ae919292d42f5858d4db04533bca67b4b5bb44f
[ "Apache-2.0" ]
1,352
2017-08-17T19:30:38.000Z
2019-11-04T16:09:29.000Z
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name """Compute and schedule for add, multiply, subtract slice op Please note the following assumptions made by the implementation: 1) The inputs will be multiple of crouton layout except for the axis that needs broadcasting.""" from tvm import te from tvm import tir from tvm import topi from ..utils import get_layout_transform_fn def add_broadcast_compute(input_a, input_b): """Call the add op from topi""" return topi.add(input_a, input_b) def subtract_broadcast_compute(input_a, input_b): """Call the subtract op from topi""" return topi.subtract(input_a, input_b) def multiply_broadcast_compute(input_a, input_b): """Call the multiply op from topi""" return topi.multiply(input_a, input_b) def tir_broadcast_schedule( out_m, input_a, input_b, output_layout: str, input_a_layout: str, input_b_layout: str, op_name: str, ): """Schedule for input and output layout nhwc-8h2w32c2w-2d considering broadcast""" func = te.create_prim_func([input_a, input_b, out_m]) s = tir.Schedule(func) block_dict = {"add": "T_add", "subtract": "T_subtract", "multiply": "T_multiply"} block = s.get_block(block_dict[op_name]) if input_a_layout == "nhwc-8h2w32c2w-2d": input_a_transformed_layout = get_layout_transform_fn(input_a_layout) s.transform_layout(block, buffer=("read", 0), index_map=input_a_transformed_layout) if input_b_layout == "nhwc-8h2w32c2w-2d": input_b_transformed_layout = get_layout_transform_fn(input_b_layout) s.transform_layout(block, buffer=("read", 1), index_map=input_b_transformed_layout) output_transformed_layout = get_layout_transform_fn(output_layout) s.transform_layout(block, buffer=("write", 0), index_map=output_transformed_layout) n, h, w, c = s.get_loops(block) h_o, h_i = s.split(h, [None, 8]) w_o, w_i = s.split(w, [None, 4]) c_o, c_i = s.split(c, [None, 32]) wio, wii = s.split(w_i, [None, 2]) s.reorder(n, h_o, w_o, c_o, h_i, wio, c_i, wii) fused = s.fuse(c_i, wii) s.vectorize(fused) return s
34.056818
97
0.703704
0
0
0
0
0
0
0
0
1,343
0.448115
86b7ef11958dc926cec50bcec5a016a3d479c413
6,634
py
Python
python_modules/automation/automation/docker/dagster_docker.py
jrouly/dagster
2b3104db2fc6439050f7825d4b9ebaf39ddf6c0c
[ "Apache-2.0" ]
null
null
null
python_modules/automation/automation/docker/dagster_docker.py
jrouly/dagster
2b3104db2fc6439050f7825d4b9ebaf39ddf6c0c
[ "Apache-2.0" ]
1
2021-06-21T18:30:02.000Z
2021-06-25T21:18:39.000Z
python_modules/automation/automation/docker/dagster_docker.py
jrouly/dagster
2b3104db2fc6439050f7825d4b9ebaf39ddf6c0c
[ "Apache-2.0" ]
null
null
null
import contextlib import os from collections import namedtuple import yaml from dagster import __version__ as current_dagster_version from dagster import check from .ecr import ecr_image, get_aws_account_id, get_aws_region from .utils import ( execute_docker_build, execute_docker_push, execute_docker_tag, python_version_image_tag, ) # Default repository prefix used for local images DEFAULT_LOCAL_PREFIX = "dagster" # Location of the template assets used here IMAGES_PATH = os.path.join(os.path.dirname(__file__), "images") @contextlib.contextmanager def do_nothing(_cwd): yield class DagsterDockerImage(namedtuple("_DagsterDockerImage", "image build_cm path")): """Represents a Dagster image. Properties: image (str): Name of the image build_cm (function): function that is a context manager for build (e.g. for populating a build cache) path (Optional(str)): The path to the image's path. Defaults to docker/images/<IMAGE NAME> """ def __new__(cls, image, build_cm=do_nothing, path=None): return super(DagsterDockerImage, cls).__new__( cls, check.str_param(image, "image"), check.callable_param(build_cm, "build_cm"), check.opt_str_param( path, "path", default=os.path.join(os.path.dirname(__file__), "images", image) ), ) @property def python_versions(self): """List of Python versions supported for this image.""" with open(os.path.join(self.path, "versions.yaml"), "r") as f: versions = yaml.safe_load(f.read()) return list(versions.keys()) def _get_last_updated_for_python_version(self, python_version): """Retrieve the last_updated timestamp for a particular python_version of this image.""" check.str_param(python_version, "python_version") with open(os.path.join(self.path, "last_updated.yaml"), "r") as f: last_updated = yaml.safe_load(f.read()) return last_updated[python_version] def _set_last_updated_for_python_version(self, timestamp, python_version): """Update the last_updated timestamp for a particular python_version of this image.""" check.str_param(timestamp, "timestamp") check.str_param(python_version, "python_version") last_updated = {} last_updated_path = os.path.join(self.path, "last_updated.yaml") if os.path.exists(last_updated_path): with open(last_updated_path, "r") as f: last_updated = yaml.safe_load(f.read()) last_updated[python_version] = timestamp with open(os.path.join(self.path, "last_updated.yaml"), "w") as f: yaml.dump(last_updated, f, default_flow_style=False) def local_image(self, python_version): """Generates the local image name, like: "dagster/foo:some-tag" """ check.str_param(python_version, "python_version") last_updated = self._get_last_updated_for_python_version(python_version) tag = python_version_image_tag(python_version, last_updated) return "{}/{}:{}".format(DEFAULT_LOCAL_PREFIX, self.image, tag) def aws_image(self, python_version=None, custom_tag=None): """Generates the AWS ECR image name, like: "1234567890.dkr.ecr.us-west-1.amazonaws.com/foo:some-tag" """ check.invariant(not (python_version and custom_tag)) check.opt_str_param(python_version, "python_version") check.opt_str_param(custom_tag, "custom_tag") if python_version: last_updated = self._get_last_updated_for_python_version(python_version) tag = python_version_image_tag(python_version, last_updated) else: tag = custom_tag return ecr_image( self.image, tag, aws_account_id=get_aws_account_id(), aws_region=get_aws_region(), ) def _get_docker_args(self, python_version): """Retrieve Docker arguments from this image's versions.yaml, and update with latest Dagster version. Also, we allow references in the image versions.yaml to another Dagster image to use as a base image. If defined, set the BASE_IMAGE Docker arg from the full name of the parent image. """ with open(os.path.join(self.path, "versions.yaml"), "r") as f: versions = yaml.safe_load(f.read()) image_info = versions.get(python_version, {}) docker_args = image_info.get("docker_args", {}) if "base_image" in image_info: check.invariant( "BASE_IMAGE" not in docker_args, "Cannot override an existing BASE_IMAGE" ) base_image = DagsterDockerImage(image_info["base_image"]["name"]) source = image_info["base_image"]["source"] if source == "aws": docker_args["BASE_IMAGE"] = base_image.aws_image(python_version) elif source == "local": docker_args["BASE_IMAGE"] = base_image.local_image(python_version) else: raise Exception("Unrecognized source {}".format(source)) # Set Dagster version docker_args["DAGSTER_VERSION"] = current_dagster_version return docker_args def build(self, timestamp, dagster_version, python_version): check.str_param(timestamp, "timestamp") check.str_param(python_version, "python_version") check.invariant( dagster_version == current_dagster_version, desc="Current dagster version ({}) does not match provided arg ({})".format( current_dagster_version, dagster_version ), ) with self.build_cm(self.path): self._set_last_updated_for_python_version(timestamp, python_version) execute_docker_build( self.local_image(python_version), docker_args=self._get_docker_args(python_version), cwd=self.path, ) def push(self, python_version, custom_tag=None): """Push this image to ECR.""" if custom_tag: execute_docker_tag( self.local_image(python_version), self.aws_image(python_version=None, custom_tag=custom_tag), ) execute_docker_push(self.aws_image(python_version=None, custom_tag=custom_tag)) else: execute_docker_tag(self.local_image(python_version), self.aws_image(python_version)) execute_docker_push(self.aws_image(python_version))
38.569767
100
0.655412
6,024
0.908049
31
0.004673
318
0.047935
0
0
1,774
0.26741
86b8aba13af33d7534f429cc7d5eda4e95f58299
13,716
py
Python
chrome/test/telemetry/chromeos/login_unittest.py
Fusion-Rom/android_external_chromium_org
d8b126911c6ea9753e9f526bee5654419e1d0ebd
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
231
2015-01-08T09:04:44.000Z
2021-12-30T03:03:10.000Z
chrome/test/telemetry/chromeos/login_unittest.py
Fusion-Rom/android_external_chromium_org
d8b126911c6ea9753e9f526bee5654419e1d0ebd
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
1
2018-02-10T21:00:08.000Z
2018-03-20T05:09:50.000Z
chrome/test/telemetry/chromeos/login_unittest.py
Fusion-Rom/android_external_chromium_org
d8b126911c6ea9753e9f526bee5654419e1d0ebd
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
268
2015-01-21T05:53:28.000Z
2022-03-25T22:09:01.000Z
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import logging import os import unittest from telemetry.core import browser_finder from telemetry.core import exceptions from telemetry.core import extension_to_load from telemetry.core import util from telemetry.core.backends.chrome import cros_interface from telemetry.unittest import options_for_unittests class CrOSAutoTest(unittest.TestCase): def setUp(self): options = options_for_unittests.GetCopy() self._cri = cros_interface.CrOSInterface(options.cros_remote, options.cros_ssh_identity) self._is_guest = options.browser_type == 'cros-chrome-guest' self._username = '' if self._is_guest else options.browser_options.username self._password = options.browser_options.password def _IsCryptohomeMounted(self): """Returns True if cryptohome is mounted""" cryptohomeJSON, _ = self._cri.RunCmdOnDevice(['/usr/sbin/cryptohome', '--action=status']) cryptohomeStatus = json.loads(cryptohomeJSON) return (cryptohomeStatus['mounts'] and cryptohomeStatus['mounts'][0]['mounted']) def _CreateBrowser(self, autotest_ext=False, auto_login=True): """Finds and creates a browser for tests. if autotest_ext is True, also loads the autotest extension""" options = options_for_unittests.GetCopy() if autotest_ext: extension_path = os.path.join(os.path.dirname(__file__), 'autotest_ext') self._load_extension = extension_to_load.ExtensionToLoad( path=extension_path, browser_type=options.browser_type, is_component=True) options.extensions_to_load = [self._load_extension] browser_to_create = browser_finder.FindBrowser(options) self.assertTrue(browser_to_create) options.browser_options.create_browser_with_oobe = True options.browser_options.auto_login = auto_login b = browser_to_create.Create() b.Start() return b def _GetAutotestExtension(self, browser): """Returns the autotest extension instance""" extension = browser.extensions[self._load_extension] self.assertTrue(extension) return extension def _GetLoginStatus(self, browser): extension = self._GetAutotestExtension(browser) self.assertTrue(extension.EvaluateJavaScript( "typeof('chrome.autotestPrivate') != 'undefined'")) extension.ExecuteJavaScript(''' window.__login_status = null; chrome.autotestPrivate.loginStatus(function(s) { window.__login_status = s; }); ''') return util.WaitFor( lambda: extension.EvaluateJavaScript('window.__login_status'), 10) def testCryptohomeMounted(self): """Verifies cryptohome mount status for regular and guest user and when logged out""" with self._CreateBrowser() as b: self.assertEquals(1, len(b.tabs)) self.assertTrue(b.tabs[0].url) self.assertTrue(self._IsCryptohomeMounted()) chronos_fs = self._cri.FilesystemMountedAt('/home/chronos/user') self.assertTrue(chronos_fs) if self._is_guest: self.assertEquals(chronos_fs, 'guestfs') else: home, _ = self._cri.RunCmdOnDevice(['/usr/sbin/cryptohome-path', 'user', self._username]) self.assertEquals(self._cri.FilesystemMountedAt(home.rstrip()), chronos_fs) self.assertFalse(self._IsCryptohomeMounted()) self.assertEquals(self._cri.FilesystemMountedAt('/home/chronos/user'), '/dev/mapper/encstateful') def testLoginStatus(self): """Tests autotestPrivate.loginStatus""" with self._CreateBrowser(autotest_ext=True) as b: login_status = self._GetLoginStatus(b) self.assertEquals(type(login_status), dict) self.assertEquals(not self._is_guest, login_status['isRegularUser']) self.assertEquals(self._is_guest, login_status['isGuest']) self.assertEquals(login_status['email'], self._username) self.assertFalse(login_status['isScreenLocked']) def _IsScreenLocked(self, browser): return self._GetLoginStatus(browser)['isScreenLocked'] def _LockScreen(self, browser): self.assertFalse(self._IsScreenLocked(browser)) extension = self._GetAutotestExtension(browser) self.assertTrue(extension.EvaluateJavaScript( "typeof chrome.autotestPrivate.lockScreen == 'function'")) logging.info('Locking screen') extension.ExecuteJavaScript('chrome.autotestPrivate.lockScreen();') logging.info('Waiting for the lock screen') def ScreenLocked(): return (browser.oobe and browser.oobe.EvaluateJavaScript("typeof Oobe == 'function'") and browser.oobe.EvaluateJavaScript( "typeof Oobe.authenticateForTesting == 'function'")) util.WaitFor(ScreenLocked, 10) self.assertTrue(self._IsScreenLocked(browser)) def _AttemptUnlockBadPassword(self, browser): logging.info('Trying a bad password') def ErrorBubbleVisible(): return not browser.oobe.EvaluateJavaScript(''' document.getElementById('bubble').hidden ''') self.assertFalse(ErrorBubbleVisible()) browser.oobe.ExecuteJavaScript(''' Oobe.authenticateForTesting('%s', 'bad'); ''' % self._username) util.WaitFor(ErrorBubbleVisible, 10) self.assertTrue(self._IsScreenLocked(browser)) def _UnlockScreen(self, browser): logging.info('Unlocking') browser.oobe.ExecuteJavaScript(''' Oobe.authenticateForTesting('%s', '%s'); ''' % (self._username, self._password)) util.WaitFor(lambda: not browser.oobe, 10) self.assertFalse(self._IsScreenLocked(browser)) def testScreenLock(self): """Tests autotestPrivate.screenLock""" with self._CreateBrowser(autotest_ext=True) as browser: self._LockScreen(browser) self._AttemptUnlockBadPassword(browser) self._UnlockScreen(browser) def testLogout(self): """Tests autotestPrivate.logout""" with self._CreateBrowser(autotest_ext=True) as b: extension = self._GetAutotestExtension(b) try: extension.ExecuteJavaScript('chrome.autotestPrivate.logout();') except (exceptions.BrowserConnectionGoneException, exceptions.BrowserGoneException): pass util.WaitFor(lambda: not self._IsCryptohomeMounted(), 20) def _SwitchRegion(self, region): self._cri.RunCmdOnDevice(['stop', 'ui']) # Change VPD (requires RW-enabled firmware). # To save time, region and initial_timezone are not set. vpd = {'initial_locale': region.language_code, 'keyboard_layout': region.keyboard} for (key, value) in vpd.items(): self._cri.RunCmdOnDevice(['vpd', '-s', '"%s"="%s"' % (key, value)]) # Remove cached files to clear initial locale info and force regeneration. self._cri.RunCmdOnDevice(['rm', '/home/chronos/Local\ State']) self._cri.RunCmdOnDevice(['rm', '/home/chronos/.oobe_completed']) self._cri.RunCmdOnDevice(['dump_vpd_log', '--force']) self._cri.RunCmdOnDevice(['start', 'ui']) def _OobeHasOption(self, browser, selectId, value): hasOptionJs = ''' // Check that the option is present, and selected if it is the default. (function hasOption(selectId, value, isDefault) { var options = document.getElementById(selectId).options; for (var i = 0; i < options.length; i++) { if (options[i].value == value) { // The option is present. Make sure it's selected if necessary. return !isDefault || options.selectedIndex == i; } } return false; })("%s", "%s", %s); ''' return browser.oobe.EvaluateJavaScript( hasOptionJs % (selectId, value, 'true')) def _ResolveLanguage(self, locale): # If the locale matches a language but not the country, fall back to # an existing locale. See ui/base/l10n/l10n_util.cc. lang, _, region = map(str.lower, locale.partition('-')) if not region: return "" # Map from other countries to a localized country if lang == 'es' and region == 'es': return 'es-419' if lang == 'zh': if region in ('hk', 'mo'): return 'zh-TW' return 'zh-CN' if lang == 'en': if region in ('au', 'ca', 'nz', 'za'): return 'en-GB' return 'en-US' # No mapping found return "" def testOobeLocalization(self): """Tests different region configurations at OOBE""" # Save the original device localization settings. # To save time, only read initial_locale and keyboard_layout. initial_region = self.Region('', '', '', '', '') initial_region.language_code, _ = self._cri.RunCmdOnDevice( ['vpd', '-g', 'initial_locale']) initial_region.keyboard, _ = self._cri.RunCmdOnDevice( ['vpd', '-g', 'keyboard_layout']) for region in self.REGIONS_LIST: self._SwitchRegion(region) with self._CreateBrowser(auto_login=False) as browser: # Ensure the dropdown lists have been created. util.WaitFor(lambda: browser.oobe.EvaluateJavaScript( 'document.getElementById("language-select") != null'), 10) # Find the language, or an acceptable fallback value. languageFound = self._OobeHasOption(browser, 'language-select', region.language_code) if not languageFound: fallback = self._ResolveLanguage(region.language_code) self.assertTrue(fallback and self._OobeHasOption(browser, 'language-select', fallback)) # Find the keyboard layout. self.assertTrue(self._OobeHasOption( browser, 'keyboard-select', region.keyboard)) # Test is finished. Restore original region settings. self._SwitchRegion(initial_region) # The Region class and region list will be available in regions.py. class Region(object): def __init__(self, region_code, keyboard, time_zone, language_code, keyboard_mechanical_layout, description=None, notes=None): self.region_code = region_code self.keyboard = keyboard self.time_zone = time_zone self.language_code = language_code self.keyboard_mechanical_layout = keyboard_mechanical_layout self.description = description or region_code self.notes = notes class Enum(frozenset): def __getattr__(self, name): if name in self: return name raise AttributeError KeyboardMechanicalLayout = Enum(['ANSI', 'ISO', 'JIS', 'ABNT2']) _KML = KeyboardMechanicalLayout REGIONS_LIST = [ Region('au', 'xkb:us::eng', 'Australia/Sydney', 'en-AU', _KML.ANSI, 'Australia'), Region('ca.ansi', 'xkb:us::eng', 'America/Toronto', 'en-CA', _KML.ANSI, 'Canada (US keyboard)', 'Canada with US (ANSI) keyboard; see http://goto/cros-canada'), Region('ca.fr', 'xkb:ca::fra', 'America/Toronto', 'fr-CA', _KML.ISO, 'Canada (French keyboard)', ('Canadian French (ISO) keyboard. The most common configuration for ' 'Canadian French SKUs. See http://goto/cros-canada')), Region('ca.hybrid', 'xkb:ca:eng:eng', 'America/Toronto', 'en-CA', _KML.ISO, 'Canada (hybrid)', ('Canada with hybrid xkb:ca:eng:eng + xkb:ca::fra keyboard (ISO), ' 'defaulting to English language and keyboard. Used only if there ' 'needs to be a single SKU for all of Canada. See ' 'http://goto/cros-canada')), Region('ca.multix', 'xkb:ca:multix:fra', 'America/Toronto', 'fr-CA', _KML.ISO, 'Canada (multilingual)', ("Canadian Multilingual keyboard; you probably don't want this. See " "http://goto/cros-canada")), Region('de', 'xkb:de::ger', 'Europe/Berlin', 'de', _KML.ISO, 'Germany'), Region('fi', 'xkb:fi::fin', 'Europe/Helsinki', 'fi', _KML.ISO, 'Finland'), Region('fr', 'xkb:fr::fra', 'Europe/Paris', 'fr', _KML.ISO, 'France'), Region('gb', 'xkb:gb:extd:eng', 'Europe/London', 'en-GB', _KML.ISO, 'UK'), Region('ie', 'xkb:gb:extd:eng', 'Europe/Dublin', 'en-GB', _KML.ISO, 'Ireland'), Region('in', 'xkb:us::eng', 'Asia/Calcutta', 'en-US', _KML.ANSI, 'India'), Region('my', 'xkb:us::eng', 'Asia/Kuala_Lumpur', 'ms', _KML.ANSI, 'Malaysia'), Region('nl', 'xkb:us:intl:eng', 'Europe/Amsterdam', 'nl', _KML.ANSI, 'Netherlands'), Region('nordic', 'xkb:se::swe', 'Europe/Stockholm', 'en-US', _KML.ISO, 'Nordics', ('Unified SKU for Sweden, Norway, and Denmark. This defaults ' 'to Swedish keyboard layout, but starts with US English language ' 'for neutrality. Use if there is a single combined SKU for Nordic ' 'countries.')), Region('se', 'xkb:se::swe', 'Europe/Stockholm', 'sv', _KML.ISO, 'Sweden', ("Use this if there separate SKUs for Nordic countries (Sweden, " "Norway, and Denmark), or the device is only shipping to Sweden. " "If there is a single unified SKU, use 'nordic' instead.")), Region('sg', 'xkb:us::eng', 'Asia/Singapore', 'en-GB', _KML.ANSI, 'Singapore'), Region('us', 'xkb:us::eng', 'America/Los_Angeles', 'en-US', _KML.ANSI, 'United States'), ]
42.203077
80
0.646544
13,226
0.964275
0
0
0
0
0
0
5,069
0.369568
86b8d88ae37a5647339fb11a5a98693e6a0c570d
790
py
Python
generator/database.py
Neotrinost/Neotrinost.ir
f501b8cf410c1e6ec6cc4e5fce935147b8be1e61
[ "MIT" ]
4
2021-05-02T17:35:30.000Z
2021-11-08T12:55:14.000Z
generator/database.py
Neotrinost/Flask_Neotrinost
f501b8cf410c1e6ec6cc4e5fce935147b8be1e61
[ "MIT" ]
4
2021-07-12T19:08:01.000Z
2021-08-13T19:37:50.000Z
generator/database.py
Neotrinost/Neotrinost.ir
f501b8cf410c1e6ec6cc4e5fce935147b8be1e61
[ "MIT" ]
2
2021-08-08T15:10:07.000Z
2021-11-15T08:59:22.000Z
import sqlite3 class Database: def get_connection(self): return sqlite3.connect("./db.sqlite") def add_card(self, card_title, card_text, card_link_text, card_link_url): con = self.get_connection() cur = con.cursor() create_table_query = "CREATE TABLE IF NOT EXISTS cards('card_title' VARCHAR," + \ " 'card_text' TEXT, 'card_link_text' VARCHAR, 'card_link_url' VARCHAR )" insert_data_query = f"INSERT INTO " + \ f"cards VALUES ({card_title}, {card_text}, {card_link_text}, {card_link_url})" try: cur.execute(create_table_query) cur.execute(insert_data_query) con.commit() except: print("an error has been occurred !")
35.909091
106
0.596203
772
0.977215
0
0
0
0
0
0
264
0.334177
86babfbac8b5c2af0dd5e02e52be427fd0ffce35
3,688
py
Python
crits/backdoors/forms.py
frbapolkosnik/crits
1278c034f2238e2fe34e65e32ce241128a014df2
[ "MIT" ]
22
2015-01-14T19:49:32.000Z
2022-01-26T12:18:52.000Z
crits/backdoors/forms.py
frbapolkosnik/crits
1278c034f2238e2fe34e65e32ce241128a014df2
[ "MIT" ]
null
null
null
crits/backdoors/forms.py
frbapolkosnik/crits
1278c034f2238e2fe34e65e32ce241128a014df2
[ "MIT" ]
6
2015-01-22T21:25:52.000Z
2021-04-12T23:24:14.000Z
from django import forms from django.forms.utils import ErrorList from crits.campaigns.campaign import Campaign from crits.core.forms import add_bucketlist_to_form, add_ticket_to_form from crits.core.handlers import get_item_names, get_source_names from crits.core.user_tools import get_user_organization from crits.core import form_consts from crits.vocabulary.relationships import RelationshipTypes relationship_choices = [(c, c) for c in RelationshipTypes.values(sort=True)] class AddBackdoorForm(forms.Form): """ Django form for adding a Backdoor to CRITs. """ error_css_class = 'error' required_css_class = 'required' name = forms.CharField(label=form_consts.Backdoor.NAME, required=True) aliases = forms.CharField(label=form_consts.Backdoor.ALIASES, required=False) version = forms.CharField(label=form_consts.Backdoor.VERSION, required=False) description = forms.CharField(label=form_consts.Backdoor.DESCRIPTION, required=False) campaign = forms.ChoiceField(widget=forms.Select, label=form_consts.Backdoor.CAMPAIGN, required=False) confidence = forms.ChoiceField(label=form_consts.Backdoor.CAMPAIGN_CONFIDENCE, required=False) source = forms.ChoiceField(widget=forms.Select(attrs={'class': 'bulknoinitial'}), label=form_consts.Backdoor.SOURCE, required=True) source_method = forms.CharField(label=form_consts.Backdoor.SOURCE_METHOD, required=False) source_reference = forms.CharField(widget=forms.TextInput(attrs={'size': '90'}), label=form_consts.Backdoor.SOURCE_REFERENCE, required=False) related_id = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_ID) related_type = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_TYPE) relationship_type = forms.ChoiceField(required=False, label=form_consts.Common.RELATIONSHIP_TYPE, widget=forms.Select(attrs={'id':'relationship_type'})) def __init__(self, username, *args, **kwargs): super(AddBackdoorForm, self).__init__(*args, **kwargs) self.fields['campaign'].choices = [('', '')] + [ (c.name, c.name) for c in get_item_names(Campaign, True)] self.fields['confidence'].choices = [ ('', ''), ('low', 'low'), ('medium', 'medium'), ('high', 'high')] self.fields['source'].choices = [ (c.name, c.name) for c in get_source_names(True, True, username)] self.fields['source'].initial = get_user_organization(username) self.fields['relationship_type'].choices = relationship_choices self.fields['relationship_type'].initial = RelationshipTypes.RELATED_TO add_bucketlist_to_form(self) add_ticket_to_form(self) def clean(self): cleaned_data = super(AddBackdoorForm, self).clean() campaign = cleaned_data.get('campaign') if campaign: confidence = cleaned_data.get('confidence') if not confidence or confidence == '': self._errors.setdefault('confidence', ErrorList()) self._errors['confidence'].append(u'This field is required if campaign is specified.') return cleaned_data
44.97561
117
0.629067
3,203
0.868492
0
0
0
0
0
0
352
0.095445
86bb18dffc0306993885a2bc13f98c2bb5b4a5b0
7,471
py
Python
src/aprl/agents/monte_carlo.py
fkamrani/adversarial-policies
53e129c2083f6557ddc18dbb39e4e633a2d7ab9b
[ "MIT" ]
211
2019-02-22T08:07:25.000Z
2022-03-14T10:44:20.000Z
src/aprl/agents/monte_carlo.py
fkamrani/adversarial-policies
53e129c2083f6557ddc18dbb39e4e633a2d7ab9b
[ "MIT" ]
51
2019-02-08T01:39:49.000Z
2022-02-15T21:21:46.000Z
src/aprl/agents/monte_carlo.py
fkamrani/adversarial-policies
53e129c2083f6557ddc18dbb39e4e633a2d7ab9b
[ "MIT" ]
41
2019-04-23T05:01:49.000Z
2022-03-16T06:51:19.000Z
"""Monte Carlo receding horizon control.""" from abc import ABC, abstractmethod from multiprocessing import Pipe, Process import gym from stable_baselines.common.vec_env import CloudpickleWrapper from aprl.common.mujoco import MujocoState, ResettableEnv class MujocoResettableWrapper(ResettableEnv, gym.Wrapper): """Converts a MujocoEnv into a ResettableEnv. Note all MuJoCo environments are resettable.""" def __init__(self, env): """Wraps a MujocoEnv, adding get_state and set_state methods. :param env: a MujocoEnv. NOTE: it must not be wrapped in a TimeLimit.""" if hasattr(env, "_max_episode_steps"): raise TypeError( "Environment must not have a time limit " "(try passing in env.unwrapped instead)." ) gym.Wrapper.__init__(self, env) self.sim = env.unwrapped.sim def get_state(self): """Serializes the qpos and qvel state of the MuJoCo emulator.""" return MujocoState.from_mjdata(self.sim.data).flatten() def set_state(self, x): """Restores qpos and qvel, calling forward() to derive other values.""" state = MujocoState.from_flattened(x, self.sim) state.set_mjdata(self.sim.data) self.sim.forward() # put mjData in consistent state def reset(self): """See base class.""" return self.env.reset() def step(self, a): """See base class.""" return self.env.step(a) class MonteCarlo(ABC): """Selects an action for a ResettableEnv by random search. Randomly samples fixed-length sequences of actions. Evaluates each trajectory in the environment, resetting the state to the original after each trajectory.""" @abstractmethod def __init__(self, horizon, trajectories): """Constructs a MonteCarlo instance for env. :param horizon: the length of the trajectories to search over. :param trajectories: the number of trajectories to evaluate.""" self.horizon = horizon self.trajectories = trajectories @abstractmethod def seed(self, seed): """Sets a seed for the PRNG for the action sequences. :param seed (int): a seed.""" pass @abstractmethod def best_action(self, state): """Returns the best action out of a random search of action sequences. Generates self.trajectories action sequences, each of length self.horizon. The cumulative reward of each action sequence is computed, starting from state. The function returns the first action and the cumulative reward of the action sequences with the largest cumulative reward. :param state: a value returned by env.get_state(). :return (action, reward): the best action found and associated reward.""" pass class MonteCarloSingle(MonteCarlo): """Selects an action for a ResettableEnv by random search. See base class for details. This implementation is not parallelized.""" def __init__(self, env, horizon, trajectories): """See base class.""" super().__init__(horizon, trajectories) self.env = env def seed(self, seed): """Sets a seed for the PRNG for the action sequences. :param seed (int): a seed.""" self.env.action_space.np_random.seed(seed) def best_action(self, state): """Returns the best action out of a random search of action sequences. See base class for details. Search takes place in a single environment, which is reset to state before evaluating each action sequence.""" res = [] for _ in range(self.trajectories): self.env.set_state(state) us = [self.env.action_space.sample() for _ in range(self.horizon)] total_rew = 0 for u in us: _ob, rew, done, _info = self.env.step(u) total_rew += rew if done: break res.append((us[0], total_rew)) self.env.set_state(state) best = max(res, key=lambda x: x[1]) return best def _worker(remote, parent_remote, dynamic_fn_wrapper, horizon, trajectories): parent_remote.close() dynamics = dynamic_fn_wrapper.var() dynamics.reset() mc = MonteCarloSingle(dynamics, horizon, trajectories) try: while True: cmd, x = remote.recv() if cmd == "seed": mc.seed(x) elif cmd == "search": best_u, best_r = mc.best_action(x) remote.send((best_u, best_r)) elif cmd == "close": remote.close() break else: raise NotImplementedError except KeyboardInterrupt: print("MonteCarloParallel worker: got KeyboardInterrupt") finally: dynamics.close() class MonteCarloParallel(MonteCarlo): """Like MonteCarlo, but performs the random search in parallel.""" # This implementation is inspired by Baselines SubprocVecEnv. def __init__(self, env_fns, horizon, trajectories, seed=0): """Launch subprocess workers and store configuration parameters. :param env_fns (list<()->ResettableEnv>): list of thunks. :param horizon (int): length of trajectories to search over. :param trajectories (int): minimum number of trajectories to evaluate. It will be rounded up to the nearest multiple of len(make_env).""" super().__init__(horizon, trajectories) nremotes = len(env_fns) # Integer ceiling of self.trajectories / nworkers traj_per_worker = (self.trajectories - 1) // nremotes + 1 pipes = [Pipe() for _ in range(nremotes)] self.remotes, self.work_remotes = zip(*pipes) worker_cfgs = zip(self.work_remotes, self.remotes, env_fns) self.ps = [] for i, (work_remote, remote, dynamic_fn) in enumerate(worker_cfgs): args = (work_remote, remote, CloudpickleWrapper(dynamic_fn), horizon, traj_per_worker) process = Process(target=_worker, args=args) process.daemon = True # If the main process crashes, we should not cause things to hang process.start() self.ps.append(process) for remote in self.work_remotes: remote.close() def seed(self, seed): """See base class.""" for i, remote in enumerate(self.remotes): remote.send(("seed", seed + i)) def best_action(self, state): """Returns the best action out of a random search of action sequences.""" for remote in self.remotes: remote.send(("search", state)) results = [remote.recv() for remote in self.remotes] best = max(results, key=lambda x: x[1]) return best def close(self): """Shuts down parallel workers.""" for remote in self.remotes: remote.send(("close", None)) for p in self.ps: p.join() def receding_horizon(monte_carlo, env): """Receding horizon control :param monte_carlo(MonteCarlo): a Monte Carlo controller for env or a clone of env. :param env(ResettableEnv): a resettable environment.""" while True: state = env.get_state() a, _seq_rew = monte_carlo.best_action(state) ob, rew, done, info = env.step(a) yield a, ob, rew, done, info if done: break
37.355
99
0.63191
6,006
0.803908
434
0.058091
1,086
0.145362
0
0
3,105
0.415607
86bb2ac534bb948d97b846d6681e205945c4c9dd
2,063
py
Python
machineLearnInAction/bayes.py
xuwening/tensorflowDemo
65687a61e16f947b7ec8a85d12213f954a71542b
[ "MIT" ]
null
null
null
machineLearnInAction/bayes.py
xuwening/tensorflowDemo
65687a61e16f947b7ec8a85d12213f954a71542b
[ "MIT" ]
null
null
null
machineLearnInAction/bayes.py
xuwening/tensorflowDemo
65687a61e16f947b7ec8a85d12213f954a71542b
[ "MIT" ]
null
null
null
import numpy as np def loadDataSet(): postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], #[0,0,1,1,1......] ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'], ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'], ['stop', 'posting', 'stupid', 'worthless', 'garbage'], ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'], ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']] classVec = [0, 1, 0, 1, 0, 1] # 1 is abusive, 0 not return postingList, classVec def createVocabList(dataSet): vocabSet = set([]) for document in dataSet: vocabSet = vocabSet | set(document) return list(vocabSet) def setOfWords2Vec(vocabList, inputSet): returnVec = [0] * len(vocabList) for word in inputSet: if word in vocabList: returnVec[vocabList.index(word)] = 1 else: print('the word: %s is not in my vocabulary' % word) return returnVec def trainNB0(trainMatrix, trainCategory): numTrainDocs = len(trainMatrix) numWords = len(trainMatrix[0]) pAbusive = sum(trainCategory) / float(numTrainDocs) p0Num = np.zeros(numWords) p1Num = np.zeros(numWords) p0Denom = 0.0 p1Denom = 0.0 for i in range(numTrainDocs): if trainCategory[i] == 1: p1Num += trainMatrix[i] p1Denom += sum(trainMatrix[i]) else: p0Num += trainMatrix[i] p0Denom += sum(trainMatrix[i]) p1Vect = p1Num / p1Denom p0Vect = p0Num / p0Denom return p0Vect, p1Vect, pAbusive if __name__ == '__main__': postinList, classVec = loadDataSet() myVocabList = createVocabList(postinList) # print(setOfWords2Vec(myVocabList, postinList[0])) trainMat = [] for postinDoc in postinList: trainMat.append(setOfWords2Vec(myVocabList, postinDoc)) print(trainMat) p0V, p1V, pAb = trainNB0(trainMat, classVec) print(p0V, p1V, pAb)
31.738462
97
0.573921
0
0
0
0
0
0
0
0
405
0.196316
86bbd227d8b7715b6a7438754f63aeb34b54d300
169
py
Python
py/debug/__init__.py
segrids/arduino_due
f375020b81459eae9b325aa3646ff84efc2853e8
[ "MIT" ]
3
2021-08-20T16:03:37.000Z
2022-03-23T20:23:30.000Z
py/debug/__init__.py
segrids/testbench
f375020b81459eae9b325aa3646ff84efc2853e8
[ "MIT" ]
null
null
null
py/debug/__init__.py
segrids/testbench
f375020b81459eae9b325aa3646ff84efc2853e8
[ "MIT" ]
null
null
null
from .swd import SWD from .ahb import AHB from .debugger import Debugger, HaltError, NotHaltedError try: from .dwarf import ELFDebugger except ImportError: pass
21.125
57
0.775148
0
0
0
0
0
0
0
0
0
0
86bc2f5f9e49100c67489c79936cc4b670708f66
72
py
Python
HAP-NodeJS/Switch3_1.py
cbdunc2/pi-kit
bf7e9e118af7853d509e0a10c95ba5d8564bb157
[ "MIT" ]
null
null
null
HAP-NodeJS/Switch3_1.py
cbdunc2/pi-kit
bf7e9e118af7853d509e0a10c95ba5d8564bb157
[ "MIT" ]
null
null
null
HAP-NodeJS/Switch3_1.py
cbdunc2/pi-kit
bf7e9e118af7853d509e0a10c95ba5d8564bb157
[ "MIT" ]
null
null
null
import subprocess subprocess.Popen(['sh', '../Switches/Switch3_On.sh'])
24
53
0.736111
0
0
0
0
0
0
0
0
31
0.430556
86bcd2890d4f11513d628469a8efe8d1af2d7195
65
py
Python
src/cicd_sim/artifact/__init__.py
Software-Natives-OSS/cicd_sim
19452a5b06a6c6d99322c9b6777c501025e954f1
[ "MIT" ]
null
null
null
src/cicd_sim/artifact/__init__.py
Software-Natives-OSS/cicd_sim
19452a5b06a6c6d99322c9b6777c501025e954f1
[ "MIT" ]
8
2020-03-12T05:51:56.000Z
2020-03-15T17:31:12.000Z
src/cicd_sim/artifact/__init__.py
Software-Natives-OSS/cicd_sim
19452a5b06a6c6d99322c9b6777c501025e954f1
[ "MIT" ]
null
null
null
from . artifactory import Artifactory __all__ = ['Artifactory']
16.25
37
0.769231
0
0
0
0
0
0
0
0
13
0.2
86bd70e7874de4b570ed32325f28d65eaa058486
4,544
py
Python
mandoline/line_segment3d.py
Spiritdude/mandoline-py
702cd1f9264c7d5d814600ff919406387fd86185
[ "BSD-2-Clause" ]
5
2021-09-16T10:41:44.000Z
2021-11-04T14:45:24.000Z
mandoline/line_segment3d.py
Spiritdude/mandoline-py
702cd1f9264c7d5d814600ff919406387fd86185
[ "BSD-2-Clause" ]
null
null
null
mandoline/line_segment3d.py
Spiritdude/mandoline-py
702cd1f9264c7d5d814600ff919406387fd86185
[ "BSD-2-Clause" ]
null
null
null
class LineSegment3D(object): """A class to represent a 3D line segment.""" def __init__(self, p1, p2): """Initialize with two endpoints.""" if p1 > p2: p1, p2 = (p2, p1) self.p1 = p1 self.p2 = p2 self.count = 1 def __len__(self): """Line segment always has two endpoints.""" return 2 def __iter__(self): """Iterator generator for endpoints.""" yield self.p1 yield self.p2 def __getitem__(self, idx): """Given a vertex number, returns a vertex coordinate vector.""" if idx == 0: return self.p1 if idx == 1: return self.p2 raise LookupError() def __hash__(self): """Returns hash value for endpoints""" return hash((self.p1, self.p2)) def __lt__(self, p): return self < p def __cmp__(self, p): """Compare points for sort ordering in an arbitrary heirarchy.""" val = self[0].__cmp__(p[0]) if val != 0: return val return self[1].__cmp__(p[1]) def __format__(self, fmt): """Provides .format() support.""" pfx = "" sep = " - " sfx = "" if "a" in fmt: pfx = "[" sep = ", " sfx = "]" elif "s" in fmt: pfx = "" sep = " " sfx = "" p1 = self.p1.__format__(fmt) p2 = self.p2.__format__(fmt) return pfx + p1 + sep + p2 + sfx def __repr__(self): """Standard string representation.""" return "<LineSegment3D: {0}>".format(self) def __str__(self): """Returns a human readable coordinate string.""" return "{0:a}".format(self) def translate(self,offset): """Translate the endpoint's vertices""" self.p1 = (self.p1[a] + offset[a] for a in range(3)) self.p2 = (self.p2[a] + offset[a] for a in range(3)) def scale(self,scale): """Translate the endpoint's vertices""" self.p1 = (self.p1[a] * scale[a] for a in range(3)) self.p2 = (self.p2[a] * scale[a] for a in range(3)) def length(self): """Returns the length of the line.""" return self.p1.distFromPoint(self.p2) class LineSegment3DCache(object): """Cache class for 3D Line Segments.""" def __init__(self): """Initialize as an empty cache.""" self.endhash = {} self.seghash = {} def _add_endpoint(self, p, seg): """Remember that this segment has a given endpoint""" if p not in self.endhash: self.endhash[p] = [] self.endhash[p].append(seg) def rehash(self): """Reset the hashes for changed edge vertices""" oldseghash = self.seghash self.seghash = { (v[0], v[1]): v for v in oldseghash.values() } oldendhash = self.endhash self.endhash = { k: v for v in oldendhash.values() for k in v } def translate(self,offset): """Translate vertices of all edges.""" for v in self.seghash.values(): v.translate(offset) self.rehash() def scale(self,scale): """Scale vertices of all edges.""" for v in self.seghash.values(): v.scale(scale) self.rehash() def endpoint_segments(self, p): """get list of edges that end at point p""" if p not in self.endhash: return [] return self.endhash[p] def get(self, p1, p2): """Given 2 endpoints, return the cached LineSegment3D inst, if any.""" key = (p1, p2) if p1 < p2 else (p2, p1) if key not in self.seghash: return None return self.seghash[key] def add(self, p1, p2): """Given 2 endpoints, return the (new or cached) LineSegment3D inst.""" key = (p1, p2) if p1 < p2 else (p2, p1) if key in self.seghash: seg = self.seghash[key] seg.count += 1 return seg seg = LineSegment3D(p1, p2) self.seghash[key] = seg self._add_endpoint(p1, seg) self._add_endpoint(p2, seg) return seg def __iter__(self): """Creates an iterator for the line segments in the cache.""" for pt in self.seghash.values(): yield pt def __len__(self): """Length of sequence.""" return len(self.seghash) # vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap
28.223602
79
0.53015
4,476
0.985035
262
0.057658
0
0
0
0
1,203
0.264745
86bd7ed417f64120a297b91ba487086bf72ccb3f
2,328
py
Python
cacheable/adapter/PeeweeAdapter.py
d1hotpep/cacheable
9ea97d6504965179f8fe495b67e466c068719445
[ "MIT" ]
null
null
null
cacheable/adapter/PeeweeAdapter.py
d1hotpep/cacheable
9ea97d6504965179f8fe495b67e466c068719445
[ "MIT" ]
null
null
null
cacheable/adapter/PeeweeAdapter.py
d1hotpep/cacheable
9ea97d6504965179f8fe495b67e466c068719445
[ "MIT" ]
null
null
null
import peewee import playhouse.kv from time import time from . import CacheableAdapter class PeeweeAdapter(CacheableAdapter, peewee.Model): key = peewee.CharField(max_length=256, unique=True) value = playhouse.kv.JSONField() mtime = peewee.IntegerField(default=time) ttl = peewee.IntegerField(default=0) class Meta: database = peewee.Proxy() def __init__(self, db_connection, table_name=None): if table_name: self._meta.db_table = table_name self._meta.database.initialize(db_connection) def multiget(self, keys): cls = self.__class__ res = self.select(cls.key, cls.value) \ .where(cls.key << keys & self.__ttl_filter()) \ .tuples() return { x[0] : x[1] for x in res } @classmethod def multiset(cls, data, ttl=None): ts = int(time()) ttl = ttl or 0 kvs = [] for key, value in data.items(): kvs.append({ cls.key : key, cls.value : value, cls.mtime : ts, cls.ttl : ttl, }) cls.insert_many(kvs).upsert().execute() def delete(self, key_or_keys): if list == type(key_or_keys): keys = key_or_keys else: keys = [ key_or_keys ] cls = self.__class__ peewee.DeleteQuery(cls).where(cls.key << keys).execute() def list(self, prefix=None, limit=None): cls = self.__class__ q = self.select(cls.key, cls.value) if prefix: if self.__db_type() == peewee.SqliteDatabase: wildcard = '*' else: wildcard = '%' q = q.where(cls.key % ('%s%s' % (prefix, wildcard))) q = q.where(self.__ttl_filter()) if limit: q = q.limit(limit) res = { x[0] : x[1] for x in q.tuples() } if prefix: res = { k[len(prefix):] : v for k, v in res.items() } return res def __ttl_filter(self): """ Add the TTL where clause to a query, to filter out stale results """ ts = int(time()) cls = self.__class__ return cls.ttl == 0 | (cls.mtime + cls.ttl > ts) def __db_type(self): return type(self._meta.database.obj)
24.25
72
0.537371
2,237
0.960911
0
0
374
0.160653
0
0
100
0.042955
86bf8dc5885e11ca632362fcec2e79f7e5e74050
6,006
py
Python
mmgen/models/architectures/arcface/helpers.py
plutoyuxie/mmgeneration
0a7f5d16c970de1766ebf049d7a0264fe506504b
[ "Apache-2.0" ]
null
null
null
mmgen/models/architectures/arcface/helpers.py
plutoyuxie/mmgeneration
0a7f5d16c970de1766ebf049d7a0264fe506504b
[ "Apache-2.0" ]
null
null
null
mmgen/models/architectures/arcface/helpers.py
plutoyuxie/mmgeneration
0a7f5d16c970de1766ebf049d7a0264fe506504b
[ "Apache-2.0" ]
null
null
null
from collections import namedtuple import torch from torch.nn import (AdaptiveAvgPool2d, BatchNorm2d, Conv2d, MaxPool2d, Module, PReLU, ReLU, Sequential, Sigmoid) # yapf: disable """ ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) # isort:skip # noqa """ # yapf: enable class Flatten(Module): """Flatten Module.""" def forward(self, input): return input.view(input.size(0), -1) def l2_norm(input, axis=1): """l2 normalization. Args: input (torch.Tensor): The input tensor. axis (int, optional): Specifies which axis of input to calculate the norm across. Defaults to 1. Returns: Tensor: Tensor after L2 normalization per-instance. """ norm = torch.norm(input, 2, axis, True) output = torch.div(input, norm) return output class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): """A named tuple describing a ResNet block.""" def get_block(in_channel, depth, num_units, stride=2): """Get a single block config. Args: in_channel (int): Input channels. depth (int): Output channels. num_units (int): Number of unit modules. stride (int, optional): Conv2d stride. Defaults to 2. Returns: list: A list of unit modules' config. """ return [Bottleneck(in_channel, depth, stride) ] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)] def get_blocks(num_layers): """Get block configs of backbone. Args: num_layers (int): Number of ConvBlock layers in backbone. Raises: ValueError: `num_layers` must be one of [50, 100, 152]. Returns: list: A list of block configs. """ if num_layers == 50: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=4), get_block(in_channel=128, depth=256, num_units=14), get_block(in_channel=256, depth=512, num_units=3) ] elif num_layers == 100: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=13), get_block(in_channel=128, depth=256, num_units=30), get_block(in_channel=256, depth=512, num_units=3) ] elif num_layers == 152: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=8), get_block(in_channel=128, depth=256, num_units=36), get_block(in_channel=256, depth=512, num_units=3) ] else: raise ValueError( 'Invalid number of layers: {}. Must be one of [50, 100, 152]'. format(num_layers)) return blocks class SEModule(Module): """Squeeze-and-Excitation Modules. Args: channels (int): Input channels. reduction (int): Intermediate channels reduction ratio. """ def __init__(self, channels, reduction): super(SEModule, self).__init__() self.avg_pool = AdaptiveAvgPool2d(1) self.fc1 = Conv2d( channels, channels // reduction, kernel_size=1, padding=0, bias=False) self.relu = ReLU(inplace=True) self.fc2 = Conv2d( channels // reduction, channels, kernel_size=1, padding=0, bias=False) self.sigmoid = Sigmoid() def forward(self, x): """Forward Function.""" module_input = x x = self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x class bottleneck_IR(Module): """Intermediate Resblock of bottleneck. Args: in_channel (int): Input channels. depth (int): Output channels. stride (int): Conv2d stride. """ def __init__(self, in_channel, depth, stride): """Intermediate Resblock of bottleneck. Args: in_channel (int): Input channels. depth (int): Output channels. stride (int): Conv2d stride. """ super(bottleneck_IR, self).__init__() if in_channel == depth: self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth)) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)) def forward(self, x): """Forward function.""" shortcut = self.shortcut_layer(x) res = self.res_layer(x) return res + shortcut class bottleneck_IR_SE(Module): """Intermediate Resblock of bottleneck with SEModule. Args: in_channel (int): Input channels. depth (int): Output channels. stride (int): Conv2d stride. """ def __init__(self, in_channel, depth, stride): super(bottleneck_IR_SE, self).__init__() if in_channel == depth: self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth)) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth), SEModule(depth, 16)) def forward(self, x): """Forward function.""" shortcut = self.shortcut_layer(x) res = self.res_layer(x) return res + shortcut
30.180905
106
0.585914
3,400
0.566101
0
0
0
0
0
0
1,915
0.318848
86bfaf5a13f46371cddc52c365f2b99eb199e27e
1,694
py
Python
createplaylist.py
mahi0601/SpotifyPlaylist
55e30bb4c13f291693b892d6eeccc70b4a769805
[ "MIT" ]
47
2020-09-21T11:35:10.000Z
2022-01-17T21:25:39.000Z
createplaylist.py
mahi0601/SpotifyPlaylist
55e30bb4c13f291693b892d6eeccc70b4a769805
[ "MIT" ]
2
2021-03-31T17:02:24.000Z
2021-07-30T08:17:37.000Z
createplaylist.py
mahi0601/SpotifyPlaylist
55e30bb4c13f291693b892d6eeccc70b4a769805
[ "MIT" ]
24
2020-09-21T16:45:38.000Z
2022-03-02T10:50:47.000Z
import os from spotifyclient import SpotifyClient def main(): spotify_client = SpotifyClient(os.getenv("SPOTIFY_AUTHORIZATION_TOKEN"), os.getenv("SPOTIFY_USER_ID")) # get last played tracks num_tracks_to_visualise = int(input("How many tracks would you like to visualise? ")) last_played_tracks = spotify_client.get_last_played_tracks(num_tracks_to_visualise) print(f"\nHere are the last {num_tracks_to_visualise} tracks you listened to on Spotify:") for index, track in enumerate(last_played_tracks): print(f"{index+1}- {track}") # choose which tracks to use as a seed to generate a playlist indexes = input("\nEnter a list of up to 5 tracks you'd like to use as seeds. Use indexes separated by a space: ") indexes = indexes.split() seed_tracks = [last_played_tracks[int(index)-1] for index in indexes] # get recommended tracks based off seed tracks recommended_tracks = spotify_client.get_track_recommendations(seed_tracks) print("\nHere are the recommended tracks which will be included in your new playlist:") for index, track in enumerate(recommended_tracks): print(f"{index+1}- {track}") # get playlist name from user and create playlist playlist_name = input("\nWhat's the playlist name? ") playlist = spotify_client.create_playlist(playlist_name) print(f"\nPlaylist '{playlist.name}' was created successfully.") # populate playlist with recommended tracks spotify_client.populate_playlist(playlist, recommended_tracks) print(f"\nRecommended tracks successfully uploaded to playlist '{playlist.name}'.") if __name__ == "__main__": main()
42.35
118
0.725502
0
0
0
0
0
0
0
0
791
0.466942
86c0f5e44bffc70a506881987c3f56e4e3ef7cdd
30,797
py
Python
tests/contrib/flask/test_request.py
thieman/dd-trace-py
1e87c9bdf7769032982349c4ccc0e1c2e6866a16
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
tests/contrib/flask/test_request.py
thieman/dd-trace-py
1e87c9bdf7769032982349c4ccc0e1c2e6866a16
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
tests/contrib/flask/test_request.py
thieman/dd-trace-py
1e87c9bdf7769032982349c4ccc0e1c2e6866a16
[ "Apache-2.0", "BSD-3-Clause" ]
1
2021-02-11T10:20:14.000Z
2021-02-11T10:20:14.000Z
# -*- coding: utf-8 -*- from ddtrace.compat import PY2 from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY from ddtrace.contrib.flask.patch import flask_version from ddtrace.ext import http from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID, HTTP_HEADER_PARENT_ID from flask import abort from . import BaseFlaskTestCase from ...utils import assert_span_http_status_code base_exception_name = 'builtins.Exception' if PY2: base_exception_name = 'exceptions.Exception' class FlaskRequestTestCase(BaseFlaskTestCase): def test_request(self): """ When making a request We create the expected spans """ @self.app.route('/') def index(): return 'Hello Flask', 200 res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') spans = self.get_spans() self.assertEqual(len(spans), 8) # Assert the order of the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.index', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans], ) # Assert span services for span in spans: self.assertEqual(span.service, 'flask') # Root request span req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span, 200) assert http.QUERY_STRING not in req_span.meta # Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error, 0) def test_request_query_string_trace(self): """Make sure when making a request that we create the expected spans and capture the query string.""" @self.app.route('/') def index(): return 'Hello Flask', 200 with self.override_http_config('flask', dict(trace_query_string=True)): self.client.get('/?foo=bar&baz=biz') spans = self.get_spans() # Request tags assert spans[0].get_tag(http.QUERY_STRING) == 'foo=bar&baz=biz' def test_analytics_global_on_integration_default(self): """ When making a request When an integration trace search is not event sample rate is not set and globally trace search is enabled We expect the root span to have the appropriate tag """ @self.app.route('/') def index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=True)): res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') root = self.get_root_span() root.assert_matches( name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY: 1.0, }, ) for span in self.spans: if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_on_integration_on(self): """ When making a request When an integration trace search is enabled and sample rate is set and globally trace search is enabled We expect the root span to have the appropriate tag """ @self.app.route('/') def index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=True)): with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)): res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') root = self.get_root_span() root.assert_matches( name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY: 0.5, }, ) for span in self.spans: if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_default(self): """ When making a request When an integration trace search is not set and sample rate is set and globally trace search is disabled We expect the root span to not include tag """ @self.app.route('/') def index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=False)): res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') root = self.get_root_span() self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) for span in self.spans: if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_on(self): """ When making a request When an integration trace search is enabled and sample rate is set and globally trace search is disabled We expect the root span to have the appropriate tag """ @self.app.route('/') def index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=False)): with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)): res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') root = self.get_root_span() root.assert_matches( name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY: 0.5, }, ) for span in self.spans: if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_distributed_tracing(self): """ When making a request When distributed tracing headers are present We create the expected spans """ @self.app.route('/') def index(): return 'Hello Flask', 200 # Default: distributed tracing enabled res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345', HTTP_HEADER_TRACE_ID: '678910', }) self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') # Assert parent and trace id are properly set on the root span span = self.find_span_by_name(self.get_spans(), 'flask.request') self.assertEqual(span.trace_id, 678910) self.assertEqual(span.parent_id, 12345) # Explicitly enable distributed tracing with self.override_config('flask', dict(distributed_tracing_enabled=True)): res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345', HTTP_HEADER_TRACE_ID: '678910', }) self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') # Assert parent and trace id are properly set on the root span span = self.find_span_by_name(self.get_spans(), 'flask.request') self.assertEqual(span.trace_id, 678910) self.assertEqual(span.parent_id, 12345) # With distributed tracing disabled with self.override_config('flask', dict(distributed_tracing_enabled=False)): res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345', HTTP_HEADER_TRACE_ID: '678910', }) self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') # Assert parent and trace id are properly set on the root span span = self.find_span_by_name(self.get_spans(), 'flask.request') self.assertNotEqual(span.trace_id, 678910) self.assertIsNone(span.parent_id) def test_request_query_string(self): """ When making a request When the request contains a query string We create the expected spans """ @self.app.route('/') def index(): return 'Hello Flask', 200 res = self.client.get('/', query_string=dict(hello='flask')) self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') spans = self.get_spans() self.assertEqual(len(spans), 8) # Assert the order of the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.index', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans], ) # Assert span services for span in spans: self.assertEqual(span.service, 'flask') # Root request span req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') # Note: contains no query string self.assertEqual(req_span.resource, 'GET /') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') # Note: contains no query string self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'), 'GET') # Note: contains no query string self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span, 200) # Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') # Note: contains no query string self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error, 0) def test_request_unicode(self): """ When making a request When the url contains unicode We create the expected spans """ @self.app.route(u'/üŋïĉóđē') def unicode(): return 'üŋïĉóđē', 200 res = self.client.get(u'/üŋïĉóđē') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'\xc3\xbc\xc5\x8b\xc3\xaf\xc4\x89\xc3\xb3\xc4\x91\xc4\x93') spans = self.get_spans() self.assertEqual(len(spans), 8) # Assert the order of the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.unicode', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans], ) # Assert span services for span in spans: self.assertEqual(span.service, 'flask') # Root request span req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, u'GET /üŋïĉóđē') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'unicode') self.assertEqual(req_span.get_tag('flask.url_rule'), u'/üŋïĉóđē') self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), u'http://localhost/üŋïĉóđē') assert_span_http_status_code(req_span, 200) # Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.unicode') self.assertEqual(handler_span.resource, u'/üŋïĉóđē') self.assertEqual(req_span.error, 0) def test_request_404(self): """ When making a request When the requested endpoint was not found We create the expected spans """ res = self.client.get('/not-found') self.assertEqual(res.status_code, 404) spans = self.get_spans() self.assertEqual(len(spans), 9) # Assert the order of the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans], ) # Assert span services for span in spans: self.assertEqual(span.service, 'flask') # Root request span req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET 404') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') assert_span_http_status_code(req_span, 404) # Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def test_request_abort_404(self): """ When making a request When the requested endpoint calls `abort(404)` We create the expected spans """ @self.app.route('/not-found') def not_found(): abort(404) res = self.client.get('/not-found') self.assertEqual(res.status_code, 404) spans = self.get_spans() self.assertEqual(len(spans), 10) # Assert the order of the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.not_found', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans], ) # Assert span services for span in spans: self.assertEqual(span.service, 'flask') # Root request span req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /not-found') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') assert_span_http_status_code(req_span, 404) self.assertEqual(req_span.get_tag('flask.endpoint'), 'not_found') self.assertEqual(req_span.get_tag('flask.url_rule'), '/not-found') # Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') # Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.not_found') self.assertEqual(handler_span.resource, '/not-found') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def test_request_500(self): """ When making a request When the requested endpoint raises an exception We create the expected spans """ @self.app.route('/500') def fivehundred(): raise Exception('500 error') res = self.client.get('/500') self.assertEqual(res.status_code, 500) spans = self.get_spans() self.assertEqual(len(spans), 9) # Assert the order of the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans], ) # Assert span services for span in spans: self.assertEqual(span.service, 'flask') # Root request span req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /500') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 1) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/500') assert_span_http_status_code(req_span, 500) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundred') self.assertEqual(req_span.get_tag('flask.url_rule'), '/500') # Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name) # Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundred') self.assertEqual(handler_span.resource, '/500') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), base_exception_name) # User exception span user_ex_span = spans[5] self.assertEqual(user_ex_span.service, 'flask') self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 1) self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(user_ex_span.get_tag('error.type'), base_exception_name) def test_request_501(self): """ When making a request When the requested endpoint calls `abort(501)` We create the expected spans """ @self.app.route('/501') def fivehundredone(): abort(501) res = self.client.get('/501') self.assertEqual(res.status_code, 501) spans = self.get_spans() self.assertEqual(len(spans), 10) # Assert the order of the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundredone', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans], ) # Assert span services for span in spans: self.assertEqual(span.service, 'flask') # Root request span req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /501') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 1) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/501') assert_span_http_status_code(req_span, 501) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundredone') self.assertEqual(req_span.get_tag('flask.url_rule'), '/501') # Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('501 Not Implemented')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') # Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundredone') self.assertEqual(handler_span.resource, '/501') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('501 Not Implemented')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') # User exception span user_ex_span = spans[5] self.assertEqual(user_ex_span.service, 'flask') self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 0) def test_request_error_handler(self): """ When making a request When the requested endpoint raises an exception We create the expected spans """ @self.app.errorhandler(500) def error_handler(e): return 'Whoops', 500 @self.app.route('/500') def fivehundred(): raise Exception('500 error') res = self.client.get('/500') self.assertEqual(res.status_code, 500) self.assertEqual(res.data, b'Whoops') spans = self.get_spans() if flask_version >= (0, 12, 0): self.assertEqual(len(spans), 11) # Assert the order of the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans], ) else: self.assertEqual(len(spans), 10) # Assert the order of the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans], ) # Assert span services for span in spans: self.assertEqual(span.service, 'flask') # Root request span req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /500') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 1) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/500') assert_span_http_status_code(req_span, 500) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundred') self.assertEqual(req_span.get_tag('flask.url_rule'), '/500') # Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name) # Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundred') self.assertEqual(handler_span.resource, '/500') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), base_exception_name) # User exception span user_ex_span = spans[5] self.assertEqual(user_ex_span.service, 'flask') self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 1) self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(user_ex_span.get_tag('error.type'), base_exception_name)
39.892487
117
0.618437
30,359
0.984212
0
0
1,156
0.037476
0
0
9,775
0.316897
86c23c7616ed380cf3c80ae082afe689a1c8e0b9
7,318
py
Python
ConvDR/data/preprocess_cast19.py
blazejdolicki/CHEDAR
e4819775e7f6ffa2d6f1ad798ee262f01370b236
[ "MIT" ]
1
2021-11-10T13:39:16.000Z
2021-11-10T13:39:16.000Z
ConvDR/data/preprocess_cast19.py
blazejdolicki/CHEDAR
e4819775e7f6ffa2d6f1ad798ee262f01370b236
[ "MIT" ]
null
null
null
ConvDR/data/preprocess_cast19.py
blazejdolicki/CHEDAR
e4819775e7f6ffa2d6f1ad798ee262f01370b236
[ "MIT" ]
null
null
null
import argparse from trec_car import read_data from tqdm import tqdm import pickle import os import json import copy from utils.util import NUM_FOLD def parse_sim_file(filename): """ Reads the deduplicated documents file and stores the duplicate passage ids into a dictionary """ sim_dict = {} lines = open(filename).readlines() for line in lines: data = line.strip().split(':') if len(data[1]) > 0: sim_docs = data[-1].split(',') for docs in sim_docs: sim_dict[docs] = 1 return sim_dict if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--car_cbor", type=str) parser.add_argument("--msmarco_collection", type=str) parser.add_argument("--duplicate_file", type=str) parser.add_argument("--cast_dir", type=str) parser.add_argument("--out_data_dir", type=str) parser.add_argument("--out_collection_dir", type=str) args = parser.parse_args() # INPUT sim_file = args.duplicate_file cast_topics_raw_file = os.path.join(args.cast_dir, "evaluation_topics_v1.0.json") cast_topics_manual_file = os.path.join( args.cast_dir, "evaluation_topics_annotated_resolved_v1.0.tsv") cast_qrels_file = os.path.join(args.cast_dir, "2019qrels.txt") # OUTPUT out_topics_file = os.path.join(args.out_data_dir, "eval_topics.jsonl") out_raw_queries_file = os.path.join(args.out_data_dir, "queries.raw.tsv") out_manual_queries_file = os.path.join(args.out_data_dir, "queries.manual.tsv") out_qrels_file = os.path.join(args.out_data_dir, "qrels.tsv") car_id_to_idx_file = os.path.join(args.out_collection_dir, "car_id_to_idx.pickle") car_idx_to_id_file = os.path.join(args.out_collection_dir, "car_idx_to_id.pickle") out_collection_file = os.path.join(args.out_collection_dir, "collection.tsv") # 1. Combine TREC-CAR & MS MARCO, remove duplicate passages, assign new ids car_id_to_idx = {} car_idx_to_id = [] if os.path.exists(out_collection_file) and os.path.exists( car_id_to_idx_file) and os.path.exists(car_idx_to_id_file): print("Preprocessed collection found. Loading car_id_to_idx...") with open(car_id_to_idx_file, "rb") as f: car_id_to_idx = pickle.load(f) else: sim_dict = parse_sim_file(sim_file) car_base_id = 10000000 i = 0 with open(out_collection_file, "w", encoding="utf-8") as f: #FIX change 'a' to 'w' in normal run print("Processing TREC-CAR...") for para in tqdm( read_data.iter_paragraphs(open(args.car_cbor, 'rb'))): car_id = "CAR_" + para.para_id text = para.get_text() text = text.replace("\t", " ").replace("\n", " ").replace("\r", " ") idx = car_base_id + i car_id_to_idx[ car_id] = idx # e.g. CAR_76a4a716d4b1b01995c6663ee16e94b4ca35fdd3 -> 10000044 car_idx_to_id.append(car_id) f.write("{}\t{}\n".format(idx, text)) i += 1 print("Processing MS MARCO...") removed = 0 with open(args.msmarco_collection, "r") as m: for line in tqdm(m): marco_id, text = line.strip().split("\t") if ("MARCO_" + marco_id) in sim_dict: removed += 1 continue f.write("{}\t{}\n".format(marco_id, text)) print("Removed " + str(removed) + " passages") print("Dumping id mappings to {} and {}...".format(car_id_to_idx_file, car_idx_to_id_file)) with open(car_id_to_idx_file, "wb") as f: pickle.dump(car_id_to_idx, f) with open(car_idx_to_id_file, "wb") as f: pickle.dump(car_idx_to_id, f) # 2. Process queries print("Processing CAsT utterances...") with open(cast_topics_raw_file, "r") as fin: raw_data = json.load(fin) with open(cast_topics_manual_file, "r") as fin: annonated_lines = fin.readlines() out_raw_queries = open(out_raw_queries_file, "w") out_manual_queries = open(out_manual_queries_file, "w") all_annonated = {} for line in annonated_lines: splitted = line.split('\t') out_manual_queries.write(line) topic_query = splitted[0] query = splitted[1].strip() topic_id = topic_query.split('_')[0] query_id = topic_query.split('_')[1] if topic_id not in all_annonated: all_annonated[topic_id] = {} all_annonated[topic_id][query_id] = query out_manual_queries.close() topic_number_dict = {} data = [] for group in raw_data: topic_number, description, turn, title = str( group['number']), group.get('description', ''), group['turn'], group.get( 'title', '') queries = [] for query in turn: query_number, raw_utterance = str( query['number']), query['raw_utterance'] queries.append(raw_utterance) record = {} record['topic_number'] = topic_number record['query_number'] = query_number record['description'] = description record['title'] = title record['input'] = copy.deepcopy(queries) record['target'] = all_annonated[topic_number][query_number] out_raw_queries.write("{}_{}\t{}\n".format(topic_number, query_number, raw_utterance)) if not topic_number in topic_number_dict: topic_number_dict[topic_number] = len(topic_number_dict) data.append(record) out_raw_queries.close() with open(out_topics_file, 'w') as fout: for item in data: json_str = json.dumps(item) fout.write(json_str + '\n') # Split eval data into K-fold topic_per_fold = len(topic_number_dict) // NUM_FOLD for i in range(NUM_FOLD): with open(out_topics_file + "." + str(i), 'w') as fout: for item in data: idx = topic_number_dict[item['topic_number']] if idx // topic_per_fold == i: json_str = json.dumps(item) fout.write(json_str + '\n') # 3. Process and convert qrels print("Processing qrels...") with open(cast_qrels_file, "r") as oq, open(out_qrels_file, "w") as nq: for line in oq: qid, _, pid, rel = line.strip().split() if pid.startswith("CAR_"): assert car_id_to_idx[pid] != -1 pid = car_id_to_idx[pid] elif pid.startswith("MARCO_"): pid = int(pid[6:]) else: continue nq.write(qid + "\t0\t" + str(pid) + "\t" + rel + "\n") print("End")
39.556757
104
0.562859
0
0
0
0
0
0
0
0
1,249
0.170675
86c253258ad8f50c39a576db2e17ac13da5ea1c7
15,207
py
Python
coord_convert/geojson_utils.py
brandonxiang/example-pyQGIS
a61d0321d223d0b82e44bb809521965858fde857
[ "MIT" ]
3
2017-02-23T08:35:30.000Z
2018-12-11T05:50:54.000Z
coord_convert/geojson_utils.py
brandonxiang/example-pyQGIS
a61d0321d223d0b82e44bb809521965858fde857
[ "MIT" ]
null
null
null
coord_convert/geojson_utils.py
brandonxiang/example-pyQGIS
a61d0321d223d0b82e44bb809521965858fde857
[ "MIT" ]
2
2019-10-22T02:16:50.000Z
2020-09-28T11:37:48.000Z
__doc__ = 'github: https://github.com/brandonxiang/geojson-python-utils' import math from coordTransform_utils import wgs84togcj02 from coordTransform_utils import gcj02tobd09 def linestrings_intersect(line1, line2): """ To valid whether linestrings from geojson are intersected with each other. reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js Keyword arguments: line1 -- first line geojson object line2 -- second line geojson object if(line1 intersects with other) return intersect point array else empty array """ intersects = [] for i in range(0, len(line1['coordinates']) - 1): for j in range(0, len(line2['coordinates']) - 1): a1_x = line1['coordinates'][i][1] a1_y = line1['coordinates'][i][0] a2_x = line1['coordinates'][i + 1][1] a2_y = line1['coordinates'][i + 1][0] b1_x = line2['coordinates'][j][1] b1_y = line2['coordinates'][j][0] b2_x = line2['coordinates'][j + 1][1] b2_y = line2['coordinates'][j + 1][0] ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \ (b2_y - b1_y) * (a1_x - b1_x) ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \ (a2_y - a1_y) * (a1_x - b1_x) u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y) if not u_b == 0: u_a = ua_t / u_b u_b = ub_t / u_b if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1: intersects.append({'type': 'Point', 'coordinates': [ a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]}) # if len(intersects) == 0: # intersects = False return intersects def _bbox_around_polycoords(coords): """ bounding box """ x_all = [] y_all = [] for first in coords[0]: x_all.append(first[1]) y_all.append(first[0]) return [min(x_all), min(y_all), max(x_all), max(y_all)] def _point_in_bbox(point, bounds): """ valid whether the point is inside the bounding box """ return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2] or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3]) def _pnpoly(x, y, coords): """ the algorithm to judge whether the point is located in polygon reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation """ vert = [[0, 0]] for coord in coords: for node in coord: vert.append(node) vert.append(coord[0]) vert.append([0, 0]) inside = False i = 0 j = len(vert) - 1 while i < len(vert): if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1]) * (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]): inside = not inside j = i i += 1 return inside def _point_in_polygon(point, coords): inside_box = False for coord in coords: if inside_box: break if _point_in_bbox(point, _bbox_around_polycoords(coord)): inside_box = True if not inside_box: return False inside_poly = False for coord in coords: if inside_poly: break if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord): inside_poly = True return inside_poly def point_in_polygon(point, poly): """ valid whether the point is located in a polygon Keyword arguments: point -- point geojson object poly -- polygon geojson object if(point inside poly) return true else false """ coords = [poly['coordinates']] if poly[ 'type'] == 'Polygon' else poly['coordinates'] return _point_in_polygon(point, coords) def point_in_multipolygon(point, multipoly): """ valid whether the point is located in a mulitpolygon (donut polygon is not supported) Keyword arguments: point -- point geojson object multipoly -- multipolygon geojson object if(point inside multipoly) return true else false """ coords_array = [multipoly['coordinates']] if multipoly[ 'type'] == "MultiPolygon" else multipoly['coordinates'] for coords in coords_array: if _point_in_polygon(point, coords): return True return False def number2radius(number): """ convert degree into radius Keyword arguments: number -- degree return radius """ return number * math.pi / 180 def number2degree(number): """ convert radius into degree Keyword arguments: number -- radius return degree """ return number * 180 / math.pi def draw_circle(radius_in_meters, center_point, steps=15): """ get a circle shape polygon based on centerPoint and radius Keyword arguments: point1 -- point one geojson object point2 -- point two geojson object if(point inside multipoly) return true else false """ steps = steps if steps > 15 else 15 center = [center_point['coordinates'][1], center_point['coordinates'][0]] dist = (radius_in_meters / 1000) / 6371 # convert meters to radiant rad_center = [number2radius(center[0]), number2radius(center[1])] # 15 sided circle poly = [] for step in range(0, steps): brng = 2 * math.pi * step / steps lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) + math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng)) lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist) * math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat)) poly.append([number2degree(lng), number2degree(lat)]) return {"type": "Polygon", "coordinates": [poly]} def rectangle_centroid(rectangle): """ get the centroid of the rectangle Keyword arguments: rectangle -- polygon geojson object return centroid """ bbox = rectangle['coordinates'][0] xmin = bbox[0][0] ymin = bbox[0][1] xmax = bbox[2][0] ymax = bbox[2][1] xwidth = xmax - xmin ywidth = ymax - ymin return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]} def point_distance(point1, point2): """ calculate the distance between two point on the sphere like google map reference http://www.movable-type.co.uk/scripts/latlong.html Keyword arguments: point1 -- point one geojson object point2 -- point two geojson object if(point inside multipoly) return true else false """ lon1 = point1['coordinates'][0] lat1 = point1['coordinates'][1] lon2 = point2['coordinates'][0] lat2 = point2['coordinates'][1] deg_lat = number2radius(lat2 - lat1) deg_lon = number2radius(lon2 - lon1) a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \ math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) return (6371 * c) * 1000 def geometry_within_radius(geometry, center, radius): """ To valid whether point or linestring or polygon is inside a radius around a center Keyword arguments: geometry -- point/linstring/polygon geojson object center -- point geojson object radius -- radius if(geometry inside radius) return true else false """ if geometry['type'] == 'Point': return point_distance(geometry, center) <= radius elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon': point = {} # it's enough to check the exterior ring of the Polygon coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates'] for coordinate in coordinates: point['coordinates'] = coordinate if point_distance(point, center) > radius: return False return True def area(poly): """ calculate the area of polygon Keyword arguments: poly -- polygon geojson object return polygon area """ poly_area = 0 # TODO: polygon holes at coordinates[1] points = poly['coordinates'][0] j = len(points) - 1 count = len(points) for i in range(0, count): p1_x = points[i][1] p1_y = points[i][0] p2_x = points[j][1] p2_y = points[j][0] poly_area += p1_x * p2_y poly_area -= p1_y * p2_x j = i poly_area /= 2 return poly_area def centroid(poly): """ get the centroid of polygon adapted from http://paulbourke.net/geometry/polyarea/javascript.txt Keyword arguments: poly -- polygon geojson object return polygon centroid """ f_total = 0 x_total = 0 y_total = 0 # TODO: polygon holes at coordinates[1] points = poly['coordinates'][0] j = len(points) - 1 count = len(points) for i in range(0, count): p1_x = points[i][1] p1_y = points[i][0] p2_x = points[j][1] p2_y = points[j][0] f_total = p1_x * p2_y - p2_x * p1_y x_total += (p1_x + p2_x) * f_total y_total += (p1_y + p2_y) * f_total j = i six_area = area(poly) * 6 return {'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]} def destination_point(point, brng, dist): """ Calculate a destination Point base on a base point and a distance Keyword arguments: pt -- polygon geojson object brng -- an angle in degrees dist -- distance in Kilometer between destination and base point return destination point object """ dist = float(dist) / 6371 # convert dist to angular distance in radians brng = number2radius(brng) lon1 = number2radius(point['coordinates'][0]) lat1 = number2radius(point['coordinates'][1]) lat2 = math.asin(math.sin(lat1) * math.cos(dist) + math.cos(lat1) * math.sin(dist) * math.cos(brng)) lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) * math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2)) lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]} def simplify(source, kink=20): """ source[] array of geojson points kink in metres, kinks above this depth kept kink depth is the height of the triangle abc where a-b and b-c are two consecutive line segments """ source_coord = map(lambda o: {"lng": o.coordinates[0], "lat": o.coordinates[1]}, source) # count, n_stack, n_dest, start, end, i, sig; # dev_sqr, max_dev_sqr, band_sqr; # x12, y12, d12, x13, y13, d13, x23, y23, d23; F = (math.pi / 180.0) * 0.5 index = [] # aray of indexes of source points to include in the reduced line sig_start = [] # indices of start & end of working section sig_end = [] # check for simple cases count = len(source_coord) if count < 3: return source_coord # one or two points # more complex case. initialize stack band_sqr = kink * 360.0 / (2.0 * math.pi * 6378137.0) # Now in degrees band_sqr *= band_sqr n_dest = 0 sig_start[0] = 0 sig_end[0] = count - 1 n_stack = 1 # while the stack is not empty while n_stack > 0: # ... pop the top-most entries off the stacks start = sig_start[n_stack - 1] end = sig_end[n_stack - 1] n_stack -= 1 if (end - start) > 1: #any intermediate points ? # ... yes, so find most deviant intermediate point to either side of line joining start & end points x12 = source[end]["lng"] - source[start]["lng"] y12 = source[end]["lat"] - source[start]["lat"] if math.fabs(x12) > 180.0: x12 = 360.0 - math.fabs(x12) x12 *= math.cos(F * (source[end]["lat"] + source[start]["lat"])) # use avg lat to reduce lng d12 = (x12 * x12) + (y12 * y12) i = start + 1 sig = start max_dev_sqr = -1.0 while i < end: x13 = source[i]["lng"] - source[start]["lng"] y13 = source[i]["lat"] - source[start]["lat"] if math.fabs(x13) > 180.0: x13 = 360.0 - math.fabs(x13) x13 *= math.cos(F * (source[i]["lat"] + source[start]["lat"])) d13 = (x13 * x13) + (y13 * y13) x23 = source[i]["lng"] - source[end]["lng"] y23 = source[i]["lat"] - source[end]["lat"] if math.fabs(x23) > 180.0: x23 = 360.0 - math.fabs(x23) x23 *= math.cos(F * (source[i]["lat"] + source[end]["lat"])) d23 = (x23 * x23) + (y23 * y23) if d13 >= (d12 + d23): dev_sqr = d23 elif d23 >= (d12 + d13): dev_sqr = d13 else: dev_sqr = (x13 * y12 - y13 * x12) * (x13 * y12 - y13 * x12) / d12 # solve triangle if dev_sqr > max_dev_sqr: sig = i max_dev_sqr = dev_sqr i += 1 if max_dev_sqr < band_sqr: # is there a sig. intermediate point ? #... no, so transfer current start point index[n_dest] = start n_dest += 1 else: # ... yes, so push two sub-sections on stack for further processing n_stack += 1 sig_start[n_stack - 1] = sig sig_end[n_stack - 1] = end n_stack += 1 sig_start[n_stack - 1] = start sig_end[n_stack - 1] = sig else: # ... no intermediate points, so transfer current start point index[n_dest] = start n_dest += 1 # transfer last point index[n_dest] = count - 1 n_dest += 1 # make return array r = [] for i in range(0, n_dest): r.append(source_coord[index[i]]) return map(lambda o: {"type": "Point","coordinates": [o.lng, o.lat]}, r) def wgs2gcj(geometry): """ convert wgs84 to gcj referencing by https://github.com/wandergis/coordTransform_py """ # TODO: point linestring point if geometry['type'] == 'MultiLineString': coordinates = geometry['coordinates'] for lines in coordinates: for line in lines: line[0], line[1] = wgs84togcj02(line[0], line[1]) return geometry def gcj2bd(geometry): """ convert gcj to bd referencing by https://github.com/wandergis/coordTransform_py """ # TODO: point linestring point if geometry['type'] == 'MultiLineString': coordinates = geometry['coordinates'] for lines in coordinates: for line in lines: line[0], line[1] = gcj02tobd09(line[0], line[1]) return geometry
31.290123
125
0.572105
0
0
0
0
0
0
0
0
5,403
0.355297
86c29a98e5b655839d8db00b70a6a8da9b1ef8d8
388
py
Python
config.py
Rinku92/Mini_Project3
eab11ce3743fddda2ccc158367a37d4522ba1e39
[ "MIT" ]
null
null
null
config.py
Rinku92/Mini_Project3
eab11ce3743fddda2ccc158367a37d4522ba1e39
[ "MIT" ]
null
null
null
config.py
Rinku92/Mini_Project3
eab11ce3743fddda2ccc158367a37d4522ba1e39
[ "MIT" ]
null
null
null
import os ''' user = os.environ['POSTGRES_USER'] password = os.environ['POSTGRES_PASSWORD'] host = os.environ['POSTGRES_HOST'] database = os.environ['POSTGRES_DB'] port = os.environ['POSTGRES_PORT'] ''' user = 'test' password = 'password' host = 'localhost' database = 'example' port = '5432' DATABASE_CONNECTION_URI = f'postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}'
24.25
93
0.716495
0
0
0
0
0
0
0
0
302
0.778351
86c368ef733994c7aa8778c60fbe8e4bdf94dac9
347
py
Python
10_days_of_statistics_8_1.py
sercangul/HackerRank
e6d7056babe03baafee8d7f1cacdca7c28b72ded
[ "Apache-2.0" ]
null
null
null
10_days_of_statistics_8_1.py
sercangul/HackerRank
e6d7056babe03baafee8d7f1cacdca7c28b72ded
[ "Apache-2.0" ]
null
null
null
10_days_of_statistics_8_1.py
sercangul/HackerRank
e6d7056babe03baafee8d7f1cacdca7c28b72ded
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Jun 3 19:26:47 2019 @author: sercangul """ n = 5 xy = [map(int, input().split()) for _ in range(n)] sx, sy, sx2, sxy = map(sum, zip(*[(x, y, x**2, x * y) for x, y in xy])) b = (n * sxy - sx * sy) / (n * sx2 - sx**2) a = (sy / n) - b * (sx / n) print('{:.3f}'.format(a + b * 80))
24.785714
71
0.501441
0
0
0
0
0
0
0
0
116
0.334294
86c4016c71680c25695f7a5d4e332b95ab4759b0
450
py
Python
rlutils/gym/envs/reset_obs/hopper.py
vermouth1992/rl-util
4c06ab8f5c96a44e58f88cf30146bcb837057112
[ "Apache-2.0" ]
null
null
null
rlutils/gym/envs/reset_obs/hopper.py
vermouth1992/rl-util
4c06ab8f5c96a44e58f88cf30146bcb837057112
[ "Apache-2.0" ]
null
null
null
rlutils/gym/envs/reset_obs/hopper.py
vermouth1992/rl-util
4c06ab8f5c96a44e58f88cf30146bcb837057112
[ "Apache-2.0" ]
null
null
null
import gym.envs.mujoco.hopper as hopper import numpy as np class HopperEnv(hopper.HopperEnv): def _get_obs(self): return np.concatenate([ self.sim.data.qpos.flat[1:], self.sim.data.qvel.flat, ]) def reset_obs(self, obs): state = np.insert(obs, 0, 0.) qpos = state[:self.model.nq] qvel = state[self.model.nq:] self.set_state(qpos, qvel) return self._get_obs()
25
40
0.591111
388
0.862222
0
0
0
0
0
0
0
0
86c445a03cb1fedcfaa9af4175640a3d81afd9b9
8,505
py
Python
reco_utils/recommender/deeprec/io/iterator.py
yutian-zhao/recommenders
17b9c1280a79019dd91f50b3a7e66f25cb5004b1
[ "MIT" ]
null
null
null
reco_utils/recommender/deeprec/io/iterator.py
yutian-zhao/recommenders
17b9c1280a79019dd91f50b3a7e66f25cb5004b1
[ "MIT" ]
null
null
null
reco_utils/recommender/deeprec/io/iterator.py
yutian-zhao/recommenders
17b9c1280a79019dd91f50b3a7e66f25cb5004b1
[ "MIT" ]
null
null
null
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import numpy as np # import tensorflow as tf import abc class BaseIterator(object): @abc.abstractmethod def parser_one_line(self, line): pass @abc.abstractmethod def load_data_from_file(self, infile): pass @abc.abstractmethod def _convert_data(self, labels, features): pass @abc.abstractmethod def gen_feed_dict(self, data_dict): pass # class FFMTextIterator(BaseIterator): # """Data loader for FFM format based models, such as xDeepFM. # Iterator will not load the whole data into memory. Instead, it loads data into memory # per mini-batch, so that large files can be used as input data. # """ # def __init__(self, hparams, graph, col_spliter=" ", ID_spliter="%"): # """Initialize an iterator. Create necessary placeholders for the model. # Args: # hparams (obj): Global hyper-parameters. Some key settings such as #_feature and #_field are there. # graph (obj): the running graph. All created placeholder will be added to this graph. # col_spliter (str): column splitter in one line. # ID_spliter (str): ID splitter in one line. # """ # self.feature_cnt = hparams.FEATURE_COUNT # self.field_cnt = hparams.FIELD_COUNT # self.col_spliter = col_spliter # self.ID_spliter = ID_spliter # self.batch_size = hparams.batch_size # self.graph = graph # with self.graph.as_default(): # self.labels = tf.placeholder(tf.float32, [None, 1], name="label") # self.fm_feat_indices = tf.placeholder( # tf.int64, [None, 2], name="fm_feat_indices" # ) # self.fm_feat_values = tf.placeholder( # tf.float32, [None], name="fm_feat_values" # ) # self.fm_feat_shape = tf.placeholder(tf.int64, [None], name="fm_feat_shape") # self.dnn_feat_indices = tf.placeholder( # tf.int64, [None, 2], name="dnn_feat_indices" # ) # self.dnn_feat_values = tf.placeholder( # tf.int64, [None], name="dnn_feat_values" # ) # self.dnn_feat_weights = tf.placeholder( # tf.float32, [None], name="dnn_feat_weights" # ) # self.dnn_feat_shape = tf.placeholder( # tf.int64, [None], name="dnn_feat_shape" # ) # def parser_one_line(self, line): # """Parse one string line into feature values. # Args: # line (str): a string indicating one instance # Returns: # list: Parsed results,including label, features and impression_id # """ # impression_id = 0 # words = line.strip().split(self.ID_spliter) # if len(words) == 2: # impression_id = words[1].strip() # cols = words[0].strip().split(self.col_spliter) # label = float(cols[0]) # features = [] # for word in cols[1:]: # if not word.strip(): # continue # tokens = word.split(":") # features.append([int(tokens[0]) - 1, int(tokens[1]) - 1, float(tokens[2])]) # return label, features, impression_id # def load_data_from_file(self, infile): # """Read and parse data from a file. # Args: # infile (str): text input file. Each line in this file is an instance. # Returns: # obj: An iterator that will yields parsed results, in the format of graph feed_dict. # """ # label_list = [] # features_list = [] # impression_id_list = [] # cnt = 0 # with tf.gfile.GFile(infile, "r") as rd: # for line in rd: # label, features, impression_id = self.parser_one_line(line) # features_list.append(features) # label_list.append(label) # impression_id_list.append(impression_id) # cnt += 1 # if cnt == self.batch_size: # res = self._convert_data(label_list, features_list) # yield self.gen_feed_dict(res), impression_id_list, self.batch_size # label_list = [] # features_list = [] # impression_id_list = [] # cnt = 0 # if cnt > 0: # res = self._convert_data(label_list, features_list) # yield self.gen_feed_dict(res), impression_id_list, cnt # def _convert_data(self, labels, features): # """Convert data into numpy arrays that are good for further operation. # Args: # labels (list): a list of ground-truth labels. # features (list): a 3-dimensional list, carrying a list (batch_size) of feature array, # where each feature array is a list of [field_idx, feature_idx, feature_value] tuple. # Returns: # dict: A dictionary, contains multiple numpy arrays that are convenient for further operation. # """ # dim = self.feature_cnt # FIELD_COUNT = self.field_cnt # instance_cnt = len(labels) # fm_feat_indices = [] # fm_feat_values = [] # fm_feat_shape = [instance_cnt, dim] # dnn_feat_indices = [] # dnn_feat_values = [] # dnn_feat_weights = [] # dnn_feat_shape = [instance_cnt * FIELD_COUNT, -1] # for i in range(instance_cnt): # m = len(features[i]) # dnn_feat_dic = {} # for j in range(m): # fm_feat_indices.append([i, features[i][j][1]]) # fm_feat_values.append(features[i][j][2]) # if features[i][j][0] not in dnn_feat_dic: # dnn_feat_dic[features[i][j][0]] = 0 # else: # dnn_feat_dic[features[i][j][0]] += 1 # dnn_feat_indices.append( # [ # i * FIELD_COUNT + features[i][j][0], # dnn_feat_dic[features[i][j][0]], # ] # ) # dnn_feat_values.append(features[i][j][1]) # dnn_feat_weights.append(features[i][j][2]) # if dnn_feat_shape[1] < dnn_feat_dic[features[i][j][0]]: # dnn_feat_shape[1] = dnn_feat_dic[features[i][j][0]] # dnn_feat_shape[1] += 1 # sorted_index = sorted( # range(len(dnn_feat_indices)), # key=lambda k: (dnn_feat_indices[k][0], dnn_feat_indices[k][1]), # ) # res = {} # res["fm_feat_indices"] = np.asarray(fm_feat_indices, dtype=np.int64) # res["fm_feat_values"] = np.asarray(fm_feat_values, dtype=np.float32) # res["fm_feat_shape"] = np.asarray(fm_feat_shape, dtype=np.int64) # res["labels"] = np.asarray([[label] for label in labels], dtype=np.float32) # res["dnn_feat_indices"] = np.asarray(dnn_feat_indices, dtype=np.int64)[ # sorted_index # ] # res["dnn_feat_values"] = np.asarray(dnn_feat_values, dtype=np.int64)[ # sorted_index # ] # res["dnn_feat_weights"] = np.asarray(dnn_feat_weights, dtype=np.float32)[ # sorted_index # ] # res["dnn_feat_shape"] = np.asarray(dnn_feat_shape, dtype=np.int64) # return res # def gen_feed_dict(self, data_dict): # """Construct a dictionary that maps graph elements to values. # Args: # data_dict (dict): a dictionary that maps string name to numpy arrays. # Returns: # dict: a dictionary that maps graph elements to numpy arrays. # """ # feed_dict = { # self.labels: data_dict["labels"], # self.fm_feat_indices: data_dict["fm_feat_indices"], # self.fm_feat_values: data_dict["fm_feat_values"], # self.fm_feat_shape: data_dict["fm_feat_shape"], # self.dnn_feat_indices: data_dict["dnn_feat_indices"], # self.dnn_feat_values: data_dict["dnn_feat_values"], # self.dnn_feat_weights: data_dict["dnn_feat_weights"], # self.dnn_feat_shape: data_dict["dnn_feat_shape"], # } # return feed_dict
38.310811
112
0.55485
345
0.040564
0
0
295
0.034685
0
0
7,894
0.92816
86c60d6dd1a1d7d8e36a571eddb9e98fee94282b
108
py
Python
HW6/YuliiaKutsyk/3_ unfinished_loop_bug_fixing.py
kolyasalubov/Lv-677.PythonCore
c9f9107c734a61e398154a90b8a3e249276c2704
[ "MIT" ]
null
null
null
HW6/YuliiaKutsyk/3_ unfinished_loop_bug_fixing.py
kolyasalubov/Lv-677.PythonCore
c9f9107c734a61e398154a90b8a3e249276c2704
[ "MIT" ]
null
null
null
HW6/YuliiaKutsyk/3_ unfinished_loop_bug_fixing.py
kolyasalubov/Lv-677.PythonCore
c9f9107c734a61e398154a90b8a3e249276c2704
[ "MIT" ]
6
2022-02-22T22:30:49.000Z
2022-03-28T12:51:19.000Z
def create_array(n): res=[] i=1 while i<=n: res.append(i) i += 1 return res
13.5
21
0.453704
0
0
0
0
0
0
0
0
0
0
86c692ea321aa5d6632c79b6a92f458cad0e5a70
2,723
py
Python
ncm/api.py
SDhuangao/netease-cloud-music-dl
4a970504e1fec0a9848f3920b392aa507d6b3879
[ "MIT" ]
null
null
null
ncm/api.py
SDhuangao/netease-cloud-music-dl
4a970504e1fec0a9848f3920b392aa507d6b3879
[ "MIT" ]
null
null
null
ncm/api.py
SDhuangao/netease-cloud-music-dl
4a970504e1fec0a9848f3920b392aa507d6b3879
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import requests from ncm.encrypt import encrypted_request from ncm.constants import headers from ncm.constants import song_download_url from ncm.constants import get_song_url from ncm.constants import get_album_url from ncm.constants import get_artist_url from ncm.constants import get_playlist_url class CloudApi(object): def __init__(self, timeout=30): super().__init__() self.session = requests.session() self.session.headers.update(headers) self.timeout = timeout def get_request(self, url): response = self.session.get(url, timeout=self.timeout) result = response.json() if result['code'] != 200: print('Return {} when try to get {}'.format(result, url)) else: return result def post_request(self, url, params): data = encrypted_request(params) response = self.session.post(url, data=data, timeout=self.timeout) result = response.json() if result['code'] != 200: print('Return {} when try to post {} => {}'.format(result, params, url)) else: return result def get_song(self, song_id): """ Get song info by song id :param song_id: :return: """ url = get_song_url(song_id) result = self.get_request(url) return result['songs'][0] def get_album_songs(self, album_id): """ Get all album songs info by album id :param album_id: :return: """ url = get_album_url(album_id) result = self.get_request(url) return result['album']['songs'] def get_song_url(self, song_id, bit_rate=320000): """Get a song's download url. :params song_id: song id<int>. :params bit_rate: {'MD 128k': 128000, 'HD 320k': 320000} :return: """ url = song_download_url csrf = '' params = {'ids': [song_id], 'br': bit_rate, 'csrf_token': csrf} result = self.post_request(url, params) song_url = result['data'][0]['url'] return song_url def get_hot_songs(self, artist_id): """ Get a artist 50 hot songs :param artist_id: :return: """ url = get_artist_url(artist_id) result = self.get_request(url) return result['hotSongs'] def get_playlist_songs(self, playlist_id): """ Get a public playlist all songs :param playlist_id: :return: """ url = get_playlist_url(playlist_id) result = self.get_request(url) return result['playlist']['trackIds'], result['playlist']['name']
27.505051
84
0.589791
2,392
0.878443
0
0
0
0
0
0
748
0.274697
86c7301877ec46ff5d214d67d7d24373229e91aa
15,337
py
Python
book/trees/binary_search_tree.py
Web-Dev-Collaborative/algos
d280581d74ded382094283d931a202eb55fd8369
[ "CC0-1.0" ]
153
2015-12-24T00:32:23.000Z
2022-02-24T06:00:29.000Z
book/trees/binary_search_tree.py
Web-Dev-Collaborative/algos
d280581d74ded382094283d931a202eb55fd8369
[ "CC0-1.0" ]
78
2015-11-17T11:46:15.000Z
2021-06-28T18:37:58.000Z
book/trees/binary_search_tree.py
rhivent/algo-books-python
c4fa29616ca9a8a15ba40fa12d21fd8f35096d40
[ "CC0-1.0" ]
66
2015-11-02T03:38:02.000Z
2022-03-05T17:36:26.000Z
# -*- coding: utf-8 -*- """ The `TreeNode` class provides many helper functions that make the work done in the `BinarySearchTree` class methods much easier. The constructor for a `TreeNode`, along with these helper functions, is shown below. As you can see, many of these helper functions help to classify a node according to its own position as a child, (left or right) and the kind of children the node has. The `TreeNode` class will also explicitly keep track of the parent as an attribute of each node. You will see why this is important when we discuss the implementation for the `del` operator. One of the more interesting methods of `TreeNode` provides an interface to simply iterate over all the keys in the tree in order. You already know how to traverse a binary tree in order, using the `inorder` traversal algorithm. However, because we want our iterator to operate lazily, in this case we use the `yield` keyword to define our `__iter__` method as a Python generator. Pay close attention to the `__iter__` implementation as at first glance you might think that the code is not recursive: in fact, because `__iter__` overrides the `for x in` operation for iteration, it really is recursive! Our full implementation of `TreeNode` is provided below. It includes three further methods `find_successor`, `find_min` and `splice_out` which you can ignore for now as we will return to them later when discussing deletion. """ class TreeNode(object): def __init__(self, key, val, left=None, right=None, parent=None): self.key = key self.val = val self.left = left self.right = right self.parent = parent def is_left_child(self): return self.parent and self.parent.left == self def is_right_child(self): return self.parent and self.parent.right == self def is_leaf(self): return not (self.right or self.left) def has_any_children(self): return self.right or self.left def has_both_children(self): return self.right and self.left def has_one_child(self): return self.has_any_children() and not self.has_both_children() def replace_node_data(self, key, val, left, right): self.key = key self.val = val self.left = left self.right = right if self.left: self.left.parent = self if self.right: self.right.parent = self def __iter__(self): if self is None: return if self.left: # `in` calls `__iter__` so is recursive for elem in self.left: yield elem yield self.key if self.right: # recurse again for elem in self.right: yield elem def find_successor(self): if self.right: return self.right.find_min() if self.parent is None: return None if self.is_left_child(): return self.parent self.parent.right = None successor = self.parent.find_successor() self.parent.right = self return successor def find_min(self): current = self while current.left: current = current.left return current def splice_out(self): if self.is_leaf(): if self.is_left_child(): self.parent.left = None else: self.parent.right = None else: promoted_node = self.left or self.right if self.is_left_child(): self.parent.left = promoted_node else: self.parent.right = promoted_node promoted_node.parent = self.parent """ Now that we have our `TreeNode` class we can begin to write `BinarySearchTree` itself. Recall that the core functionality of this class will be to enable `put`ing to and `get`ing from the tree, so we begin our implementation with the `put` functionality. In order to enable the `tree[1] = 'foo'` style assignment interface for our `BinarySearchTree` instances, we override the `__setitem__` magic method. In this method we first check to see if the tree already has a root. If there is not a root then we create a new `TreeNode` and set it as the root of the tree. If a root node is already in place then `put` calls the private, recursive, helper function `_put` to search the tree according to the following algorithm: - Starting at the root of the tree, search the binary tree comparing the new key to the key in the current node. If the new key is less than the current node, search the left subtree. If the new key is greater than the current node, search the right subtree. - When there is no left (or right) child to search, we have found the position in the tree where the new node should be installed. - To add a node to the tree, create a new `TreeNode` object and insert the object at the point discovered in the previous step. The code below shows the Python code for inserting a new node in the tree. The `_put` function is written recursively following the steps outlined above. Notice that when a new child is inserted into the tree, the `node` is passed to the new tree as the parent. One important problem with our implementation of insert is that duplicate keys are not handled properly. As our tree is implemented a duplicate key will create a new node with the same key value in the right subtree of the node having the original key. The result of this is that the node with the new key will never be found during a search. A better way to handle the insertion of a duplicate key is for the value associated with the new key to replace the old value. We leave fixing this bug as an exercise for you. """ class BinarySearchTree(object): TreeNodeClass = TreeNode def __init__(self): self.root = None self.size = 0 def __len__(self): return self.size def __iter__(self): return self.root.__iter__() def __setitem__(self, key, val): if self.root: self._put(key, val, self.root) else: self.root = self.TreeNodeClass(key, val) self.size = self.size + 1 def _put(self, key, val, node): if key < node.key: if node.left: self._put(key, val, node.left) else: node.left = self.TreeNodeClass(key, val, parent=node) else: if node.right: self._put(key, val, node.right) else: node.right = self.TreeNodeClass(key, val, parent=node) """ The diagram below illustrates the process for inserting a new node into a binary search tree. The lightly shaded nodes indicate the nodes that were visited during the insertion process. ![Inserting a node with key = 19](figures/binary-search-tree-put.png) Once the tree is constructed, the next task is to implement the retrieval of a value for a given key. The `get` functionality is even easier than the `put` functionality because we simply search the tree recursively until we get to a non-matching leaf node or find a matching key. When a matching key is found, the value stored in the val of the node is returned. Again, inorder to enable a `tree[1]` retrieval interface, we overload one of Python’s magic methods—in this case `__getitem__`. Just like with `__setitem__`, the primary purpose of this method is to handle presence and absence of a root node, and delegates the core `get` functionality to `_get`. The search code in the `_get` method uses the same logic for choosing the left or right child as the `_put` method. Notice that the `_get` method returns a `TreeNode` to `__getitem__`, this allows `_get` to be used as a flexible helper method for other `BinarySearchTree` methods that may need to make use of other data from the `TreeNode` besides the val. """ def __getitem__(self, key): if self.root: result = self._get(key, self.root) if result: return result.val raise KeyError def _get(self, key, node): if not node: return None if node.key == key: return node if key < node.key: return self._get(key, node.left) return self._get(key, node.right) """ Using `_get`, we can implement the `in` operation by writing a `__contains__` method for the `BinarySearchTree`. The `__contains__` method will simply call `_get` and return `True` if `_get` returns a value, or `False` if it returns `None`. The code for `__contains__` is shown below. """ def __contains__(self, key): return bool(self._get(key, self.root)) """ Finally, we turn our attention to the most challenging method in the binary search tree: the deletion of a key. The first task is to find the node to delete by searching the tree. If the tree has more than one node we search using the `_get` method to find the `TreeNode` that needs to be removed. If the tree only has a single node, that means we are removing the root of the tree, but we still must check to make sure the key of the root matches the key that is to be deleted. In either case if the key is not found the `del` operator raises an error. """ def delete(self, key): if self.size > 1: node_to_remove = self._get(key, self.root) if node_to_remove: self.remove(node_to_remove) self.size = self.size - 1 return elif self.size == 1 and self.root.key == key: self.root = None self.size = self.size - 1 return raise KeyError('Error, key not in tree') def __delitem__(self, key): self.delete(key) """ Once we’ve found the node containing the key we want to delete, there are three cases that we must consider: 1. The node to be deleted has no children 2. The node to be deleted has only one child 3. The node to be deleted has two children The first case is straightforward. If the current node has no children all we need to do is delete the node and remove the reference to this node in the parent. The code for this case is shown below. """ def remove(self, node): if node.is_leaf() and node.parent is not None: if node == node.parent.left: node.parent.left = None else: node.parent.right = None """ ![Deleting Node 16, a node without children](figures/binary-search-tree-delete-1.png) The second case is only slightly more complicated (see below). If a node has only a single child, then we can simply promote the child to take the place of its parent. The code for this case is shown in the next code sample. As you look at this code you will see that there are six cases to consider. Since the cases are symmetric with respect to either having a left or right child we will just discuss the case where the current node has a left child. The decision proceeds as follows: 1. If the current node is a left child then we only need to update the parent reference of the left child to point to the parent of the current node, and then update the left child reference of the parent to point to the current node’s left child. 2. If the current node is a right child then we only need to update the parent reference of the right child to point to the parent of the current node, and then update the right child reference of the parent to point to the current node’s right child. 3. If the current node has no parent, it must be the root. In this case we will just replace the `key`, `val`, `left`, and `right` data by calling the `replace_node_data` method on the root. Code for this decision process may look like: """ elif node.has_one_child(): promoted_node = node.left or node.right if node.is_left_child(): promoted_node.parent = node.parent node.parent.left = promoted_node elif node.is_right_child(): promoted_node.parent = node.parent node.parent.right = promoted_node else: node.replace_node_data( promoted_node.key, promoted_node.val, promoted_node.left, promoted_node.right ) """ ![Deleting node 25, a node that has a single child](figures/binary-search-tree-delete-2.png) The third case is the most difficult case to handle (see below). If a node has two children, then it is unlikely that we can simply promote one of them to take the node’s place. We can, however, search the tree for a node that can be used to replace the one scheduled for deletion. What we need is a node that will preserve the binary search tree relationships for both of the existing left and right subtrees. The node that will do this is the node that has the next-largest key in the tree. We call this node the **successor**, and we will look at a way to find the successor shortly. The successor is guaranteed to have no more than one child, so we know how to remove it using the two cases for deletion that we have already implemented. Once the successor has been removed, we simply put it in the tree in place of the node to be deleted. ![Deleting node 5, a node with two children](figures/binary-search-tree-delete-3.png) The code to handle the third case is shown below. Notice that we make use of the helper methods `find_successor` and `find_min` to find the successor. To remove the successor, we make use of the method `splice_out`. The reason we use `splice_out` is that it goes directly to the node we want to splice out and makes the right changes. We could call `delete` recursively, but then we would waste time re-searching for the key node. """ else: # has both children successor = node.find_successor() if successor: successor.splice_out() node.key = successor.key node.val = successor.val """ The code to find the successor is shown above and as you can see is a method of the `TreeNode` class. This code makes use of the same properties of binary search trees that cause an inorder traversal to print out the nodes in the tree from smallest to largest. There are three cases to consider when looking for the successor: 1. If the node has a right child, then the successor is the smallest key in the right subtree. 2. If the node has no right child and is the left child of its parent, then the parent is the successor. 3. If the node is the right child of its parent, and itself has no right child, then the successor to this node is the successor of its parent, excluding this node. The first condition is the only one that matters for us when deleting a node from a binary search tree. The `find_min` method is called to find the minimum key in a subtree. You should convince yourself that the minimum valued key in any binary search tree is the leftmost child of the tree. Therefore the `find_min` method simply follows the `left` references in each node of the subtree until it reaches a node that does not have a left child. """
37.775862
78
0.684684
13,906
0.905987
339
0.022086
0
0
0
0
10,189
0.663822
86c7d4acbb62e0447380b9c4c68ef07bbf5ead1b
28,677
py
Python
fire/core.py
adamruth/python-fire
6912ccd56f50e0f4bb30a0725d95858ef29f3bde
[ "Apache-2.0" ]
1
2020-02-05T04:43:03.000Z
2020-02-05T04:43:03.000Z
fire/core.py
chesnjak/python-fire
72604f40314008e562ba47936dcc183b51166b72
[ "Apache-2.0" ]
null
null
null
fire/core.py
chesnjak/python-fire
72604f40314008e562ba47936dcc183b51166b72
[ "Apache-2.0" ]
1
2020-07-15T22:58:25.000Z
2020-07-15T22:58:25.000Z
# Copyright (C) 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Python Fire is a library for creating CLIs from absolutely any Python object. You can call Fire on any Python object: functions, classes, modules, objects, dictionaries, lists, tuples, etc. They all work! Python Fire turns any Python object into a command line interface. Simply call the Fire function as your main method to create a CLI. When using Fire to build a CLI, your main method includes a call to Fire. Eg: def main(argv): fire.Fire(Component) A Fire CLI command is run by consuming the arguments in the command in order to access a member of current component, call the current component (if it's a function), or instantiate the current component (if it's a class). The target component begins as Component, and at each operation the component becomes the result of the preceding operation. For example "command fn arg1 arg2" might access the "fn" property of the initial target component, and then call that function with arguments 'arg1' and 'arg2'. Additional examples are available in the examples directory. Fire Flags, common to all Fire CLIs, must go after a separating "--". For example, to get help for a command you might run: `command -- --help`. The available flags for all Fire CLIs are: -v --verbose: Include private members in help and usage information. -h --help: Provide help and usage information for the command. -i --interactive: Drop into a Python REPL after running the command. --completion: Write the Bash completion script for the tool to stdout. --separator SEPARATOR: Use SEPARATOR in place of the default separator, '-'. --trace: Get the Fire Trace for the command. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import inspect import json import os import pipes import shlex import sys import types from fire import completion from fire import decorators from fire import helputils from fire import inspectutils from fire import interact from fire import parser from fire import trace import six def Fire(component=None, command=None, name=None): """This function, Fire, is the main entrypoint for Python Fire. Executes a command either from the `command` argument or from sys.argv by recursively traversing the target object `component`'s members consuming arguments, evaluating functions, and instantiating classes as it goes. When building a CLI with Fire, your main method should call this function. Args: component: The initial target component. command: Optional. If supplied, this is the command executed. If not supplied, then the command is taken from sys.argv instead. This can be a string or a list of strings; a list of strings is preferred. name: Optional. The name of the command as entered at the command line. Used in interactive mode and for generating the completion script. Returns: The result of executing the Fire command. Execution begins with the initial target component. The component is updated by using the command arguments to either access a member of the current component, call the current component (if it's a function), or instantiate the current component (if it's a class). When all arguments are consumed and there's no function left to call or class left to instantiate, the resulting current component is the final result. Raises: ValueError: If the command argument is supplied, but not a string or a sequence of arguments. FireExit: When Fire encounters a FireError, Fire will raise a FireExit with code 2. When used with the help or trace flags, Fire will raise a FireExit with code 0 if successful. """ name = name or os.path.basename(sys.argv[0]) # Get args as a list. if isinstance(command, six.string_types): args = shlex.split(command) elif isinstance(command, (list, tuple)): args = command elif command is None: # Use the command line args by default if no command is specified. args = sys.argv[1:] else: raise ValueError('The command argument must be a string or a sequence of ' 'arguments.') # Determine the calling context. caller = inspect.stack()[1] caller_frame = caller[0] caller_globals = caller_frame.f_globals caller_locals = caller_frame.f_locals context = {} context.update(caller_globals) context.update(caller_locals) component_trace = _Fire(component, args, context, name) if component_trace.HasError(): for help_flag in ['-h', '--help']: if help_flag in component_trace.elements[-1].args: command = '{cmd} -- --help'.format(cmd=component_trace.GetCommand()) print(('WARNING: The proper way to show help is {cmd}.\n' 'Showing help anyway.\n').format(cmd=pipes.quote(command)), file=sys.stderr) print('Fire trace:\n{trace}\n'.format(trace=component_trace), file=sys.stderr) result = component_trace.GetResult() print( helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(2, component_trace) elif component_trace.show_trace and component_trace.show_help: print('Fire trace:\n{trace}\n'.format(trace=component_trace), file=sys.stderr) result = component_trace.GetResult() print( helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(0, component_trace) elif component_trace.show_trace: print('Fire trace:\n{trace}'.format(trace=component_trace), file=sys.stderr) raise FireExit(0, component_trace) elif component_trace.show_help: result = component_trace.GetResult() print( helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(0, component_trace) else: _PrintResult(component_trace, verbose=component_trace.verbose) result = component_trace.GetResult() return result def CompletionScript(name, component): """Returns the text of the Bash completion script for a Fire CLI.""" return completion.Script(name, component) class FireError(Exception): """Exception used by Fire when a Fire command cannot be executed. These exceptions are not raised by the Fire function, but rather are caught and added to the FireTrace. """ class FireExit(SystemExit): """An exception raised by Fire to the client in the case of a FireError. The trace of the Fire program is available on the `trace` property. This exception inherits from SystemExit, so clients may explicitly catch it with `except SystemExit` or `except FireExit`. If not caught, this exception will cause the client program to exit without a stacktrace. """ def __init__(self, code, component_trace): """Constructs a FireExit exception. Args: code: (int) Exit code for the Fire CLI. component_trace: (FireTrace) The trace for the Fire command. """ super(FireExit, self).__init__(code) self.trace = component_trace def _PrintResult(component_trace, verbose=False): """Prints the result of the Fire call to stdout in a human readable way.""" # TODO: Design human readable deserializable serialization method # and move serialization to it's own module. result = component_trace.GetResult() if isinstance(result, (list, set, types.GeneratorType)): for i in result: print(_OneLineResult(i)) elif inspect.isgeneratorfunction(result): raise NotImplementedError elif isinstance(result, dict): print(_DictAsString(result, verbose)) elif isinstance(result, tuple): print(_OneLineResult(result)) elif isinstance(result, (bool, six.string_types, six.integer_types, float, complex)): print(result) elif result is not None: print(helputils.HelpString(result, component_trace, verbose)) def _DictAsString(result, verbose=False): """Returns a dict as a string. Args: result: The dict to convert to a string verbose: Whether to include 'hidden' members, those keys starting with _. Returns: A string representing the dict """ result = {key: value for key, value in result.items() if _ComponentVisible(key, verbose)} if not result: return '{}' longest_key = max(len(str(key)) for key in result.keys()) format_string = '{{key:{padding}s}} {{value}}'.format(padding=longest_key + 1) lines = [] for key, value in result.items(): line = format_string.format(key=str(key) + ':', value=_OneLineResult(value)) lines.append(line) return '\n'.join(lines) def _ComponentVisible(component, verbose=False): """Returns whether a component should be visible in the output.""" return ( verbose or not isinstance(component, six.string_types) or not component.startswith('_')) def _OneLineResult(result): """Returns result serialized to a single line string.""" # TODO: Ensure line is fewer than eg 120 characters. if isinstance(result, six.string_types): return str(result).replace('\n', ' ') try: # Don't force conversion to ascii. return json.dumps(result, ensure_ascii=False) except (TypeError, ValueError): return str(result).replace('\n', ' ') def _Fire(component, args, context, name=None): """Execute a Fire command on a target component using the args supplied. Arguments that come after a final isolated '--' are treated as Flags, eg for interactive mode or completion script generation. Other arguments are consumed by the execution of the Fire command, eg in the traversal of the members of the component, or in calling a function or instantiating a class found during the traversal. The steps performed by this method are: 1. Parse any Flag args (the args after the final --) 2. Start with component as the current component. 2a. If the current component is a class, instantiate it using args from args. 2b. If the current component is a routine, call it using args from args. 2c. Otherwise access a member from component using an arg from args. 2d. Repeat 2a-2c until no args remain. 3a. Embed into ipython REPL if interactive mode is selected. 3b. Generate a completion script if that flag is provided. In step 2, arguments will only ever be consumed up to a separator; a single step will never consume arguments from both sides of a separator. The separator defaults to a hyphen (-), and can be overwritten with the --separator Fire argument. Args: component: The target component for Fire. args: A list of args to consume in Firing on the component, usually from the command line. context: A dict with the local and global variables available at the call to Fire. name: Optional. The name of the command. Used in interactive mode and in the tab completion script. Returns: FireTrace of components starting with component, tracing Fire's execution path as it consumes args. Raises: ValueError: If there are arguments that cannot be consumed. ValueError: If --completion is specified but no name available. """ args, flag_args = parser.SeparateFlagArgs(args) argparser = parser.CreateParser() parsed_flag_args, unused_args = argparser.parse_known_args(flag_args) verbose = parsed_flag_args.verbose interactive = parsed_flag_args.interactive separator = parsed_flag_args.separator show_completion = parsed_flag_args.completion show_help = parsed_flag_args.help show_trace = parsed_flag_args.trace # component can be a module, class, routine, object, etc. if component is None: component = context initial_component = component component_trace = trace.FireTrace( initial_component=initial_component, name=name, separator=separator, verbose=verbose, show_help=show_help, show_trace=show_trace) instance = None remaining_args = args while True: last_component = component initial_args = remaining_args if not remaining_args and (show_help or interactive or show_trace or show_completion): # Don't initialize the final class or call the final function unless # there's a separator after it, and instead process the current component. break saved_args = [] used_separator = False if separator in remaining_args: # For the current component, only use arguments up to the separator. separator_index = remaining_args.index(separator) saved_args = remaining_args[separator_index + 1:] remaining_args = remaining_args[:separator_index] used_separator = True assert separator not in remaining_args if inspect.isclass(component) or inspect.isroutine(component): # The component is a class or a routine; we'll try to initialize it or # call it. isclass = inspect.isclass(component) try: target = component.__name__ filename, lineno = inspectutils.GetFileAndLine(component) component, consumed_args, remaining_args, capacity = _CallCallable( component, remaining_args) # Update the trace. if isclass: component_trace.AddInstantiatedClass( component, target, consumed_args, filename, lineno, capacity) else: component_trace.AddCalledRoutine( component, target, consumed_args, filename, lineno, capacity) except FireError as error: component_trace.AddError(error, initial_args) return component_trace if last_component is initial_component: # If the initial component is a class, keep an instance for use with -i. instance = component elif isinstance(component, (list, tuple)) and remaining_args: # The component is a tuple or list; we'll try to access a member. arg = remaining_args[0] try: index = int(arg) component = component[index] except (ValueError, IndexError): error = FireError( 'Unable to index into component with argument:', arg) component_trace.AddError(error, initial_args) return component_trace remaining_args = remaining_args[1:] filename = None lineno = None component_trace.AddAccessedProperty( component, index, [arg], filename, lineno) elif isinstance(component, dict) and remaining_args: # The component is a dict; we'll try to access a member. target = remaining_args[0] if target in component: component = component[target] elif target.replace('-', '_') in component: component = component[target.replace('-', '_')] else: # The target isn't present in the dict as a string, but maybe it is as # another type. # TODO: Consider alternatives for accessing non-string keys. found_target = False for key, value in component.items(): if target == str(key): component = value found_target = True break if not found_target: error = FireError( 'Cannot find target in dict:', target, component) component_trace.AddError(error, initial_args) return component_trace remaining_args = remaining_args[1:] filename = None lineno = None component_trace.AddAccessedProperty( component, target, [target], filename, lineno) elif remaining_args: # We'll try to access a member of the component. try: target = remaining_args[0] component, consumed_args, remaining_args = _GetMember( component, remaining_args) filename, lineno = inspectutils.GetFileAndLine(component) component_trace.AddAccessedProperty( component, target, consumed_args, filename, lineno) except FireError as error: component_trace.AddError(error, initial_args) return component_trace if used_separator: # Add back in the arguments from after the separator. if remaining_args: remaining_args = remaining_args + [separator] + saved_args elif (inspect.isclass(last_component) or inspect.isroutine(last_component)): remaining_args = saved_args component_trace.AddSeparator() elif component is not last_component: remaining_args = [separator] + saved_args else: # It was an unnecessary separator. remaining_args = saved_args if component is last_component and remaining_args == initial_args: # We're making no progress. break if remaining_args: component_trace.AddError( FireError('Could not consume arguments:', remaining_args), initial_args) return component_trace if show_completion: if name is None: raise ValueError('Cannot make completion script without command name') script = CompletionScript(name, initial_component) component_trace.AddCompletionScript(script) if interactive: variables = context.copy() if name is not None: variables[name] = initial_component variables['component'] = initial_component variables['result'] = component variables['trace'] = component_trace if instance is not None: variables['self'] = instance interact.Embed(variables, verbose) component_trace.AddInteractiveMode() return component_trace def _GetMember(component, args): """Returns a subcomponent of component by consuming an arg from args. Given a starting component and args, this function gets a member from that component, consuming one arg in the process. Args: component: The component from which to get a member. args: Args from which to consume in the search for the next component. Returns: component: The component that was found by consuming an arg. consumed_args: The args that were consumed by getting this member. remaining_args: The remaining args that haven't been consumed yet. Raises: FireError: If we cannot consume an argument to get a member. """ members = dict(inspect.getmembers(component)) arg = args[0] arg_names = [ arg, arg.replace('-', '_'), # treat '-' as '_'. ] for arg_name in arg_names: if arg_name in members: return members[arg_name], [arg], args[1:] raise FireError('Could not consume arg:', arg) def _CallCallable(fn, args): """Calls the function fn by consuming args from args. Args: fn: The function to call or class to instantiate. args: Args from which to consume for calling the function. Returns: component: The object that is the result of the function call. consumed_args: The args that were consumed for the function call. remaining_args: The remaining args that haven't been consumed yet. capacity: Whether the call could have taken additional args. """ parse = _MakeParseFn(fn) (varargs, kwargs), consumed_args, remaining_args, capacity = parse(args) result = fn(*varargs, **kwargs) return result, consumed_args, remaining_args, capacity def _MakeParseFn(fn): """Creates a parse function for fn. Args: fn: The function or class to create the parse function for. Returns: A parse function for fn. The parse function accepts a list of arguments and returns (varargs, kwargs), remaining_args. The original function fn can then be called with fn(*varargs, **kwargs). The remaining_args are the leftover args from the arguments to the parse function. """ fn_spec = inspectutils.GetFullArgSpec(fn) all_args = fn_spec.args + fn_spec.kwonlyargs metadata = decorators.GetMetadata(fn) # Note: num_required_args is the number of positional arguments without # default values. All of these arguments are required. num_required_args = len(fn_spec.args) - len(fn_spec.defaults) required_kwonly = set(fn_spec.kwonlyargs) - set(fn_spec.kwonlydefaults) def _ParseFn(args): """Parses the list of `args` into (varargs, kwargs), remaining_args.""" kwargs, remaining_kwargs, remaining_args = _ParseKeywordArgs( args, all_args, fn_spec.varkw) # Note: _ParseArgs modifies kwargs. parsed_args, kwargs, remaining_args, capacity = _ParseArgs( fn_spec.args, fn_spec.defaults, num_required_args, kwargs, remaining_args, metadata) if fn_spec.varargs or fn_spec.varkw: # If we're allowed *varargs or **kwargs, there's always capacity. capacity = True extra_kw = set(kwargs) - set(fn_spec.kwonlyargs) if fn_spec.varkw is None and extra_kw: raise FireError('Unexpected kwargs present:', extra_kw) missing_kwonly = set(required_kwonly) - set(kwargs) if missing_kwonly: raise FireError('Missing required flags:', missing_kwonly) # If we accept *varargs, then use all remaining arguments for *varargs. if fn_spec.varargs is not None: varargs, remaining_args = remaining_args, [] else: varargs = [] for index, value in enumerate(varargs): varargs[index] = _ParseValue(value, None, None, metadata) varargs = parsed_args + varargs remaining_args += remaining_kwargs consumed_args = args[:len(args) - len(remaining_args)] return (varargs, kwargs), consumed_args, remaining_args, capacity return _ParseFn def _ParseArgs(fn_args, fn_defaults, num_required_args, kwargs, remaining_args, metadata): """Parses the positional and named arguments from the available supplied args. Modifies kwargs, removing args as they are used. Args: fn_args: A list of argument names that the target function accepts, including positional and named arguments, but not the varargs or kwargs names. fn_defaults: A list of the default values in the function argspec. num_required_args: The number of required arguments from the function's argspec. This is the number of arguments without a default value. kwargs: Dict with named command line arguments and their values. remaining_args: The remaining command line arguments, which may still be used as positional arguments. metadata: Metadata about the function, typically from Fire decorators. Returns: parsed_args: A list of values to be used as positional arguments for calling the target function. kwargs: The input dict kwargs modified with the used kwargs removed. remaining_args: A list of the supplied args that have not been used yet. capacity: Whether the call could have taken args in place of defaults. Raises: FireError: if additional positional arguments are expected, but none are available. """ accepts_positional_args = metadata.get(decorators.ACCEPTS_POSITIONAL_ARGS) capacity = False # If we see a default get used, we'll set capacity to True # Select unnamed args. parsed_args = [] for index, arg in enumerate(fn_args): value = kwargs.pop(arg, None) if value is not None: # A value is specified at the command line. value = _ParseValue(value, index, arg, metadata) parsed_args.append(value) else: # No value has been explicitly specified. if remaining_args and accepts_positional_args: # Use a positional arg. value = remaining_args.pop(0) value = _ParseValue(value, index, arg, metadata) parsed_args.append(value) elif index < num_required_args: raise FireError( 'The function received no value for the required argument:', arg) else: # We're past the args for which there's no default value. # There's a default value for this arg. capacity = True default_index = index - num_required_args # index into the defaults. parsed_args.append(fn_defaults[default_index]) for key, value in kwargs.items(): kwargs[key] = _ParseValue(value, None, key, metadata) return parsed_args, kwargs, remaining_args, capacity def _ParseKeywordArgs(args, fn_args, fn_keywords): """Parses the supplied arguments for keyword arguments. Given a list of arguments, finds occurences of --name value, and uses 'name' as the keyword and 'value' as the value. Constructs and returns a dictionary of these keyword arguments, and returns a list of the remaining arguments. Only if fn_keywords is None, this only finds argument names used by the function, specified through fn_args. This returns the values of the args as strings. They are later processed by _ParseArgs, which converts them to the appropriate type. Args: args: A list of arguments fn_args: A list of argument names that the target function accepts, including positional and named arguments, but not the varargs or kwargs names. fn_keywords: The argument name for **kwargs, or None if **kwargs not used Returns: kwargs: A dictionary mapping keywords to values. remaining_kwargs: A list of the unused kwargs from the original args. remaining_args: A list of the unused arguments from the original args. """ kwargs = {} remaining_kwargs = [] remaining_args = [] if not args: return kwargs, remaining_kwargs, remaining_args skip_argument = False for index, argument in enumerate(args): if skip_argument: skip_argument = False continue arg_consumed = False if argument.startswith('--'): # This is a named argument; get its value from this arg or the next. got_argument = False keyword = argument[2:] contains_equals = '=' in keyword is_bool_syntax = ( not contains_equals and (index + 1 == len(args) or args[index + 1].startswith('--'))) if contains_equals: keyword, value = keyword.split('=', 1) got_argument = True elif is_bool_syntax: # Since there's no next arg or the next arg is a Flag, we consider # this flag to be a boolean. got_argument = True if keyword in fn_args: value = 'True' elif keyword.startswith('no'): keyword = keyword[2:] value = 'False' else: value = 'True' else: if index + 1 < len(args): value = args[index + 1] got_argument = True keyword = keyword.replace('-', '_') # In order for us to consume the argument as a keyword arg, we either: # Need to be explicitly expecting the keyword, or we need to be # accepting **kwargs. if got_argument: skip_argument = not contains_equals and not is_bool_syntax arg_consumed = True if keyword in fn_args or fn_keywords: kwargs[keyword] = value else: remaining_kwargs.append(argument) if skip_argument: remaining_kwargs.append(args[index + 1]) if not arg_consumed: # The argument was not consumed, so it is still a remaining argument. remaining_args.append(argument) return kwargs, remaining_kwargs, remaining_args def _ParseValue(value, index, arg, metadata): """Parses value, a string, into the appropriate type. The function used to parse value is determined by the remaining arguments. Args: value: The string value to be parsed, typically a command line argument. index: The index of the value in the function's argspec. arg: The name of the argument the value is being parsed for. metadata: Metadata about the function, typically from Fire decorators. Returns: value, parsed into the appropriate type for calling a function. """ parse_fn = parser.DefaultParseValue # We check to see if any parse function from the fn metadata applies here. parse_fns = metadata.get(decorators.FIRE_PARSE_FNS) if parse_fns: default = parse_fns['default'] positional = parse_fns['positional'] named = parse_fns['named'] if index is not None and 0 <= index < len(positional): parse_fn = positional[index] elif arg in named: parse_fn = named[arg] elif default is not None: parse_fn = default return parse_fn(value)
36.577806
80
0.706873
901
0.031419
0
0
0
0
0
0
14,202
0.49524
86c845d512d008bf07b10c93c9a059cfaa7474a0
1,668
py
Python
app.py
AmirValeev/auto-ml-classifier
e803fe92d1ec71e87509845ea61ecc46b363bae6
[ "Apache-2.0" ]
null
null
null
app.py
AmirValeev/auto-ml-classifier
e803fe92d1ec71e87509845ea61ecc46b363bae6
[ "Apache-2.0" ]
null
null
null
app.py
AmirValeev/auto-ml-classifier
e803fe92d1ec71e87509845ea61ecc46b363bae6
[ "Apache-2.0" ]
null
null
null
import os, ast import pandas as pd from sklearn.svm import SVC from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from sklearn.compose import make_column_transformer from sklearn.pipeline import make_pipeline import pickle def main(): # Get the dataset from the users GitHub repository dataset_path = "https://raw.githubusercontent.com/" + os.environ["GITHUB_REPOSITORY"] + "/master/dataset.csv" data = pd.read_csv(dataset_path) print() print(data.describe()) x=data.iloc[:,:-1] y=data.iloc[:,-1] column_trans = make_column_transformer((OneHotEncoder(),[-1]),remainder='passthrough') # apply encoding on output variable x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state=0) #define a pipeline pipe = make_pipeline(column_trans,SVC()) pipe.fit(x_train,y_train) #training the model print("\nModel Training Finished") accuracy = pipe.score(x_test,y_test) print("\nAccuracy of the Model: "+str(accuracy*100)) if pipe: pickle.dump(pipe,open('model.pkl','wb')) # store the artifact in docker container if not os.environ["INPUT_MYINPUT"] == 'zeroinputs': inputs = ast.literal_eval(os.environ["INPUT_MYINPUT"]) print("\nThe Predicted Ouput is :") output = pipe.predict([inputs]) print(output) else: output = ["None"] print("\nUser didn't provided inputs to predict") print("\n=======================Action Completed========================") print(f"::set-output name=myOutput::{output[0]}") if __name__ == "__main__": main()
33.36
126
0.668465
0
0
0
0
0
0
0
0
557
0.333933
86c8b4810cb292d6be03cbb1ee7d68143bb6929f
512
py
Python
util/headers.py
giuseppe/quay
a1b7e4b51974edfe86f66788621011eef2667e6a
[ "Apache-2.0" ]
2,027
2019-11-12T18:05:48.000Z
2022-03-31T22:25:04.000Z
util/headers.py
giuseppe/quay
a1b7e4b51974edfe86f66788621011eef2667e6a
[ "Apache-2.0" ]
496
2019-11-12T18:13:37.000Z
2022-03-31T10:43:45.000Z
util/headers.py
giuseppe/quay
a1b7e4b51974edfe86f66788621011eef2667e6a
[ "Apache-2.0" ]
249
2019-11-12T18:02:27.000Z
2022-03-22T12:19:19.000Z
import base64 def parse_basic_auth(header_value): """ Attempts to parse the given header value as a Base64-encoded Basic auth header. """ if not header_value: return None parts = header_value.split(" ") if len(parts) != 2 or parts[0].lower() != "basic": return None try: basic_parts = base64.b64decode(parts[1]).split(":", 1) if len(basic_parts) != 2: return None return basic_parts except ValueError: return None
21.333333
83
0.599609
0
0
0
0
0
0
0
0
108
0.210938
86c9f566a6eb8b7449c2eceeae4f2a4b402b56f5
3,115
py
Python
indico/core/signals/event/core.py
tobiashuste/indico
c1e6ec0c8c84745988e38c9b1768142a6feb9e0e
[ "MIT" ]
null
null
null
indico/core/signals/event/core.py
tobiashuste/indico
c1e6ec0c8c84745988e38c9b1768142a6feb9e0e
[ "MIT" ]
null
null
null
indico/core/signals/event/core.py
tobiashuste/indico
c1e6ec0c8c84745988e38c9b1768142a6feb9e0e
[ "MIT" ]
null
null
null
# This file is part of Indico. # Copyright (C) 2002 - 2020 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from indico.core.signals.event import _signals sidemenu = _signals.signal('sidemenu', """ Expected to return ``MenuEntryData`` objects to be added to the event side menu. A single entry can be returned directly, multiple entries must be yielded. """) deleted = _signals.signal('deleted', """ Called when an event is deleted. The *sender* is the event object. The `user` kwarg contains the user performing the deletion if available. """) updated = _signals.signal('updated', """ Called when basic data of an event is updated. The *sender* is the event. A dict of changes is passed in the `changes` kwarg, with ``(old, new)`` tuples for each change. Note than the `person_links` change may happen with `old` and `new` being the same lists for technical reasons. If the key is present, it should be assumed that something changed (usually the order or some data on the person link). """) cloned = _signals.signal('cloned', """ Called when an event is cloned. The *sender* is the `Event` object of the old event, the new event is passed in the `new_event` kwarg. """) type_changed = _signals.signal('type-changed', """ Called when the type of an event is changed. The `sender` is the event, the old type is passed in the `old_type` kwarg. """) moved = _signals.signal('moved', """ Called when an event is moved to a different category. The `sender` is the event, the old category is in the `old_parent` kwarg. """) created = _signals.signal('created', """ Called when a new event is created. The `sender` is the new Event. """) session_updated = _signals.signal('session-updated', """ Called when a session is updated. The *sender* is the session. """) session_deleted = _signals.signal('session-deleted', """ Called when a session is deleted. The *sender* is the session. """) session_block_deleted = _signals.signal('session-block-deleted', """ Called when a session block is deleted. The *sender* is the session block. This signal is called before the ``db.session.delete()`` on the block is executed. """) timetable_buttons = _signals.signal('timetable-buttons', """ Expected to return a list of tuples ('button_name', 'js-call-class'). Called when building the timetable view. """) get_log_renderers = _signals.signal('get-log-renderers', """ Expected to return `EventLogRenderer` classes. """) get_feature_definitions = _signals.signal('get-feature-definitions', """ Expected to return `EventFeature` subclasses. """) metadata_postprocess = _signals.signal('metadata-postprocess', """ Called right after a dict-like representation of an event is created, so that plugins can add their own fields. The *sender* is a string parameter specifying the source of the metadata. The *event* kwarg contains the event object. The metadata is passed in the `data` kwarg. The signal should return a dict that will be used to update the original representation (fields to add or override). """)
35.804598
81
0.741252
0
0
0
0
0
0
0
0
2,544
0.816693
86ca3287dbcbbef744a382d06122c372e95e738d
3,294
py
Python
cinder/tests/unit/volume/drivers/emc/scaleio/test_delete_volume.py
aarunsai81/netapp
8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba
[ "Apache-2.0" ]
11
2015-08-25T13:11:18.000Z
2020-10-15T11:29:20.000Z
cinder/tests/unit/volume/drivers/emc/scaleio/test_delete_volume.py
aarunsai81/netapp
8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba
[ "Apache-2.0" ]
5
2018-01-25T11:31:56.000Z
2019-05-06T23:13:35.000Z
cinder/tests/unit/volume/drivers/emc/scaleio/test_delete_volume.py
aarunsai81/netapp
8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba
[ "Apache-2.0" ]
11
2015-02-20T18:48:24.000Z
2021-01-30T20:26:18.000Z
# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves import urllib from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.emc import scaleio from cinder.tests.unit.volume.drivers.emc.scaleio import mocks class TestDeleteVolume(scaleio.TestScaleIODriver): """Test cases for ``ScaleIODriver.delete_volume()``""" def setUp(self): """Setup a test case environment. Creates a fake volume object and sets up the required API responses. """ super(TestDeleteVolume, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER_ID}) self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: self.volume.id, 'instances/Volume::{}/action/removeMappedSdc'.format( self.volume.provider_id): self.volume.provider_id, 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): self.volume.provider_id, }, self.RESPONSE_MODE.BadStatus: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus Volume Test', }, 401 ), 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus Volume Test', }, 401 ), }, } def test_bad_login_and_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.volume) def test_delete_volume(self): """Setting the unmap volume before delete flag for tests """ self.driver.configuration.set_override( 'sio_unmap_volume_before_deletion', override=True) self.driver.delete_volume(self.volume)
39.686747
78
0.610808
2,348
0.712811
0
0
0
0
0
0
1,211
0.367638
86ca3cb4e460e6fa964047e9d8e3d1c032b0dafb
1,233
py
Python
example-package/transportation_tutorials/__init__.py
chrisc20042001/python-for-transportation-modeling
677129daa390fcaa6e5cde45960e27d9bd6ca4bf
[ "BSD-3-Clause" ]
null
null
null
example-package/transportation_tutorials/__init__.py
chrisc20042001/python-for-transportation-modeling
677129daa390fcaa6e5cde45960e27d9bd6ca4bf
[ "BSD-3-Clause" ]
null
null
null
example-package/transportation_tutorials/__init__.py
chrisc20042001/python-for-transportation-modeling
677129daa390fcaa6e5cde45960e27d9bd6ca4bf
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- __version__ = '1.0.2' import os import appdirs import osmnx as ox import joblib import requests from .files import load_vars, save_vars, cached, inflate_tar, download_zipfile from .data import data, list_data, problematic from .tools.view_code import show_file from . import mapping cache_dir = None memory = None def set_cache_dir(location=None, compress=True, verbose=0, **kwargs): """ Set up a cache directory for use with the tutorials. Parameter --------- cache_dir : Path-like or False, optional A path for the cache files. Set to False to disable caching. """ global memory, cache_dir if location is None: location = appdirs.user_cache_dir('transportation_tutorials') if location is False: location = None memory = joblib.Memory(location, compress=compress, verbose=verbose, **kwargs) make_cache = ( (ox, 'gdf_from_place'), (ox, 'graph_from_bbox'), (requests, 'get'), (requests, 'post'), ) for module, func_name in make_cache: try: func = getattr(module, f"_{func_name}_orig") except AttributeError: func = getattr(module, func_name) setattr(module, f"_{func_name}_orig", func) setattr(module, func_name, memory.cache(func)) set_cache_dir()
20.55
79
0.721006
0
0
0
0
0
0
0
0
331
0.268451
86ca8c2e422d5ab12a80680e14af6535e5befd05
2,146
py
Python
common/common.py
czajowaty/curry-bot
91bfbd884342a02c6defd057d27d5b1fcd78cb21
[ "MIT" ]
3
2019-10-09T23:17:55.000Z
2022-02-01T17:34:27.000Z
common/common.py
czajowaty/curry-bot
91bfbd884342a02c6defd057d27d5b1fcd78cb21
[ "MIT" ]
19
2019-10-09T20:42:05.000Z
2022-02-01T08:22:25.000Z
common/common.py
czajowaty/curry-bot
91bfbd884342a02c6defd057d27d5b1fcd78cb21
[ "MIT" ]
6
2020-08-09T20:17:13.000Z
2022-01-27T23:59:28.000Z
from requests.models import PreparedRequest def is_valid_url(url): prepared_request = PreparedRequest() try: prepared_request.prepare_url(url, None) return True except Exception as e: return False class Timestamp: # a speedrun.com style timestamp e.g. "3h 53m 233s 380ms" def __init__(self, s): self.hours, self.minutes, self.seconds, self.milliseconds = 0, 0, 0, 0 for arg in s.split(): if arg.endswith("ms"): self.milliseconds += int(arg[:-2]) elif arg.endswith("s"): self.seconds += int(arg[:-1]) elif arg.endswith("m"): self.minutes += int(arg[:-1]) elif arg.endswith("h"): self.hours += int(arg[:-1]) @staticmethod def from_milliseconds(ms): t = Timestamp("0ms") temp = ms t.hours = temp // 3600000 temp %= 3600000 t.minutes = temp // 60000 temp %= 60000 t.seconds = temp // 1000 t.milliseconds = temp % 1000 return t def __str__(self): result = [] if self.hours != 0: result.append("{}h".format(self.hours)) if not (self.hours == 0 and self.minutes == 0): result.append("{}m".format(self.minutes)) result.append("{}s".format(self.seconds)) if self.milliseconds > 0: result.append("{}ms".format(self.milliseconds)) return ' '.join(result) def __eq__(self, other): return self.hours == other.hours and self.minutes == other.minutes and self.seconds == other.seconds and self.milliseconds == other.milliseconds def __lt__(self, other): if self.hours < other.hours: return True elif self.hours > other.hours: return False if self.minutes < other.minutes: return True elif self.minutes > other.minutes: return False if self.seconds < other.seconds: return True elif self.seconds > other.seconds: return False return self.milliseconds < other.milliseconds
32.029851
152
0.56384
1,908
0.889096
0
0
292
0.136067
0
0
99
0.046132
86cbceec04afe24550cbee582258380f822dc77d
5,265
py
Python
hendrix/test/test_ux.py
anthonyalmarza/hendrix
eebd2a2183cc18ec2267d96a53a70d41b1630ce6
[ "MIT" ]
null
null
null
hendrix/test/test_ux.py
anthonyalmarza/hendrix
eebd2a2183cc18ec2267d96a53a70d41b1630ce6
[ "MIT" ]
null
null
null
hendrix/test/test_ux.py
anthonyalmarza/hendrix
eebd2a2183cc18ec2267d96a53a70d41b1630ce6
[ "MIT" ]
null
null
null
import os import sys from . import HendrixTestCase, TEST_SETTINGS from hendrix.contrib import SettingsError from hendrix.options import options as hx_options from hendrix import ux from mock import patch class TestMain(HendrixTestCase): def setUp(self): super(TestMain, self).setUp() self.DEFAULTS = hx_options() os.environ['DJANGO_SETTINGS_MODULE'] = '' self.devnull = open(os.devnull, 'w') self.args_list = ['hx', 'start'] self.patcher = patch('hendrix.ux.findSettingsModule') self.patcher.start() def tearDown(self): super(TestMain, self).tearDown() self.devnull.close() self.patcher.stop() def test_settings_from_system_variable(self): django_settings = 'django.inanity' with patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value = django_settings options = self.DEFAULTS self.assertEqual(options['settings'], '') options = ux.djangoVsWsgi(options) self.assertEqual(options['settings'], django_settings) def test_settings_wsgi_absense(self): with patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value = "" self.assertRaises(SettingsError, ux.djangoVsWsgi, self.DEFAULTS) def test_user_settings_overrides_system_variable(self): django_settings = 'django.inanity' with patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value = django_settings options = self.DEFAULTS user_settings = 'myproject.settings' options['settings'] = user_settings self.assertEqual(options['settings'], user_settings) options = ux.djangoVsWsgi(options) self.assertEqual(options['settings'], user_settings) def test_wsgi_correct_wsgi_path_works(self): wsgi_dot_path = 'hendrix.test.wsgi' options = self.DEFAULTS options.update({'wsgi': wsgi_dot_path}) options = ux.djangoVsWsgi(options) self.assertEqual(options['wsgi'], wsgi_dot_path) def test_wsgi_wrong_path_raises(self): wsgi_dot_path = '_this.leads.nowhere.man' options = self.DEFAULTS options.update({'wsgi': wsgi_dot_path}) self.assertRaises(ImportError, ux.djangoVsWsgi, options) def test_cwd_exposure(self): cwd = os.getcwd() _path = sys.path sys.path = [p for p in _path if p != cwd] self.assertTrue(cwd not in sys.path) ux.exposeProject(self.DEFAULTS) self.assertTrue(cwd in sys.path) def test_pythonpath(self): options = self.DEFAULTS test_path = os.path.join( os.path.dirname(os.getcwd()), 'hendrix/test/testproject' ) options['pythonpath'] = test_path ux.exposeProject(options) self.assertTrue(test_path in sys.path) sys.path = [p for p in sys.path if p != test_path] def test_shitty_pythonpath(self): options = self.DEFAULTS test_path = '/if/u/have/this/path/you/suck' options['pythonpath'] = test_path self.assertRaises(IOError, ux.exposeProject, options) def test_dev_friendly_options(self): options = self.DEFAULTS options['dev'] = True self.assertFalse(options['reload']) self.assertFalse(options['loud']) options = ux.devFriendly(options) self.assertTrue(options['reload']) self.assertTrue(options['loud']) def test_noise_control_daemonize(self): options = self.DEFAULTS options['quiet'] = True options['daemonize'] = True stdout = sys.stdout stderr = sys.stderr redirect = ux.noiseControl(options) self.assertEqual(sys.stdout.name, stdout.name) self.assertEqual(sys.stderr.name, stderr.name) self.assertEqual(redirect, None) def test_noise_control_traceback(self): options = self.DEFAULTS options['quiet'] = True options['daemonize'] = True options['traceback'] = True stdout = sys.stdout stderr = sys.stderr redirect = ux.noiseControl(options) self.assertEqual(sys.stdout.name, stdout.name) self.assertEqual(sys.stderr.name, stderr.name) self.assertEqual(redirect, None) def test_main_with_daemonize(self): sys.argv = self.args_list + ['-d', '--settings', TEST_SETTINGS] class Process(object): def poll(self): return 0 with patch('time.sleep'): with patch('subprocess.Popen') as popen: popen.return_value = Process() ux.main() self.assertTrue(popen.called) self.assertTrue('--settings' in popen.call_args[0][0]) sys.argv = [] def test_options_structure(self): """ A test to ensure that HendrixDeploy.options also has the complete set of options available """ deploy = self.wsgiDeploy() expected_keys = self.DEFAULTS.keys() actual_keys = deploy.options.keys() self.assertListEqual(expected_keys, actual_keys)
35.816327
77
0.637607
5,058
0.960684
0
0
0
0
0
0
673
0.127825
86cc1ac4bd0be7cfc736232b574de4d27f85e0ca
6,656
py
Python
discord/types/interactions.py
Voxel-Fox-Ltd/Novus
3e254115daf1c09455b26dc7819b73fbf5ee56e5
[ "MIT" ]
61
2021-08-30T05:30:31.000Z
2022-03-24T11:24:38.000Z
discord/types/interactions.py
Voxel-Fox-Ltd/Novus
3e254115daf1c09455b26dc7819b73fbf5ee56e5
[ "MIT" ]
30
2021-08-31T10:16:42.000Z
2022-03-09T22:53:15.000Z
discord/types/interactions.py
Voxel-Fox-Ltd/Novus
3e254115daf1c09455b26dc7819b73fbf5ee56e5
[ "MIT" ]
15
2021-09-02T09:40:58.000Z
2022-02-25T12:19:02.000Z
""" The MIT License (MIT) Copyright (c) 2015-2021 Rapptz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import annotations from typing import Optional, TYPE_CHECKING, Dict, TypedDict, Union, List, Literal from .snowflake import Snowflake from .components import Component, SelectOption from .embed import Embed from .channel import ChannelType, Channel from .member import Member from .role import Role from .user import User if TYPE_CHECKING: from .message import AllowedMentions, Message ApplicationCommandType = Literal[1, 2, 3] class ApplicationCommand(TypedDict): id: Snowflake application_id: Snowflake name: str description: str options: Optional[List[ApplicationCommandOption]] type: Optional[ApplicationCommandType] ApplicationCommandOptionType = Literal[1, 2, 3, 4, 5, 6, 7, 8, 9, 10] class ApplicationCommandOption(TypedDict): type: ApplicationCommandOptionType name: str description: str required: bool choices: Optional[List[ApplicationCommandOptionChoice]] options: Optional[List[ApplicationCommandOption]] class ApplicationCommandOptionChoice(TypedDict): name: str value: Union[str, int] ApplicationCommandPermissionType = Literal[1, 2] class ApplicationCommandPermissions(TypedDict): id: Snowflake type: ApplicationCommandPermissionType permission: bool class BaseGuildApplicationCommandPermissions(TypedDict): permissions: List[ApplicationCommandPermissions] class PartialGuildApplicationCommandPermissions(BaseGuildApplicationCommandPermissions): id: Snowflake class GuildApplicationCommandPermissions(PartialGuildApplicationCommandPermissions): application_id: Snowflake guild_id: Snowflake InteractionType = Literal[1, 2, 3] class _ApplicationCommandInteractionDataOption(TypedDict): name: str class _ApplicationCommandInteractionDataOptionSubcommand(_ApplicationCommandInteractionDataOption): type: Literal[1, 2] options: List[ApplicationCommandInteractionDataOption] class _ApplicationCommandInteractionDataOptionString(_ApplicationCommandInteractionDataOption): type: Literal[3] value: str class _ApplicationCommandInteractionDataOptionInteger(_ApplicationCommandInteractionDataOption): type: Literal[4] value: int class _ApplicationCommandInteractionDataOptionBoolean(_ApplicationCommandInteractionDataOption): type: Literal[5] value: bool class _ApplicationCommandInteractionDataOptionSnowflake(_ApplicationCommandInteractionDataOption): type: Literal[6, 7, 8, 9] value: Snowflake class _ApplicationCommandInteractionDataOptionNumber(_ApplicationCommandInteractionDataOption): type: Literal[10] value: float ApplicationCommandInteractionDataOption = Union[ _ApplicationCommandInteractionDataOptionString, _ApplicationCommandInteractionDataOptionInteger, _ApplicationCommandInteractionDataOptionSubcommand, _ApplicationCommandInteractionDataOptionBoolean, _ApplicationCommandInteractionDataOptionSnowflake, _ApplicationCommandInteractionDataOptionNumber, ] class ApplicationCommandResolvedPartialChannel(TypedDict): id: Snowflake type: ChannelType permissions: str name: str class ApplicationCommandInteractionDataResolved(TypedDict, total=False): users: Dict[Snowflake, User] members: Dict[Snowflake, Member] roles: Dict[Snowflake, Role] channels: Dict[Snowflake, ApplicationCommandResolvedPartialChannel] class ApplicationCommandInteractionDataOption(TypedDict): name: str type: int value: Optional[str] # Optional[ApplicationCommandOptionType] options: Optional[ApplicationCommandInteractionDataOption] focused: Optional[bool] components: Optional[List[ApplicationCommandInteractionDataOption]] class _InteractionDataOptional(TypedDict, total=False): resolved: Dict[str, dict] options: List[ApplicationCommandInteractionDataOption] custom_id: str component_type: int values: List[str] target_id: Snowflake components: List[ApplicationCommandInteractionDataOption] class InteractionData(_InteractionDataOptional): id: Snowflake name: str type: ApplicationCommandType class InteractionResolved(TypedDict): users: List[Union[User, Member]] members: List[Member] roles: List[Role] channels: List[Channel] messages: List[Message] class _InteractionOptional(TypedDict, total=False): data: InteractionData guild_id: Snowflake channel_id: Snowflake member: Member user: User message: Message guild_locale: str class Interaction(_InteractionOptional): id: Snowflake application_id: Snowflake type: InteractionType token: str version: int resolved: InteractionResolved locale: str class InteractionApplicationCommandCallbackData(TypedDict, total=False): tts: bool content: str embeds: List[Embed] allowed_mentions: AllowedMentions flags: int components: List[Component] InteractionResponseType = Literal[1, 4, 5, 6, 7] class _InteractionResponseOptional(TypedDict, total=False): data: InteractionApplicationCommandCallbackData class InteractionResponse(_InteractionResponseOptional): type: InteractionResponseType class MessageInteraction(TypedDict): id: Snowflake type: InteractionType name: str user: User class _EditApplicationCommandOptional(TypedDict, total=False): description: str options: Optional[List[ApplicationCommandOption]] type: ApplicationCommandType class EditApplicationCommand(_EditApplicationCommandOptional): name: str default_permission: bool
27.618257
99
0.790415
4,448
0.668269
0
0
0
0
0
0
1,125
0.16902
86cc747c2e5f0caead634114a98e5f4a747d16ea
15,163
py
Python
local/local_sign.py
EVAyo/chaoxing_auto_sign
7ae91a5e9aa4d15f57a5419ff3f5a455e151930a
[ "MIT" ]
null
null
null
local/local_sign.py
EVAyo/chaoxing_auto_sign
7ae91a5e9aa4d15f57a5419ff3f5a455e151930a
[ "MIT" ]
null
null
null
local/local_sign.py
EVAyo/chaoxing_auto_sign
7ae91a5e9aa4d15f57a5419ff3f5a455e151930a
[ "MIT" ]
null
null
null
# -*- coding: utf8 -*- import os import re import time import json import random import asyncio from typing import Optional, List, Dict from aiohttp import ClientSession from aiohttp.cookiejar import SimpleCookie from lxml import etree from bs4 import BeautifulSoup from config import * from message import server_chan_send class AutoSign(object): def __init__(self, username, password, schoolid=None, enc=None): """初始化就进行登录""" self.headers = { 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.100 Safari/537.36', } self.session = ClientSession(headers=self.headers) self.username = username self.password = password self.schoolid = '' if schoolid is None else schoolid self.enc = '' if enc is None else enc async def check_login_status(self, status, text): if status == 403: return 1002 data = json.loads(text) if data['result']: return 1000 # 登录成功 else: return 1001 # 登录信息有误 async def set_cookies(self): """设置cookies""" cookie = await self.check_cookies() if not cookie: # 无效则重新登录,并保存cookies status, text, cookie = await self.login() login_status = await self.check_login_status(status, text) if login_status == 1000: cookies = self.dict_from_simple_cookie(cookie) self.save_cookies(cookies) else: return 1001 else: self.session.cookie_jar.update_cookies(cookie) return 1000 def dict_from_simple_cookie(self, cookies) -> dict: """ 从响应对象中抽取cookies """ result = {} for key, value in cookies.items(): result[key] = value.value return result def save_cookies(self, cookies: dict): """保存cookies""" with open(COOKIES_FILE_PATH, "r") as f: data = json.load(f) data[self.username] = cookies with open(COOKIES_FILE_PATH, 'w') as f2: json.dump(data, f2) async def check_cookies(self) -> Optional[SimpleCookie]: """检测json文件内是否存有cookies,有则检测,无则登录""" if "cookies.json" not in os.listdir(COOKIES_PATH): with open(COOKIES_FILE_PATH, 'w+') as f: f.write("{}") with open(COOKIES_FILE_PATH, 'r') as f: # json文件有无账号cookies, 没有,则直接返回假 try: data = json.load(f) cookies = data[self.username] except Exception: return False # 检测cookies是否有效 async with self.session.request(method='GET', url='http://mooc1-1.chaoxing.com/api/workTestPendingNew', allow_redirects=False, cookies=cookies) as resp: if resp.status != 200: print("cookie失效") return None else: print("cookie有效!") return cookies async def login(self): """ 登录并返回响应 """ params = { 'name': self.username, 'pwd': self.password, 'schoolid': self.schoolid, 'verify': 0 } async with self.session.request(method='GET', url='https://passport2.chaoxing.com/api/login', params=params) as resp: status = resp.status text = await resp.text() cookies = resp.cookies return status, text, cookies def check_activeid(self, activeid): """检测activeid是否存在,不存在则添加""" activeid += self.username if "activeid.json" not in os.listdir(ACTIVEID_PATH): with open(ACTIVEID_FILE_PATH, 'w+') as f: f.write("{}") with open(ACTIVEID_FILE_PATH, 'r') as f: try: # 读取文件 data = json.load(f) if data[activeid]: return True except BaseException: # 如果出错,则表示没有此activeid return False def save_activeid(self, activeid): """保存已成功签到的activeid""" activeid += self.username if "activeid.json" not in os.listdir(ACTIVEID_PATH): with open(ACTIVEID_FILE_PATH, 'w+') as f: f.write("{}") with open(ACTIVEID_FILE_PATH, 'r') as f: data = json.load(f) with open(ACTIVEID_FILE_PATH, 'w') as f2: data[activeid] = True json.dump(data, f2) async def get_all_classid(self) -> list: """获取课程主页中所有课程的classid和courseid""" res = [] async with self.session.request(method='GET', url='http://mooc1-2.chaoxing.com/visit/interaction') as resp: text = await resp.text() soup = BeautifulSoup(text, "lxml") course_list = soup.find_all( 'li', class_="course") for course in course_list: res.append((course.attrs['courseid'], course.attrs['clazzid'], course.find_next('span', class_="course-name").text)) print('课程列表: ', res) return res async def get_sign_type(self, classid, courseid, activeid): """获取签到类型""" params = { 'activeId': activeid, 'classId': classid, 'courseId': courseid } async with self.session.request(method='GET', url='https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign', params=params) as resp: text = await resp.text() h = etree.HTML(text) sign_type = h.xpath('//div[@class="location"]/span/text()') return sign_type async def get_activeid(self, classid, courseid, classname): """访问任务面板获取课程的活动id""" res = [] re_rule = r'([\d]+),2' params = { 'courseId': courseid, 'jclassId': classid } async with self.session.request(method='GET', url="https://mobilelearn.chaoxing.com/widget/pcpick/stu/index", verify_ssl=False, params=params) as resp: text = await resp.text() h = etree.HTML(text) activeid_list = h.xpath('//*[@id="startList"]/div/div/@onclick') for activeid in activeid_list: activeid = re.findall(re_rule, activeid) if not activeid: continue sign_type = await self.get_sign_type(classid, courseid, activeid[0]) res.append((activeid[0], sign_type[0])) n = len(res) if n: d = {'num': n, 'class': {}} for i in range(n): if not self.check_activeid(res[i][0]): d['class'][i] = { 'classid': classid, 'courseid': courseid, 'activeid': res[i][0], 'classname': classname, 'sign_type': res[i][1] } return d async def general_sign(self, classid, courseid, activeid): """普通签到""" params = { 'activeId': activeid, 'classId': classid, 'fid': '39037', 'courseId': courseid } async with self.session.request( method='GET', url="https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign", params=params, verify_ssl=False ) as resp: text = await resp.text() title = re.findall('<title>(.*)</title>', text)[0] if "签到成功" not in title: # 网页标题不含签到成功,则为拍照签到 return self.tphoto_sign(activeid) else: s = { 'date': time.strftime("%m-%d %H:%M", time.localtime()), 'status': title } return s async def hand_sign(self, classid, courseid, activeid): """手势签到""" params = { 'courseId': courseid, 'classId': classid, 'activeId': activeid } async with self.session.request( method='GET', url="https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/signIn", params=params, verify_ssl=False ) as resp: text = await resp.text() title = re.findall('<title>(.*)</title>', text) s = { 'date': time.strftime("%m-%d %H:%M", time.localtime()), 'status': title } return s async def qcode_sign(self, activeid): """二维码签到""" params = { 'enc': self.enc, 'name': '', 'activeId': activeid, 'uid': '', 'clientip': '', 'useragent': '', 'latitude': '-1', 'longitude': '-1', 'fid': '', 'appType': '15' } async with self.session.request('GET', 'https://mobilelearn.chaoxing.com/pptSign/stuSignajax', params=params, allow_redirects=False) as resp: text = await resp.text() return { 'date': time.strftime("%m-%d %H:%M", time.localtime()), 'status': text } async def addr_sign(self, activeid): """位置签到""" params = { 'name': '', 'activeId': activeid, 'address': '中国', 'uid': '', 'clientip': clientip, 'latitude': latitude, 'longitude': longitude, 'fid': '', 'appType': '15', 'ifTiJiao': '1' } async with self.session.request( method="GET", url="https://mobilelearn.chaoxing.com/pptSign/stuSignajax", params=params ) as resp: text = await resp.text() return { 'date': time.strftime("%m-%d %H:%M", time.localtime()), 'status': text } async def tphoto_sign(self, activeid, uid): """拍照签到""" objectId = await self.upload_img(uid) params = { 'name': '', 'activeId': activeid, 'address': '中国', 'uid': '', 'clientip': clientip, 'latitude': latitude, 'longitude': longitude, 'fid': '', 'appType': '15', 'ifTiJiao': '1', 'objectId': objectId } async with self.session.request( method="GET", url="https://mobilelearn.chaoxing.com/pptSign/stuSignajax", params=params ) as resp: text = await resp.text() return { 'date': time.strftime("%m-%d %H:%M", time.localtime()), 'status': text } async def get_token(self): """获取上传文件所需参数token""" url = 'https://pan-yz.chaoxing.com/api/token/uservalid' async with self.session.request( method='GET', url=url ) as resp: text = await resp.text() token_dict = json.loads(text) return token_dict['_token'] async def upload_img(self, uid): """上传图片""" # 从图片文件夹内随机选择一张图片 try: all_img = os.listdir(IMAGE_PATH) except Exception as e: os.mkdir(IMAGE_PATH) all_img = 0 if len(all_img) == 0: return "a5d588f7bce1994323c348982332e470" else: img = IMAGE_PATH + random.choice(all_img) # uid = self.session.cookies.get_dict()['UID'] url = 'https://pan-yz.chaoxing.com/upload' files = {'file': open(img, 'rb')} uid = self.session.cookie_jar.filter_cookies('').get('UID').value token = await self.get_token() param = { 'puid': uid, '_token': token } async with self.session.request( method='POST', url=url, params=param, data=files ) as resp: text = await resp.text() res_dict = json.loads(text) return res_dict['objectId'] async def send_sign_request(self, classid, courseid, activeid, sign_type): """发送签到请求""" if "手势" in sign_type: return await self.hand_sign(classid, courseid, activeid) elif "二维码" in sign_type: return await self.qcode_sign(activeid) elif "位置" in sign_type: return await self.addr_sign(activeid) elif "拍照" in sign_type: return await self.tphoto_sign(activeid) else: return await self.general_sign(classid, courseid, activeid) async def send_sign_result(self, results: List[Dict]): """ 发送签到结果 """ await server_chan_send(results, self.session) async def start_sign_task(self): """开始所有签到任务""" tasks = [] res = [] await self.set_cookies() # 获取所有课程的classid和course_id classid_courseId = await self.get_all_classid() # 获取所有课程activeid和签到类型 for i in classid_courseId: coroutine = self.get_activeid(i[1], i[0], i[2]) tasks.append(coroutine) results: List[Dict] = await asyncio.gather(*tasks) for r in results: if r is None: continue for d in r['class'].values(): resp = await self.send_sign_request( d['classid'], d['courseid'], d['activeid'], d['sign_type'] ) if resp: # 签到课程, 签到时间, 签到状态 sign_msg = { 'name': d['classname'], 'date': resp['date'], 'status': resp['status'] } res.append(sign_msg) if '失败' in resp['status']: continue # 签到成功后,新增activeid self.save_activeid(d['activeid']) return res async def close_session(self): await self.session.close()
33.770601
149
0.485722
15,455
0.979218
0
0
0
0
12,853
0.814357
3,573
0.226383
86ccfd65a1bb34c39113feed67502cda22587b34
4,240
py
Python
build/scripts-3.6/fit_background_model.py
stahlberggroup/umierrorcorrect
8ceabe30a87811dad467d04eb5a08d0213065946
[ "MIT" ]
null
null
null
build/scripts-3.6/fit_background_model.py
stahlberggroup/umierrorcorrect
8ceabe30a87811dad467d04eb5a08d0213065946
[ "MIT" ]
null
null
null
build/scripts-3.6/fit_background_model.py
stahlberggroup/umierrorcorrect
8ceabe30a87811dad467d04eb5a08d0213065946
[ "MIT" ]
1
2022-01-12T13:51:59.000Z
2022-01-12T13:51:59.000Z
#!python import numpy as np from numpy import inf from numpy import nan from scipy.optimize import fmin from scipy.stats import beta from scipy.special import beta as B from scipy.special import comb import argparse import sys def parseArgs(): '''Function for parsing arguments''' parser = argparse.ArgumentParser(description="Pipeline for analyzing barcoded amplicon \ sequencing data with Unique molecular \ identifiers (UMI)") parser.add_argument('-cons', '--cons_file', dest='cons_file', help='Path to cons file, for fitting parameters of the bgmodel') parser.add_argument('-nonbgposfile', '--non-background-positions', dest='nonbgposfile', help='Path to file with non-background positions') parser.add_argument('-out', '--out_file',dest='out_file',help="name of output file, default = %(default)s]",default="bgmodel.params") parser.add_argument('-f','--fsize',dest='fsize', help='Family size cutoff (consensus cutoff) for variant calling. [default = %(default)s]', default=3) args = parser.parse_args(sys.argv[1:]) return(args) def parse_cons_file(filename,fsize=3): n1=[] f1=[] c1=[] posx=[] data=[] with open(filename) as f: for line in f: if not line.startswith('Sample Name'): line=line.rstrip('\n') parts=line.split('\t') pos=parts[1]+':'+parts[2] name=parts[3] #print(name) if name not in "": famsize=parts[-4] if int(famsize)==fsize: frac=float(parts[-2]) alt=parts[-1] count=parts[-3] if frac > 0 and alt not in 'N': cov=int(parts[-5]) f1.append(float(frac)) n1.append(int(cov)) c1.append(int(count)) posx.append(pos) data.append(line) #print(name) #print(famsize) return(f1,n1,c1,posx,data) def betaNLL(params,*args): a,b = params data = np.array(args[0]) pdf=beta.pdf(data,a,b,loc=0,scale=1) lg=np.log(pdf) #lg=np.where(lg==-np.inf,0,lg) mask = np.isfinite(lg) nll = -lg[mask].sum() nll=-1*np.sum(lg) return(nll) def get_beta_parameters(data): m=np.mean(data) v=np.var(data) a0=m*(m * (1-m) / v-1 ) b0=(1-m)*(m * (1-m) / v-1 ) result=fmin(betaNLL,[a0,b0],args=(data,)) return(result) def run_fit_bgmodel(args): spikepositions=[178952085,55599321,7577558,7577547,7577538,7577120] if args.nonbgposfile: nonbgpos=[] with open(args.nonbgposfile) as f: for line in f: line=line.rstrip() nonbgpos.append(line) else: nonbgpos=spikepositions if not args.cons_file: args.cons_file=glob.glob(args.output_path+'/*cons.tsv')[0] args.fsize=int(args.fsize) f1,n1,a1,pos,data=parse_cons_file(args.cons_file,args.fsize) f1 = np.array(f1) n1 = np.array(n1) a1 = np.array(a1) pos = np.array(pos) data = np.array(data) result=get_beta_parameters(f1[np.isin(pos,nonbgpos)!=True]) #a=prob_bb(n1,a1,result[0],result[1]) print(pos,nonbgpos,np.isin(pos,nonbgpos)) with open(args.out_file,'w') as g: g.write('{}\n'.format(result[0])) g.write('{}\n'.format(result[1])) #a[a==inf]=1e-10 #a[np.isnan(a)]=1e-10 #Q = -10*np.log10(a) #data=np.array(data) #plot_histogram(Q,args.output_path+'/'+args.sample_name+'.histogram.png') #if args.vc_method.lower()=='bbmodel': # rout=data[Q >= float(args.qvalue_threshold)] # Qsig=Q[Q >= float(args.qvalue_threshold)] #else: # rout=data[a1 >= float(args.count_cutoff)] # Qsig=Q[a1 >= float(args.count_cutoff)] #outfilename=args.output_path+'/'+args.sample_name+'2.vcf' #write_vcf(outfilename,rout,Qsig,args.reference_file) if __name__=='__main__': args=parseArgs() run_fit_bgmodel(args)
35.932203
154
0.566274
0
0
0
0
0
0
0
0
1,290
0.304245
86cdf766574c9c743ff631f5d4070feb9f763d2a
7,654
py
Python
caffe2/python/operator_test/partition_ops_test.py
KevinKecc/caffe2
a2b6c6e2f0686358a84277df65e9489fb7d9ddb2
[ "Apache-2.0" ]
585
2015-08-10T02:48:52.000Z
2021-12-01T08:46:59.000Z
caffe2/python/operator_test/partition_ops_test.py
mingzhe09088/caffe2
8f41717c46d214aaf62b53e5b3b9b308b5b8db91
[ "Apache-2.0" ]
27
2018-04-14T06:44:22.000Z
2018-08-01T18:02:39.000Z
caffe2/python/operator_test/partition_ops_test.py
mingzhe09088/caffe2
8f41717c46d214aaf62b53e5b3b9b308b5b8db91
[ "Apache-2.0" ]
183
2015-08-10T02:49:04.000Z
2021-12-01T08:47:13.000Z
# Copyright (c) 2016-present, Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np from caffe2.python import core, workspace from caffe2.python.test_util import TestCase, rand_array class TestPartitionOps(TestCase): def test_configs(self): # (main dims, partitions, main type, [list of (extra dims, type)]) configs = [ ((10, ), 3), ((4, ), 10), ((10, 10), 4), ((100, ), 2), ((5, ), 1), ((1, ), 1), ((2, 10), 2), ] suffixes = [ [], [((2, 2), np.float32)], [((3, ), np.int64), ((2, ), np.float32)], ] return [ (main_dims, parts, main_type, extra, pack) for main_dims, parts in configs for main_type in [np.int32, np.int64] for extra in suffixes for pack in [False, True] ] def testPartition(self): for main_dims, parts, main_type, extra_ins, pack in self.test_configs(): ins = ['in' + str(i) for i in range(1 + len(extra_ins))] outs = [ 'in{}_p{}'.format(j, i) for i in range(parts) for j in range(1 + len(extra_ins)) ] op = core.CreateOperator( 'Partition', ins, outs, pack_first_input=(1 if pack else 0)) x = [] for i, (dims, t) in enumerate([((), main_type)] + extra_ins): if t in [np.float32, np.float64]: d = rand_array(*(main_dims + dims)) else: d = np.random.randint(-100, 100, (main_dims + dims)) d = d.astype(t) workspace.FeedBlob(ins[i], d) x.append(d) def sharding(x): # numpy has proper modulo op that yields non-negative results shards = (x[0] % parts).reshape([-1]) out = [] for i in range(parts): for ind, v in enumerate(x): suffix_shape = v.shape[len(x[0].shape):] accum = [] data = v.reshape((-1, ) + suffix_shape) if pack and ind == 0: data = data // parts for j, s in enumerate(shards): if s == i: accum.append(data[j]) def join(a): if not a: return np.empty(shape=(0, ) + suffix_shape) return np.stack(a) out.append(join(accum)) return out workspace.RunOperatorOnce(op) ref = sharding(x) print(x) print(ref) for name, expected in zip(outs, ref): np.testing.assert_array_equal( expected, workspace.FetchBlob(name) ) # test inverse operation (GatherByKey) if len(main_dims) == 1: # currently only 1D key tensor supported for i in range(len(extra_ins)): expected_out = ins[i + 1] gather_ins = [ins[0]] + [ outs[len(ins) * p + i + 1] for p in range(parts)] actual_out = expected_out + '_actual' op = core.CreateOperator( 'GatherByKey', gather_ins, actual_out) workspace.RunOperatorOnce(op) expected = workspace.FetchBlob(expected_out) actual = workspace.FetchBlob(actual_out) np.testing.assert_array_equal(expected, actual) def testLengthsPartition(self): for main_dims, parts, main_type, extra_ins, pack in self.test_configs(): # For LengthsSharding only 1-D tensors supported as a first input if len(main_dims) > 1: continue ins = ['in' + str(i) for i in range(2 + len(extra_ins))] outs = [ 'in{}_p{}'.format(j, i) for i in range(parts) for j in range(2 + len(extra_ins)) ] op = core.CreateOperator( 'LengthsPartition', ins, outs, pack_first_input=(1 if pack else 0) ) x = [] for i, (dims, t) in enumerate([((), main_type)] + extra_ins): if t in [np.float32, np.float64]: d = rand_array(*(main_dims + dims)) else: d = np.random.randint(-100, 100, (main_dims + dims)) d = d.astype(t) workspace.FeedBlob(ins[i + 1], d) x.append(d) # Randomly generate length tensor as well elements = np.random.randint(2, 10) lengths = [] total_length = 0 for _ in range(elements - 1): lengths.append(np.random.randint(main_dims[0] - total_length)) total_length += lengths[-1] lengths.append(main_dims[0] - total_length) workspace.FeedBlob(ins[0], np.array(lengths, dtype=np.int32)) def sharding(x): # numpy has proper modulo op that yields non-negative results shards = (x[0] % parts).reshape([-1]) out = [] for i in range(parts): idx = 0 sharded_lengths = np.zeros(elements) for ind, length in enumerate(lengths): for _ in range(length): if shards[idx] == i: sharded_lengths[ind] += 1 idx += 1 out.append(sharded_lengths) for ind, v in enumerate(x): suffix_shape = v.shape[len(x[0].shape):] accum = [] data = v.reshape((-1, ) + suffix_shape) if pack and ind == 0: data = data // parts for j, s in enumerate(shards): if s == i: accum.append(data[j]) def join(a): if not a: return np.empty(shape=(0, ) + suffix_shape) return np.stack(a) out.append(join(accum)) return out workspace.RunOperatorOnce(op) ref = sharding(x) for name, expected in zip(outs, ref): np.testing.assert_array_equal( expected, workspace.FetchBlob(name) ) if __name__ == "__main__": import unittest unittest.main()
38.852792
80
0.468774
6,645
0.868174
0
0
0
0
0
0
1,118
0.146067
86ce2b47e96edc2e4a65e6684b182564c236c3d3
11,195
py
Python
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_fib_common_cfg.py
Maikor/ydk-py
b86c4a7c570ae3b2c5557d098420446df5de4929
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_fib_common_cfg.py
Maikor/ydk-py
b86c4a7c570ae3b2c5557d098420446df5de4929
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_fib_common_cfg.py
Maikor/ydk-py
b86c4a7c570ae3b2c5557d098420446df5de4929
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
""" Cisco_IOS_XR_fib_common_cfg This module contains a collection of YANG definitions for Cisco IOS\-XR fib\-common package configuration. This module contains definitions for the following management objects\: fib\: CEF configuration Copyright (c) 2013\-2018 by Cisco Systems, Inc. All rights reserved. """ from collections import OrderedDict from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from ydk.filters import YFilter from ydk.errors import YError, YModelError from ydk.errors.error_handler import handle_type_error as _handle_type_error class FibPbtsFallback(Enum): """ FibPbtsFallback (Enum Class) Fib pbts fallback .. data:: list = 1 Fallback to class number list .. data:: any = 2 Fallback to any class .. data:: drop = 3 Fallback to drop """ list = Enum.YLeaf(1, "list") any = Enum.YLeaf(2, "any") drop = Enum.YLeaf(3, "drop") class FibPbtsForwardClass(Enum): """ FibPbtsForwardClass (Enum Class) Fib pbts forward class .. data:: any = 8 Any class """ any = Enum.YLeaf(8, "any") class Fib(Entity): """ CEF configuration .. attribute:: pbts_forward_class_fallbacks PBTS class configuration **type**\: :py:class:`PbtsForwardClassFallbacks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks>` .. attribute:: platform FIB platform parameters **type**\: :py:class:`Platform <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform>` .. attribute:: auto_hash_recover Set option for automatcially recovering consistent\-hashing state on interface up **type**\: bool .. attribute:: prefer_aib_routes Set options for adjacency routes overriding RIB routes **type**\: bool .. attribute:: encap_sharing_disable Set true to disable encapsulation sharing **type**\: bool .. attribute:: frr_follow_bgp_pic Set option for fast\-reroute to follow BGP PIC update, not to wait for timeout **type**\: bool """ _prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib, self).__init__() self._top_entity = None self.yang_name = "fib" self.yang_parent_name = "Cisco-IOS-XR-fib-common-cfg" self.is_top_level_class = True self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("pbts-forward-class-fallbacks", ("pbts_forward_class_fallbacks", Fib.PbtsForwardClassFallbacks)), ("platform", ("platform", Fib.Platform))]) self._leafs = OrderedDict([ ('auto_hash_recover', (YLeaf(YType.boolean, 'auto-hash-recover'), ['bool'])), ('prefer_aib_routes', (YLeaf(YType.boolean, 'prefer-aib-routes'), ['bool'])), ('encap_sharing_disable', (YLeaf(YType.boolean, 'encap-sharing-disable'), ['bool'])), ('frr_follow_bgp_pic', (YLeaf(YType.boolean, 'frr-follow-bgp-pic'), ['bool'])), ]) self.auto_hash_recover = None self.prefer_aib_routes = None self.encap_sharing_disable = None self.frr_follow_bgp_pic = None self.pbts_forward_class_fallbacks = Fib.PbtsForwardClassFallbacks() self.pbts_forward_class_fallbacks.parent = self self._children_name_map["pbts_forward_class_fallbacks"] = "pbts-forward-class-fallbacks" self.platform = Fib.Platform() self.platform.parent = self self._children_name_map["platform"] = "platform" self._segment_path = lambda: "Cisco-IOS-XR-fib-common-cfg:fib" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib, ['auto_hash_recover', 'prefer_aib_routes', 'encap_sharing_disable', 'frr_follow_bgp_pic'], name, value) class PbtsForwardClassFallbacks(Entity): """ PBTS class configuration .. attribute:: pbts_forward_class_fallback Set PBTS class for fallback **type**\: list of :py:class:`PbtsForwardClassFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback>` """ _prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.PbtsForwardClassFallbacks, self).__init__() self.yang_name = "pbts-forward-class-fallbacks" self.yang_parent_name = "fib" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("pbts-forward-class-fallback", ("pbts_forward_class_fallback", Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback))]) self._leafs = OrderedDict() self.pbts_forward_class_fallback = YList(self) self._segment_path = lambda: "pbts-forward-class-fallbacks" self._absolute_path = lambda: "Cisco-IOS-XR-fib-common-cfg:fib/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks, [], name, value) class PbtsForwardClassFallback(Entity): """ Set PBTS class for fallback .. attribute:: forward_class_number (key) PBTS forward class number **type**\: union of the below types: **type**\: :py:class:`FibPbtsForwardClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsForwardClass>` **type**\: int **range:** 0..8 .. attribute:: fallback_type Set PBTS fallback type **type**\: :py:class:`FibPbtsFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsFallback>` **mandatory**\: True .. attribute:: fallback_class_number_array Set PBTS fallback class number array **type**\: list of int **range:** 0..7 """ _prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, self).__init__() self.yang_name = "pbts-forward-class-fallback" self.yang_parent_name = "pbts-forward-class-fallbacks" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = ['forward_class_number'] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('forward_class_number', (YLeaf(YType.str, 'forward-class-number'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsForwardClass', ''),'int'])), ('fallback_type', (YLeaf(YType.enumeration, 'fallback-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsFallback', '')])), ('fallback_class_number_array', (YLeafList(YType.uint32, 'fallback-class-number-array'), ['int'])), ]) self.forward_class_number = None self.fallback_type = None self.fallback_class_number_array = [] self._segment_path = lambda: "pbts-forward-class-fallback" + "[forward-class-number='" + str(self.forward_class_number) + "']" self._absolute_path = lambda: "Cisco-IOS-XR-fib-common-cfg:fib/pbts-forward-class-fallbacks/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, ['forward_class_number', 'fallback_type', 'fallback_class_number_array'], name, value) class Platform(Entity): """ FIB platform parameters .. attribute:: label_switched_multicast Options for label\-switched\-multicast parameters **type**\: :py:class:`LabelSwitchedMulticast <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform.LabelSwitchedMulticast>` """ _prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.Platform, self).__init__() self.yang_name = "platform" self.yang_parent_name = "fib" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("label-switched-multicast", ("label_switched_multicast", Fib.Platform.LabelSwitchedMulticast))]) self._leafs = OrderedDict() self.label_switched_multicast = Fib.Platform.LabelSwitchedMulticast() self.label_switched_multicast.parent = self self._children_name_map["label_switched_multicast"] = "label-switched-multicast" self._segment_path = lambda: "platform" self._absolute_path = lambda: "Cisco-IOS-XR-fib-common-cfg:fib/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib.Platform, [], name, value) class LabelSwitchedMulticast(Entity): """ Options for label\-switched\-multicast parameters .. attribute:: frr_holdtime Set time to keep FRR slots programmed post FRR **type**\: int **range:** 3..180 **units**\: second """ _prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.Platform.LabelSwitchedMulticast, self).__init__() self.yang_name = "label-switched-multicast" self.yang_parent_name = "platform" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('frr_holdtime', (YLeaf(YType.uint32, 'frr-holdtime'), ['int'])), ]) self.frr_holdtime = None self._segment_path = lambda: "label-switched-multicast" self._absolute_path = lambda: "Cisco-IOS-XR-fib-common-cfg:fib/platform/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib.Platform.LabelSwitchedMulticast, ['frr_holdtime'], name, value) def clone_ptr(self): self._top_entity = Fib() return self._top_entity
34.875389
184
0.609915
10,552
0.942564
0
0
0
0
0
0
5,356
0.478428
86ce2bcecdfa6edd6bc5db700d444829470b263a
2,888
py
Python
action/combo.py
dl-stuff/dl9
1cbe98afc53a1de9d413797fb130946acc4b6ba4
[ "MIT" ]
null
null
null
action/combo.py
dl-stuff/dl9
1cbe98afc53a1de9d413797fb130946acc4b6ba4
[ "MIT" ]
null
null
null
action/combo.py
dl-stuff/dl9
1cbe98afc53a1de9d413797fb130946acc4b6ba4
[ "MIT" ]
null
null
null
"""Series of actions that form a combo chain""" from __future__ import annotations from typing import Optional, Sequence, TYPE_CHECKING from action import Action from core.utility import Array from core.constants import PlayerForm, SimActKind, MomentType from core.database import FromDB if TYPE_CHECKING: from entity.player import Player class Combos: def __init__(self, player: Player, form: PlayerForm, act_ids: Sequence[int], ex_act_ids: Optional[Sequence[int]] = None) -> None: self.player = player self.actions: Array[Action] = Array() for idx, act_id in enumerate(act_ids): self.actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx + 1)) self.ex_actions = None if ex_act_ids: self.ex_actions: Array[Action] = Array() for idx, act_id in enumerate(ex_act_ids): if not act_id: self.ex_actions.append(None) continue self.ex_actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx + 1)) def next(self): if self.player.current in self.actions: try: return self.actions[self.player.current.index + 1] except IndexError: pass return self.actions[1] def __repr__(self) -> str: if self.ex_actions: return "->".join(map(repr, self.actions)) + "\tEX[" + "->".join(map(repr, self.ex_actions)) + "]" return "->".join(map(repr, self.actions)) class UniqueCombos(Combos, FromDB, table="CharaUniqueCombo"): def __init__(self, id: int, player: Player) -> None: FromDB.__init__(self, id) act_ids = (self._data["_ActionId"] + i for i in range(self._data["_MaxComboNum"])) ex_act_ids = None if not self._data["_ExActionId"] else (self._data["_ExActionId"] + i for i in range(self._data["_MaxComboNum"])) Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids) if self._data["_ShiftConditionType"] == 1: self.player.events.listen(MomentType.HIT, self.enable) def enable(self, *args, **kwargs): pass class DefaultCombos(Combos, FromDB, table="WeaponType"): def __init__(self, id: int, player: Player) -> None: FromDB.__init__(self, id) act_ids = (self._data[f"_DefaultSkill{i+1:02}"] for i in range(5) if self._data[f"_DefaultSkill{i+1:02}"]) ex_act_ids = None if not self._data["_DefaultSkill05Ex"] else (0, 0, 0, 0, self._data["_DefaultSkill05Ex"]) Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids) class DragonCombos(Combos): def __init__(self, id: int, combo_max: int, player: Player) -> None: act_ids = (id + i for i in range(combo_max)) Combos.__init__(self, player, PlayerForm.DRG, act_ids)
42.470588
138
0.649584
2,531
0.876385
0
0
0
0
0
0
271
0.093837
86ce3c0225876fe3453133a9a0965f8f30c17a84
5,447
py
Python
flask_unchained/bundles/session/config.py
achiang/flask-unchained
12788a6e618904a25ff2b571eb05ff1dc8f1840f
[ "MIT" ]
null
null
null
flask_unchained/bundles/session/config.py
achiang/flask-unchained
12788a6e618904a25ff2b571eb05ff1dc8f1840f
[ "MIT" ]
null
null
null
flask_unchained/bundles/session/config.py
achiang/flask-unchained
12788a6e618904a25ff2b571eb05ff1dc8f1840f
[ "MIT" ]
null
null
null
import os from datetime import timedelta from flask_unchained import BundleConfig try: from flask_unchained.bundles.sqlalchemy import db except ImportError: db = None class _DefaultFlaskConfigForSessions(BundleConfig): SESSION_COOKIE_NAME = 'session' """ The name of the session cookie. Defaults to ``'session'``. """ SESSION_COOKIE_DOMAIN = None """ The domain for the session cookie. If this is not set, the cookie will be valid for all subdomains of ``SERVER_NAME``. Defaults to ``None``. """ SESSION_COOKIE_PATH = None """ The path for the session cookie. If this is not set the cookie will be valid for all of ``APPLICATION_ROOT`` or if that is not set for '/'. Defaults to ``None``. """ SESSION_COOKIE_HTTPONLY = True """ Controls if the cookie should be set with the ``httponly`` flag. Browsers will not allow JavaScript access to cookies marked as ``httponly`` for security. Defaults to ``True``. """ SESSION_COOKIE_SECURE = False """ Controls if the cookie should be set with the ``secure`` flag. Browsers will only send cookies with requests over HTTPS if the cookie is marked ``secure``. The application must be served over HTTPS for this to make sense. Defaults to ``False``. """ PERMANENT_SESSION_LIFETIME = timedelta(days=31) """ The lifetime of a permanent session as ``datetime.timedelta`` object or an integer representing seconds. Defaults to 31 days. """ SESSION_COOKIE_SAMESITE = None """ Restrict how cookies are sent with requests from external sites. Limits the scope of the cookie such that it will only be attached to requests if those requests are "same-site". Can be set to ``'Lax'`` (recommended) or ``'Strict'``. Defaults to ``None``. """ SESSION_REFRESH_EACH_REQUEST = True """ Controls the set-cookie behavior. If set to ``True`` a permanent session will be refreshed each request and get their lifetime extended, if set to ``False`` it will only be modified if the session actually modifies. Non permanent sessions are not affected by this and will always expire if the browser window closes. Defaults to ``True``. """ class Config(_DefaultFlaskConfigForSessions): """ Default configuration options for the Session Bundle. """ SESSION_TYPE = 'null' """ Specifies which type of session interface to use. Built-in session types: - ``'null'``: :class:`~flask_unchained.bundles.session.session_interfaces.NullSessionInterface` (default) - ``'redis'``: :class:`~flask_unchained.bundles.session.session_interfaces.RedisSessionInterface` - ``'memcached'``: :class:`~flask_unchained.bundles.session.session_interfaces.MemcachedSessionInterface` - ``'filesystem'``: :class:`~flask_unchained.bundles.session.session_interfaces.FileSystemSessionInterface` - ``'mongodb'``: :class:`~flask_unchained.bundles.session.session_interfaces.MongoDBSessionInterface` - ``'sqlalchemy'``: :class:`~flask_unchained.bundles.session.session_interfaces.SqlAlchemySessionInterface` Defaults to ``'null'``. """ SESSION_PERMANENT = True """ Whether use permanent session or not. Defaults to ``True``. """ SESSION_USE_SIGNER = False """ Whether sign the session cookie sid or not. If set to ``True``, you have to set ``SECRET_KEY``. Defaults to ``False``. """ SESSION_KEY_PREFIX = 'session:' """ A prefix that is added before all session keys. This makes it possible to use the same backend storage server for different apps. Defaults to ``'session:'``. """ SESSION_REDIS = None """ A :class:`redis.Redis` instance. By default, connect to ``127.0.0.1:6379``. """ SESSION_MEMCACHED = None """ A :class:`memcached.Client` instance. By default, connect to ``127.0.0.1:11211``. """ SESSION_FILE_DIR = os.path.join(os.getcwd(), 'flask_sessions') """ The folder where session files are stored. Defaults to using a folder named ``flask_sessions`` in your current working directory. """ SESSION_FILE_THRESHOLD = 500 """ The maximum number of items the session stores before it starts deleting some. Defaults to 500. """ SESSION_FILE_MODE = 0o600 """ The file mode wanted for the session files. Should be specified as an octal, eg ``0o600``. Defaults to ``0o600``. """ SESSION_MONGODB = None """ A :class:`pymongo.MongoClient` instance. By default, connect to ``127.0.0.1:27017``. """ SESSION_MONGODB_DB = 'flask_session' """ The MongoDB database you want to use. Defaults to ``'flask_session'``. """ SESSION_MONGODB_COLLECT = 'sessions' """ The MongoDB collection you want to use. Defaults to ``'sessions'``. """ SESSION_SQLALCHEMY = db """ A :class:`~flask_unchained.bundles.sqlalchemy.SQLAlchemy` extension instance. """ SESSION_SQLALCHEMY_TABLE = 'flask_sessions' """ The name of the SQL table you want to use. Defaults to ``flask_sessions``. """ SESSION_SQLALCHEMY_MODEL = None """ Set this if you need to customize the :class:`~flask_unchained.bundles.sqlalchemy.BaseModel` subclass used for storing sessions in the database. """
27.371859
111
0.668258
5,264
0.966404
0
0
0
0
0
0
4,285
0.786672
86ce9b178e942f833e8db993afdcf0aface18b4a
3,845
py
Python
sktime/forecasting/base/adapters/_statsmodels.py
tombh/sktime
53df0b9ed9d1fd800539165c414cc5611bcc56b3
[ "BSD-3-Clause" ]
1
2020-06-02T22:24:44.000Z
2020-06-02T22:24:44.000Z
sktime/forecasting/base/adapters/_statsmodels.py
abhishek-parashar/sktime
1dfce6b41c2acdb576acfc04b09d11bf115c92d1
[ "BSD-3-Clause" ]
1
2020-11-20T13:51:20.000Z
2020-11-20T13:51:20.000Z
sktime/forecasting/base/adapters/_statsmodels.py
abhishek-parashar/sktime
1dfce6b41c2acdb576acfc04b09d11bf115c92d1
[ "BSD-3-Clause" ]
3
2020-10-18T04:54:30.000Z
2021-02-15T18:04:18.000Z
#!/usr/bin/env python3 -u # -*- coding: utf-8 -*- __author__ = ["Markus Löning"] __all__ = ["_StatsModelsAdapter"] import numpy as np import pandas as pd from sktime.forecasting.base._base import DEFAULT_ALPHA from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin from sktime.forecasting.base._sktime import _SktimeForecaster class _StatsModelsAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster): """Base class for interfacing statsmodels forecasting algorithms""" _fitted_param_names = () def __init__(self): self._forecaster = None self._fitted_forecaster = None super(_StatsModelsAdapter, self).__init__() def fit(self, y, X=None, fh=None): """Fit to training data. Parameters ---------- y : pd.Series Target time series to which to fit the forecaster. fh : int, list or np.array, optional (default=None) The forecasters horizon with the steps ahead to to predict. X : pd.DataFrame, optional (default=None) Exogenous variables are ignored Returns ------- self : returns an instance of self. """ # statsmodels does not support the pd.Int64Index as required, # so we coerce them here to pd.RangeIndex if isinstance(y, pd.Series) and type(y.index) == pd.Int64Index: y, X = _coerce_int_to_range_index(y, X) self._set_y_X(y, X) self._set_fh(fh) self._fit_forecaster(y, X) self._is_fitted = True return self def _fit_forecaster(self, y_train, X_train=None): """Internal fit""" raise NotImplementedError("abstract method") def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA): """ Make forecasts. Parameters ---------- fh : ForecastingHorizon The forecasters horizon with the steps ahead to to predict. Default is one-step ahead forecast, i.e. np.array([1]) X : pd.DataFrame, optional (default=None) Exogenous variables are ignored. return_pred_int : bool, optional (default=False) alpha : int or list, optional (default=0.95) Returns ------- y_pred : pd.Series Returns series of predicted values. """ if return_pred_int: raise NotImplementedError() # statsmodels requires zero-based indexing starting at the # beginning of the training series when passing integers start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]] y_pred = self._fitted_forecaster.predict(start, end) # statsmodels forecasts all periods from start to end of forecasting # horizon, but only return given time points in forecasting horizon return y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()] def get_fitted_params(self): """Get fitted parameters Returns ------- fitted_params : dict """ self.check_is_fitted() return { name: self._fitted_forecaster.params.get(name) for name in self._get_fitted_param_names() } def _get_fitted_param_names(self): """Get names of fitted parameters""" return self._fitted_param_names def _coerce_int_to_range_index(y, X=None): new_index = pd.RangeIndex(y.index[0], y.index[-1] + 1) try: np.testing.assert_array_equal(y.index, new_index) except AssertionError: raise ValueError( "Coercion of pd.Int64Index to pd.RangeIndex " "failed. Please provide `y_train` with a " "pd.RangeIndex." ) y.index = new_index if X is not None: X.index = new_index return y, X
32.310924
79
0.628349
3,024
0.786271
0
0
0
0
0
0
1,811
0.470879
86cf3a0a9a0e45685f04435a33dcecfd088782c9
12,924
py
Python
melodic/lib/python2.7/dist-packages/gazebo_msgs/srv/_GetLinkProperties.py
Dieptranivsr/Ros_Diep
d790e75e6f5da916701b11a2fdf3e03b6a47086b
[ "MIT" ]
null
null
null
melodic/lib/python2.7/dist-packages/gazebo_msgs/srv/_GetLinkProperties.py
Dieptranivsr/Ros_Diep
d790e75e6f5da916701b11a2fdf3e03b6a47086b
[ "MIT" ]
1
2021-07-08T10:26:06.000Z
2021-07-08T10:31:11.000Z
melodic/lib/python2.7/dist-packages/gazebo_msgs/srv/_GetLinkProperties.py
Dieptranivsr/Ros_Diep
d790e75e6f5da916701b11a2fdf3e03b6a47086b
[ "MIT" ]
null
null
null
# This Python file uses the following encoding: utf-8 """autogenerated by genpy from gazebo_msgs/GetLinkPropertiesRequest.msg. Do not edit.""" import codecs import sys python3 = True if sys.hexversion > 0x03000000 else False import genpy import struct class GetLinkPropertiesRequest(genpy.Message): _md5sum = "7d82d60381f1b66a30f2157f60884345" _type = "gazebo_msgs/GetLinkPropertiesRequest" _has_header = False # flag to mark the presence of a Header object _full_text = """string link_name # name of link # link names are prefixed by model name, e.g. pr2::base_link """ __slots__ = ['link_name'] _slot_types = ['string'] def __init__(self, *args, **kwds): """ Constructor. Any message fields that are implicitly/explicitly set to None will be assigned a default value. The recommend use is keyword arguments as this is more robust to future message changes. You cannot mix in-order arguments and keyword arguments. The available fields are: link_name :param args: complete set of field values, in .msg order :param kwds: use keyword arguments corresponding to message field names to set specific fields. """ if args or kwds: super(GetLinkPropertiesRequest, self).__init__(*args, **kwds) # message fields cannot be None, assign default values for those that are if self.link_name is None: self.link_name = '' else: self.link_name = '' def _get_types(self): """ internal API method """ return self._slot_types def serialize(self, buff): """ serialize message into buffer :param buff: buffer, ``StringIO`` """ try: _x = self.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.Struct('<I%ss'%length).pack(length, _x)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize(self, str): """ unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str`` """ codecs.lookup_error("rosmsg").msg_type = self._type try: end = 0 start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.link_name = str[start:end].decode('utf-8', 'rosmsg') else: self.link_name = str[start:end] return self except struct.error as e: raise genpy.DeserializationError(e) # most likely buffer underfill def serialize_numpy(self, buff, numpy): """ serialize message with numpy array types into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python module """ try: _x = self.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.Struct('<I%ss'%length).pack(length, _x)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize_numpy(self, str, numpy): """ unpack serialized message in str into this message instance using numpy for array types :param str: byte array of serialized message, ``str`` :param numpy: numpy python module """ codecs.lookup_error("rosmsg").msg_type = self._type try: end = 0 start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.link_name = str[start:end].decode('utf-8', 'rosmsg') else: self.link_name = str[start:end] return self except struct.error as e: raise genpy.DeserializationError(e) # most likely buffer underfill _struct_I = genpy.struct_I def _get_struct_I(): global _struct_I return _struct_I # This Python file uses the following encoding: utf-8 """autogenerated by genpy from gazebo_msgs/GetLinkPropertiesResponse.msg. Do not edit.""" import codecs import sys python3 = True if sys.hexversion > 0x03000000 else False import genpy import struct import geometry_msgs.msg class GetLinkPropertiesResponse(genpy.Message): _md5sum = "a8619f92d17cfcc3958c0fd13299443d" _type = "gazebo_msgs/GetLinkPropertiesResponse" _has_header = False # flag to mark the presence of a Header object _full_text = """geometry_msgs/Pose com # center of mass location in link frame # and orientation of the moment of inertias # relative to the link frame bool gravity_mode # set gravity mode on/off float64 mass # linear mass of link float64 ixx # moment of inertia float64 ixy # moment of inertia float64 ixz # moment of inertia float64 iyy # moment of inertia float64 iyz # moment of inertia float64 izz # moment of inertia bool success # return true if get info is successful string status_message # comments if available ================================================================================ MSG: geometry_msgs/Pose # A representation of pose in free space, composed of position and orientation. Point position Quaternion orientation ================================================================================ MSG: geometry_msgs/Point # This contains the position of a point in free space float64 x float64 y float64 z ================================================================================ MSG: geometry_msgs/Quaternion # This represents an orientation in free space in quaternion form. float64 x float64 y float64 z float64 w """ __slots__ = ['com','gravity_mode','mass','ixx','ixy','ixz','iyy','iyz','izz','success','status_message'] _slot_types = ['geometry_msgs/Pose','bool','float64','float64','float64','float64','float64','float64','float64','bool','string'] def __init__(self, *args, **kwds): """ Constructor. Any message fields that are implicitly/explicitly set to None will be assigned a default value. The recommend use is keyword arguments as this is more robust to future message changes. You cannot mix in-order arguments and keyword arguments. The available fields are: com,gravity_mode,mass,ixx,ixy,ixz,iyy,iyz,izz,success,status_message :param args: complete set of field values, in .msg order :param kwds: use keyword arguments corresponding to message field names to set specific fields. """ if args or kwds: super(GetLinkPropertiesResponse, self).__init__(*args, **kwds) # message fields cannot be None, assign default values for those that are if self.com is None: self.com = geometry_msgs.msg.Pose() if self.gravity_mode is None: self.gravity_mode = False if self.mass is None: self.mass = 0. if self.ixx is None: self.ixx = 0. if self.ixy is None: self.ixy = 0. if self.ixz is None: self.ixz = 0. if self.iyy is None: self.iyy = 0. if self.iyz is None: self.iyz = 0. if self.izz is None: self.izz = 0. if self.success is None: self.success = False if self.status_message is None: self.status_message = '' else: self.com = geometry_msgs.msg.Pose() self.gravity_mode = False self.mass = 0. self.ixx = 0. self.ixy = 0. self.ixz = 0. self.iyy = 0. self.iyz = 0. self.izz = 0. self.success = False self.status_message = '' def _get_types(self): """ internal API method """ return self._slot_types def serialize(self, buff): """ serialize message into buffer :param buff: buffer, ``StringIO`` """ try: _x = self buff.write(_get_struct_7dB7dB().pack(_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode, _x.mass, _x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success)) _x = self.status_message length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.Struct('<I%ss'%length).pack(length, _x)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize(self, str): """ unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str`` """ codecs.lookup_error("rosmsg").msg_type = self._type try: if self.com is None: self.com = geometry_msgs.msg.Pose() end = 0 _x = self start = end end += 114 (_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode, _x.mass, _x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success,) = _get_struct_7dB7dB().unpack(str[start:end]) self.gravity_mode = bool(self.gravity_mode) self.success = bool(self.success) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.status_message = str[start:end].decode('utf-8', 'rosmsg') else: self.status_message = str[start:end] return self except struct.error as e: raise genpy.DeserializationError(e) # most likely buffer underfill def serialize_numpy(self, buff, numpy): """ serialize message with numpy array types into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python module """ try: _x = self buff.write(_get_struct_7dB7dB().pack(_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode, _x.mass, _x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success)) _x = self.status_message length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.Struct('<I%ss'%length).pack(length, _x)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize_numpy(self, str, numpy): """ unpack serialized message in str into this message instance using numpy for array types :param str: byte array of serialized message, ``str`` :param numpy: numpy python module """ codecs.lookup_error("rosmsg").msg_type = self._type try: if self.com is None: self.com = geometry_msgs.msg.Pose() end = 0 _x = self start = end end += 114 (_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode, _x.mass, _x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success,) = _get_struct_7dB7dB().unpack(str[start:end]) self.gravity_mode = bool(self.gravity_mode) self.success = bool(self.success) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.status_message = str[start:end].decode('utf-8', 'rosmsg') else: self.status_message = str[start:end] return self except struct.error as e: raise genpy.DeserializationError(e) # most likely buffer underfill _struct_I = genpy.struct_I def _get_struct_I(): global _struct_I return _struct_I _struct_7dB7dB = None def _get_struct_7dB7dB(): global _struct_7dB7dB if _struct_7dB7dB is None: _struct_7dB7dB = struct.Struct("<7dB7dB") return _struct_7dB7dB class GetLinkProperties(object): _type = 'gazebo_msgs/GetLinkProperties' _md5sum = '0e06a70386d0ee3fb880c02f23fcd821' _request_class = GetLinkPropertiesRequest _response_class = GetLinkPropertiesResponse
37.031519
284
0.638502
12,024
0.930362
0
0
0
0
0
0
5,175
0.400418
86cfbb57e1ec13e6ae0711449af6c95612ae3139
2,268
py
Python
jupytext/kernels.py
st--/jupytext
f8e8352859cc22e17b11154d0770fd946c4a430a
[ "MIT" ]
5,378
2018-09-01T22:03:43.000Z
2022-03-31T06:51:42.000Z
jupytext/kernels.py
st--/jupytext
f8e8352859cc22e17b11154d0770fd946c4a430a
[ "MIT" ]
812
2018-08-31T08:26:13.000Z
2022-03-30T18:12:11.000Z
jupytext/kernels.py
st--/jupytext
f8e8352859cc22e17b11154d0770fd946c4a430a
[ "MIT" ]
380
2018-09-02T01:40:07.000Z
2022-03-25T13:57:23.000Z
"""Find kernel specifications for a given language""" import os import sys from .languages import same_language from .reraise import reraise try: # I prefer not to take a dependency on jupyter_client from jupyter_client.kernelspec import find_kernel_specs, get_kernel_spec except ImportError as err: find_kernel_specs = reraise(err) get_kernel_spec = reraise(err) def set_kernelspec_from_language(notebook): """Set the kernel specification based on the 'main_language' metadata""" language = notebook.metadata.get("jupytext", {}).get("main_language") if "kernelspec" not in notebook.metadata and language: try: kernelspec = kernelspec_from_language(language) except ValueError: return notebook.metadata["kernelspec"] = kernelspec notebook.metadata.get("jupytext", {}).pop("main_language") def kernelspec_from_language(language): """Return the python kernel that matches the current env, or the first kernel that matches the given language""" if language == "python": # Return the kernel that matches the current Python executable for name in find_kernel_specs(): kernel_specs = get_kernel_spec(name) cmd = kernel_specs.argv[0] if ( kernel_specs.language == "python" and os.path.isfile(cmd) and os.path.samefile(cmd, sys.executable) ): return { "name": name, "language": language, "display_name": kernel_specs.display_name, } raise ValueError( "No kernel found that matches the current python executable {}\n".format( sys.executable ) + "Install one with 'python -m ipykernel install --name kernel_name [--user]'" ) for name in find_kernel_specs(): kernel_specs = get_kernel_spec(name) if same_language(kernel_specs.language, language): return { "name": name, "language": language, "display_name": kernel_specs.display_name, } raise ValueError("No kernel found for the language {}".format(language))
36
116
0.622575
0
0
0
0
0
0
0
0
680
0.299824
86d025f02ce51457ef476e760c051f7660045f69
5,333
py
Python
scipy/sparse/_matrix_io.py
dhruv9vats/scipy
48e1dd7e604df3ae57d104b407c5b7a2a6a3247d
[ "BSD-3-Clause" ]
1
2021-08-16T09:32:42.000Z
2021-08-16T09:32:42.000Z
scipy/sparse/_matrix_io.py
dhruv9vats/scipy
48e1dd7e604df3ae57d104b407c5b7a2a6a3247d
[ "BSD-3-Clause" ]
44
2019-06-27T15:56:14.000Z
2022-03-15T22:21:10.000Z
scipy/sparse/_matrix_io.py
dhruv9vats/scipy
48e1dd7e604df3ae57d104b407c5b7a2a6a3247d
[ "BSD-3-Clause" ]
4
2020-06-13T10:32:25.000Z
2021-12-03T15:48:16.000Z
import numpy as np import scipy.sparse __all__ = ['save_npz', 'load_npz'] # Make loading safe vs. malicious input PICKLE_KWARGS = dict(allow_pickle=False) def save_npz(file, matrix, compressed=True): """ Save a sparse matrix to a file using ``.npz`` format. Parameters ---------- file : str or file-like object Either the file name (string) or an open file (file-like object) where the data will be saved. If file is a string, the ``.npz`` extension will be appended to the file name if it is not already there. matrix: spmatrix (format: ``csc``, ``csr``, ``bsr``, ``dia`` or coo``) The sparse matrix to save. compressed : bool, optional Allow compressing the file. Default: True See Also -------- scipy.sparse.load_npz: Load a sparse matrix from a file using ``.npz`` format. numpy.savez: Save several arrays into a ``.npz`` archive. numpy.savez_compressed : Save several arrays into a compressed ``.npz`` archive. Examples -------- Store sparse matrix to disk, and load it again: >>> import scipy.sparse >>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]])) >>> sparse_matrix <2x3 sparse matrix of type '<class 'numpy.int64'>' with 2 stored elements in Compressed Sparse Column format> >>> sparse_matrix.todense() matrix([[0, 0, 3], [4, 0, 0]], dtype=int64) >>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) >>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz') >>> sparse_matrix <2x3 sparse matrix of type '<class 'numpy.int64'>' with 2 stored elements in Compressed Sparse Column format> >>> sparse_matrix.todense() matrix([[0, 0, 3], [4, 0, 0]], dtype=int64) """ arrays_dict = {} if matrix.format in ('csc', 'csr', 'bsr'): arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr) elif matrix.format == 'dia': arrays_dict.update(offsets=matrix.offsets) elif matrix.format == 'coo': arrays_dict.update(row=matrix.row, col=matrix.col) else: raise NotImplementedError('Save is not implemented for sparse matrix of format {}.'.format(matrix.format)) arrays_dict.update( format=matrix.format.encode('ascii'), shape=matrix.shape, data=matrix.data ) if compressed: np.savez_compressed(file, **arrays_dict) else: np.savez(file, **arrays_dict) def load_npz(file): """ Load a sparse matrix from a file using ``.npz`` format. Parameters ---------- file : str or file-like object Either the file name (string) or an open file (file-like object) where the data will be loaded. Returns ------- result : csc_matrix, csr_matrix, bsr_matrix, dia_matrix or coo_matrix A sparse matrix containing the loaded data. Raises ------ OSError If the input file does not exist or cannot be read. See Also -------- scipy.sparse.save_npz: Save a sparse matrix to a file using ``.npz`` format. numpy.load: Load several arrays from a ``.npz`` archive. Examples -------- Store sparse matrix to disk, and load it again: >>> import scipy.sparse >>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]])) >>> sparse_matrix <2x3 sparse matrix of type '<class 'numpy.int64'>' with 2 stored elements in Compressed Sparse Column format> >>> sparse_matrix.todense() matrix([[0, 0, 3], [4, 0, 0]], dtype=int64) >>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) >>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz') >>> sparse_matrix <2x3 sparse matrix of type '<class 'numpy.int64'>' with 2 stored elements in Compressed Sparse Column format> >>> sparse_matrix.todense() matrix([[0, 0, 3], [4, 0, 0]], dtype=int64) """ with np.load(file, **PICKLE_KWARGS) as loaded: try: matrix_format = loaded['format'] except KeyError as e: raise ValueError('The file {} does not contain a sparse matrix.'.format(file)) from e matrix_format = matrix_format.item() if not isinstance(matrix_format, str): # Play safe with Python 2 vs 3 backward compatibility; # files saved with SciPy < 1.0.0 may contain unicode or bytes. matrix_format = matrix_format.decode('ascii') try: cls = getattr(scipy.sparse, '{}_matrix'.format(matrix_format)) except AttributeError as e: raise ValueError('Unknown matrix format "{}"'.format(matrix_format)) from e if matrix_format in ('csc', 'csr', 'bsr'): return cls((loaded['data'], loaded['indices'], loaded['indptr']), shape=loaded['shape']) elif matrix_format == 'dia': return cls((loaded['data'], loaded['offsets']), shape=loaded['shape']) elif matrix_format == 'coo': return cls((loaded['data'], (loaded['row'], loaded['col'])), shape=loaded['shape']) else: raise NotImplementedError('Load is not implemented for ' 'sparse matrix of format {}.'.format(matrix_format))
35.553333
114
0.615976
0
0
0
0
0
0
0
0
3,608
0.676542
86d07b07d670dc9caa0bd92708721764a364d527
1,423
py
Python
src/simulator/services/resources/atlas.py
ed741/PathBench
50fe138eb1f824f49fe1a862705e435a1c3ec3ae
[ "BSD-3-Clause" ]
46
2020-12-25T04:09:15.000Z
2022-03-25T12:32:42.000Z
src/simulator/services/resources/atlas.py
ed741/PathBench
50fe138eb1f824f49fe1a862705e435a1c3ec3ae
[ "BSD-3-Clause" ]
36
2020-12-21T16:10:02.000Z
2022-01-03T01:42:01.000Z
src/simulator/services/resources/atlas.py
judicaelclair/PathBenchURO
101e67674efdfa8e27e1cf7787dac9fdf99552fe
[ "BSD-3-Clause" ]
11
2021-01-06T23:34:12.000Z
2022-03-21T17:21:47.000Z
from typing import Dict, List from simulator.services.resources.directory import Directory from simulator.services.services import Services class Atlas(Directory): def __init__(self, services: Services, name: str, parent: str, create: bool = False) -> None: super().__init__(services, name, parent, create) if create: metadata: Dict[str, any] = { "next_index": 0, } self._save_metadata(metadata) def append(self, obj: any) -> None: self.save(str(self._get_next_index()), obj) self._increment_index() def load_all(self, max_els: int = float("inf")) -> List[any]: ret: List[any] = [] idx: int = 0 while idx < max_els: obj: any = self.load(str(idx)) if obj: ret.append(obj) idx += 1 else: break return ret def _get_next_index(self) -> int: metadata: Dict[str, any] = self._get_metadata() return metadata["next_index"] def _increment_index(self) -> None: metadata: Dict[str, any] = self._get_metadata() metadata["next_index"] += 1 self._save_metadata(metadata) def _save_metadata(self, metadata: Dict[str, any]) -> None: super().save("metadata", metadata) def _get_metadata(self) -> Dict[str, any]: return super().load("metadata")
29.040816
97
0.579761
1,279
0.898805
0
0
0
0
0
0
61
0.042867
86d18fa6bf233db205e6db3a19952144dd79aa36
1,427
py
Python
ingestion/src/metadata/great_expectations/builders/table/row_count_to_equal.py
ulixius9/OpenMetadata
f121698d968717f0932f685ef2a512c2a4d92438
[ "Apache-2.0" ]
null
null
null
ingestion/src/metadata/great_expectations/builders/table/row_count_to_equal.py
ulixius9/OpenMetadata
f121698d968717f0932f685ef2a512c2a4d92438
[ "Apache-2.0" ]
null
null
null
ingestion/src/metadata/great_expectations/builders/table/row_count_to_equal.py
ulixius9/OpenMetadata
f121698d968717f0932f685ef2a512c2a4d92438
[ "Apache-2.0" ]
null
null
null
# Copyright 2022 Collate # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TestCase builder """ from metadata.generated.schema.api.tests.createTableTest import CreateTableTestRequest from metadata.generated.schema.tests.table import tableRowCountToEqual from metadata.generated.schema.tests.tableTest import TableTestType from metadata.great_expectations.builders.table.base_table_test_builders import ( BaseTableTestBuilder, ) class TableRowCountToEqualBuilder(BaseTableTestBuilder): """Builder for `expect_table_row_count_to_equal` GE expectation""" def _build_test(self) -> CreateTableTestRequest: """Specific test builder for the test""" return self.build_test_request( config=tableRowCountToEqual.TableRowCountToEqual( value=self.result["expectation_config"]["kwargs"]["value"], ), test_type=TableTestType.tableRowCountToEqual, )
41.970588
86
0.756833
491
0.344078
0
0
0
0
0
0
726
0.50876
86d22671738e4b0cf43566c5aeec7cd2a5f04193
6,899
py
Python
tensorflow/bbox/jrieke-tf-parse-v2/jrieke_tf_dataset.py
gustavovaliati/obj-det-experiments
e81774a18b34c22d971ad15d7ac6eb8663ac6f22
[ "Apache-2.0" ]
null
null
null
tensorflow/bbox/jrieke-tf-parse-v2/jrieke_tf_dataset.py
gustavovaliati/obj-det-experiments
e81774a18b34c22d971ad15d7ac6eb8663ac6f22
[ "Apache-2.0" ]
null
null
null
tensorflow/bbox/jrieke-tf-parse-v2/jrieke_tf_dataset.py
gustavovaliati/obj-det-experiments
e81774a18b34c22d971ad15d7ac6eb8663ac6f22
[ "Apache-2.0" ]
null
null
null
''' This code is based on https://github.com/jrieke/shape-detection/ ''' import matplotlib.pyplot as plt import matplotlib import numpy as np import tensorflow as tf import datetime class JriekeBboxDataset: def generate(self): print('Generating...') self.WIDTH = 8 self.HEIGHT = 8 num_imgs = 50000 min_object_size = 1 max_object_size = 4 num_objects = 1 self.bboxes = np.zeros((num_imgs, num_objects, 4)) self.imgs = np.zeros((num_imgs, self.WIDTH, self.HEIGHT)) # set background to 0 for i_img in range(num_imgs): for i_object in range(num_objects): w, h = np.random.randint(min_object_size, max_object_size, size=2) x = np.random.randint(0, self.WIDTH - w) y = np.random.randint(0, self.HEIGHT - h) self.imgs[i_img, y:y+h, x:x+w] = 1. # set rectangle to 1 self.bboxes[i_img, i_object] = [x, y, w, h] print("Shapes: imgs ", self.imgs.shape, " bboxes ", self.bboxes.shape) #why this? # X = (self.imgs.reshape(num_imgs, -1) - np.mean(self.imgs)) / np.std(self.imgs) X = self.imgs y = self.bboxes.reshape(num_imgs, -1) / self.WIDTH # Split training and test. i = int(0.8 * num_imgs) train_X = X[:i] #80% for training test_X = X[i:] train_y = y[:i] test_y = y[i:] self.test_imgs = self.imgs[i:] self.test_bboxes = self.bboxes[i:] return train_X, train_y, test_X, test_y def check_dataset_image_compability(self, test_X_sample, test_imgs_sample): fig = plt.figure(figsize=(12, 3)) fig.suptitle('check if the generated imgs match to the test_X slice image') fig.subplots_adjust(top=0.85) plt.subplot(1, 2, 1) plt.gca().set_title('Returned by the dataset class: used for training') plt.imshow(test_X_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.subplot(1, 2, 2) plt.gca().set_title('Global image holder: used for plotting.') plt.imshow(test_imgs_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.show() print('compare:',TMP,test_imgs_sample) def IOU(self,bbox1, bbox2): '''Calculate overlap between two bounding boxes [x, y, w, h] as the area of intersection over the area of unity''' x1, y1, w1, h1 = bbox1[0], bbox1[1], bbox1[2], bbox1[3] x2, y2, w2, h2 = bbox2[0], bbox2[1], bbox2[2], bbox2[3] w_I = min(x1 + w1, x2 + w2) - max(x1, x2) h_I = min(y1 + h1, y2 + h2) - max(y1, y2) if w_I <= 0 or h_I <= 0: # no overlap return 0. I = w_I * h_I U = w1 * h1 + w2 * h2 - I return I / U def convertDefaultAnnotToCoord(self, annot): ''' annot -> [x, y, w, h] ''' w = annot[2] * self.WIDTH h = annot[3] * self.HEIGHT x = annot[0] * self.HEIGHT y = annot[1] * self.HEIGHT return [x,y,w,h] def convertYoloAnnotToCoord(self, yolo_annot): ''' yolo_annot -> [x, y, w, h] ''' w = yolo_annot[2] * self.WIDTH h = yolo_annot[3] * self.HEIGHT x = (yolo_annot[0] * self.WIDTH) - (w/2) y = (yolo_annot[1] * self.HEIGHT) - (h/2) return [x,y,w,h] def show_generated(self, i=0): fig = plt.figure() fig.subplots_adjust(top=0.85) fig.suptitle('Generated image sample + GT') plt.imshow(self.imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) for bbox in self.bboxes[i]: plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none')) plt.gca().legend(['GT']) plt.show() def plot_rectangle(self, img, bbox): fig = plt.figure() fig.suptitle('Plotting rectangle.') fig.subplots_adjust(top=0.85) plt.subplot(1, 1, 1) plt.imshow(img, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none')) plt.show() def check_dataset_image_compability(self, test_X_sample, test_imgs_sample): fig = plt.figure(figsize=(12, 3)) fig.suptitle('check if the generated imgs match to the test_X slice image') fig.subplots_adjust(top=0.85) plt.subplot(1, 2, 1) plt.gca().set_title('Returned by the dataset class: used for training') plt.imshow(test_X_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.subplot(1, 2, 2) plt.gca().set_title('Global image holder: used for plotting.') plt.imshow(test_imgs_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.show() print('compare:',test_X_sample,test_imgs_sample) def show_predicted(self, pred_bboxes): # Show a few images and predicted bounding boxes from the test dataset. fig = plt.figure(figsize=(12, 3)) fig.subplots_adjust(top=0.85) fig.suptitle('Prediction demonstration. Random samples.') legend_plotted = False for i_subplot in range(1, 11): plt.subplot(1, 10, i_subplot) i = np.random.randint(len(pred_bboxes)) plt.imshow(self.test_imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) for pred_bbox, exp_bbox in zip(pred_bboxes[i], self.test_bboxes[i]): # print('before convertion: pred',pred_bbox, 'gt',exp_bbox) pred_bbox = self.convertDefaultAnnotToCoord(pred_bbox) # exp_bbox = self.convertDefaultAnnotToCoord(exp_bbox) print('after convertion: pred',pred_bbox, 'gt',exp_bbox) plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0], pred_bbox[1]), pred_bbox[2], pred_bbox[3], ec='r', fc='none')) #gt plt.gca().add_patch(matplotlib.patches.Rectangle((exp_bbox[0], exp_bbox[1]), exp_bbox[2], exp_bbox[3], ec='b', fc='none')) plt.annotate('IOU: {:.2f}'.format(self.IOU(pred_bbox, exp_bbox)), (pred_bbox[0], pred_bbox[1]+pred_bbox[3]+0.2), color='r') if not legend_plotted: legend_plotted = True plt.gca().legend(['Pred','GT'],loc='upper center', bbox_to_anchor=(0.5, -0.5), fancybox=True) plt.show() # plt.savefig('plots/bw-single-rectangle_prediction_{0:%Y-%m-%d%H:%M:%S}.png'.format(datetime.datetime.now()), dpi=300)
40.582353
142
0.59052
6,714
0.973185
0
0
0
0
0
0
1,478
0.214234
86d39cbeb38ed832359d8101e1462aeccc15eee8
1,400
py
Python
src/knownnodes.py
skeevey/PyBitmessage
196d688b138393d1d540df3322844dfe7e7c02ba
[ "MIT" ]
1
2018-04-25T08:08:47.000Z
2018-04-25T08:08:47.000Z
src/knownnodes.py
skeevey/PyBitmessage
196d688b138393d1d540df3322844dfe7e7c02ba
[ "MIT" ]
null
null
null
src/knownnodes.py
skeevey/PyBitmessage
196d688b138393d1d540df3322844dfe7e7c02ba
[ "MIT" ]
1
2018-04-25T08:08:48.000Z
2018-04-25T08:08:48.000Z
import pickle import threading from bmconfigparser import BMConfigParser import state knownNodesLock = threading.Lock() knownNodes = {} knownNodesTrimAmount = 2000 def saveKnownNodes(dirName = None): if dirName is None: dirName = state.appdata with knownNodesLock: with open(dirName + 'knownnodes.dat', 'wb') as output: pickle.dump(knownNodes, output) def increaseRating(peer): increaseAmount = 0.1 maxRating = 1 with knownNodesLock: for stream in knownNodes.keys(): try: knownNodes[stream][peer]["rating"] = min(knownNodes[stream][peer]["rating"] + increaseAmount, maxRating) except KeyError: pass def decreaseRating(peer): decreaseAmount = 0.1 minRating = -1 with knownNodesLock: for stream in knownNodes.keys(): try: knownNodes[stream][peer]["rating"] = max(knownNodes[stream][peer]["rating"] - decreaseAmount, minRating) except KeyError: pass def trimKnownNodes(recAddrStream = 1): if len(knownNodes[recAddrStream]) < BMConfigParser().get("knownnodes", "maxnodes"): return with knownNodesLock: oldestList = sorted(knownNodes[recAddrStream], key=lambda x: x['lastseen'])[:knownNodesTrimAmount] for oldest in oldestList: del knownNodes[recAddrStream][oldest]
30.434783
120
0.648571
0
0
0
0
0
0
0
0
84
0.06
86d3d194e9e0137871f97d1ad57cf24e9f4a90e3
1,672
py
Python
chroma_agent/action_plugins/manage_node.py
whamcloud/iml-agent
fecb2468fd6edc822f3ab37ced444d98d8725730
[ "MIT" ]
1
2020-04-22T16:43:09.000Z
2020-04-22T16:43:09.000Z
chroma_agent/action_plugins/manage_node.py
AlexTalker/iml-agent
5ebcfe96be670912d9a9b7fbb23431af0d54f768
[ "MIT" ]
53
2018-07-07T18:17:50.000Z
2021-03-19T23:15:28.000Z
chroma_agent/action_plugins/manage_node.py
AlexTalker/iml-agent
5ebcfe96be670912d9a9b7fbb23431af0d54f768
[ "MIT" ]
6
2018-06-18T08:51:38.000Z
2019-10-24T12:16:42.000Z
# Copyright (c) 2018 DDN. All rights reserved. # Use of this source code is governed by a MIT-style # license that can be found in the LICENSE file. import os from chroma_agent.lib.shell import AgentShell from chroma_agent.log import console_log from chroma_agent.device_plugins.action_runner import CallbackAfterResponse from chroma_agent.lib.pacemaker import PacemakerConfig def ssi(runlevel): # force a manual failover by failing a node AgentShell.try_run(["sync"]) AgentShell.try_run(["sync"]) AgentShell.try_run(["init", runlevel]) def fail_node(): ssi("0") def stonith(node): p_cfg = PacemakerConfig() # TODO: signal that manager that a STONITH has been done so that it # doesn't treat it as an AWOL console_log.info("Rebooting %s per a STONITH request" % node) p_cfg.get_node(node).fence_reboot() def shutdown_server(halt=True, at_time="now"): def _shutdown(): console_log.info("Initiating server shutdown per manager request") # This will initiate a "nice" shutdown with a wall from root, etc. AgentShell.try_run(["shutdown", "-H" if halt else "-h", at_time]) console_log.info("Terminating") os._exit(0) raise CallbackAfterResponse(None, _shutdown) def reboot_server(at_time="now"): def _reboot(): console_log.info("Initiating server reboot per manager request") # reboot(8) just calls shutdown anyhow. AgentShell.try_run(["shutdown", "-r", at_time]) console_log.info("Terminating") os._exit(0) raise CallbackAfterResponse(None, _reboot) ACTIONS = [reboot_server, shutdown_server, fail_node, stonith]
27.866667
75
0.703947
0
0
0
0
0
0
0
0
615
0.367823
86d45952adaab5e1d25729182d1ca80f64803a29
8,103
py
Python
census_data_downloader/core/tables.py
ian-r-rose/census-data-downloader
f8ac9d773e6d3f52be87bf916a2e32249391f966
[ "MIT" ]
null
null
null
census_data_downloader/core/tables.py
ian-r-rose/census-data-downloader
f8ac9d773e6d3f52be87bf916a2e32249391f966
[ "MIT" ]
null
null
null
census_data_downloader/core/tables.py
ian-r-rose/census-data-downloader
f8ac9d773e6d3f52be87bf916a2e32249391f966
[ "MIT" ]
null
null
null
#! /usr/bin/env python # -*- coding: utf-8 -* """ A base class that governs how to download and process tables from a Census API table. """ import os import logging import pathlib from . import geotypes from . import decorators logger = logging.getLogger(__name__) class BaseTableConfig(object): """ Configures how to download and process tables from the Census API. """ THIS_DIR = pathlib.Path(__file__).parent PARENT_DIR = THIS_DIR.parent # All available years YEAR_LIST = [ 2017, 2016, 2015, 2014, 2013, 2012, 2011, 2010, 2009 ] # All available geographies GEOTYPE_LIST = ( "nationwide", "regions", "divisions", "states", "congressional_districts", "state_legislative_upper_districts", "state_legislative_lower_districts", "counties", "places", "urban_areas", "msas", "csas", "pumas", "nectas", "cnectas", "aiannh_homelands", "tracts", "zctas", "unified_school_districts", "elementary_school_districts", "secondary_school_districts" ) def __init__( self, api_key=None, source="acs5", years=None, data_dir=None, force=False ): """ Configuration. """ # Set the inputs self.CENSUS_API_KEY = os.getenv("CENSUS_API_KEY", api_key) if not self.CENSUS_API_KEY: raise NotImplementedError("Census API key required. Pass it as the first argument.") self.source = source self.force = force # # Allow custom years for data download, defaulting to most recent year # # If they want all the years, give it to them. if years == "all": self.years_to_download = self.YEAR_LIST # If the user provides a year give them that. elif isinstance(years, int): self.years_to_download = [years] # Or if they provide years as a list, give those then. elif isinstance(years, list): self.years_to_download = list(map(int, years)) # If they provided nothing, default to the latest year of data elif years is None: self.years_to_download = [max(self.YEAR_LIST), ] # Validate the years for year in self.years_to_download: if year not in self.YEAR_LIST: error_msg = ("Data only available for the years" f"{self.YEAR_LIST[-1]}-{self.YEAR_LIST[0]}.") raise NotImplementedError(error_msg) # Set the data directories if data_dir: self.data_dir = pathlib.Path(str(data_dir)) else: self.data_dir = self.PARENT_DIR.joinpath("data") self.raw_data_dir = self.data_dir.joinpath("raw") self.processed_data_dir = self.data_dir.joinpath("processed") # Make sure they exist if not self.data_dir.exists(): self.data_dir.mkdir() if not self.raw_data_dir.exists(): self.raw_data_dir.mkdir() if not self.processed_data_dir.exists(): self.processed_data_dir.mkdir() @property def censusreporter_url(self): """ Returns the URL of the Census Reporter page explaining the ACS table. """ return f"https://censusreporter.org/tables/{self.RAW_TABLE_NAME}/" # # Geotype downloaders # @decorators.downloader def download_nationwide(self): """ Download nationwide data. """ return geotypes.NationwideDownloader @decorators.downloader def download_regions(self): """ Download data for all regions. """ return geotypes.RegionsDownloader @decorators.downloader def download_divisions(self): """ Download data for all divisions. """ return geotypes.DivisionsDownloader @decorators.downloader def download_states(self): """ Download data for all states. """ return geotypes.StatesDownloader @decorators.downloader def download_congressional_districts(self): """ Download data for all Congressional districts. """ return geotypes.CongressionalDistrictsDownloader @decorators.downloader def download_state_legislative_upper_districts(self): """ Download data for all Census upper legislative districts in the provided state. """ return geotypes.StateLegislativeUpperDistrictsDownloader @decorators.downloader def download_state_legislative_lower_districts(self): """ Download data for all Census lower legislative districts in the provided state. """ return geotypes.StateLegislativeLowerDistrictsDownloader @decorators.downloader def download_counties(self): """ Download data for all counties. """ return geotypes.CountiesDownloader @decorators.downloader def download_places(self): """ Download data for all Census designated places. """ return geotypes.PlacesDownloader @decorators.downloader def download_urban_areas(self): """ Download data for all urban areas """ return geotypes.UrbanAreasDownloader @decorators.downloader def download_msas(self): """ Download data for Metropolitian Statistical Areas. """ return geotypes.MsasDownloader @decorators.downloader def download_csas(self): """ Download data for Combined Statistical Areas. """ return geotypes.CsasDownloader @decorators.downloader def download_pumas(self): """ Download data for Public Use Microdata Areas. """ return geotypes.PumasDownloader @decorators.downloader def download_nectas(self): """ Download data for New England cities and towns. """ return geotypes.NectasDownloader @decorators.downloader def download_cnectas(self): """ Download data for combined New England cities and towns. """ return geotypes.CnectasDownloader @decorators.downloader def download_aiannh_homelands(self): """ Download data for American Indian home lands. """ return geotypes.AiannhHomelandsDownloader @decorators.downloader def download_tracts(self): """ Download data for all Census tracts in the provided state. """ return geotypes.TractsDownloader @decorators.downloader def download_zctas(self): """ Download data for Zip Code Tabulation Areas """ return geotypes.ZctasDownloader @decorators.downloader def download_unified_school_districts(self): """ Download data for unified school districts. """ return geotypes.UnifiedSchoolDistrictsDownloader @decorators.downloader def download_elementary_school_districts(self): """ Download data for elementary school districts. """ return geotypes.ElementarySchoolDistrictsDownloader @decorators.downloader def download_secondary_school_districts(self): """ Download data for secondary school districts. """ return geotypes.SecondarySchoolDistrictsDownloader def download_everything(self): """ Download 'em all. """ for geo in self.GEOTYPE_LIST: print(geo) # Get the downloader function dl = getattr(self, f"download_{geo}", None) # Validate it if not dl or not callable(dl): raise NotImplementedError(f"Invalid geography type: {geo}") # Run it try: dl() except NotImplementedError: pass
28.038062
96
0.60817
7,835
0.966926
0
0
4,126
0.509194
0
0
2,944
0.363322
86d61d512c3c9d47b1f63fe91873604a549e077d
5,422
py
Python
sgf2ebook.py
loujine/sgf2ebook
13c87056646cc6c06485b129221ab2028e67ef95
[ "MIT" ]
null
null
null
sgf2ebook.py
loujine/sgf2ebook
13c87056646cc6c06485b129221ab2028e67ef95
[ "MIT" ]
null
null
null
sgf2ebook.py
loujine/sgf2ebook
13c87056646cc6c06485b129221ab2028e67ef95
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import argparse import os from pathlib import Path import shutil import subprocess import sys from tempfile import TemporaryDirectory from uuid import uuid4 from zipfile import ZipFile import jinja2 import sente # type: ignore __version__ = (1, 0, 0) SGF_RENDER_EXECUTABLE = './sgf-render' TEMPLATEDIR = Path(__file__, '..', 'epub_template').resolve() def load_sgf(sgfpath: Path): game = sente.sgf.load(str(sgfpath)) comments = {} seq = game.get_default_sequence() for idx, move in enumerate(seq, 1): game.play(move) if game.comment: comments[idx] = game.comment return { # read only main sequence, not variations 'nb_moves': len(seq), 'metadata': game.get_properties(), 'comments': comments, } def main(sgfpath: Path, output_path: Path) -> None: print() print(f'Load content of {sgfpath}') try: sgf_content = load_sgf(sgfpath) except (sente.exceptions.InvalidSGFException, sente.exceptions.IllegalMoveException): print(f'Could not read {sgfpath}, skipping') return nb_moves = sgf_content['nb_moves'] metadata = sgf_content['metadata'] comments = sgf_content['comments'] uuid = uuid4() with TemporaryDirectory() as tmpdir: print('Prepare structure of the ebook') shutil.copytree(TEMPLATEDIR, tmpdir, dirs_exist_ok=True) template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'Text', 'page_001.html').open().read()) print('Prepare SVG diagrams') svgdirpath = Path(tmpdir, 'EPUB', 'Images') for move in range(1, nb_moves + 1): svgpath = f'diagram_{move:03}.svg' # generate SVG files with sgf-render try: subprocess.check_call([ SGF_RENDER_EXECUTABLE, str(sgfpath), '--move-numbers', '--first-move-number', str(move), '-n', str(move), '--style', 'minimalist', '-o', svgdirpath.joinpath(svgpath), ]) except subprocess.CalledProcessError: print(f'Move {move} could not be converted to SVG') continue # replace move number in SVG # not possible directly in sgf-render invocation at the moment svg_content = svgdirpath.joinpath(svgpath).open().read() svgdirpath.joinpath(svgpath).open('w').write( svg_content.replace('>1<', f'>{move}<', 1)) # create HTML page with SVG element html_content = template.render( title=sgfpath.stem, svgpath=svgpath, info=metadata, first_flag=(move == 1), last_flag=(move == nb_moves), comment=comments.get(move, ''), ) with Path(tmpdir, 'EPUB', 'Text', f'page_{move:03}.html').open('w') as fd: fd.write(html_content) # Declare all HTML/SVG files in master file print('Prepare content.opf file') template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'content.opf').open().read()) opf_content = template.render( title=sgfpath.stem, creator='sgf2ebook', UUID=uuid, svgpath=sorted(svgdirpath.glob('*.svg')), enumerate=enumerate, ) with Path(tmpdir, 'EPUB', 'content.opf').open('w') as fd: fd.write(opf_content) # Generate table of contents print('Prepare table of contents') template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'toc.ncx').open().read()) toc_content = template.render( title=sgfpath.stem, UUID=uuid, nb_moves=nb_moves, range=range, ) with Path(tmpdir, 'EPUB', 'toc.ncx').open('w') as fd: fd.write(toc_content) # zip all content in EPUB file output_path.mkdir(exist_ok=True, parents=True) output_name = f"{metadata.get('EV', 'unknown_event')}{'_' if 'RO' in metadata else ''}{metadata.get('RO', '')}.epub".replace(' ', '_') with ZipFile(output_path.joinpath(output_name), 'w') as zf: os.chdir(tmpdir) # "The first file in the OCF ZIP Container MUST be the mimetype file" zf.write('mimetype') for root, dirs, files in os.walk('.'): for file in sorted(files): if file != 'mimetype': zf.write(Path(root, file)) os.chdir(Path(__file__).parent) print(f'{output_path.joinpath(output_name)} generated') if __name__ == "__main__": parser = argparse.ArgumentParser( description='') parser.add_argument('--input-path', '-i', help='Input files or directory') parser.add_argument('--output-path', '-o', help='Output directory') args = parser.parse_args() path = Path(args.input_path) outpath = Path(args.output_path) if not path.exists(): print(f'Input path {path} not found') sys.exit(1) if path.is_file(): main(path, outpath) if path.is_dir(): for filepath in sorted(path.rglob('*.sgf')): main(filepath, outpath.joinpath(filepath.parent.relative_to(path)))
34.75641
142
0.574511
0
0
0
0
0
0
0
0
1,310
0.241608
86d6728bc96a31ea175e93ab91aadcc559c13053
1,788
py
Python
vmis_sql_python/evaluation/metrics/popularity.py
bolcom/serenade-experiments-sigmod
0a4c7f19d800d1c2784ea5536abb1a628cb12f7a
[ "Apache-2.0" ]
null
null
null
vmis_sql_python/evaluation/metrics/popularity.py
bolcom/serenade-experiments-sigmod
0a4c7f19d800d1c2784ea5536abb1a628cb12f7a
[ "Apache-2.0" ]
null
null
null
vmis_sql_python/evaluation/metrics/popularity.py
bolcom/serenade-experiments-sigmod
0a4c7f19d800d1c2784ea5536abb1a628cb12f7a
[ "Apache-2.0" ]
null
null
null
class Popularity: ''' Popularity( length=20 ) Used to iteratively calculate the average overall popularity of an algorithm's recommendations. Parameters ----------- length : int Coverage@length training_df : dataframe determines how many distinct item_ids there are in the training data ''' def __init__(self, length=20, training_df=None): self.length = length; self.sum = 0 self.tests = 0 self.train_actions = len(training_df.index) #group the data by the itemIds grp = training_df.groupby('ItemId') #count the occurence of every itemid in the trainingdataset self.pop_scores = grp.size() #sort it according to the score self.pop_scores.sort_values(ascending=False, inplace=True) #normalize self.pop_scores = self.pop_scores / self.pop_scores[:1].values[0] def add(self, result, next_items, for_item=0, session=0, pop_bin=None, position=None): ''' Update the metric with a result set and the correct next item. Result must be sorted correctly. Parameters -------- result: pandas.Series Series of scores with the item id as the index ''' #only keep the k- first predictions recs = result[:self.length] #take the unique values out of those top scorers items = recs.index.unique() self.sum += ( self.pop_scores[ items ].sum() / len( items ) ) self.tests += 1 def result(self): ''' Return a tuple of a description string and the current averaged value ''' return ("Popularity@" + str( self.length ) + ": "), ( self.sum / self.tests )
33.735849
100
0.597315
1,779
0.994966
0
0
0
0
0
0
911
0.509508
86d75a7478a79891b6baf0f18c7802c22b104725
918
py
Python
dandeliondiary/household/urls.py
amberdiehl/dandeliondiary_project
e9bace5bd7980def6ca763840ab5b38f1e05cd3d
[ "FSFAP" ]
null
null
null
dandeliondiary/household/urls.py
amberdiehl/dandeliondiary_project
e9bace5bd7980def6ca763840ab5b38f1e05cd3d
[ "FSFAP" ]
6
2020-04-29T23:54:15.000Z
2022-03-11T23:25:24.000Z
dandeliondiary/household/urls.py
amberdiehl/dandeliondiary_project
e9bace5bd7980def6ca763840ab5b38f1e05cd3d
[ "FSFAP" ]
null
null
null
from django.conf.urls import include, url from . import views urlpatterns = [ url(r'^settings$', views.household_dashboard, name='household_dashboard'), url(r'^myinfo$', views.my_info, name='my_info'), url(r'^profile$', views.household_profile, name='maintain_household'), url(r'^members$', views.household_members, name='maintain_members'), url(r'^vehicles$', views.household_vehicles, name='maintain_vehicles'), url(r'^ajax/models-by-make/(?P<make_id>\d+)/$', views.ajax_models_by_make), url(r'^ajax/makes-by-type/(?P<type_id>\d+)/$', views.ajax_makes_by_type), url(r'^ajax/add-make/(?P<type_key>\d+)/(?P<make>[\w ]{1,50})/$', views.ajax_add_make), url(r'^ajax/add-model/(?P<make_key>\d+)/(?P<model>[\w -]{1,128})/$', views.ajax_add_model), url(r'^ajax/delete-invite/$', views.ajax_delete_invite), url(r'^ajax/change-member-status/$', views.ajax_change_member_status), ]
54
95
0.683007
0
0
0
0
0
0
0
0
408
0.444444
86d75f7e9a302f49289d9be8498b550dc47650fa
79,634
py
Python
private/templates/NYC/config.py
devinbalkind/eden
d5a684eae537432eb2c7d954132484a4714ca8fb
[ "MIT" ]
null
null
null
private/templates/NYC/config.py
devinbalkind/eden
d5a684eae537432eb2c7d954132484a4714ca8fb
[ "MIT" ]
null
null
null
private/templates/NYC/config.py
devinbalkind/eden
d5a684eae537432eb2c7d954132484a4714ca8fb
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- try: # Python 2.7 from collections import OrderedDict except: # Python 2.6 from gluon.contrib.simplejson.ordered_dict import OrderedDict from gluon import current from gluon.html import A, URL from gluon.storage import Storage from s3 import s3_fullname T = current.T settings = current.deployment_settings """ Template settings for NYC Prepared """ # Pre-Populate settings.base.prepopulate = ("NYC",) settings.base.system_name = T("NYC Prepared") settings.base.system_name_short = T("NYC Prepared") # Theme (folder to use for views/layout.html) settings.base.theme = "NYC" settings.ui.formstyle_row = "bootstrap" settings.ui.formstyle = "bootstrap" settings.ui.filter_formstyle = "table_inline" settings.msg.parser = "NYC" # Uncomment to Hide the language toolbar settings.L10n.display_toolbar = False # Default timezone for users settings.L10n.utc_offset = "UTC -0500" # Uncomment these to use US-style dates in English settings.L10n.date_format = "%m-%d-%Y" # Start week on Sunday settings.L10n.firstDOW = 0 # Number formats (defaults to ISO 31-0) # Decimal separator for numbers (defaults to ,) settings.L10n.decimal_separator = "." # Thousands separator for numbers (defaults to space) settings.L10n.thousands_separator = "," # Default Country Code for telephone numbers settings.L10n.default_country_code = 1 # Enable this to change the label for 'Mobile Phone' settings.ui.label_mobile_phone = "Cell Phone" # Enable this to change the label for 'Postcode' settings.ui.label_postcode = "ZIP Code" # Uncomment to disable responsive behavior of datatables # - Disabled until tested settings.ui.datatables_responsive = False # PDF to Letter settings.base.paper_size = T("Letter") # Restrict the Location Selector to just certain countries # NB This can also be over-ridden for specific contexts later # e.g. Activities filtered to those of parent Project settings.gis.countries = ("US",) settings.fin.currencies = { "USD" : T("United States Dollars"), } settings.L10n.languages = OrderedDict([ ("en", "English"), ("es", "Español"), ]) # Authentication settings # These settings should be changed _after_ the 1st (admin) user is # registered in order to secure the deployment # Should users be allowed to register themselves? settings.security.self_registration = "index" # Do new users need to verify their email address? settings.auth.registration_requires_verification = True # Do new users need to be approved by an administrator prior to being able to login? settings.auth.registration_requires_approval = True # Always notify the approver of a new (verified) user, even if the user is automatically approved #settings.auth.always_notify_approver = False # Uncomment this to request the Mobile Phone when a user registers settings.auth.registration_requests_mobile_phone = True # Uncomment this to request the Organisation when a user registers settings.auth.registration_requests_organisation = True # Uncomment this to request the Site when a user registers #settings.auth.registration_requests_site = True # Roles that newly-registered users get automatically #settings.auth.registration_roles = { 0: ["comms_dispatch"]} #settings.auth.registration_link_user_to = {"staff":T("Staff"), # #"volunteer":T("Volunteer") # } settings.auth.registration_link_user_to_default = "staff" settings.security.policy = 5 # Controller, Function & Table ACLs # Enable this to have Open links in IFrames open a full page in a new tab settings.ui.iframe_opens_full = True settings.ui.label_attachments = "Media" settings.ui.update_label = "Edit" # Uncomment to disable checking that LatLons are within boundaries of their parent #settings.gis.check_within_parent_boundaries = False # GeoNames username settings.gis.geonames_username = "eden_nyc" # Uncomment to show created_by/modified_by using Names not Emails settings.ui.auth_user_represent = "name" # Record Approval settings.auth.record_approval = True settings.auth.record_approval_required_for = ("org_organisation",) # ----------------------------------------------------------------------------- # Audit def audit_write(method, tablename, form, record, representation): if not current.auth.user: # Don't include prepop return False if tablename in ("cms_post", "org_facility", "org_organisation", "req_req", ): # Perform normal Audit return True else: # Don't Audit non user-visible resources return False settings.security.audit_write = audit_write # ----------------------------------------------------------------------------- # CMS # Uncomment to use Bookmarks in Newsfeed settings.cms.bookmarks = True # Uncomment to use have Filter form in Newsfeed be open by default settings.cms.filter_open = True # Uncomment to adjust filters in Newsfeed when clicking on locations instead of opening the profile page settings.cms.location_click_filters = True # Uncomment to use organisation_id instead of created_by in Newsfeed settings.cms.organisation = "post_organisation.organisation_id" # Uncomment to use org_group_id in Newsfeed settings.cms.organisation_group = "post_organisation_group.group_id" # Uncomment to use person_id instead of created_by in Newsfeed settings.cms.person = "person_id" # Uncomment to use Rich Text editor in Newsfeed settings.cms.richtext = True # Uncomment to show Links in Newsfeed settings.cms.show_links = True # Uncomment to show Tags in Newsfeed settings.cms.show_tags = True # Uncomment to show post Titles in Newsfeed settings.cms.show_titles = True # ----------------------------------------------------------------------------- # Inventory Management # Uncomment to customise the label for Facilities in Inventory Management settings.inv.facility_label = "Facility" # Uncomment if you need a simpler (but less accountable) process for managing stock levels #settings.inv.direct_stock_edits = True # Uncomment to call Stock Adjustments, 'Stock Counts' settings.inv.stock_count = True # Uncomment to not track pack values settings.inv.track_pack_values = False settings.inv.send_show_org = False # Types common to both Send and Receive settings.inv.shipment_types = { 1: T("Other Warehouse") } settings.inv.send_types = { #21: T("Distribution") } settings.inv.send_type_default = 1 settings.inv.item_status = { #0: current.messages["NONE"], #1: T("Dump"), #2: T("Sale"), #3: T("Reject"), #4: T("Surplus") } # ----------------------------------------------------------------------------- # Organisations # # Enable the use of Organisation Groups settings.org.groups = "Network" # Make Services Hierarchical settings.org.services_hierarchical = True # Set the label for Sites settings.org.site_label = "Facility" #settings.org.site_label = "Location" # Uncomment to show the date when a Site (Facilities-only for now) was last contacted settings.org.site_last_contacted = True # Enable certain fields just for specific Organisations # empty list => disabled for all (including Admin) #settings.org.dependent_fields = { \ # "pr_person_details.mother_name" : [], # "pr_person_details.father_name" : [], # "pr_person_details.company" : [], # "pr_person_details.affiliations" : [], # "vol_volunteer.active" : [], # "vol_volunteer_cluster.vol_cluster_type_id" : [], # "vol_volunteer_cluster.vol_cluster_id" : [], # "vol_volunteer_cluster.vol_cluster_position_id" : [], # } # Uncomment to use an Autocomplete for Site lookup fields settings.org.site_autocomplete = True # Extra fields to search in Autocompletes & display in Representations settings.org.site_autocomplete_fields = ("organisation_id$name", "location_id$addr_street", ) # Uncomment to hide inv & req tabs from Sites #settings.org.site_inv_req_tabs = True # ----------------------------------------------------------------------------- def facility_marker_fn(record): """ Function to decide which Marker to use for Facilities Map @ToDo: Legend """ db = current.db s3db = current.s3db table = db.org_facility_type ltable = db.org_site_facility_type query = (ltable.site_id == record.site_id) & \ (ltable.facility_type_id == table.id) rows = db(query).select(table.name) types = [row.name for row in rows] # Use Marker in preferential order if "Hub" in types: marker = "warehouse" elif "Medical Clinic" in types: marker = "hospital" elif "Food" in types: marker = "food" elif "Relief Site" in types: marker = "asset" elif "Residential Building" in types: marker = "residence" #elif "Shelter" in types: # marker = "shelter" else: # Unknown marker = "office" if settings.has_module("req"): # Colour code by open/priority requests reqs = record.reqs if reqs == 3: # High marker = "%s_red" % marker elif reqs == 2: # Medium marker = "%s_yellow" % marker elif reqs == 1: # Low marker = "%s_green" % marker mtable = db.gis_marker try: marker = db(mtable.name == marker).select(mtable.image, mtable.height, mtable.width, cache=s3db.cache, limitby=(0, 1) ).first() except: marker = db(mtable.name == "office").select(mtable.image, mtable.height, mtable.width, cache=s3db.cache, limitby=(0, 1) ).first() return marker # ----------------------------------------------------------------------------- def org_facility_onvalidation(form): """ Default the name to the Street Address """ form_vars = form.vars name = form_vars.get("name", None) if name: return address = form_vars.get("address", None) if address: form_vars.name = address else: # We need a default form_vars.name = current.db.org_facility.location_id.represent(form_vars.location_id) # ----------------------------------------------------------------------------- def customise_org_facility_controller(**attr): s3db = current.s3db s3 = current.response.s3 # Tell the client to request per-feature markers s3db.configure("org_facility", marker_fn=facility_marker_fn) # Custom PreP standard_prep = s3.prep def custom_prep(r): # Call standard prep if callable(standard_prep): result = standard_prep(r) if not result: return False if r.method not in ("read", "update"): types = r.get_vars.get("site_facility_type.facility_type_id__belongs", None) if not types: # Hide Private Residences from s3 import FS s3.filter = FS("site_facility_type.facility_type_id$name") != "Private Residence" if r.interactive: tablename = "org_facility" table = s3db[tablename] if not r.component and r.method in (None, "create", "update"): from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2, S3MultiSelectWidget field = table.location_id if r.method in ("create", "update"): field.label = "" # Gets replaced by widget levels = ("L2", "L3") field.requires = IS_LOCATION_SELECTOR2(levels=levels) field.widget = S3LocationSelectorWidget2(levels=levels, hide_lx=False, reverse_lx=True, show_address=True, show_postcode=True, ) table.organisation_id.widget = S3MultiSelectWidget(multiple=False) if r.get_vars.get("format", None) == "popup": # Coming from req/create form # Hide most Fields from s3 import S3SQLCustomForm, S3SQLInlineComponent # We default this onvalidation table.name.notnull = False table.name.requires = None crud_form = S3SQLCustomForm(S3SQLInlineComponent( "site_facility_type", label = T("Facility Type"), fields = [("", "facility_type_id")], multiple = False, required = True, ), "name", "location_id", ) s3db.configure(tablename, crud_form = crud_form, onvalidation = org_facility_onvalidation, ) return True s3.prep = custom_prep return attr settings.customise_org_facility_controller = customise_org_facility_controller # ----------------------------------------------------------------------------- def customise_org_organisation_resource(r, tablename): from gluon.html import DIV, INPUT from s3 import S3MultiSelectWidget, S3SQLCustomForm, S3SQLInlineLink, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget s3db = current.s3db if r.tablename == "org_organisation": if r.id: # Update form ctable = s3db.pr_contact query = (ctable.pe_id == r.record.pe_id) & \ (ctable.contact_method == "RSS") & \ (ctable.deleted == False) rss = current.db(query).select(ctable.poll, limitby=(0, 1) ).first() if rss and not rss.poll: # Remember that we don't wish to import rss_import = "on" else: # Default rss_import = None else: # Create form: Default rss_import = None else: # Component if r.component_id: # Update form db = current.db otable = s3db.org_organisation org = db(otable.id == r.component_id).select(otable.pe_id, limitby=(0, 1) ).first() try: pe_id = org.pe_id except: current.log.error("Org %s not found: cannot set rss_import correctly" % r.component_id) # Default rss_import = None else: ctable = s3db.pr_contact query = (ctable.pe_id == pe_id) & \ (ctable.contact_method == "RSS") & \ (ctable.deleted == False) rss = db(query).select(ctable.poll, limitby=(0, 1) ).first() if rss and not rss.poll: # Remember that we don't wish to import rss_import = "on" else: # Default rss_import = None else: # Create form: Default rss_import = None mtable = s3db.org_group_membership mtable.group_id.widget = S3MultiSelectWidget(multiple=False) mtable.status_id.widget = S3MultiSelectWidget(multiple=False, create=dict(c="org", f="group_membership_status", label=str(T("Add New Status")), parent="group_membership", child="status_id" )) crud_form = S3SQLCustomForm( "name", "acronym", S3SQLInlineLink( "organisation_type", field = "organisation_type_id", label = T("Type"), multiple = False, #widget = "hierarchy", ), S3SQLInlineComponentMultiSelectWidget( # activate hierarchical org_service: #S3SQLInlineLink( "service", label = T("Services"), field = "service_id", # activate hierarchical org_service: #leafonly = False, #widget = "hierarchy", ), S3SQLInlineComponent( "group_membership", label = T("Network"), fields = [("", "group_id"), ("", "status_id"), ], ), S3SQLInlineComponent( "address", label = T("Address"), multiple = False, # This is just Text - put into the Comments box for now # Ultimately should go into location_id$addr_street fields = [("", "comments")], ), S3SQLInlineComponentMultiSelectWidget( "location", label = T("Neighborhoods Served"), field = "location_id", filterby = dict(field = "level", options = "L4" ), # @ToDo: GroupedCheckbox Widget or Hierarchical MultiSelectWidget #cols = 5, ), "phone", S3SQLInlineComponent( "contact", name = "phone2", label = T("Phone2"), multiple = False, fields = [("", "value")], filterby = dict(field = "contact_method", options = "WORK_PHONE" ) ), S3SQLInlineComponent( "contact", name = "email", label = T("Email"), multiple = False, fields = [("", "value")], filterby = dict(field = "contact_method", options = "EMAIL" ) ), "website", S3SQLInlineComponent( "contact", comment = DIV(INPUT(_type="checkbox", _name="rss_no_import", value = rss_import, ), T("Don't Import Feed")), name = "rss", label = T("RSS"), multiple = False, fields = [("", "value"), #(T("Don't Import Feed"), "poll"), ], filterby = dict(field = "contact_method", options = "RSS" ) ), S3SQLInlineComponent( "document", name = "iCal", label = "iCAL", multiple = False, fields = [("", "url")], filterby = dict(field = "name", options="iCal" ) ), S3SQLInlineComponent( "document", name = "data", label = T("Data"), multiple = False, fields = [("", "url")], filterby = dict(field = "name", options="Data" ) ), S3SQLInlineComponent( "contact", name = "twitter", label = T("Twitter"), multiple = False, fields = [("", "value")], filterby = dict(field = "contact_method", options = "TWITTER" ) ), S3SQLInlineComponent( "contact", name = "facebook", label = T("Facebook"), multiple = False, fields = [("", "value")], filterby = dict(field = "contact_method", options = "FACEBOOK" ) ), "comments", postprocess = pr_contact_postprocess, ) from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter # activate hierarchical org_service: #from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter, S3HierarchyFilter filter_widgets = [ S3TextFilter(["name", "acronym"], label = T("Name"), _class = "filter-search", ), S3OptionsFilter("group_membership.group_id", label = T("Network"), represent = "%(name)s", #hidden = True, ), S3LocationFilter("organisation_location.location_id", label = T("Neighborhood"), levels = ("L3", "L4"), #hidden = True, ), S3OptionsFilter("service_organisation.service_id", #label = T("Service"), #hidden = True, ), # activate hierarchical org_service: #S3HierarchyFilter("service_organisation.service_id", # #label = T("Service"), # #hidden = True, # ), S3OptionsFilter("organisation_organisation_type.organisation_type_id", label = T("Type"), #hidden = True, ), ] list_fields = ["name", (T("Type"), "organisation_organisation_type.organisation_type_id"), (T("Services"), "service.name"), "phone", (T("Email"), "email.value"), "website" #(T("Neighborhoods Served"), "location.name"), ] s3db.configure("org_organisation", crud_form = crud_form, filter_widgets = filter_widgets, list_fields = list_fields, ) settings.customise_org_organisation_resource = customise_org_organisation_resource # ----------------------------------------------------------------------------- def customise_org_organisation_controller(**attr): s3db = current.s3db s3 = current.response.s3 # Custom prep standard_prep = s3.prep def custom_prep(r): # Call standard prep if callable(standard_prep): result = standard_prep(r) else: result = True if r.interactive: if r.component_name == "facility": if r.method in (None, "create", "update"): from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 table = s3db.org_facility field = table.location_id if r.method in ("create", "update"): field.label = "" # Gets replaced by widget levels = ("L2", "L3") field.requires = IS_LOCATION_SELECTOR2(levels=levels) field.widget = S3LocationSelectorWidget2(levels=levels, hide_lx=False, reverse_lx=True, show_address=True, show_postcode=True, ) elif r.component_name == "human_resource": # Don't assume that user is from same org/site as Contacts they create r.component.table.site_id.default = None return result s3.prep = custom_prep # Custom postp standard_postp = s3.postp def custom_postp(r, output): # Call standard postp if callable(standard_postp): output = standard_postp(r, output) if r.interactive and isinstance(output, dict): if "rheader" in output: # Custom Tabs tabs = [(T("Basic Details"), None), (T("Contacts"), "human_resource"), (T("Facilities"), "facility"), (T("Projects"), "project"), (T("Assets"), "asset"), ] output["rheader"] = s3db.org_rheader(r, tabs=tabs) return output s3.postp = custom_postp return attr settings.customise_org_organisation_controller = customise_org_organisation_controller # ----------------------------------------------------------------------------- def customise_org_group_controller(**attr): s3db = current.s3db s3 = current.response.s3 # Custom prep standard_prep = s3.prep def custom_prep(r): # Call standard prep if callable(standard_prep): result = standard_prep(r) else: result = True if not r.component: table = s3db.org_group list_fields = ["name", "mission", "website", "meetings", ] s3db.configure("org_group", list_fields = list_fields, ) if r.interactive: from gluon.html import DIV, INPUT from s3 import S3SQLCustomForm, S3SQLInlineComponent if r.method != "read": from gluon.validators import IS_EMPTY_OR from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 field = table.location_id field.label = "" # Gets replaced by widget #field.requires = IS_LOCATION_SELECTOR2(levels = ("L2",)) field.requires = IS_EMPTY_OR( IS_LOCATION_SELECTOR2(levels = ("L2",)) ) field.widget = S3LocationSelectorWidget2(levels = ("L2",), points = True, polygons = True, ) # Default location to Manhattan db = current.db gtable = db.gis_location query = (gtable.name == "New York") & \ (gtable.level == "L2") manhattan = db(query).select(gtable.id, limitby=(0, 1)).first() if manhattan: field.default = manhattan.id table.mission.readable = table.mission.writable = True table.meetings.readable = table.meetings.writable = True if r.id: # Update form ctable = s3db.pr_contact query = (ctable.pe_id == r.record.pe_id) & \ (ctable.contact_method == "RSS") & \ (ctable.deleted == False) rss = current.db(query).select(ctable.poll, limitby=(0, 1) ).first() if rss and not rss.poll: # Remember that we don't wish to import rss_import = "on" else: # Default rss_import = None else: # Create form: Default rss_import = None crud_form = S3SQLCustomForm( "name", "location_id", "mission", S3SQLInlineComponent( "contact", name = "phone", label = T("Phone"), multiple = False, fields = [("", "value")], filterby = dict(field = "contact_method", options = "WORK_PHONE" ) ), S3SQLInlineComponent( "contact", name = "email", label = T("Email"), multiple = False, fields = [("", "value")], filterby = dict(field = "contact_method", options = "EMAIL" ) ), "website", S3SQLInlineComponent( "contact", comment = DIV(INPUT(_type="checkbox", _name="rss_no_import", value = rss_import, ), T("Don't Import Feed")), name = "rss", label = T("RSS"), multiple = False, fields = [("", "value")], filterby = dict(field = "contact_method", options = "RSS" ) ), S3SQLInlineComponent( "document", name = "iCal", label = "iCAL", multiple = False, fields = [("", "url")], filterby = dict(field = "name", options="iCal" ) ), S3SQLInlineComponent( "document", name = "data", label = T("Data"), multiple = False, fields = [("", "url")], filterby = dict(field = "name", options="Data" ) ), S3SQLInlineComponent( "contact", name = "twitter", label = T("Twitter"), multiple = False, fields = [("", "value")], filterby = dict(field = "contact_method", options = "TWITTER" ) ), S3SQLInlineComponent( "contact", name = "facebook", label = T("Facebook"), multiple = False, fields = [("", "value")], filterby = dict(field = "contact_method", options = "FACEBOOK" ) ), "meetings", "comments", postprocess = pr_contact_postprocess, ) s3db.configure("org_group", crud_form = crud_form, ) elif r.component_name == "pr_group": list_fields = [#(T("Network"), "group_team.org_group_id"), "name", "description", "meetings", (T("Chairperson"), "chairperson"), "comments", ] s3db.configure("pr_group", list_fields = list_fields, ) elif r.component_name == "organisation": # Add Network Status to List Fields list_fields = s3db.get_config("org_organisation", "list_fields") list_fields.insert(1, "group_membership.status_id") return result s3.prep = custom_prep if current.auth.s3_logged_in(): # Allow components with components (such as org/group) to breakout from tabs attr["native"] = True return attr settings.customise_org_group_controller = customise_org_group_controller # ----------------------------------------------------------------------------- # Persons # Uncomment to hide fields in S3AddPersonWidget settings.pr.request_dob = False settings.pr.request_gender = False # Doesn't yet work (form fails to submit) #settings.pr.select_existing = False settings.pr.show_emergency_contacts = False # ----------------------------------------------------------------------------- # Persons def customise_pr_person_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep = s3.prep def custom_prep(r): # Call standard prep if callable(standard_prep): result = standard_prep(r) else: result = True s3db = current.s3db #if r.method == "validate": # # Can't validate image without the file # image_field = s3db.pr_image.image # image_field.requires = None if r.interactive or r.representation == "aadata": if not r.component: hr_fields = ["organisation_id", "job_title_id", "site_id", ] if r.method in ("create", "update"): get_vars = r.get_vars # Context from a Profile page?" organisation_id = get_vars.get("(organisation)", None) if organisation_id: field = s3db.hrm_human_resource.organisation_id field.default = organisation_id field.readable = field.writable = False hr_fields.remove("organisation_id") site_id = get_vars.get("(site)", None) if site_id: field = s3db.hrm_human_resource.site_id field.default = site_id field.readable = field.writable = False hr_fields.remove("site_id") else: s3db.hrm_human_resource.site_id.default = None # ImageCrop widget doesn't currently work within an Inline Form #image_field = s3db.pr_image.image #from gluon.validators import IS_IMAGE #image_field.requires = IS_IMAGE() #image_field.widget = None from s3 import S3SQLCustomForm, S3SQLInlineComponent s3_sql_custom_fields = ["first_name", #"middle_name", "last_name", S3SQLInlineComponent( "human_resource", name = "human_resource", label = "", multiple = False, fields = hr_fields, ), #S3SQLInlineComponent( # "image", # name = "image", # label = T("Photo"), # multiple = False, # fields = [("", "image")], # filterby = dict(field = "profile", # options=[True] # ) # ), ] list_fields = [(current.messages.ORGANISATION, "human_resource.organisation_id"), "first_name", #"middle_name", "last_name", (T("Job Title"), "human_resource.job_title_id"), (T("Office"), "human_resource.site_id"), ] # Don't include Email/Phone for unauthenticated users if current.auth.is_logged_in(): MOBILE = settings.get_ui_label_mobile_phone() EMAIL = T("Email") list_fields += [(MOBILE, "phone.value"), (EMAIL, "email.value"), ] s3_sql_custom_fields.insert(3, S3SQLInlineComponent( "contact", name = "phone", label = MOBILE, multiple = False, fields = [("", "value")], filterby = dict(field = "contact_method", options = "SMS")), ) s3_sql_custom_fields.insert(3, S3SQLInlineComponent( "contact", name = "email", label = EMAIL, multiple = False, fields = [("", "value")], filterby = dict(field = "contact_method", options = "EMAIL")), ) crud_form = S3SQLCustomForm(*s3_sql_custom_fields) s3db.configure(r.tablename, crud_form = crud_form, list_fields = list_fields, ) elif r.component_name == "group_membership": s3db.pr_group_membership.group_head.label = T("Group Chairperson") return result s3.prep = custom_prep # Custom postp standard_postp = s3.postp def custom_postp(r, output): # Call standard postp if callable(standard_postp): output = standard_postp(r, output) if r.interactive and isinstance(output, dict): if "form" in output: output["form"].add_class("pr_person") elif "item" in output and hasattr(output["item"], "add_class"): output["item"].add_class("pr_person") return output s3.postp = custom_postp return attr settings.customise_pr_person_controller = customise_pr_person_controller # ----------------------------------------------------------------------------- # Groups def chairperson(row): """ Virtual Field to show the chairperson of a group """ if hasattr(row, "pr_group"): row = row.pr_group try: group_id = row.id except: # not available return current.messages["NONE"] db = current.db mtable = current.s3db.pr_group_membership ptable = db.pr_person query = (mtable.group_id == group_id) & \ (mtable.group_head == True) & \ (mtable.person_id == ptable.id) chair = db(query).select(ptable.first_name, ptable.middle_name, ptable.last_name, ptable.id, limitby=(0, 1)).first() if chair: # Only used in list view so HTML is OK return A(s3_fullname(chair), _href=URL(c="hrm", f="person", args=chair.id)) else: return current.messages["NONE"] # ----------------------------------------------------------------------------- def customise_pr_group_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep = s3.prep def custom_prep(r): # Call standard prep if callable(standard_prep): result = standard_prep(r) if not result: return False from s3 import S3Represent, S3TextFilter, S3OptionsFilter, S3SQLCustomForm, S3SQLInlineComponent s3db = current.s3db s3db.org_group_team.org_group_id.represent = S3Represent(lookup="org_group", show_link=True) crud_form = S3SQLCustomForm("name", "description", S3SQLInlineComponent("group_team", label = T("Network"), fields = [("", "org_group_id")], # @ToDo: Make this optional? multiple = False, ), "meetings", "comments", ) filter_widgets = [ S3TextFilter(["name", "description", "comments", "group_team.org_group_id$name", ], label = T("Search"), comment = T("You can search by by group name, description or comments and by network name. You may use % as wildcard. Press 'Search' without input to list all."), #_class = "filter-search", ), S3OptionsFilter("group_team.org_group_id", label = T("Network"), #hidden = True, ), ] # Need to re-do list_fields as get over_written by hrm_group_controller() list_fields = [(T("Network"), "group_team.org_group_id"), "name", "description", "meetings", (T("Chairperson"), "chairperson"), "comments", ] s3db.configure("pr_group", crud_form = crud_form, filter_widgets = filter_widgets, list_fields = list_fields, ) s3db.pr_group_membership.group_head.label = T("Group Chairperson") if r.component_name == "group_membership": from s3layouts import S3AddResourceLink s3db.pr_group_membership.person_id.comment = \ S3AddResourceLink(c="pr", f="person", title=T("Create Person"), tooltip=current.messages.AUTOCOMPLETE_HELP) #else: # # RHeader wants a simplified version, but don't want inconsistent across tabs # s3db.pr_group_membership.group_head.label = T("Chairperson") return True s3.prep = custom_prep return attr settings.customise_pr_group_controller = customise_pr_group_controller # ----------------------------------------------------------------------------- def customise_pr_group_resource(r, tablename): """ Customise pr_group resource (in group & org_group controllers) - runs after controller customisation - but runs before prep """ s3db = current.s3db table = s3db.pr_group field = table.group_type field.default = 3 # Relief Team, to show up in hrm/group field.readable = field.writable = False table.name.label = T("Name") table.description.label = T("Description") table.meetings.readable = table.meetings.writable = True # Increase size of widget from s3 import s3_comments_widget table.description.widget = s3_comments_widget from gluon import Field table.chairperson = Field.Method("chairperson", chairperson) # Format for filter_widgets & imports s3db.add_components("pr_group", org_group_team = "group_id", ) s3db.configure("pr_group", # Redirect to member list when a new group has been created create_next = URL(c="hrm", f="group", args=["[id]", "group_membership"]), ) settings.customise_pr_group_resource = customise_pr_group_resource # ----------------------------------------------------------------------------- def pr_contact_postprocess(form): """ Import Organisation/Network RSS Feeds """ s3db = current.s3db form_vars = form.vars rss_url = form_vars.rsscontact_i_value_edit_0 or \ form_vars.rsscontact_i_value_edit_none if not rss_url: if form.record: # Update form old_rss = form.record.sub_rsscontact import json data = old_rss = json.loads(old_rss)["data"] if data: # RSS feed is being deleted, so we should disable it old_rss = data[0]["value"]["value"] table = s3db.msg_rss_channel old = current.db(table.url == old_rss).select(table.channel_id, table.enabled, limitby = (0, 1) ).first() if old and old.enabled: s3db.msg_channel_disable("msg_rss_channel", old.channel_id) return else: # Nothing to do :) return # Check if we already have a channel for this Contact db = current.db name = form_vars.name table = s3db.msg_rss_channel name_exists = db(table.name == name).select(table.id, table.channel_id, table.enabled, table.url, limitby = (0, 1) ).first() no_import = current.request.post_vars.get("rss_no_import", None) if name_exists: if name_exists.url == rss_url: # No change to either Contact Name or URL if no_import: if name_exists.enabled: # Disable channel (& associated parsers) s3db.msg_channel_disable("msg_rss_channel", name_exists.channel_id) return elif name_exists.enabled: # Nothing to do :) return else: # Enable channel (& associated parsers) s3db.msg_channel_enable("msg_rss_channel", name_exists.channel_id) return # Check if we already have a channel for this URL url_exists = db(table.url == rss_url).select(table.id, table.channel_id, table.enabled, limitby = (0, 1) ).first() if url_exists: # We have 2 feeds: 1 for the Contact & 1 for the URL # Disable the old Contact one and link the URL one to this Contact # and ensure active or not as appropriate # Name field is unique so rename old one name_exists.update_record(name="%s (Old)" % name) if name_exists.enabled: # Disable channel (& associated parsers) s3db.msg_channel_disable("msg_rss_channel", name_exists.channel_id) url_exists.update_record(name=name) if no_import: if url_exists.enabled: # Disable channel (& associated parsers) s3db.msg_channel_disable("msg_rss_channel", url_exists.channel_id) return elif url_exists.enabled: # Nothing to do :) return else: # Enable channel (& associated parsers) s3db.msg_channel_enable("msg_rss_channel", url_exists.channel_id) return else: # Update the URL name_exists.update_record(url=rss_url) if no_import: if name_exists.enabled: # Disable channel (& associated parsers) s3db.msg_channel_disable("msg_rss_channel", name_exists.channel_id) return elif name_exists.enabled: # Nothing to do :) return else: # Enable channel (& associated parsers) s3db.msg_channel_enable("msg_rss_channel", name_exists.channel_id) return else: # Check if we already have a channel for this URL url_exists = db(table.url == rss_url).select(table.id, table.channel_id, table.enabled, limitby = (0, 1) ).first() if url_exists: # Either Contact has changed Name or this feed is associated with # another Contact # - update Feed name url_exists.update_record(name=name) if no_import: if url_exists.enabled: # Disable channel (& associated parsers) s3db.msg_channel_disable("msg_rss_channel", url_exists.channel_id) return elif url_exists.enabled: # Nothing to do :) return else: # Enable channel (& associated parsers) s3db.msg_channel_enable("msg_rss_channel", url_exists.channel_id) return elif no_import: # Nothing to do :) return #else: # # Create a new Feed # pass # Add RSS Channel _id = table.insert(name=name, enabled=True, url=rss_url) record = dict(id=_id) s3db.update_super(table, record) # Enable channel_id = record["channel_id"] s3db.msg_channel_enable("msg_rss_channel", channel_id) # Setup Parser table = s3db.msg_parser _id = table.insert(channel_id=channel_id, function_name="parse_rss", enabled=True) s3db.msg_parser_enable(_id) # Check Now async = current.s3task.async async("msg_poll", args=["msg_rss_channel", channel_id]) async("msg_parse", args=[channel_id, "parse_rss"]) # ----------------------------------------------------------------------------- # Human Resource Management # Uncomment to chage the label for 'Staff' settings.hrm.staff_label = "Contacts" # Uncomment to allow Staff & Volunteers to be registered without an email address settings.hrm.email_required = False # Uncomment to allow Staff & Volunteers to be registered without an Organisation settings.hrm.org_required = False # Uncomment to show the Organisation name in HR represents settings.hrm.show_organisation = True # Uncomment to disable Staff experience settings.hrm.staff_experience = False # Uncomment to disable the use of HR Certificates settings.hrm.use_certificates = False # Uncomment to disable the use of HR Credentials settings.hrm.use_credentials = False # Uncomment to enable the use of HR Education settings.hrm.use_education = False # Uncomment to disable the use of HR Skills #settings.hrm.use_skills = False # Uncomment to disable the use of HR Trainings settings.hrm.use_trainings = False # Uncomment to disable the use of HR Description settings.hrm.use_description = False # Change the label of "Teams" to "Groups" settings.hrm.teams = "Groups" # Custom label for Organisations in HR module #settings.hrm.organisation_label = "National Society / Branch" settings.hrm.organisation_label = "Organization" # ----------------------------------------------------------------------------- def customise_hrm_human_resource_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep = s3.prep def custom_prep(r): # Call standard prep if callable(standard_prep): result = standard_prep(r) else: result = True if r.interactive or r.representation == "aadata": if not r.component: from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter filter_widgets = [ S3TextFilter(["person_id$first_name", "person_id$middle_name", "person_id$last_name", ], label = T("Name"), ), S3OptionsFilter("organisation_id", filter = True, header = "", hidden = True, ), S3OptionsFilter("group_person.group_id", label = T("Network"), #filter = True, #header = "", hidden = True, ), S3LocationFilter("location_id", label = T("Location"), levels = ("L1", "L2", "L3", "L4"), hidden = True, ), S3OptionsFilter("site_id", hidden = True, ), S3OptionsFilter("training.course_id", label = T("Training"), hidden = True, ), S3OptionsFilter("group_membership.group_id", label = T("Team"), filter = True, header = "", hidden = True, ), ] s3db = current.s3db s3db.configure("hrm_human_resource", filter_widgets = filter_widgets, ) field = r.table.site_id # Don't assume that user is from same org/site as Contacts they create field.default = None # Use a hierarchical dropdown instead of AC field.widget = None script = \ '''$.filterOptionsS3({ 'trigger':'organisation_id', 'target':'site_id', 'lookupResource':'site', 'lookupURL':'/%s/org/sites_for_org/', 'optional':true })''' % r.application s3.jquery_ready.append(script) return result s3.prep = custom_prep return attr settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller # ----------------------------------------------------------------------------- def customise_hrm_human_resource_resource(r, tablename): """ Customise hrm_human_resource resource (in facility, human_resource, organisation & person controllers) - runs after controller customisation - but runs before prep """ s3db = current.s3db from s3 import S3SQLCustomForm, S3SQLInlineComponent crud_form = S3SQLCustomForm("person_id", "organisation_id", "site_id", S3SQLInlineComponent( "group_person", label = T("Network"), link = False, fields = [("", "group_id")], multiple = False, ), "job_title_id", "start_date", ) list_fields = ["id", "person_id", "job_title_id", "organisation_id", (T("Network"), "group_person.group_id"), (T("Groups"), "person_id$group_membership.group_id"), "site_id", #"site_contact", (T("Email"), "email.value"), (settings.get_ui_label_mobile_phone(), "phone.value"), ] s3db.configure("hrm_human_resource", crud_form = crud_form, list_fields = list_fields, ) settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource # ----------------------------------------------------------------------------- def customise_hrm_job_title_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep = s3.prep def custom_prep(r): # Call standard prep if callable(standard_prep): result = standard_prep(r) else: result = True if r.interactive or r.representation == "aadata": table = current.s3db.hrm_job_title table.organisation_id.readable = table.organisation_id.writable = False table.type.readable = table.type.writable = False return result s3.prep = custom_prep return attr settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller # ----------------------------------------------------------------------------- # Projects # Use codes for projects (called 'blurb' in NYC) settings.project.codes = True # Uncomment this to use settings suitable for detailed Task management settings.project.mode_task = False # Uncomment this to use Activities for projects settings.project.activities = True # Uncomment this to use Milestones in project/task. settings.project.milestones = False # Uncomment this to disable Sectors in projects settings.project.sectors = False # Multiple partner organizations settings.project.multiple_organisations = True def customise_project_project_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep = s3.prep def custom_prep(r): # Call standard prep if callable(standard_prep): result = standard_prep(r) else: result = True if not r.component and (r.interactive or r.representation == "aadata"): from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentCheckbox s3db = current.s3db table = r.table tablename = "project_project" table.code.label = T("Project blurb (max. 100 characters)") table.code.max_length = 100 table.comments.label = T("How people can help") script = '''$('#project_project_code').attr('maxlength','100')''' s3.jquery_ready.append(script) crud_form = S3SQLCustomForm( "organisation_id", "name", "code", "description", "status_id", "start_date", "end_date", "calendar", #"drr.hfa", #"objectives", "human_resource_id", # Activities S3SQLInlineComponent( "location", label = T("Location"), fields = [("", "location_id")], ), # Partner Orgs S3SQLInlineComponent( "organisation", name = "partner", label = T("Partner Organizations"), fields = ["organisation_id", "comments", # NB This is labelled 'Role' in DRRPP ], filterby = dict(field = "role", options = "2" ) ), S3SQLInlineComponent( "document", name = "media", label = T("URLs (media, fundraising, website, social media, etc."), fields = ["document_id", "name", "url", "comments", ], filterby = dict(field = "name") ), S3SQLInlineComponentCheckbox( "activity_type", label = T("Categories"), field = "activity_type_id", cols = 3, # Filter Activity Type by Project filter = {"linktable": "project_activity_type_project", "lkey": "project_id", "rkey": "activity_type_id", }, ), #"budget", #"currency", "comments", ) from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter, S3DateFilter filter_widgets = [ S3TextFilter(["name", "code", "description", "organisation.name", "organisation.acronym", ], label = T("Name"), _class = "filter-search", ), S3OptionsFilter("status_id", label = T("Status"), # Not translateable #represent = "%(name)s", cols = 3, ), #S3OptionsFilter("theme_project.theme_id", # label = T("Theme"), # #hidden = True, # ), S3LocationFilter("location.location_id", label = T("Location"), levels = ("L1", "L2", "L3", "L4"), #hidden = True, ), # @ToDo: Widget to handle Start & End in 1! S3DateFilter("start_date", label = T("Start Date"), hide_time = True, #hidden = True, ), S3DateFilter("end_date", label = T("End Date"), hide_time = True, #hidden = True, ), ] list_fields = ["id", "name", "code", "organisation_id", "start_date", "end_date", (T("Locations"), "location.location_id"), ] s3db.configure(tablename, crud_form = crud_form, filter_widgets = filter_widgets, list_fields = list_fields, ) return result s3.prep = custom_prep return attr settings.customise_project_project_controller = customise_project_project_controller # ----------------------------------------------------------------------------- # Requests Management settings.req.req_type = ["People", "Stock"]#, "Summary"] settings.req.prompt_match = False #settings.req.use_commit = False settings.req.requester_optional = True settings.req.date_writable = False settings.req.item_quantities_writable = True settings.req.skill_quantities_writable = True settings.req.items_ask_purpose = False #settings.req.use_req_number = False # Label for Requester settings.req.requester_label = "Site Contact" # Filter Requester as being from the Site settings.req.requester_from_site = True # Label for Inventory Requests settings.req.type_inv_label = "Supplies" # Uncomment to enable Summary 'Site Needs' tab for Offices/Facilities settings.req.summary = True # ----------------------------------------------------------------------------- def req_req_postprocess(form): """ Runs after crud_form completes - creates a cms_post in the newswire - @ToDo: Send out Tweets """ req_id = form.vars.id db = current.db s3db = current.s3db rtable = s3db.req_req # Read the full record row = db(rtable.id == req_id).select(rtable.type, rtable.site_id, rtable.requester_id, rtable.priority, rtable.date_required, rtable.purpose, rtable.comments, limitby=(0, 1) ).first() # Build Title & Body from the Request details priority = rtable.priority.represent(row.priority) date_required = row.date_required if date_required: date = rtable.date_required.represent(date_required) title = "%(priority)s by %(date)s" % dict(priority=priority, date=date) else: title = priority body = row.comments if row.type == 1: # Items ritable = s3db.req_req_item items = db(ritable.req_id == req_id).select(ritable.item_id, ritable.item_pack_id, ritable.quantity) item_represent = s3db.supply_item_represent pack_represent = s3db.supply_item_pack_represent for item in items: item = "%s %s %s" % (item.quantity, pack_represent(item.item_pack_id), item_represent(item.item_id)) body = "%s\n%s" % (item, body) else: # Skills body = "%s\n%s" % (row.purpose, body) rstable = s3db.req_req_skill skills = db(rstable.req_id == req_id).select(rstable.skill_id, rstable.quantity) skill_represent = s3db.hrm_multi_skill_represent for skill in skills: item = "%s %s" % (skill.quantity, skill_represent(skill.skill_id)) body = "%s\n%s" % (item, body) # Lookup series_id stable = s3db.cms_series try: series_id = db(stable.name == "Request").select(stable.id, cache=s3db.cache, limitby=(0, 1) ).first().id except: # Prepop hasn't been run series_id = None # Location is that of the site otable = s3db.org_site location_id = db(otable.site_id == row.site_id).select(otable.location_id, limitby=(0, 1) ).first().location_id # Create Post ptable = s3db.cms_post _id = ptable.insert(series_id=series_id, title=title, body=body, location_id=location_id, person_id=row.requester_id, ) record = dict(id=_id) s3db.update_super(ptable, record) # Add source link url = "%s%s" % (settings.get_base_public_url(), URL(c="req", f="req", args=req_id)) s3db.doc_document.insert(doc_id=record["doc_id"], url=url, ) # ----------------------------------------------------------------------------- def customise_req_req_resource(r, tablename): from s3layouts import S3AddResourceLink current.s3db.req_req.site_id.comment = \ S3AddResourceLink(c="org", f="facility", vars = dict(child="site_id"), title=T("Create Facility"), tooltip=current.messages.AUTOCOMPLETE_HELP) current.response.s3.req_req_postprocess = req_req_postprocess if not r.component and r.method in ("create", "update"): script = \ '''$('#req_req_site_id').change(function(){ var url=$('#person_add').attr('href') url=url.split('?') var q=S3.queryString.parse(url[1]) q['(site)']=$(this).val() url=url[0]+'?'+S3.queryString.stringify(q) $('#person_add').attr('href',url)})''' current.response.s3.jquery_ready.append(script) settings.customise_req_req_resource = customise_req_req_resource # ----------------------------------------------------------------------------- # Comment/uncomment modules here to disable/enable them settings.modules = OrderedDict([ # Core modules which shouldn't be disabled ("default", Storage( name_nice = T("Home"), restricted = False, # Use ACLs to control access to this module access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller module_type = None # This item is not shown in the menu )), ("admin", Storage( name_nice = T("Admin"), #description = "Site Administration", restricted = True, access = "|1|", # Only Administrators can see this module in the default menu & access the controller module_type = None # This item is handled separately for the menu )), ("appadmin", Storage( name_nice = T("Administration"), #description = "Site Administration", restricted = True, module_type = None # No Menu )), ("errors", Storage( name_nice = T("Ticket Viewer"), #description = "Needed for Breadcrumbs", restricted = False, module_type = None # No Menu )), ("sync", Storage( name_nice = T("Synchronization"), #description = "Synchronization", restricted = True, access = "|1|", # Only Administrators can see this module in the default menu & access the controller module_type = None # This item is handled separately for the menu )), # Uncomment to enable internal support requests #("support", Storage( # name_nice = T("Support"), # #description = "Support Requests", # restricted = True, # module_type = None # This item is handled separately for the menu # )), ("gis", Storage( name_nice = T("Map"), #description = "Situation Awareness & Geospatial Analysis", restricted = True, module_type = 9, # 8th item in the menu )), ("pr", Storage( name_nice = T("Person Registry"), #description = "Central point to record details on People", restricted = True, access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still) module_type = 10 )), ("org", Storage( name_nice = T("Locations"), #description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities', restricted = True, module_type = 4 )), # All modules below here should be possible to disable safely ("hrm", Storage( name_nice = T("Contacts"), #description = "Human Resources Management", restricted = True, module_type = 3, )), #("vol", Storage( # name_nice = T("Volunteers"), # #description = "Human Resources Management", # restricted = True, # module_type = 2, # )), ("cms", Storage( name_nice = T("Content Management"), #description = "Content Management System", restricted = True, module_type = 10, )), ("doc", Storage( name_nice = T("Documents"), #description = "A library of digital resources, such as photos, documents and reports", restricted = True, module_type = None, )), ("msg", Storage( name_nice = T("Messaging"), #description = "Sends & Receives Alerts via Email & SMS", restricted = True, # The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules. module_type = None, )), ("supply", Storage( name_nice = T("Supply Chain Management"), #description = "Used within Inventory Management, Request Management and Asset Management", restricted = True, module_type = None, # Not displayed )), ("inv", Storage( name_nice = T("Inventory"), #description = "Receiving and Sending Items", restricted = True, module_type = 10 )), #("proc", Storage( # name_nice = T("Procurement"), # #description = "Ordering & Purchasing of Goods & Services", # restricted = True, # module_type = 10 # )), ("asset", Storage( name_nice = T("Assets"), #description = "Recording and Assigning Assets", restricted = True, module_type = 10, )), # Vehicle depends on Assets #("vehicle", Storage( # name_nice = T("Vehicles"), # #description = "Manage Vehicles", # restricted = True, # module_type = 10, # )), ("req", Storage( name_nice = T("Requests"), #description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.", restricted = True, module_type = 1, )), ("project", Storage( name_nice = T("Projects"), #description = "Tracking of Projects, Activities and Tasks", restricted = True, module_type = 10 )), ("assess", Storage( name_nice = T("Assessments"), #description = "Rapid Assessments & Flexible Impact Assessments", restricted = True, module_type = 5, )), ("event", Storage( name_nice = T("Events"), #description = "Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).", restricted = True, module_type = 10, )), ("survey", Storage( name_nice = T("Surveys"), #description = "Create, enter, and manage surveys.", restricted = True, module_type = 5, )), #("cr", Storage( # name_nice = T("Shelters"), # #description = "Tracks the location, capacity and breakdown of victims in Shelters", # restricted = True, # module_type = 10 # )), #("dvr", Storage( # name_nice = T("Disaster Victim Registry"), # #description = "Allow affected individuals & households to register to receive compensation and distributions", # restricted = False, # module_type = 10, # )), #("member", Storage( # name_nice = T("Members"), # #description = "Membership Management System", # restricted = True, # module_type = 10, # )), # @ToDo: Rewrite in a modern style #("budget", Storage( # name_nice = T("Budgeting Module"), # #description = "Allows a Budget to be drawn up", # restricted = True, # module_type = 10 # )), # @ToDo: Port these Assessments to the Survey module #("building", Storage( # name_nice = T("Building Assessments"), # #description = "Building Safety Assessments", # restricted = True, # module_type = 10, # )), ])
39.422772
187
0.470226
0
0
0
0
0
0
0
0
24,746
0.310743
86d782fd2d0c71e606843e5d70887529cd2c5a40
106
py
Python
experiments/issue561/v2.py
nitinkaveriappa/downward
5c9a1b5111d667bb96f94da61ca2a45b1b70bb83
[ "MIT" ]
4
2019-04-23T10:41:35.000Z
2019-10-27T05:14:42.000Z
experiments/issue561/v2.py
nitinkaveriappa/downward
5c9a1b5111d667bb96f94da61ca2a45b1b70bb83
[ "MIT" ]
null
null
null
experiments/issue561/v2.py
nitinkaveriappa/downward
5c9a1b5111d667bb96f94da61ca2a45b1b70bb83
[ "MIT" ]
4
2018-01-16T00:00:22.000Z
2019-11-01T23:35:01.000Z
#! /usr/bin/env python # -*- coding: utf-8 -*- from main import main main("issue561-v1", "issue561-v2")
15.142857
34
0.632075
0
0
0
0
0
0
0
0
71
0.669811
86d8ff6a04670083ea5d1c4de998cdc6916ada2c
4,207
py
Python
q2_qemistree/tests/test_fingerprint.py
tgroth97/q2-qemistree
289c447a6c3a29478bb84212281ef0d7ffc1387a
[ "BSD-2-Clause" ]
null
null
null
q2_qemistree/tests/test_fingerprint.py
tgroth97/q2-qemistree
289c447a6c3a29478bb84212281ef0d7ffc1387a
[ "BSD-2-Clause" ]
null
null
null
q2_qemistree/tests/test_fingerprint.py
tgroth97/q2-qemistree
289c447a6c3a29478bb84212281ef0d7ffc1387a
[ "BSD-2-Clause" ]
null
null
null
# ---------------------------------------------------------------------------- # Copyright (c) 2016-2018, QIIME 2 development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- from unittest import TestCase, main import qiime2 import os from q2_qemistree import MGFDirFmt, SiriusDirFmt, ZodiacDirFmt, OutputDirs from q2_qemistree import (compute_fragmentation_trees, rerank_molecular_formulas, predict_fingerprints) from q2_qemistree._fingerprint import artifactory class FingerprintTests(TestCase): def setUp(self): THIS_DIR = os.path.dirname(os.path.abspath(__file__)) self.badsirpath = os.path.join(THIS_DIR, 'data/foo/bin') self.goodsirpath = os.path.join(THIS_DIR, 'data/' 'sirius-linux64-headless-4.0.1/bin') # MassSpectrometryFeatures self.ions = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirius.mgf.qza')) # SiriusFolder self.sirout = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirFolder.qza')) # ZodiacFolder self.zodout = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/zodFolder.qza')) def test_artifactory(self): # everything is working fine obs = os.environ.get('_JAVA_OPTIONS', '') res = artifactory(self.goodsirpath, ['--help'], constructor=OutputDirs, java_flags='-Xms2G') self.assertEqual(obs, os.environ.get('_JAVA_OPTIONS')) self.assertTrue(isinstance(res, OutputDirs)) # exceptions are raised with self.assertRaises(OSError): res = artifactory(self.badsirpath, ['--help'], constructor=OutputDirs) def test_fragmentation_trees(self): ions = self.ions.view(MGFDirFmt) result = compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap') contents = os.listdir(result.get_path()) self.assertTrue(('version.txt' in contents)) def test_fragmentation_trees_negative_ionization(self): ions = self.ions.view(MGFDirFmt) result = compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap', ionization_mode='negative') contents = os.listdir(result.get_path()) self.assertTrue(('version.txt' in contents)) def test_fragmentation_trees_exception(self): ions = self.ions.view(MGFDirFmt) with self.assertRaises(ValueError): compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap', ionization_mode='n3gativ3') def test_reranking(self): ions = self.ions.view(MGFDirFmt) sirout = self.sirout.view(SiriusDirFmt) result = rerank_molecular_formulas(sirius_path=self.goodsirpath, fragmentation_trees=sirout, features=ions) contents = os.listdir(result.get_path()) self.assertTrue(('zodiac_summary.csv' in contents)) def test_fingerid(self): zodout = self.zodout.view(ZodiacDirFmt) result = predict_fingerprints(sirius_path=self.goodsirpath, molecular_formulas=zodout, ppm_max=15) contents = os.listdir(result.get_path()) self.assertTrue(('summary_csi_fingerid.csv' in contents)) if __name__ == '__main__': main()
45.728261
78
0.548134
3,473
0.825529
0
0
0
0
0
0
752
0.17875
86d90c0ca6a5dbc266bca705498a4a9e3c8d3aac
721
py
Python
chroma-manager/tests/utils/__init__.py
GarimaVishvakarma/intel-chroma
fdf68ed00b13643c62eb7480754d3216d9295e0b
[ "MIT" ]
null
null
null
chroma-manager/tests/utils/__init__.py
GarimaVishvakarma/intel-chroma
fdf68ed00b13643c62eb7480754d3216d9295e0b
[ "MIT" ]
null
null
null
chroma-manager/tests/utils/__init__.py
GarimaVishvakarma/intel-chroma
fdf68ed00b13643c62eb7480754d3216d9295e0b
[ "MIT" ]
null
null
null
import time import datetime import contextlib @contextlib.contextmanager def patch(obj, **attrs): "Monkey patch an object's attributes, restoring them after the block." stored = {} for name in attrs: stored[name] = getattr(obj, name) setattr(obj, name, attrs[name]) try: yield finally: for name in stored: setattr(obj, name, stored[name]) @contextlib.contextmanager def timed(msg='', threshold=0): "Print elapsed time of a block, if over optional threshold." start = time.time() try: yield finally: elapsed = time.time() - start if elapsed >= threshold: print datetime.timedelta(seconds=elapsed), msg
24.033333
74
0.629681
0
0
615
0.852982
669
0.927878
0
0
132
0.183079
86d90c692c5aa920f75d361edbf2de1c22109ec8
3,518
py
Python
tempo/worker.py
rackerlabs/Tempo
60c2adaf5b592ae171987b999e0b9cc46b80c54e
[ "Apache-2.0" ]
4
2015-04-26T01:46:51.000Z
2020-11-10T13:07:59.000Z
tempo/worker.py
rackerlabs/Tempo
60c2adaf5b592ae171987b999e0b9cc46b80c54e
[ "Apache-2.0" ]
null
null
null
tempo/worker.py
rackerlabs/Tempo
60c2adaf5b592ae171987b999e0b9cc46b80c54e
[ "Apache-2.0" ]
null
null
null
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2012 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import kombu from tempo import actions from tempo import config from tempo import db from tempo import notifier from tempo import queue as tempo_queue from tempo.openstack.common import cfg from tempo.openstack.common import exception as common_exception CFG = config.CFG logger = logging.getLogger('tempo.worker') worker_opts = [ cfg.BoolOpt('daemonized', default=False, help='Run worker as a daemon'), cfg.StrOpt('publisher_id', default='host', help='Where the notification came from') ] worker_group = cfg.OptGroup(name='worker', title='Worker options') CFG.register_group(worker_group) CFG.register_opts(worker_opts, group=worker_group) def _perform_task(task): def _notify(event_type, exception=None): payload = {'task_uuid': task_uuid} if exception is not None: payload['exception'] = exception publisher_id = CFG.worker.publisher_id priority = notifier.DEBUG notifier.notify(publisher_id, event_type, priority, payload) action = task.action task_uuid = task.uuid try: func = getattr(actions, action) except AttributeError: logger.error("unrecognized action '%(action)s' for task task" " '%(task_uuid)s'" % locals()) return logger.debug("task '%(task_uuid)s' started: '%(action)s'" % locals()) _notify('Started Task') try: func(task) except Exception as e: logger.error("task '%(task_uuid)s' errored: %(e)s" % locals()) _notify('Errored Task', exception=e) else: logger.debug("task '%(task_uuid)s' finished: returned successfully" % locals()) _notify('Finished Task') def _process_message(body, message): message.ack() task_uuid = body['task_uuid'] try: task = db.task_get(task_uuid) except common_exception.NotFound: logger.error("Task '%(task_uuid)s' not found" % locals()) return _perform_task(task) def _consume_messages(exchange, queue, key): kombu_xchg = kombu.Exchange(exchange, 'direct', durable=True) kombu_queue = kombu.Queue(queue, exchange=kombu_xchg, key=key) connection = tempo_queue.get_connection() consumer = kombu.Consumer(connection.channel(), kombu_queue) consumer.register_callback(_process_message) consumer.consume() while True: connection.drain_events() def consume_messages(exchange, queue, key): if CFG.worker.daemonized: # TODO(mdietz): there's a cleaner way to do this, but this works well # as a way of backgrounding the server for now import daemon with daemon.DaemonContext(): _consume_messages(exchange, queue, key) else: _consume_messages(exchange, queue, key)
29.563025
77
0.673394
0
0
0
0
0
0
0
0
1,213
0.344798
86d979010cd46ef001009b94be4cbd36b5242fa0
24,187
py
Python
bin/basenji_motifs.py
AndyPJiang/basenji
64e43570c8bece156b4ab926608014f489b7965e
[ "Apache-2.0" ]
1
2020-05-22T20:53:37.000Z
2020-05-22T20:53:37.000Z
bin/basenji_motifs.py
AndyPJiang/basenji
64e43570c8bece156b4ab926608014f489b7965e
[ "Apache-2.0" ]
null
null
null
bin/basenji_motifs.py
AndyPJiang/basenji
64e43570c8bece156b4ab926608014f489b7965e
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # Copyright 2017 Calico LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ========================================================================= from __future__ import print_function from optparse import OptionParser import copy, os, pdb, random, shutil, subprocess, time import h5py import matplotlib matplotlib.use('PDF') import matplotlib.pyplot as plt import numpy as np import pandas as pd from scipy.stats import spearmanr import seaborn as sns from sklearn import preprocessing import tensorflow as tf import basenji ''' basenji_motifs.py Collect statistics and make plots to explore the first convolution layer of the given model using the given sequences. ''' weblogo_opts = '-X NO -Y NO --errorbars NO --fineprint ""' weblogo_opts += ' -C "#CB2026" A A' weblogo_opts += ' -C "#34459C" C C' weblogo_opts += ' -C "#FBB116" G G' weblogo_opts += ' -C "#0C8040" T T' ################################################################################ # main ################################################################################ def main(): usage = 'usage: %prog [options] <params_file> <model_file> <data_file>' parser = OptionParser(usage) parser.add_option( '-a', dest='act_t', default=0.5, type='float', help= 'Activation threshold (as proportion of max) to consider for PWM [Default: %default]' ) parser.add_option( '-d', dest='model_hdf5_file', default=None, help='Pre-computed model output as HDF5.') parser.add_option('-o', dest='out_dir', default='.') parser.add_option( '-m', dest='meme_db', default='%s/data/motifs/Homo_sapiens.meme' % os.environ['BASENJIDIR'], help='MEME database used to annotate motifs') parser.add_option( '-p', dest='plot_heats', default=False, action='store_true', help= 'Plot heat maps describing filter activations in the test sequences [Default: %default]' ) parser.add_option( '-s', dest='sample', default=None, type='int', help='Sample sequences from the test set [Default:%default]') parser.add_option( '-t', dest='trim_filters', default=False, action='store_true', help='Trim uninformative positions off the filter ends [Default: %default]' ) (options, args) = parser.parse_args() if len(args) != 3: parser.error( 'Must provide Basenji parameters and model files and test data in HDF5' ' format.' ) else: params_file = args[0] model_file = args[1] data_file = args[2] if not os.path.isdir(options.out_dir): os.mkdir(options.out_dir) ################################################################# # load data data_open = h5py.File(data_file) test_seqs1 = data_open['test_in'] test_targets = data_open['test_out'] try: target_names = list(data_open['target_labels']) except KeyError: target_names = ['t%d' % ti for ti in range(test_targets.shape[1])] if options.sample is not None: # choose sampled indexes sample_i = sorted(random.sample(range(test_seqs1.shape[0]), options.sample)) # filter test_seqs1 = test_seqs1[sample_i] test_targets = test_targets[sample_i] # convert to letters test_seqs = basenji.dna_io.hot1_dna(test_seqs1) ################################################################# # model parameters and placeholders job = basenji.dna_io.read_job_params(params_file) job['seq_length'] = test_seqs1.shape[1] job['seq_depth'] = test_seqs1.shape[2] job['num_targets'] = test_targets.shape[2] job['target_pool'] = int(np.array(data_open.get('pool_width', 1))) t0 = time.time() dr = basenji.seqnn.SeqNN() dr.build(job) print('Model building time %ds' % (time.time() - t0)) # adjust for fourier job['fourier'] = 'train_out_imag' in data_open if job['fourier']: test_targets_imag = data_open['test_out_imag'] if options.valid: test_targets_imag = data_open['valid_out_imag'] ################################################################# # predict # initialize batcher if job['fourier']: batcher_test = basenji.batcher.BatcherF( test_seqs1, test_targets, test_targets_imag, batch_size=dr.batch_size, pool_width=job['target_pool']) else: batcher_test = basenji.batcher.Batcher( test_seqs1, test_targets, batch_size=dr.batch_size, pool_width=job['target_pool']) # initialize saver saver = tf.train.Saver() with tf.Session() as sess: # load variables into session saver.restore(sess, model_file) # get weights filter_weights = sess.run(dr.filter_weights[0]) filter_weights = np.transpose(np.squeeze(filter_weights), [2, 1, 0]) print(filter_weights.shape) # test t0 = time.time() layer_filter_outs, _ = dr.hidden(sess, batcher_test, layers=[0]) filter_outs = layer_filter_outs[0] print(filter_outs.shape) # store useful variables num_filters = filter_weights.shape[0] filter_size = filter_weights.shape[2] ################################################################# # individual filter plots ################################################################# # also save information contents filters_ic = [] meme_out = meme_intro('%s/filters_meme.txt' % options.out_dir, test_seqs) for f in range(num_filters): print('Filter %d' % f) # plot filter parameters as a heatmap plot_filter_heat(filter_weights[f, :, :], '%s/filter%d_heat.pdf' % (options.out_dir, f)) # write possum motif file filter_possum(filter_weights[f, :, :], 'filter%d' % f, '%s/filter%d_possum.txt' % (options.out_dir, f), options.trim_filters) # plot weblogo of high scoring outputs plot_filter_logo( filter_outs[:, :, f], filter_size, test_seqs, '%s/filter%d_logo' % (options.out_dir, f), maxpct_t=options.act_t) # make a PWM for the filter filter_pwm, nsites = make_filter_pwm('%s/filter%d_logo.fa' % (options.out_dir, f)) if nsites < 10: # no information filters_ic.append(0) else: # compute and save information content filters_ic.append(info_content(filter_pwm)) # add to the meme motif file meme_add(meme_out, f, filter_pwm, nsites, options.trim_filters) meme_out.close() ################################################################# # annotate filters ################################################################# # run tomtom subprocess.call( 'tomtom -dist pearson -thresh 0.1 -oc %s/tomtom %s/filters_meme.txt %s' % (options.out_dir, options.out_dir, options.meme_db), shell=True) # read in annotations filter_names = name_filters( num_filters, '%s/tomtom/tomtom.txt' % options.out_dir, options.meme_db) ################################################################# # print a table of information ################################################################# table_out = open('%s/table.txt' % options.out_dir, 'w') # print header for later panda reading header_cols = ('', 'consensus', 'annotation', 'ic', 'mean', 'std') print('%3s %19s %10s %5s %6s %6s' % header_cols, file=table_out) for f in range(num_filters): # collapse to a consensus motif consensus = filter_motif(filter_weights[f, :, :]) # grab annotation annotation = '.' name_pieces = filter_names[f].split('_') if len(name_pieces) > 1: annotation = name_pieces[1] # plot density of filter output scores fmean, fstd = plot_score_density( np.ravel(filter_outs[:, :, f]), '%s/filter%d_dens.pdf' % (options.out_dir, f)) row_cols = (f, consensus, annotation, filters_ic[f], fmean, fstd) print('%-3d %19s %10s %5.2f %6.4f %6.4f' % row_cols, file=table_out) table_out.close() ################################################################# # global filter plots ################################################################# if options.plot_heats: # plot filter-sequence heatmap plot_filter_seq_heat(filter_outs, '%s/filter_seqs.pdf' % options.out_dir) # plot filter-segment heatmap plot_filter_seg_heat(filter_outs, '%s/filter_segs.pdf' % options.out_dir) plot_filter_seg_heat( filter_outs, '%s/filter_segs_raw.pdf' % options.out_dir, whiten=False) # plot filter-target correlation heatmap plot_target_corr(filter_outs, seq_targets, filter_names, target_names, '%s/filter_target_cors_mean.pdf' % options.out_dir, 'mean') plot_target_corr(filter_outs, seq_targets, filter_names, target_names, '%s/filter_target_cors_max.pdf' % options.out_dir, 'max') def get_motif_proteins(meme_db_file): """ Hash motif_id's to protein names using the MEME DB file """ motif_protein = {} for line in open(meme_db_file): a = line.split() if len(a) > 0 and a[0] == 'MOTIF': if a[2][0] == '(': motif_protein[a[1]] = a[2][1:a[2].find(')')] else: motif_protein[a[1]] = a[2] return motif_protein def info_content(pwm, transpose=False, bg_gc=0.415): """ Compute PWM information content. In the original analysis, I used a bg_gc=0.5. For any future analysis, I ought to switch to the true hg19 value of 0.415. """ pseudoc = 1e-9 if transpose: pwm = np.transpose(pwm) bg_pwm = [1 - bg_gc, bg_gc, bg_gc, 1 - bg_gc] ic = 0 for i in range(pwm.shape[0]): for j in range(4): # ic += 0.5 + pwm[i][j]*np.log2(pseudoc+pwm[i][j]) ic += -bg_pwm[j] * np.log2( bg_pwm[j]) + pwm[i][j] * np.log2(pseudoc + pwm[i][j]) return ic def make_filter_pwm(filter_fasta): """ Make a PWM for this filter from its top hits """ nts = {'A': 0, 'C': 1, 'G': 2, 'T': 3} pwm_counts = [] nsites = 4 # pseudocounts for line in open(filter_fasta): if line[0] != '>': seq = line.rstrip() nsites += 1 if len(pwm_counts) == 0: # initialize with the length for i in range(len(seq)): pwm_counts.append(np.array([1.0] * 4)) # count for i in range(len(seq)): try: pwm_counts[i][nts[seq[i]]] += 1 except KeyError: pwm_counts[i] += np.array([0.25] * 4) # normalize pwm_freqs = [] for i in range(len(pwm_counts)): pwm_freqs.append([pwm_counts[i][j] / float(nsites) for j in range(4)]) return np.array(pwm_freqs), nsites - 4 def meme_add(meme_out, f, filter_pwm, nsites, trim_filters=False): """ Print a filter to the growing MEME file Attrs: meme_out : open file f (int) : filter index # filter_pwm (array) : filter PWM array nsites (int) : number of filter sites """ if not trim_filters: ic_start = 0 ic_end = filter_pwm.shape[0] - 1 else: ic_t = 0.2 # trim PWM of uninformative prefix ic_start = 0 while ic_start < filter_pwm.shape[0] and info_content( filter_pwm[ic_start:ic_start + 1]) < ic_t: ic_start += 1 # trim PWM of uninformative suffix ic_end = filter_pwm.shape[0] - 1 while ic_end >= 0 and info_content(filter_pwm[ic_end:ic_end + 1]) < ic_t: ic_end -= 1 if ic_start < ic_end: print('MOTIF filter%d' % f, file=meme_out) print( 'letter-probability matrix: alength= 4 w= %d nsites= %d' % (ic_end - ic_start + 1, nsites), file=meme_out) for i in range(ic_start, ic_end + 1): print('%.4f %.4f %.4f %.4f' % tuple(filter_pwm[i]), file=meme_out) print('', file=meme_out) def meme_intro(meme_file, seqs): """ Open MEME motif format file and print intro Attrs: meme_file (str) : filename seqs [str] : list of strings for obtaining background freqs Returns: mem_out : open MEME file """ nts = {'A': 0, 'C': 1, 'G': 2, 'T': 3} # count nt_counts = [1] * 4 for i in range(len(seqs)): for nt in seqs[i]: try: nt_counts[nts[nt]] += 1 except KeyError: pass # normalize nt_sum = float(sum(nt_counts)) nt_freqs = [nt_counts[i] / nt_sum for i in range(4)] # open file for writing meme_out = open(meme_file, 'w') # print intro material print('MEME version 4', file=meme_out) print('', file=meme_out) print('ALPHABET= ACGT', file=meme_out) print('', file=meme_out) print('Background letter frequencies:', file=meme_out) print('A %.4f C %.4f G %.4f T %.4f' % tuple(nt_freqs), file=meme_out) print('', file=meme_out) return meme_out def name_filters(num_filters, tomtom_file, meme_db_file): """ Name the filters using Tomtom matches. Attrs: num_filters (int) : total number of filters tomtom_file (str) : filename of Tomtom output table. meme_db_file (str) : filename of MEME db Returns: filter_names [str] : """ # name by number filter_names = ['f%d' % fi for fi in range(num_filters)] # name by protein if tomtom_file is not None and meme_db_file is not None: motif_protein = get_motif_proteins(meme_db_file) # hash motifs and q-value's by filter filter_motifs = {} tt_in = open(tomtom_file) tt_in.readline() for line in tt_in: a = line.split() fi = int(a[0][6:]) motif_id = a[1] qval = float(a[5]) filter_motifs.setdefault(fi, []).append((qval, motif_id)) tt_in.close() # assign filter's best match for fi in filter_motifs: top_motif = sorted(filter_motifs[fi])[0][1] filter_names[fi] += '_%s' % motif_protein[top_motif] return np.array(filter_names) ################################################################################ # plot_target_corr # # Plot a clustered heatmap of correlations between filter activations and # targets. # # Input # filter_outs: # filter_names: # target_names: # out_pdf: ################################################################################ def plot_target_corr(filter_outs, seq_targets, filter_names, target_names, out_pdf, seq_op='mean'): num_seqs = filter_outs.shape[0] num_targets = len(target_names) if seq_op == 'mean': filter_outs_seq = filter_outs.mean(axis=2) else: filter_outs_seq = filter_outs.max(axis=2) # std is sequence by filter. filter_seqs_std = filter_outs_seq.std(axis=0) filter_outs_seq = filter_outs_seq[:, filter_seqs_std > 0] filter_names_live = filter_names[filter_seqs_std > 0] filter_target_cors = np.zeros((len(filter_names_live), num_targets)) for fi in range(len(filter_names_live)): for ti in range(num_targets): cor, p = spearmanr(filter_outs_seq[:, fi], seq_targets[:num_seqs, ti]) filter_target_cors[fi, ti] = cor cor_df = pd.DataFrame( filter_target_cors, index=filter_names_live, columns=target_names) sns.set(font_scale=0.3) plt.figure() sns.clustermap(cor_df, cmap='BrBG', center=0, figsize=(8, 10)) plt.savefig(out_pdf) plt.close() ################################################################################ # plot_filter_seq_heat # # Plot a clustered heatmap of filter activations in # # Input # param_matrix: np.array of the filter's parameter matrix # out_pdf: ################################################################################ def plot_filter_seq_heat(filter_outs, out_pdf, whiten=True, drop_dead=True): # compute filter output means per sequence filter_seqs = filter_outs.mean(axis=2) # whiten if whiten: filter_seqs = preprocessing.scale(filter_seqs) # transpose filter_seqs = np.transpose(filter_seqs) if drop_dead: filter_stds = filter_seqs.std(axis=1) filter_seqs = filter_seqs[filter_stds > 0] # downsample sequences seqs_i = np.random.randint(0, filter_seqs.shape[1], 500) hmin = np.percentile(filter_seqs[:, seqs_i], 0.1) hmax = np.percentile(filter_seqs[:, seqs_i], 99.9) sns.set(font_scale=0.3) plt.figure() sns.clustermap( filter_seqs[:, seqs_i], row_cluster=True, col_cluster=True, linewidths=0, xticklabels=False, vmin=hmin, vmax=hmax) plt.savefig(out_pdf) #out_png = out_pdf[:-2] + 'ng' #plt.savefig(out_png, dpi=300) plt.close() ################################################################################ # plot_filter_seq_heat # # Plot a clustered heatmap of filter activations in sequence segments. # # Mean doesn't work well for the smaller segments for some reason, but taking # the max looks OK. Still, similar motifs don't cluster quite as well as you # might expect. # # Input # filter_outs ################################################################################ def plot_filter_seg_heat(filter_outs, out_pdf, whiten=True, drop_dead=True): b = filter_outs.shape[0] f = filter_outs.shape[1] l = filter_outs.shape[2] s = 5 while l / float(s) - (l / s) > 0: s += 1 print('%d segments of length %d' % (s, l / s)) # split into multiple segments filter_outs_seg = np.reshape(filter_outs, (b, f, s, l / s)) # mean across the segments filter_outs_mean = filter_outs_seg.max(axis=3) # break each segment into a new instance filter_seqs = np.reshape(np.swapaxes(filter_outs_mean, 2, 1), (s * b, f)) # whiten if whiten: filter_seqs = preprocessing.scale(filter_seqs) # transpose filter_seqs = np.transpose(filter_seqs) if drop_dead: filter_stds = filter_seqs.std(axis=1) filter_seqs = filter_seqs[filter_stds > 0] # downsample sequences seqs_i = np.random.randint(0, filter_seqs.shape[1], 500) hmin = np.percentile(filter_seqs[:, seqs_i], 0.1) hmax = np.percentile(filter_seqs[:, seqs_i], 99.9) sns.set(font_scale=0.3) if whiten: dist = 'euclidean' else: dist = 'cosine' plt.figure() sns.clustermap( filter_seqs[:, seqs_i], metric=dist, row_cluster=True, col_cluster=True, linewidths=0, xticklabels=False, vmin=hmin, vmax=hmax) plt.savefig(out_pdf) #out_png = out_pdf[:-2] + 'ng' #plt.savefig(out_png, dpi=300) plt.close() ################################################################################ # filter_motif # # Collapse the filter parameter matrix to a single DNA motif. # # Input # param_matrix: np.array of the filter's parameter matrix # out_pdf: ################################################################################ def filter_motif(param_matrix): nts = 'ACGT' motif_list = [] for v in range(param_matrix.shape[1]): max_n = 0 for n in range(1, 4): if param_matrix[n, v] > param_matrix[max_n, v]: max_n = n if param_matrix[max_n, v] > 0: motif_list.append(nts[max_n]) else: motif_list.append('N') return ''.join(motif_list) ################################################################################ # filter_possum # # Write a Possum-style motif # # Input # param_matrix: np.array of the filter's parameter matrix # out_pdf: ################################################################################ def filter_possum(param_matrix, motif_id, possum_file, trim_filters=False, mult=200): # possible trim trim_start = 0 trim_end = param_matrix.shape[1] - 1 trim_t = 0.3 if trim_filters: # trim PWM of uninformative prefix while trim_start < param_matrix.shape[1] and np.max( param_matrix[:, trim_start]) - np.min( param_matrix[:, trim_start]) < trim_t: trim_start += 1 # trim PWM of uninformative suffix while trim_end >= 0 and np.max(param_matrix[:, trim_end]) - np.min( param_matrix[:, trim_end]) < trim_t: trim_end -= 1 if trim_start < trim_end: possum_out = open(possum_file, 'w') print('BEGIN GROUP', file=possum_out) print('BEGIN FLOAT', file=possum_out) print('ID %s' % motif_id, file=possum_out) print('AP DNA', file=possum_out) print('LE %d' % (trim_end + 1 - trim_start), file=possum_out) for ci in range(trim_start, trim_end + 1): print( 'MA %s' % ' '.join(['%.2f' % (mult * n) for n in param_matrix[:, ci]]), file=possum_out) print('END', file=possum_out) print('END', file=possum_out) possum_out.close() ################################################################################ # plot_filter_heat # # Plot a heatmap of the filter's parameters. # # Input # param_matrix: np.array of the filter's parameter matrix # out_pdf: ################################################################################ def plot_filter_heat(param_matrix, out_pdf): param_range = abs(param_matrix).max() sns.set(font_scale=2) plt.figure(figsize=(param_matrix.shape[1], 4)) sns.heatmap( param_matrix, cmap='PRGn', linewidths=0.2, vmin=-param_range, vmax=param_range) ax = plt.gca() ax.set_xticklabels(range(1, param_matrix.shape[1] + 1)) ax.set_yticklabels('TGCA', rotation='horizontal') # , size=10) plt.savefig(out_pdf) plt.close() ################################################################################ # plot_filter_logo # # Plot a weblogo of the filter's occurrences # # Input # param_matrix: np.array of the filter's parameter matrix # out_pdf: ################################################################################ def plot_filter_logo(filter_outs, filter_size, seqs, out_prefix, raw_t=0, maxpct_t=None): if maxpct_t: all_outs = np.ravel(filter_outs) all_outs_mean = all_outs.mean() all_outs_norm = all_outs - all_outs_mean raw_t = maxpct_t * all_outs_norm.max() + all_outs_mean left_pad = (filter_size - 1) // 2 right_pad = filter_size - left_pad # print fasta file of positive outputs filter_fasta_out = open('%s.fa' % out_prefix, 'w') filter_count = 0 for i in range(filter_outs.shape[0]): for j in range(filter_outs.shape[1]): if filter_outs[i, j] > raw_t: # construct kmer kmer = '' # determine boundaries, considering padding fstart = j - left_pad fend = fstart + filter_size # if it starts in left_pad if fstart < 0: kmer += 'N' * (-fstart) fstart = 0 # add primary sequence kmer += seqs[i][fstart:fend] # if it ends in right_pad if fend > len(seqs[i]): kmer += 'N' * (fend - len(seqs[i])) # output print('>%d_%d' % (i, j), file=filter_fasta_out) print(kmer, file=filter_fasta_out) filter_count += 1 filter_fasta_out.close() # make weblogo if filter_count > 0: weblogo_cmd = 'weblogo %s < %s.fa > %s.eps' % (weblogo_opts, out_prefix, out_prefix) subprocess.call(weblogo_cmd, shell=True) ################################################################################ # plot_score_density # # Plot the score density and print to the stats table. # # Input # param_matrix: np.array of the filter's parameter matrix # out_pdf: ################################################################################ def plot_score_density(f_scores, out_pdf): sns.set(font_scale=1.3) plt.figure() sns.distplot(f_scores, kde=False) plt.xlabel('ReLU output') plt.savefig(out_pdf) plt.close() return f_scores.mean(), f_scores.std() ################################################################################ # __main__ ################################################################################ if __name__ == '__main__': main() # pdb.runcall(main)
29.282082
99
0.585687
0
0
0
0
0
0
0
0
9,345
0.386365
86db53b7a1cf34f8c926e78563b430e45842c3b8
1,337
py
Python
apps/shop/urls.py
Joetib/jshop
810ce5dcf2cf2d23b45536dd0c8806efd3b7fc91
[ "MIT" ]
1
2021-09-29T18:48:00.000Z
2021-09-29T18:48:00.000Z
apps/shop/urls.py
Joetib/jshop
810ce5dcf2cf2d23b45536dd0c8806efd3b7fc91
[ "MIT" ]
null
null
null
apps/shop/urls.py
Joetib/jshop
810ce5dcf2cf2d23b45536dd0c8806efd3b7fc91
[ "MIT" ]
null
null
null
from django.urls import path from . import views app_name = "shop" urlpatterns = [ path('', views.HomePage.as_view(), name="home-page"), path('shop/', views.ProductListView.as_view(), name="product-list"), path('shop/<int:category_pk>/', views.ProductListView.as_view(), name="product-list"), path('shop/products/<int:pk>/', views.ProductDetailView.as_view(), name="product-detail"), path('cart/', views.cart_view, name="cart"), path('cart/add/<int:product_pk>/', views.add_product_to_order, name="add-product-to-cart"), path('cart/add/<int:product_pk>/json/', views.add_product_to_cart_json, name="add-product-to-cart-json"), path('checkout/', views.CheckOut.as_view(), name="checkout"), path('checkout/<int:address_pk>/', views.CheckOut.as_view(), name="checkout"), path('payment/', views.PaymentChoice.as_view(), name="payment-choice"), path('payment/order/<int:pk>/', views.MomoPayment.as_view(), name="momo-payment"), path('payment/momo/<int:pk>/confirm/', views.ConfirmMomoPayment.as_view(), name="confirm-momo-payment"), path('orders/', views.OrderList.as_view(), name="order-list"), path('orders/<int:pk>/', views.OrderDetail.as_view(), name="order-detail"), path('orders/<int:order_id>/items/<int:pk>/', views.OrderItemDetail.as_view(), name="order-item-detail"), ]
58.130435
109
0.691847
0
0
0
0
0
0
0
0
530
0.39641
86db5b39f7333cdce223e5a0be6e734eb216f5d2
11,028
py
Python
surpyval/parametric/expo_weibull.py
dfm/SurPyval
014fba8f1d4a0f43218a3713ce80a78191ad8be9
[ "MIT" ]
null
null
null
surpyval/parametric/expo_weibull.py
dfm/SurPyval
014fba8f1d4a0f43218a3713ce80a78191ad8be9
[ "MIT" ]
null
null
null
surpyval/parametric/expo_weibull.py
dfm/SurPyval
014fba8f1d4a0f43218a3713ce80a78191ad8be9
[ "MIT" ]
null
null
null
import autograd.numpy as np from scipy.stats import uniform from autograd import jacobian from numpy import euler_gamma from scipy.special import gamma as gamma_func from scipy.special import ndtri as z from scipy import integrate from scipy.optimize import minimize from surpyval import parametric as para from surpyval import nonparametric as nonp from surpyval.parametric.parametric_fitter import ParametricFitter from .fitters.mpp import mpp class ExpoWeibull_(ParametricFitter): def __init__(self, name): self.name = name self.k = 3 self.bounds = ((0, None), (0, None), (0, None),) self.support = (0, np.inf) self.plot_x_scale = 'log' self.y_ticks = [0.0001, 0.0002, 0.0003, 0.001, 0.002, 0.003, 0.005, 0.01, 0.02, 0.03, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 0.9999] self.param_names = ['alpha', 'beta', 'mu'] self.param_map = { 'alpha' : 0, 'beta' : 1, 'mu' : 2 } def _parameter_initialiser(self, x, c=None, n=None, offset=False): log_x = np.log(x) log_x[np.isnan(log_x)] = 0 gumb = para.Gumbel.fit(log_x, c, n, how='MLE') if not gumb.res.success: gumb = para.Gumbel.fit(log_x, c, n, how='MPP') mu, sigma = gumb.params alpha, beta = np.exp(mu), 1. / sigma if (np.isinf(alpha) | np.isnan(alpha)): alpha = np.median(x) if (np.isinf(beta) | np.isnan(beta)): beta = 1. if offset: gamma = np.min(x) - (np.max(x) - np.min(x))/10. return gamma, alpha, beta, 1. else: return alpha, beta, 1. def sf(self, x, alpha, beta, mu): r""" Survival (or reliability) function for the ExpoWeibull Distribution: .. math:: R(x) = 1 - \left [ 1 - e^{-\left ( \frac{x}{\alpha} \right )^\beta} \right ]^{\mu} Parameters ---------- x : numpy array or scalar The values at which the function will be calculated alpha : numpy array or scalar scale parameter for the ExpoWeibull distribution beta : numpy array or scalar shape parameter for the ExpoWeibull distribution mu : numpy array or scalar shape parameter for the ExpoWeibull distribution Returns ------- sf : scalar or numpy array The value(s) of the reliability function at x. Examples -------- >>> import numpy as np >>> from surpyval import ExpoWeibull >>> x = np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.sf(x, 3, 4, 1.2) array([9.94911330e-01, 8.72902497e-01, 4.23286791e-01, 5.06674866e-02, 5.34717283e-04]) """ return 1 - np.power(1 - np.exp(-(x / alpha)**beta), mu) def ff(self, x, alpha, beta, mu): r""" Failure (CDF or unreliability) function for the ExpoWeibull Distribution: .. math:: F(x) = \left [ 1 - e^{-\left ( \frac{x}{\alpha} \right )^\beta} \right ]^{\mu} Parameters ---------- x : numpy array or scalar The values at which the function will be calculated alpha : numpy array or scalar scale parameter for the ExpoWeibull distribution beta : numpy array or scalar shape parameter for the ExpoWeibull distribution mu : numpy array or scalar shape parameter for the ExpoWeibull distribution Returns ------- sf : scalar or numpy array The value(s) of the failure function at x. Examples -------- >>> import numpy as np >>> from surpyval import ExpoWeibull >>> x = np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.ff(x, 3, 4, 1.2) array([0.00508867, 0.1270975 , 0.57671321, 0.94933251, 0.99946528]) """ return np.power(1 - np.exp(-(x / alpha)**beta), mu) def cs(self, x, X, alpha, beta, mu): r""" Conditional survival (or reliability) function for the ExpoWeibull Distribution: .. math:: R(x, X) = \frac{R(x + X)}{R(X)} Parameters ---------- x : numpy array or scalar The values at which the function will be calculated alpha : numpy array or scalar scale parameter for the ExpoWeibull distribution beta : numpy array or scalar shape parameter for the ExpoWeibull distribution mu : numpy array or scalar shape parameter for the ExpoWeibull distribution Returns ------- sf : scalar or numpy array The value(s) of the reliability function at x. Examples -------- >>> import numpy as np >>> from surpyval import ExpoWeibull >>> x = np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.sf(x, 1, 3, 4, 1.2) array([8.77367129e-01, 4.25451775e-01, 5.09266354e-02, 5.37452200e-04, 1.35732908e-07]) """ return self.sf(x + X, alpha, beta, mu) / self.sf(X, alpha, beta, mu) def df(self, x, alpha, beta, mu): r""" Density function for the ExpoWeibull Distribution: .. math:: f(x) = \mu \left ( \frac{\beta}{\alpha} \right ) \left ( \frac{x}{\alpha} \right )^{\beta - 1} \left [ 1 - e^{-\left ( \frac{x}{\alpha} \right )^\beta} \right ]^{\mu - 1} e^{- \left ( \frac{x}{\alpha} \right )^\beta} Parameters ---------- x : numpy array or scalar The values at which the function will be calculated alpha : numpy array or scalar scale parameter for the ExpoWeibull distribution beta : numpy array or scalar shape parameter for the ExpoWeibull distribution mu : numpy array or scalar shape parameter for the ExpoWeibull distribution Returns ------- df : scalar or numpy array The value(s) of the density function at x. Examples -------- >>> import numpy as np >>> from surpyval import ExpoWeibull >>> x = np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.df(x, 3, 4, 1.2) array([0.02427515, 0.27589838, 0.53701385, 0.15943643, 0.00330058]) """ return (beta * mu * x**(beta - 1)) / (alpha**beta) \ * (1 - np.exp(-(x/alpha)**beta))**(mu - 1) \ * np.exp(-(x/alpha)**beta) def hf(self, x, alpha, beta, mu): r""" Instantaneous hazard rate for the ExpoWeibull Distribution: .. math:: h(x) = \frac{f(x)}{R(x)} Parameters ---------- x : numpy array or scalar The values at which the function will be calculated alpha : numpy array or scalar scale parameter for the ExpoWeibull distribution beta : numpy array or scalar shape parameter for the ExpoWeibull distribution mu : numpy array or scalar shape parameter for the ExpoWeibull distribution Returns ------- hf : scalar or numpy array The value(s) of the instantaneous hazard rate at x. Examples -------- >>> import numpy as np >>> from surpyval import ExpoWeibull >>> x = np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.hf(x, 3, 4, 1.2) array([0.02439931, 0.3160701 , 1.26867613, 3.14672068, 6.17256436]) """ return self.df(x, alpha, beta, mu) / self.sf(x, alpha, beta, mu) def Hf(self, x, alpha, beta, mu): r""" Instantaneous hazard rate for the ExpoWeibull Distribution: .. math:: H(x) = -\ln \left ( R(x) \right ) Parameters ---------- x : numpy array or scalar The values at which the function will be calculated alpha : numpy array or scalar scale parameter for the ExpoWeibull distribution beta : numpy array or scalar shape parameter for the ExpoWeibull distribution mu : numpy array or scalar shape parameter for the ExpoWeibull distribution Returns ------- Hf : scalar or numpy array The value(s) of the cumulative hazard rate at x. Examples -------- >>> import numpy as np >>> from surpyval import ExpoWeibull >>> x = np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.Hf(x, 3, 4, 1.2) array([5.10166141e-03, 1.35931416e-01, 8.59705336e-01, 2.98247086e+00, 7.53377239e+00]) """ return -np.log(self.sf(x, alpha, beta, mu)) def qf(self, p, alpha, beta, mu): r""" Instantaneous hazard rate for the ExpoWeibull Distribution: .. math:: q(p) = Parameters ---------- p : numpy array or scalar The percentiles at which the quantile will be calculated alpha : numpy array or scalar scale parameter for the ExpoWeibull distribution beta : numpy array or scalar shape parameter for the ExpoWeibull distribution mu : numpy array or scalar shape parameter for the ExpoWeibull distribution Returns ------- Q : scalar or numpy array The quantiles for the Weibull distribution at each value p Examples -------- >>> import numpy as np >>> from surpyval import ExpoWeibull >>> p = np.array([.1, .2, .3, .4, .5]) >>> ExpoWeibull.qf(p, 3, 4, 1.2) array([1.89361341, 2.2261045 , 2.46627621, 2.66992747, 2.85807988]) """ return alpha * (-np.log(1 - p**(1./mu)))**(1/beta) def mean(self, alpha, beta, mu): func = lambda x : x * self.df(x, alpha, beta, mu) top = 2 * self.qf(0.999, alpha, beta, mu) return integrate.quadrature(func, 0, top)[0] def random(self, size, alpha, beta, mu): U = uniform.rvs(size=size) return self.qf(U, alpha, beta, mu) def mpp_x_transform(self, x, gamma=0): return np.log(x - gamma) def mpp_y_transform(self, y, *params): mu = params[-1] mask = ((y == 0) | (y == 1)) out = np.zeros_like(y) out[~mask] = np.log(-np.log((1 - y[~mask]**(1./mu)))) out[mask] = np.nan return out def mpp_inv_y_transform(self, y, *params): i = len(params) mu = params[i-1] return (1 - np.exp(-np.exp(y)))**mu def unpack_rr(self, params, rr): #UPDATE ME if rr == 'y': beta = params[0] alpha = np.exp(params[1]/-beta) elif rr == 'x': beta = 1./params[0] alpha = np.exp(params[1] / (beta * params[0])) return alpha, beta, 1. ExpoWeibull = ExpoWeibull_('ExpoWeibull')
32.151603
228
0.537087
10,528
0.954661
0
0
0
0
0
0
7,347
0.666213
86db8d66e4f0f969e4dab6cb93ed65e00e44883f
3,292
py
Python
tests/test_base_table.py
stjordanis/datar
4e2b5db026ad35918954576badef9951928c0cb1
[ "MIT" ]
110
2021-03-09T04:10:40.000Z
2022-03-13T10:28:20.000Z
tests/test_base_table.py
sthagen/datar
1218a549e2f0547c7b5a824ca6d9adf1bf96ba46
[ "MIT" ]
54
2021-06-20T18:53:44.000Z
2022-03-29T22:13:07.000Z
tests/test_base_table.py
sthagen/datar
1218a549e2f0547c7b5a824ca6d9adf1bf96ba46
[ "MIT" ]
11
2021-06-18T03:03:14.000Z
2022-02-25T11:48:26.000Z
import pytest from datar import stats from datar.base import * from datar import f from datar.datasets import warpbreaks, state_division, state_region, airquality from .conftest import assert_iterable_equal def test_table(): # https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/table z = stats.rpois(100, 5) x = table(z) assert sum(x.values.flatten()) == 100 #----------------- with data_context(warpbreaks) as _: tab = table(f.wool, f.tension) assert tab.columns.tolist() == ['H', 'L', 'M'] assert tab.index.tolist() == ['A', 'B'] assert_iterable_equal(tab.values.flatten(), [9] * 6) tab = table(warpbreaks.loc[:, ['wool', 'tension']]) assert tab.columns.tolist() == ['H', 'L', 'M'] assert tab.index.tolist() == ['A', 'B'] assert_iterable_equal(tab.values.flatten(), [9] * 6) #----------------- tab = table(state_division, state_region) assert tab.loc['New England', 'Northeast'] == 6 #----------------- with data_context(airquality) as _: qt = stats.quantile(f.Temp) ct = cut(f.Temp, qt) tab = table(ct, f.Month) assert tab.iloc[0,0] == 24 #----------------- a = letters[:3] tab = table(a, sample(a)) assert sum(tab.values.flatten()) == 3 #----------------- tab = table(a, sample(a), dnn=['x', 'y']) assert tab.index.name == 'x' assert tab.columns.name == 'y' #----------------- a = c(NA, Inf, (1.0/(i+1) for i in range(3))) a = a * 10 # tab = table(a) # assert_iterable_equal(tab.values.flatten(), [10] * 4) tab = table(a, exclude=None) assert_iterable_equal(tab.values.flatten(), [10] * 5) #------------------ b = as_factor(rep(c("A","B","C"), 10)) tab = table(b) assert tab.shape == (1, 3) assert_iterable_equal(tab.values.flatten(), [10] * 3) tab = table(b, exclude="B") assert tab.shape == (1, 2) assert_iterable_equal(tab.values.flatten(), [10] * 2) assert 'B' not in tab.columns #------------------- d = factor(rep(c("A","B","C"), 10), levels=c("A","B","C","D","E")) tab = table(d, exclude="B", dnn=['x']) assert_iterable_equal(tab.columns.to_list(), ["A", "C", "D", "E"]) assert_iterable_equal(tab.values.flatten(), [10, 10, 0, 0]) d2 = factor(rep(c("A","B","C"), 10), levels=c("A","B","C","D","E")) tab = table(d, d2, exclude="B") assert tab.shape == (4, 4) tab = table("abc", "cba", dnn='x') assert tab.shape == (3,3) assert sum(tab.values.flatten()) == 3 with data_context(airquality) as _: tab = table(f.Ozone, f.Solar_R, exclude=None) assert '<NA>' in tab.columns assert '<NA>' in tab.index def test_table_error(): from datar.datasets import iris, warpbreaks with pytest.raises(ValueError): table(iris) with pytest.raises(ValueError): table(warpbreaks, iris) with pytest.raises(ValueError): table(warpbreaks.wool, iris) with pytest.raises(ValueError): table(iris.iloc[:, []]) with pytest.raises(ValueError): table(iris.iloc[:, [1,2]], iris) with pytest.raises(ValueError): table(iris.iloc[:, [1]], iris, iris) with pytest.raises(ValueError): table(iris.iloc[:, [1]], iris.iloc[:, []])
31.056604
79
0.564702
0
0
0
0
0
0
0
0
482
0.146416
86dbc8be4491e9aac31a1a68443d62ca3e952415
1,922
py
Python
cqlengine/tests/statements/test_update_statement.py
dokai/cqlengine
a080aff3a73351d37126b14eef606061b445aa37
[ "BSD-3-Clause" ]
null
null
null
cqlengine/tests/statements/test_update_statement.py
dokai/cqlengine
a080aff3a73351d37126b14eef606061b445aa37
[ "BSD-3-Clause" ]
null
null
null
cqlengine/tests/statements/test_update_statement.py
dokai/cqlengine
a080aff3a73351d37126b14eef606061b445aa37
[ "BSD-3-Clause" ]
null
null
null
from unittest import TestCase from cqlengine.statements import UpdateStatement, WhereClause, AssignmentClause from cqlengine.operators import * class UpdateStatementTests(TestCase): def test_table_rendering(self): """ tests that fields are properly added to the select statement """ us = UpdateStatement('table') self.assertTrue(unicode(us).startswith('UPDATE table SET'), unicode(us)) self.assertTrue(str(us).startswith('UPDATE table SET'), str(us)) def test_rendering(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(unicode(us), 'UPDATE table SET "a" = :0, "c" = :1 WHERE "a" = :2', unicode(us)) def test_context(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(us.get_context(), {'0': 'b', '1': 'd', '2': 'x'}) def test_context_update(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) us.update_context_id(3) self.assertEqual(unicode(us), 'UPDATE table SET "a" = :4, "c" = :5 WHERE "a" = :3') self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3': 'x'}) def test_additional_rendering(self): us = UpdateStatement('table', ttl=60) us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertIn('USING TTL 60', unicode(us))
44.697674
104
0.648283
1,774
0.922997
0
0
0
0
0
0
359
0.186785
86dbf2f275a336e7d12bde00e6cd729b126ef190
1,883
py
Python
packages/facilities/diagnostics/py/custom_checkbox.py
Falcons-Robocup/code
2281a8569e7f11cbd3238b7cc7341c09e2e16249
[ "Apache-2.0" ]
2
2021-01-15T13:27:19.000Z
2021-08-04T08:40:52.000Z
packages/facilities/diagnostics/py/custom_checkbox.py
Falcons-Robocup/code
2281a8569e7f11cbd3238b7cc7341c09e2e16249
[ "Apache-2.0" ]
null
null
null
packages/facilities/diagnostics/py/custom_checkbox.py
Falcons-Robocup/code
2281a8569e7f11cbd3238b7cc7341c09e2e16249
[ "Apache-2.0" ]
5
2018-05-01T10:39:31.000Z
2022-03-25T03:02:35.000Z
# Copyright 2020 Jan Feitsma (Falcons) # SPDX-License-Identifier: Apache-2.0 #!/usr/bin/python import matplotlib.pyplot as plt from matplotlib.patches import Rectangle class Checkbox(): def __init__(self, name, position, default=False, label=None, rsize=0.6, enabled=True): self.name = name # unique ID associated with # label to display next to the checkbox if label == None: self.label = name # reuse else: self.label = label self.callback = None self.enabled = enabled self.ticked = default self.ax = plt.axes(position) # position is a tuple (x,y,w,h) self.ax.axis('off') self.canvas = self.ax.figure.canvas # draw text if len(self.label): self.text = self.ax.text(-0.15, 0.5, self.label, horizontalalignment='right', verticalalignment='center') # draw a rectangle, add a bit of spacing self.ax.add_patch(Rectangle((0,(1.0-rsize)/2), rsize, rsize, fill=True)) # setup event handling self.canvas.mpl_connect('button_release_event', self._handle_event) self.redraw() def __repr__(self): s = 'checkbox:' + self.name + '=' + str(self.ticked) if not self.enabled: s += ' (disabled)' return s def on_changed(self, cb): self.callback = cb def _handle_event(self, e): if self.enabled and e.inaxes == self.ax: # TODO: exclude spacing margin for inaxes calculation self.ticked = not self.ticked self.redraw() if self.callback != None: self.callback(self.name, self.ticked) def redraw(self): col = 'grey' if self.enabled: col = ['lightgoldenrodyellow', 'blue'][self.ticked] self.ax.patches[0].set_facecolor(col) self.ax.figure.canvas.draw()
33.625
117
0.601699
1,709
0.907594
0
0
0
0
0
0
425
0.225704
86dc7d357b174d6a4843f8edef2436d8cf30c367
742
py
Python
generator.py
Axonny/HexagonalHitori
582cb50b751796c30ed273f66c8ac9fa6f3dd089
[ "MIT" ]
null
null
null
generator.py
Axonny/HexagonalHitori
582cb50b751796c30ed273f66c8ac9fa6f3dd089
[ "MIT" ]
null
null
null
generator.py
Axonny/HexagonalHitori
582cb50b751796c30ed273f66c8ac9fa6f3dd089
[ "MIT" ]
null
null
null
from hitori_generator import Generator from argparse import ArgumentParser def generate(n: int, output_file: str) -> None: if n < 3 or n > 8: print("It isn't valid size") exit(4) generator = Generator(n) data = generator.generate() lines = map(lambda x: ' '.join(map(str, x)), data) with open(output_file, 'w', encoding='utf-8') as f: f.write('\n'.join(lines)) def main(): p = ArgumentParser() p.add_argument('filename', type=str, help='Path to output file') p.add_argument('-s', "--size", type=int, default=3, help='Generate SxS field. size must be in [3, 8]. Default is 3') args = p.parse_args() generate(args.size, args.filename) if __name__ == '__main__': main()
27.481481
120
0.628032
0
0
0
0
0
0
0
0
149
0.200809
86dd7f5030a8b0c0b8c5d1166bbac51638b7d539
25,946
py
Python
opaflib/xmlast.py
feliam/opaf
f9908c26af1bf28cc29f3d647dcd9f55d631d732
[ "MIT" ]
2
2019-11-23T14:46:35.000Z
2022-01-21T16:09:47.000Z
opaflib/xmlast.py
feliam/opaf
f9908c26af1bf28cc29f3d647dcd9f55d631d732
[ "MIT" ]
null
null
null
opaflib/xmlast.py
feliam/opaf
f9908c26af1bf28cc29f3d647dcd9f55d631d732
[ "MIT" ]
1
2019-09-06T21:04:39.000Z
2019-09-06T21:04:39.000Z
from lxml import etree from opaflib.filters import defilterData #Logging facility import logging,code logger = logging.getLogger("OPAFXML") class PDFXML(etree.ElementBase): ''' Base pdf-xml class. Every pdf token xml representation will have a span wich indicates where the original token layed in the file ''' def _getspan(self): return tuple([int(i) for i in self.get('span').split('~')]) def _setspan(self, value): self.set('span',"%d~%d"%value) def span_move(self,offset, recursive=True): begin,end = self.span self.span = (begin+offset,end+offset) if recursive: for child in self.getchildren(): child.span_move(offset) def span_expand(self,span): begin,end = self.span self.span = (min(begin,span[0]),max(end,span[1])) def clear_span(self, recursive=True): del self.attrib['span'] for child in self.getchildren(): child.clear_span() span = property(_getspan,_setspan) def _to_xml(self): return etree.tostring(self) xml = property(_to_xml) def _from_python(self, value): self.from_python(value) def _to_python(self): return self.to_python() value = property(_to_python,_from_python) def __getattr__(self, name): tags = set([e.tag for e in self]) if name in tags: return self.xpath('./%s'%name) return getattr(super(PDFXML,self),name) def get_numgen(self): ''' Search the object and generation number of any pdf element ''' if self.tag.startswith('indirect'): return self.id else: return self.getparent().get_numgen() #leaf class PDFString(PDFXML): def from_python(self, value): self.text = value.encode('string_escape') def to_python(self): return self.text.decode('string_escape') class PDFName(PDFString): pass class PDFData(PDFString): pass class PDFBool(PDFString): def from_python(self, value): assert type(value) == bool, 'Value must be a boolean' self.text = ['false','true'][int(value)] def to_python(self): return {'false': False, 'true': True}[self.text] class PDFNull(PDFString): def from_python(self, value): assert value is None, 'Value must be None' self.text = 'null' def to_python(self): assert self.text == 'null', 'PDFNull xml not initialized' return None class PDFR(PDFString): def from_python(self, (n,g)): assert type(n) == int and type(g) == int, 'R must be two numbers, n and g' assert n >= 0 and n < 65535 , 'Invalid object number (%d)'%n assert g >= 0 and g < 65535 , 'Invalid generation number (%d)'%g self.text = "%d %d"%(n,g) def to_python(self): return tuple([int(i) for i in self.text.split(' ')]) def solve(self): ''' search the referenced indirect object in the containing pdf ''' pdf = self.xpath('/*')[0] return pdf.getIndirectObject(self.value) class PDFNumber(PDFXML): def from_python(self, value): assert type(value) in [int, float], 'Wrong type for a number' self.text = str(value) def to_python(self): x = self.text return float(int(float(x))) == float(x) and int(float(x)) or float(x) class PDFStartxref(PDFString): def from_python(self, value): assert type(value) == int , 'Wrong type for startxref' self.text = str(value).encode('string_escape') def to_python(self): return int(self.text.decode('string_escape')) class PDFHeader(PDFString): pass #tree class PDFEntry(PDFXML): def to_python(self): return tuple([e.value for e in self.getchildren()]) def _getkey(self): return self[0] def _setkey(self, key): assert key.tag == 'name' self[0] = key key = property(_getkey,_setkey,None) def _getval(self): return self[1] def _setval(self, val): self[1] = val val = property(_getval,_setval,None) class PDFDictionary(PDFXML): def to_python(self): return dict([e.value for e in self.getchildren()]) def has_key(self,key): return len(self.xpath('./entry/name[position()=1 and text()="%s"]'%key))>0 def __getitem__(self, i): if str == type(i): return self.xpath('./entry/name[position()=1 and text()="%s"]/../*[position()=2]'%i)[0] return super(PDFDictionary,self).__getitem__(i) def __delitem__(self, i): if str == type(i): return self.remove(self.xpath('./entry/name[position()=1 and text()="%s"]/..'%i)[0]) return super(PDFDictionary,self).__delitem__(i) def __setitem__(self, key, val): if str == type(key): self.xpath('./entry/name[position()=1 and text()="%s"]/..'%key)[0].val=val else: super(PDFDictionary,self).__setitem__(key,val) class PDFStream(PDFXML): def to_python(self): return {'dictionary':self[0].value, 'data':self[1].value} def _getdictionary(self): return self[0] def _setdictionary(self, d): assert key.tag == 'dictionary' self[0] = d dictionary = property(_getdictionary,_setdictionary,None) def _getdata(self): return self[1] def _setdata(self, data): assert data.tag == 'data' self[1] = data data = property(_getdata,_setdata,None) def isFiltered(self): ''' Check if stream is filtered ''' return self.dictionary.has_key('Filter') def getFilters(self): val = self.dictionary.value filters = val.get('Filter',None) params = val.get('DecodeParams',None) assert any([type(filters) == list and (type(params) == list or params==None ), type(filters) != list and (type(params) == dict or params==None ) ]), 'Filter/DecodeParms wrong type' if type(filters) != list: filters=[filters] params=params and [params] or [{}] if params == None: params = [{}]*len(filters) assert all([type(x)==str for x in filters]), 'Filter shall be a names' assert all([type(x)==dict for x in params]), 'Params should be a dictionary.. or null?' assert len(filters) == len(params),'Number of Decodeparams should match Filters' return zip(filters,params) def popFilter(self): dictionary = self.dictionary assert dictionary.has_key('Filter'), 'Stream not Filtered!' selected_filter = None selected_params = None deletion_list = [] if dictionary['Length'].value != len(self.data.value): logger.info("Length field of object %s does not match the actual data size (%d != %d)"%(str(self.get_numgen()),dictionary['Length'].value,len(self.data.value))) if type(dictionary['Filter']) == PDFArray: selected_filter = dictionary['Filter'][0] del dictionary['Filter'][0] if dictionary.has_key('DecodeParms'): assert dictionary['DecodeParms'] == PDFArray, 'Array of filters need array of decoding params' selected_params = dictionary['DecodeParms'][0] deletion_list.append((dictionary['DecodeParms'],0)) #del dictionary['DecodeParms'][0] else: selected_filter = dictionary['Filter'] del dictionary['Filter'] if dictionary.has_key('DecodeParms'): selected_params = dictionary['DecodeParms'] deletion_list.append((dictionary, 'DecodeParms')) #del dictionary['DecodeParms'] if dictionary.has_key('Filter') and \ type(dictionary['Filter']) == PDFArray and \ len(dictionary['Filter']) == 0: deletion_list.append((dictionary, 'Filter')) #del dictionary['Filter'] if dictionary.has_key('DecodeParms') and \ type(dictionary['DecodeParms']) == PDFArray and \ len(dictionary['DecodeParms']) == 0: deletion_list.append((dictionary, 'DecodeParms')) #del dictionary['DecodeParms'] #FIX recode defilterData .. make it register/unregister able. #(think /Crypt 7.4.10 Crypt Filter ) self.data.value = defilterData(selected_filter.value,self.data.value, selected_params and selected_params.value or selected_params) for v,i in deletion_list: del v[i] dictionary['Length'].value = len(self.data.value) def defilter(self): try: while self.isFiltered(): self.popFilter() except Exception,e: logger.debug("Couldn't defilter <%s> stream (exception %s)."%(self.value,str(e))) logger.info("Couldn't defilter <%s> stream."%str(self.get_numgen())) def isObjStm(self): ''' Return true if this is an object stream (ObjStml) ''' return self.dictionary.has_key('Type') and self.dictionary['Type'].value == 'ObjStm' def expandObjStm(self): ''' This parses the ObjStm structure and replace it with all the new indirect objects. ''' from opaflib.parser import parse assert not self.isFiltered(), "ObjStm should not be compressed at this point" assert self.dictionary.has_key('N'), "N is mandatory in ObjStm dictionary" assert self.dictionary.has_key('First'), "First is mandatory in ObjStm dictionary" dictionary = self.dictionary data = self.data.value first = dictionary["First"].value pointers = [int(x) for x in data[:first].split()] assert len(pointers)%2 == 0 , "Wrong number of integer in the ObjStm begining" pointers = dict([(pointers[i+1]+first,pointers[i]) for i in range(0,len(pointers),2) ]) positions = sorted(pointers.keys() + [len(data)]) parsed_objects = [] for p in range(0,len(positions)-1): logger.info("Adding new object %s from objectstream"%repr((pointers[positions[p]],0))) io = PDF.indirect_object(parse('object', data[positions[p]:positions[p+1]]+" ")) io.id = (pointers[positions[p]],0) parsed_objects.append(io) return parsed_objects class PDFArray(PDFXML): def to_python(self): return [e.value for e in self] class PDFIndirect(PDFXML): def to_python(self): assert len(self.getchildren())==1, "Wrong number of children in indirect object" return (self.id, self.object.value) def _getobject(self): return self[0] def _setobject(self, o): self[0] = o object = property(_getobject,_setobject,None) def _getid(self): return tuple([int(i) for i in self.get('id').split(' ')]) def _setid(self, o): self.set('id', "%d %d"%o) id = property(_getid,_setid,None) def isStream(self): return len(self.xpath('./stream'))==1 class PDFPdf(PDFXML): def to_python(self): return [e.value for e in self] def getStartxref(self): ''' Get the last startxref pointer (should be at least one) ''' return self.pdf_update[-1].startxref[-1] #FIX move all this to pdf_update and do the wrapper here def getObjectAt(self, pos): ''' Get the object found at certain byte position ''' return self.xpath('//*[starts-with(@span,"%d~")]'%pos)[0] def getTrailer(self, startxref=None): ''' Get the Trailer dictionary (should be at least one) ''' if startxref == None: startxref = self.getStartxref().value xref = self.getObjectAt(startxref) assert xref.tag in ['xref', 'stream'] and xref[0].tag == 'dictionary' return xref[0] def getID(self, startxref=None): ''' Get the pdf ID from the trailer dictionary ''' trailer = self.getTrailer(startxref).value if trailer.has_key('ID'): return trailer['ID'] else: return ['',''] def getIndirectObject(self, ref): ''' Search for an indirect object ''' for u in self.pdf_update: if u.has_key(ref): return u[ref] def getRoot(self): ''' Get the pdf Root node. ''' return self.getIndirectObject(self.getTrailer()['Root'].value).object def isEncrypted(self): ''' Return true if pdf is encrypted ''' return self.getTrailer().has_key('Encrypt') def countObjStm(self): ''' Count number of 'compressed' object streams ''' return len(self.xpath('//stream/dictionary/entry/name[position()=1 and text()="Type"]/../name[position()=2 and text()="ObjStm"]/../../..')) def countIObj(self): ''' Count number of 'compressed' object streams ''' return len(self.xpath('//indirect_object')) def graph(xml_pdf,dot='default.dot'): ''' Generate a .dot graph of the pdf ''' dotdata = "digraph {\n" nodes_added = set() for io in self.pdf_update.indirect_object: references = io.xpath(".//R") orig = "%d %d"%io.id if len(references) == 0: dotdata += '\t"%s";\n'%x nodes_added.add(orig) else: for r in references: dest = "%d %d"%r.value dotdata += '\t"%s" -> "%s";\n'%(orig, dest) nodes_added.add(orig) nodes_added.add(dest) try: root = "%d %d"%self.getRoot() dotdata += '\t"trailer" -> "%s";\n'%root except Exception,e : pass dotdata += '}\n' logger.info("Writing graph to %s(a dot file). Download graphviz or try this http://rise4fun.com/Agl for render it."%dot) file(dot,"w").write(dotdata) def expandAllObjStm(self): ''' Find all object streams and expand them. Each ObjStm will be replaced by its childs ''' for u in self.pdf_update: for ref in u.findAllObjStm(): u.expandObjStm(ref) def defilterAll(self): ''' Find all object streams and expand them. Each ObjStm will be replaced by its childs ''' for u in self.pdf_update: for io in u[:]: if type(io) == PDFIndirect and io.isStream() and io.object.isFiltered(): io.object.defilter() def decrypt(self): ''' This will try to decrypt V:4 null password encryption ''' import hashlib, struct from Crypto.Cipher import AES from Crypto.Util import randpool import base64 def rc4crypt(data, key): x = 0 box = range(256) for i in range(256): x = (x + box[i] + ord(key[i % len(key)])) % 256 box[i], box[x] = box[x], box[i] x = 0 y = 0 out = [] for char in data: x = (x + 1) % 256 y = (y + box[x]) % 256 box[x], box[y] = box[y], box[x] out.append(chr(ord(char) ^ box[(box[x] + box[y]) % 256])) return ''.join(out) block_size = 16 key_size = 32 def encrypt(plain_text,key_bytes): assert len(key_bytes) == key_size mode = AES.MODE_CBC pad = block_size - len(plain_text) % block_size data = plain_text + pad * chr(pad) iv_bytes = randpool.RandomPool(512).get_bytes(block_size) encrypted_bytes = iv_bytes + AES.new(key_bytes, mode, iv_bytes).encrypt(data) return encrypted_bytes def decrypt(encrypted_bytes,key_bytes): #assert len(key_bytes) == key_size mode = AES.MODE_CBC iv_bytes = encrypted_bytes[:block_size] plain_text = AES.new(key_bytes, mode, iv_bytes).decrypt(encrypted_bytes[block_size:]) pad = ord(plain_text[-1]) return plain_text[:-pad] assert self.isEncrypted() #Get and print the encryption dictionary encrypt = self.getTrailer()['Encrypt'].solve().object print "It's ENCRYPTED!" encrypt_py = encrypt.value print encrypt_py #Ok try to decrypt it ... assert encrypt_py['V'] == 4, "Sorry only Version 4 supported" assert encrypt_py['R'] == 4, "Sorry only Version 4 supported" #password length n = encrypt_py['Length']/8 print "N:",n #a) Pad or truncate the password string to exactly 32 bytes. user_password = "" pad = "28BF4E5E4E758A4164004E56FFFA01082E2E00B6D0683E802F0CA9FE6453697A".decode('hex') print "PASSWORD: ", user_password.encode('hex') print "PAD: ", pad.encode('hex') #b) Initialize the MD5 hash function and pass the result of step (a) as input to this function. m = hashlib.md5() m.update((user_password+pad)[:32]) print "MD5 update 1", ((user_password+pad)[:32]).encode('hex') #c) Pass the value of the encryption dictionary's O entry to the MD5 hash function. m.update (encrypt_py['O'][:32]) print "MD5 update 2", (encrypt_py['O'][:32]).encode('hex') #d) Convert the integer value of the P entry to a 32-bit unsigned binary number and pass these bytes to the # MD5 hash function, low-order byte first. WTF!!?? print "MD5 update 3", struct.pack("<L", 0xffffffff&encrypt_py['P']).encode('hex') m.update (struct.pack("<L", 0xffffffff&encrypt_py['P'] )) #e) append ID ? #TODO, get the ID from the trailer.. ID = '' m.update (ID) print "MD5 update 4", ID.encode('hex') #f) If document metadata is not being encrypted, pass 4 bytes with the value 0xFFFFFFFF to the MD5 hash function. if encrypt_py.has_key('EncryptMetadata') and encrypt_py['EncryptMetadata'] == false: m.update('\xff'*4) print "MD5 update 5", ('\xff'*4).encode('hex') print "1rst DIGEST:", m.digest().encode('hex') h = m.digest()[:n] for i in range(0,50): h = hashlib.md5(h[:n]).digest() print "Encryption KEY(%d)"%i, h.encode('hex') key = h[:n] print "Encryption KEY", key.encode('hex') print "Try to authenticate" _buf = hashlib.md5(pad + ID).digest() print "MD5(padding+ID):",_buf.encode('hex') for i in range(0,20): _key = ''.join([chr(ord(k)^i) for k in list(key)]) _buf1 = rc4crypt(_buf,_key) print "RC4 iter(%d) Encrypt data <%s> with key <%s> and it gives data <%s>"%(i,_buf.encode('hex'),_key.encode('hex'),_buf1.encode('hex')) _buf = _buf1 assert _buf == encrypt_py['U'][:16] print "Authenticated! (An actual pass is not needed. Using null pass '' )" print "U", encrypt_py['U'].encode('hex') print "O", encrypt_py['O'].encode('hex') def decrypt_xml(xml_element): n,g = xml_element.get_numgen() m = hashlib.md5() m.update(key) m.update(chr(n&0xff)) m.update(chr((n>>8)&0xff)) m.update(chr((n>>16)&0xff)) m.update(chr(g&0xff)) m.update(chr((g>>8)&0xff)) m.update("sAlT") real_key = m.digest() pld = e.value if pld.endswith("\x0d\x0a"): pld = pld[:-2] pld = decrypt(pld,real_key) e.value=pld #decrypt every string and stream in place... for e in self.xpath('//stream/data'): decrypt_xml(e) for e in self.xpath('//string'): decrypt_xml(e) class PDFUpdate(PDFXML): def to_python(self): return dict([e.value for e in self.xpath('./indirect_object')]) def has_key(self,key): key = "%d %d"%key return len(self.xpath('./indirect_object[@id="%s"]'%key))>0 def __getitem__(self, key): if tuple == type(key): key = "%d %d"%key return self.xpath('./indirect_object[@id="%s"]'%key)[0] return super(PDFUpdate,self).__getitem__(key) def __delitem__(self, key): if tuple == type(key): key = "%d %d"%key return self.remove(self.xpath('./indirect_object[@id="%s"]'%key)[0]) return super(PDFUpdate,self).__delitem__(key) def __setitem__(self, key, val): if str == type(key): self.xpath('./indirect_object[@obj="%s"]'%key)[0][:]=[val] #mmm else: super(PDFDictionary,self).__setitem__(key,val) def getObjectAt(self, pos): ''' Get the object found at certain byte position (only in this update!)''' return self.xpath('.//*[starts-with(@span,"%d~")]'%pos)[0] def getTrailer(self, startxref=None): ''' Get the Trailer dictionary (of this update!)''' if startxref == None: startxref = self.getStartxref().value xref = self.getObjectAt(startxref) return xref.dictionary def getRoot(self): ''' Get the pdf Root node of this update. ''' return self[self.getTrailer()['Root'].value].object def countObjStm(self): ''' Count number of 'compressed' object streams ''' return len(self.xpath('.//stream/dictionary/entry/name[position()=1 and text()="Type"]/../name[position()=2 and text()="ObjStm"]/../../..')) def expandObjStm(self, ref): io_objstm = self[ref] assert io_objstm.object.dictionary['Type'].value == 'ObjStm' #completelly defilter the object stream while io_objstm.object.isFiltered(): io_objstm.object.popFilter() #parse the indirect simpe objects inside it expanded_iobjects = io_objstm.object.expandObjStm() #replace the object stream by its childs for new_io in expanded_iobjects: io_objstm.addnext(new_io) self.remove(io_objstm) def findAllObjStm(self): ''' Search 'compressed' object streams ids/refs''' return [io.id for io in self.xpath('.//stream/dictionary/entry/name[position()=1 and text()="Type"]/../name[position()=2 and text()="ObjStm"]/../../../..')] def expandAllObjStm(self): for ref in self.findAllObjStm(): self.expandObjStm(ref) #Factory class PDFXMLFactory(): def __init__(self): self.parser = etree.XMLParser() fallback = etree.ElementDefaultClassLookup(PDFXML) lookup = etree.ElementNamespaceClassLookup(fallback) namespace = lookup.get_namespace(None) #leafs namespace['name'] = PDFName namespace['string'] = PDFString namespace['number'] = PDFNumber namespace['null'] = PDFNull namespace['bool'] = PDFBool namespace['R'] = PDFR namespace['header'] = PDFHeader namespace['startxref'] = PDFStartxref namespace['data'] = PDFData #trees namespace['entry'] = PDFEntry namespace['dictionary'] = PDFDictionary namespace['stream'] = PDFStream namespace['pdf'] = PDFPdf namespace['pdf_update'] = PDFUpdate namespace['indirect_object'] = PDFIndirect namespace['array'] = PDFArray self.parser.set_element_class_lookup(lookup) #leaf def create_leaf(self, tag, value,**attribs): assert tag in ['number','string','name','R','startxref','header','data','null','bool'], "Got wrong leaf tag: %s"%tag xml = self.parser.makeelement(tag) xml.value=value xml.span=attribs.setdefault('span', (0xffffffff,-1)) del attribs['span'] for attr_key, attr_val in attribs.items(): xml.set(attr_key, str(attr_val)) return xml #Tree def create_tree(self, tag, *childs, **attribs): assert tag in ['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref', 'pdf', 'pdf_update'], "Got wrong tree tag: %s"%tag xml = self.parser.makeelement(tag) xml.span=attribs.setdefault('span', (0xffffffff,-1)) del attribs['span'] for attr_key, attr_val in attribs.items(): xml.set(attr_key, str(attr_val)) for child in childs: xml.append(child) return xml def __getattr__(self,tag, *args,**kwargs): if tag in ['number','string','name','R','startxref','header','data','null','bool']: return lambda payload, **my_kwargs: self.create_leaf(tag, payload, **my_kwargs) elif tag in ['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref', 'pdf', 'pdf_update']: return lambda payload, **my_kwargs: self.create_tree(tag, *payload, **my_kwargs) return super(PDFXMLFactory,self).__getattr__(tag,*args,**kwargs) PDF = PDFXMLFactory() def create_leaf(tag, value, **kwargs): return PDF.create_leaf(tag, value,**kwargs) def create_tree(tag, childs, **kwargs): return PDF.create_tree(tag, *childs, **kwargs) if __name__=="__main__": name = create_leaf('name', "Name") string = create_leaf('string', "Felipe") entry = create_tree('entry',[name,string]) dictionary = create_tree('dictionary',[entry]) stream_data = create_leaf('data',"A"*100) stream = create_tree('stream',[dictionary,stream_data]) indirect = create_tree('indirect_object', [stream], obj=(1,0)) array = create_tree('array', [create_leaf('number', i) for i in range(0,10)]) xml=indirect print etree.tostring(xml), xml.value import code code.interact(local=locals())
37.332374
172
0.582248
24,921
0.960495
0
0
0
0
0
0
6,454
0.248747
86dd8cfba25399e11b5e6b0c69e97eec2cc7d779
1,590
py
Python
course-code/imooc-tf-mnist-flask/mnist/module.py
le3t/ko-repo
50eb0b4cadb9db9bf608a9e5d36376f38ff5cce5
[ "Apache-2.0" ]
30
2018-12-06T02:17:45.000Z
2021-04-07T09:03:36.000Z
course-code/imooc-tf-mnist-flask/mnist/module.py
Artister/tutorials-java
50eb0b4cadb9db9bf608a9e5d36376f38ff5cce5
[ "Apache-2.0" ]
3
2019-08-26T13:41:57.000Z
2019-08-26T13:44:21.000Z
course-code/imooc-tf-mnist-flask/mnist/module.py
Artister/tutorials-java
50eb0b4cadb9db9bf608a9e5d36376f38ff5cce5
[ "Apache-2.0" ]
20
2018-12-27T08:31:02.000Z
2020-12-03T08:35:28.000Z
import tensorflow as tf # y=ax+b linear model def regression(x): a = tf.Variable(tf.zeros([784, 10]), name="a") b = tf.Variable(tf.zeros([10]), name="b") y = tf.nn.softmax(tf.matmul(x, a) + b) return y, [a, b] # 定义卷积模型 def convolutional(x, keep_prob): def conv2d(x, w): return tf.nn.conv2d(x, w, [1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool( x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) x_image = tf.reshape(x, [-1, 28, 28, 1]) w_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) w_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) # 全连接层 w_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) w_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y = tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2)) return y, [w_conv1, b_conv1, w_conv2, b_conv2, w_fc1, w_fc2, b_fc2]
30.576923
72
0.620755
0
0
0
0
0
0
0
0
73
0.045342
86dfb9b0ac538e587eb0952c661e061a843edff2
1,544
py
Python
src/sol/handle_metaplex.py
terra-dashboard/staketaxcsv
5793105488bf799c61aee64a45f44e9ae8fef397
[ "MIT" ]
140
2021-12-11T23:37:46.000Z
2022-03-29T23:04:36.000Z
src/sol/handle_metaplex.py
terra-dashboard/staketaxcsv
5793105488bf799c61aee64a45f44e9ae8fef397
[ "MIT" ]
80
2021-12-17T15:13:47.000Z
2022-03-31T13:33:53.000Z
src/sol/handle_metaplex.py
terra-dashboard/staketaxcsv
5793105488bf799c61aee64a45f44e9ae8fef397
[ "MIT" ]
52
2021-12-12T00:37:17.000Z
2022-03-29T23:25:09.000Z
from common.make_tx import make_swap_tx from sol.handle_simple import handle_unknown_detect_transfers def handle_metaplex(exporter, txinfo): transfers_in, transfers_out, _ = txinfo.transfers_net if len(transfers_in) == 1 and len(transfers_out) == 1: sent_amount, sent_currency, _, _ = transfers_out[0] received_amount, received_currency, _, _ = transfers_in[0] row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency) exporter.ingest_row(row) else: handle_unknown_detect_transfers(exporter, txinfo) def is_nft_mint(txinfo): log_instructions = txinfo.log_instructions transfers_in, transfers_out, _ = txinfo.transfers_net if "MintTo" in log_instructions and len(transfers_out) == 1 and len(transfers_in) == 0: return True elif ("MintTo" in log_instructions and len(transfers_out) == 1 and len(transfers_in) == 1 and transfers_in[0][0] == 1): return True else: return False def handle_nft_mint(exporter, txinfo): transfers_in, transfers_out, transfers_unknown = txinfo.transfers_net if len(transfers_in) == 1 and len(transfers_out) == 1: sent_amount, sent_currency, _, _ = transfers_out[0] received_amount, received_currency, _, _ = transfers_in[0] row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency) exporter.ingest_row(row) return handle_unknown_detect_transfers(exporter, txinfo)
34.311111
98
0.709845
0
0
0
0
0
0
0
0
16
0.010363
86e079c3cae2dd930094e58b47942d7cc71d011d
11,293
py
Python
dcor/independence.py
lemiceterieux/dcor
205682a71463a2c6ab8f5b8b215ec12d44f0b5a6
[ "MIT" ]
null
null
null
dcor/independence.py
lemiceterieux/dcor
205682a71463a2c6ab8f5b8b215ec12d44f0b5a6
[ "MIT" ]
null
null
null
dcor/independence.py
lemiceterieux/dcor
205682a71463a2c6ab8f5b8b215ec12d44f0b5a6
[ "MIT" ]
null
null
null
""" Functions for testing independence of several distributions. The functions in this module provide methods for testing if the samples generated from two random vectors are independent. """ import numpy as np import scipy.stats from . import _dcor_internals, _hypothesis from ._dcor import u_distance_correlation_sqr from ._utils import _random_state_init, _transform_to_2d def distance_covariance_test( x, y, *, num_resamples=0, exponent=1, random_state=None, n_jobs=1, ): """ Test of distance covariance independence. Compute the test of independence based on the distance covariance, for two random vectors. The test is a permutation test where the null hypothesis is that the two random vectors are independent. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. exponent: float Exponent of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of fractional Brownian motion. num_resamples: int Number of permutations resamples to take in the permutation test. random_state: {None, int, array_like, numpy.random.RandomState} Random state to generate the permutations. Returns ------- HypothesisTest Results of the hypothesis test. See Also -------- distance_covariance Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1, 0, 0, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [1, 1, 0, 1]]) >>> dcor.independence.distance_covariance_test(a, a) HypothesisTest(p_value=1.0, statistic=208.0) >>> dcor.independence.distance_covariance_test(a, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=11.75323056...) >>> dcor.independence.distance_covariance_test(b, b) HypothesisTest(p_value=1.0, statistic=1.3604610...) >>> dcor.independence.distance_covariance_test(a, b, ... num_resamples=5, random_state=0) HypothesisTest(p_value=0.5, statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a, b, ... num_resamples=5, random_state=13) HypothesisTest(p_value=0.3333333..., statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a, a, ... num_resamples=7, random_state=0) HypothesisTest(p_value=0.125, statistic=208.0) """ x = _transform_to_2d(x) y = _transform_to_2d(y) _dcor_internals._check_same_n_elements(x, y) random_state = _random_state_init(random_state) # Compute U-centered matrices u_x = _dcor_internals._distance_matrix_generic( x, centering=_dcor_internals.double_centered, exponent=exponent) u_y = _dcor_internals._distance_matrix_generic( y, centering=_dcor_internals.double_centered, exponent=exponent) # Use the dcov statistic def statistic_function(distance_matrix): return u_x.shape[0] * _dcor_internals.mean_product( distance_matrix, u_y) return _hypothesis._permutation_test_with_sym_matrix( u_x, statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs) def partial_distance_covariance_test( x, y, z, *, num_resamples=0, exponent=1, random_state=None, n_jobs=1, ): """ Test of partial distance covariance independence. Compute the test of independence based on the partial distance covariance, for two random vectors conditioned on a third. The test is a permutation test where the null hypothesis is that the first two random vectors are independent given the third one. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. z: array_like Observed random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. num_resamples: int Number of permutations resamples to take in the permutation test. random_state: {None, int, array_like, numpy.random.RandomState} Random state to generate the permutations. Returns ------- HypothesisTest Results of the hypothesis test. See Also -------- partial_distance_covariance Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1, 0, 0, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [1, 1, 0, 1]]) >>> c = np.array([[1000, 0, 0, 1000], ... [0, 1000, 1000, 1000], ... [1000, 1000, 1000, 1000], ... [1000, 1000, 0, 1000]]) >>> dcor.independence.partial_distance_covariance_test(a, a, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=142.6664416...) >>> dcor.independence.partial_distance_covariance_test(a, b, c) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(b, b, c) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=2.2533380...e-30) >>> dcor.independence.partial_distance_covariance_test(a, b, c, ... num_resamples=5, random_state=0) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a, b, c, ... num_resamples=5, random_state=13) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a, c, b, ... num_resamples=7, random_state=0) HypothesisTest(p_value=1.0, statistic=-7.5701764...e-12) """ random_state = _random_state_init(random_state) # Compute U-centered matrices u_x = _dcor_internals._u_distance_matrix(x, exponent=exponent) u_y = _dcor_internals._u_distance_matrix(y, exponent=exponent) u_z = _dcor_internals._u_distance_matrix(z, exponent=exponent) # Compute projections proj = _dcor_internals.u_complementary_projection(u_z) p_xz = proj(u_x) p_yz = proj(u_y) # Use the pdcor statistic def statistic_function(distance_matrix): return u_x.shape[0] * _dcor_internals.u_product( distance_matrix, p_yz) return _hypothesis._permutation_test_with_sym_matrix( p_xz, statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs) def distance_correlation_t_statistic(x, y): """ Transformation of the bias corrected version of distance correlation used in :func:`distance_correlation_t_test`. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. Returns ------- numpy scalar T statistic. See Also -------- distance_correlation_t_test Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1, 0, 0, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [1, 1, 0, 1]]) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(a, a) inf >>> dcor.independence.distance_correlation_t_statistic(a, b) ... # doctest: +ELLIPSIS -0.4430164... >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(b, b) inf """ bcdcor = u_distance_correlation_sqr(x, y) n = x.shape[0] v = n * (n - 3) / 2 return np.sqrt(v - 1) * bcdcor / np.sqrt(1 - bcdcor**2) def distance_correlation_t_test(x, y): """ Test of independence for high dimension based on convergence to a Student t distribution. The null hypothesis is that the two random vectors are independent. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. Returns ------- HypothesisTest Results of the hypothesis test. See Also -------- distance_correlation_t_statistic Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16]]) >>> b = np.array([[1, 0, 0, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [1, 1, 0, 1]]) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(a, a) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf) >>> dcor.independence.distance_correlation_t_test(a, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.6327451..., statistic=-0.4430164...) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(b, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf) """ t_test = distance_correlation_t_statistic(x, y) n = x.shape[0] v = n * (n - 3) / 2 df = v - 1 p_value = 1 - scipy.stats.t.cdf(t_test, df=df) return _hypothesis.HypothesisTest(p_value=p_value, statistic=t_test)
33.411243
79
0.609493
0
0
0
0
0
0
0
0
8,806
0.779775
86e150137bde5dca549d0321cdc857bd542bc500
3,878
py
Python
cinemasci/cis/__init__.py
cinemascience/cinemasc
5b00a0c2e3c886f65cfbf1f59e914fc458d7068b
[ "BSD-3-Clause" ]
null
null
null
cinemasci/cis/__init__.py
cinemascience/cinemasc
5b00a0c2e3c886f65cfbf1f59e914fc458d7068b
[ "BSD-3-Clause" ]
3
2020-04-22T16:26:44.000Z
2020-04-22T16:30:12.000Z
cinemasci/cis/__init__.py
cinemascience/cinemasc
5b00a0c2e3c886f65cfbf1f59e914fc458d7068b
[ "BSD-3-Clause" ]
1
2020-03-06T21:21:19.000Z
2020-03-06T21:21:19.000Z
from . import imageview from . import cisview from . import renderer from . import convert class cis: """Composible Image Set Class The data structure to hold properties of a Composible Image Set. """ def __init__(self, filename): """ The constructor. """ self.fname = filename self.classname = "COMPOSABLE_IMAGE_SET" self.dims = [0,0] self.flags = "CONSTANT_CHANNELS" self.version = "1.0" self.parameterlist = [] self.parametertable = None self.variables = {} self.images = {} self.colormaps = {} def debug_print(self): """ Debug print statement for CIS properties. """ print("printing cis") print(" fname: {}".format(self.fname)) print(" classname: {}".format(self.classname)) print(" dims: {}".format(self.dims)) print(" flags: {}".format(self.flags)) print(" version: {}".format(self.version)) print(" colormaps: ") for m in self.colormaps: print(m) for i in self.get_images(): print(" image: {}".format(self.get_image(i).name)) for l in self.get_image(i).get_layers(): print(" layer: {}".format(self.get_image(i).get_layer(l).name)) print("\n") def get_image(self, key): """ Returns an image given its key. """ result = False if key in self.images: result = self.images[key] return result def get_images(self): """ Returns all images. """ for i in self.images: yield i def get_image_names(self): """ Returns list of image names. """ return list(self.images.keys()) def set_parameter_table(self, table): """ Set parameter table using a deep copy. """ self.parametertable = table.copy(deep=True) def add_parameter(self, name, type): """ Add a parameter to the list of parameters for the CIS. """ # check for duplicates self.parameterlist.append([name, type]) def add_variable(self, name, type, min, max): """ Add a variable to the set of variables. """ # check for duplicates self.variables[name] = {'type':type, 'min':min, 'max':max} def add_image(self, name): """ Add an image to the set of images in the CIS. """ # check for duplicates self.images[name] = image.image(name) return self.images[name] def get_variables(self): """ Return all variables. """ for i in self.variables: yield i def get_variable(self, name): """ Return a variable. """ variable = None if name in self.variables: variable = self.variables[name] return variable def get_image(self,name): """ Return an image. """ image = None if name in self.images: image = self.images[name] return image def get_colormap(self,name): """ Return a colormap. """ colormap = None if name in self.colormaps: colormap = self.colormaps[name] return colormap def add_colormap(self, name, path): """ Add a colormap to the set of colormaps. """ #if colormap not in dict if (name not in self.colormaps): self.colormaps[name] = colormap.colormap(path) def remove_colormap(self, name): """ Remove a colormap from the set of colormaps. """ self.colormaps.pop(name) def get_colormaps(self): """ Return all colormaps. """ for i in self.colormaps: yield i def set_dims(self, w, h): """ Set the dimensions of the CIS given a width and height. """ self.dims = [w, h]
29.603053
84
0.555183
3,784
0.975761
337
0.0869
0
0
0
0
1,087
0.280299
86e1817f75ca21dff7ecb06d87908e9887be1bfd
2,172
py
Python
applications/spaghetti.py
fos/fos-legacy
db6047668781a0615abcebc7d55a7164f3105047
[ "BSD-3-Clause" ]
2
2016-08-03T10:33:08.000Z
2021-06-23T18:50:14.000Z
applications/spaghetti.py
fos/fos-legacy
db6047668781a0615abcebc7d55a7164f3105047
[ "BSD-3-Clause" ]
null
null
null
applications/spaghetti.py
fos/fos-legacy
db6047668781a0615abcebc7d55a7164f3105047
[ "BSD-3-Clause" ]
null
null
null
import numpy as np import nibabel as nib import os.path as op import pyglet #pyglet.options['debug_gl'] = True #pyglet.options['debug_x11'] = True #pyglet.options['debug_gl_trace'] = True #pyglet.options['debug_texture'] = True #fos modules from fos.actor.axes import Axes from fos import World, Window, WindowManager from labeler import TrackLabeler from fos.actor.slicer import Slicer #dipy modules from dipy.segment.quickbundles import QuickBundles from dipy.io.dpy import Dpy from dipy.io.pickles import load_pickle,save_pickle from dipy.viz.colormap import orient2rgb import copy if __name__ == '__main__': subject = 5 seeds = 1 qb_dist = 30 #load T1 volume registered in MNI space img = nib.load('data/subj_'+("%02d" % subject)+'/MPRAGE_32/T1_flirt_out.nii.gz') data = img.get_data() affine = img.get_affine() #load the tracks registered in MNI space fdpyw = 'data/subj_'+("%02d" % subject)+'/101_32/DTI/tracks_gqi_'+str(seeds)+'M_linear.dpy' dpr = Dpy(fdpyw, 'r') T = dpr.read_tracks() dpr.close() #load initial QuickBundles with threshold 30mm fpkl = 'data/subj_'+("%02d" % subject)+'/101_32/DTI/qb_gqi_'+str(seeds)+'M_linear_'+str(qb_dist)+'.pkl' #qb=QuickBundles(T,30.,12) qb=load_pickle(fpkl) #create the interaction system for tracks tl = TrackLabeler(qb,qb.downsampled_tracks(),vol_shape=data.shape,tracks_alpha=1) #add a interactive slicing/masking tool sl = Slicer(affine,data) #add one way communication between tl and sl tl.slicer=sl #OpenGL coordinate system axes ax = Axes(100) x,y,z=data.shape #add the actors to the world w=World() w.add(tl) w.add(sl) #w.add(ax) #create a window wi = Window(caption="Interactive Spaghetti using Diffusion Imaging in Python (dipy.org) and Free On Shades (fos.me)",\ bgcolor=(0.3,0.3,0.6,1),width=1200,height=800) #attach the world to the window wi.attach(w) #create a manager which can handle multiple windows wm = WindowManager() wm.add(wi) wm.run() print('Everything is running ;-)')
31.941176
122
0.675414
0
0
0
0
0
0
0
0
931
0.428637
86e1dc1697df65dd8302b1c8457579ff83a8e10d
1,074
py
Python
faceai/gender.py
dlzdy/faceai
4b1e41d4c394c00da51533562b76306d86493f72
[ "MIT" ]
1
2021-05-18T07:31:14.000Z
2021-05-18T07:31:14.000Z
faceai/gender.py
dlzdy/faceai
4b1e41d4c394c00da51533562b76306d86493f72
[ "MIT" ]
null
null
null
faceai/gender.py
dlzdy/faceai
4b1e41d4c394c00da51533562b76306d86493f72
[ "MIT" ]
null
null
null
#coding=utf-8 #性别识别 import cv2 from keras.models import load_model import numpy as np import chineseText img = cv2.imread("img/gather.png") face_classifier = cv2.CascadeClassifier( "d:\Python36\Lib\site-packages\opencv-master\data\haarcascades\haarcascade_frontalface_default.xml" ) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_classifier.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=3, minSize=(140, 140)) gender_classifier = load_model( "classifier/gender_models/simple_CNN.81-0.96.hdf5") gender_labels = {0: '女', 1: '男'} color = (255, 255, 255) for (x, y, w, h) in faces: face = img[(y - 60):(y + h + 60), (x - 30):(x + w + 30)] face = cv2.resize(face, (48, 48)) face = np.expand_dims(face, 0) face = face / 255.0 gender_label_arg = np.argmax(gender_classifier.predict(face)) gender = gender_labels[gender_label_arg] cv2.rectangle(img, (x, y), (x + h, y + w), color, 2) img = chineseText.cv2ImgAddText(img, gender, x + h, y, color, 30) cv2.imshow("Image", img) cv2.waitKey(0) cv2.destroyAllWindows()
30.685714
103
0.691806
0
0
0
0
0
0
0
0
208
0.191529
86e1dfa0c33f00a823a44b2f6b5cc3f12ae76c76
5,872
py
Python
csm_web/scheduler/tests/utils.py
mudit2103/csm_web
3b7fd9ca7269ad4cb57bf264cf62a620e02d3780
[ "MIT" ]
null
null
null
csm_web/scheduler/tests/utils.py
mudit2103/csm_web
3b7fd9ca7269ad4cb57bf264cf62a620e02d3780
[ "MIT" ]
null
null
null
csm_web/scheduler/tests/utils.py
mudit2103/csm_web
3b7fd9ca7269ad4cb57bf264cf62a620e02d3780
[ "MIT" ]
null
null
null
from django.test import TestCase from os import path from rest_framework import status from rest_framework.test import APIClient import random from scheduler.models import Profile from scheduler.factories import ( CourseFactory, SpacetimeFactory, UserFactory, ProfileFactory, SectionFactory, AttendanceFactory, OverrideFactory, create_attendances_for, ) random.seed(0) COURSE_NAMES = ("CS88", "CS61A", "CS61B", "CS70", "CS61C", "EE16A") ROLE_MAP = Profile.ROLE_MAP BASE_PATH = "/scheduler" # ----- REQUEST UTILITIES ----- def fail_msg(ep, resp): return "Endpoint: {}\nResponse Content: {}".format(ep, resp.content) class APITestCase(TestCase): def get_client_for(self, user): """Returns an APIClient object that is logged in as the provided user.""" client = APIClient() client.force_authenticate(user) return client def request(self, method, endpoint, exp_code=None, data=None): """ Performs a request to the specified endpoint and returns the response object. Also checks if the status code of the response is exp_code, if provided. The method parameter should be a get/post/etc from an APIClient object. """ resp = method(path.join(BASE_PATH, endpoint.strip("/")), follow=True, data=data) if exp_code is not None: self.assertEqual(resp.status_code, exp_code, msg=fail_msg(endpoint, resp)) return resp def req_fails_perms(self, method, endpoint, data=None): """ Performs a request to the specified endpoint, and checks that it fails due to the user lacking proper permissions. The method parameter should be a get/post/etc from an APIClient object. Returns the response object afterwards. """ return self.request( method, endpoint, exp_code=status.HTTP_403_FORBIDDEN, data=data ) def req_fails_method(self, method, endpoint, data=None): """ Performs a request to the specified endpoint, and checks that it fails due to the endpoint not supporting the provided method. Returns the response object. """ return self.request( method, endpoint, exp_code=status.HTTP_405_METHOD_NOT_ALLOWED, data=data ) def req_succeeds(self, method, endpoint, data=None): """ Performs a request to the specified endpoint, and checks that it succeeds. The method parameter should be a get/post/etc from an APIClient object. Returns the response object. """ return self.request(method, endpoint, exp_code=status.HTTP_200_OK, data=data) # ----- MODEL GENERATION ----- def random_objs(clazz, n=1): """ Generates N instances of the provided class, retrieved from the database. """ src = clazz.objects.all() for _ in range(n): yield random.choice(src) def make_test_courses(): """Creates course objects and persists them to database.""" return [CourseFactory.create(name=name) for name in COURSE_NAMES] def make_test_users(n): """Creates N test users and persists them to database.""" return UserFactory.create_batch(n) def give_role(user, role, course): """ Creates a profile for USER in a given ROLE for the provided COURSE, and saves the profile to database. """ return ProfileFactory.create( user=user, course=course, leader=None, section=None, role=role ) def create_empty_section_for(mentor): """ Creates a section for MENTOR without populated students. """ return SectionFactory.create(course=mentor.course, mentor=mentor) def enroll_user_as_student(user, section): """ Creates a student profile for USER, and assigns them to the given SECTION. Also creates blank attendances as necessary. Returns the created profile. """ student = give_role(user, Profile.STUDENT, section.course) student.section = section student.leader = section.leader create_attendances_for(student) return student def gen_test_data(cls, NUM_USERS=300): """ Adds NUM_USERS users to the database and initializes profiles for them as follows: - 2 coords per course - 4 SMs per coord, each with a section of 3-6 students - 3 JMs per SM, each with a section of 3-6 students """ users = iter(make_test_users(NUM_USERS)) courses = make_test_courses() # for sanity tests, everyone only has one role for now num_courses = len(courses) coords, seniors, juniors, students = [], [], [], [] COORD_COUNT = 2 SM_COUNT = 4 JM_COUNT = 3 def assign(role, leader, c, lst): # returns the profile created profile = give_role(next(users), role, c) profile.leader = leader lst.append(profile) return profile try: for c in courses: # coords for i in range(COORD_COUNT): coord = assign(Profile.COORDINATOR, None, c, coords) # SMs for j in range(SM_COUNT): sm = assign(Profile.SENIOR_MENTOR, coord, c, seniors) section = create_empty_section_for(sm) for k in range(random.randint(3, 6)): students.append(enroll_user_as_student(next(users), section)) # JMs for k in range(JM_COUNT): jm = assign(Profile.JUNIOR_MENTOR, sm, c, juniors) for _ in range(random.randint(3, 6)): students.append( enroll_user_as_student(next(users), section) ) except StopIteration: pass cls.users = users cls.courses = courses cls.coords = coords cls.seniors = seniors cls.juniors = juniors cls.students = students
32.804469
88
0.647309
2,022
0.344346
208
0.035422
0
0
0
0
2,040
0.347411
86e1fd3bf7ee00e117356675760b13ae01e5890a
3,282
py
Python
coldtype/beziers.py
tallpauley/coldtype
c1811e1d3713ff9c3c804511d6cd607b1d802065
[ "Apache-2.0" ]
null
null
null
coldtype/beziers.py
tallpauley/coldtype
c1811e1d3713ff9c3c804511d6cd607b1d802065
[ "Apache-2.0" ]
null
null
null
coldtype/beziers.py
tallpauley/coldtype
c1811e1d3713ff9c3c804511d6cd607b1d802065
[ "Apache-2.0" ]
null
null
null
import math from fontTools.pens.recordingPen import RecordingPen, replayRecording from fontTools.misc.bezierTools import calcCubicArcLength, splitCubicAtT from coldtype.geometry import Rect, Point def raise_quadratic(start, a, b): c0 = start c1 = (c0[0] + (2/3)*(a[0] - c0[0]), c0[1] + (2/3)*(a[1] - c0[1])) c2 = (b[0] + (2/3)*(a[0] - b[0]), b[1] + (2/3)*(a[1] - b[1])) c3 = (b[0], b[1]) return [c1, c2, c3] __length_cache = {} __split_cache = {} def splitCubicAtT_cached(a, b, c, d, t): global __split_cache abcdt = (a, b, c, d, t) sc = __split_cache.get(abcdt) if sc: return sc else: s = splitCubicAtT(a, b, c, d, t) __split_cache[abcdt] = s return s def calcCubicArcLength_cached(a, b, c, d): #return calcCubicArcLength(a, b, c, d) global __length_cache abcd = (a, b, c, d) lc = __length_cache.get(abcd) if lc: return lc else: l = calcCubicArcLength(a, b, c, d) __length_cache[abcd] = l return l class CurveCutter(): def __init__(self, g, inc=0.0015): if isinstance(g, RecordingPen): self.pen = g else: self.pen = RecordingPen() g.draw(self.pen) self.inc = inc self.length = self.calcCurveLength() def calcCurveLength(self): length = 0 for i, (t, pts) in enumerate(self.pen.value): if t == "curveTo": p1, p2, p3 = pts p0 = self.pen.value[i-1][-1][-1] length += calcCubicArcLength_cached(p0, p1, p2, p3) elif t == "lineTo": pass # todo return length def subsegment(self, start=None, end=None): global __cut_cache inc = self.inc length = self.length ended = False _length = 0 out = [] for i, (t, pts) in enumerate(self.pen.value): if t == "curveTo": p1, p2, p3 = pts p0 = self.pen.value[i-1][-1][-1] length_arc = calcCubicArcLength_cached(p0, p1, p2, p3) if _length + length_arc < end: _length += length_arc else: t = inc tries = 0 while not ended: a, b = splitCubicAtT_cached(p0, p1, p2, p3, t) length_a = calcCubicArcLength_cached(*a) if _length + length_a > end: ended = True out.append(("curveTo", a[1:])) else: t += inc tries += 1 if t == "lineTo": pass # TODO if not ended: out.append((t, pts)) if out[-1][0] != "endPath": out.append(("endPath",[])) return out def subsegmentPoint(self, start=0, end=1): inc = self.inc subsegment = self.subsegment(start=start, end=end) try: t, (a, b, c) = subsegment[-2] tangent = math.degrees(math.atan2(c[1] - b[1], c[0] - b[0]) + math.pi*.5) return c, tangent except ValueError: return None, None
31.557692
85
0.482937
2,249
0.685253
0
0
0
0
0
0
111
0.033821
86e21cfc54ba4f492a89adb3a5ddc21c8d452d78
3,930
py
Python
p1_navigation/train.py
nick0lay/deep-reinforcement-learning
5af4daca9850b4e12aec5d8b0dad87f1e22a1f98
[ "MIT" ]
null
null
null
p1_navigation/train.py
nick0lay/deep-reinforcement-learning
5af4daca9850b4e12aec5d8b0dad87f1e22a1f98
[ "MIT" ]
null
null
null
p1_navigation/train.py
nick0lay/deep-reinforcement-learning
5af4daca9850b4e12aec5d8b0dad87f1e22a1f98
[ "MIT" ]
null
null
null
""" Project for Udacity Danaodgree in Deep Reinforcement Learning This script train an agent to navigate (and collect bananas!) in a large, square world. A reward of +1 is provided for collecting a yellow banana, and a reward of -1 is provided for collecting a blue banana. Thus, the goal of your agent is to collect as many yellow bananas as possible while avoiding blue bananas. The state space has 37 dimensions and contains the agent's velocity, along with ray-based perception of objects around the agent's forward direction. Given this information, the agent has to learn how to best select actions. Four discrete actions are available, corresponding to: 0 - move forward. 1 - move backward. 2 - turn left. 3 - turn right. The task is episodic, and in order to solve the environment, your agent must get an average score of +13 over 100 consecutive episodes. """ from unityagents import UnityEnvironment import numpy as np from collections import deque from dqn_agent import Agent import torch device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") """ Unity environment configuration Mac: "path/to/Banana.app" Windows (x86): "path/to/Banana_Windows_x86/Banana.exe" Windows (x86_64): "path/to/Banana_Windows_x86_64/Banana.exe" Linux (x86): "path/to/Banana_Linux/Banana.x86" Linux (x86_64): "path/to/Banana_Linux/Banana.x86_64" Linux (x86, headless): "path/to/Banana_Linux_NoVis/Banana.x86" Linux (x86_64, headless): "path/to/Banana_Linux_NoVis/Banana.x86_64" """ # start Unity environment env = UnityEnvironment(file_name="Banana.app") # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] env_info = env.reset(train_mode=False)[brain_name] action_size = brain.vector_action_space_size state_size = len(env_info.vector_observations[0]) # initialize agent agent = Agent(state_size=state_size, action_size=action_size, seed=0, device=device) def train(n_episodes=2000, eps_start=1.0, eps_end=0.05, eps_decay=0.99): """Deep Q-Learning. Params ====== n_episodes (int): maximum number of training episodes eps_start (float): starting value of epsilon, for epsilon-greedy action selection eps_end (float): minimum value of epsilon eps_decay (float): multiplicative factor (per episode) for decreasing epsilon """ scores = [] # list containing scores from each episode scores_window = deque(maxlen=100) # last 100 scores eps = eps_start # initialize epsilon for i_episode in range(1, n_episodes+1): # reset environment env_info = env.reset(train_mode=True)[brain_name] # get initial state state = env_info.vector_observations[0] # set initial score score = 0 while True: action = agent.act(state, eps) env_info = env.step(action)[brain_name] next_state, reward, done = env_info.vector_observations[0], env_info.rewards[0], env_info.local_done[0] agent.step(state, action, reward, next_state, done) state = next_state score += reward if done: break scores_window.append(score) # save most recent score scores.append(score) # save most recent score eps = max(eps_end, eps_decay*eps) # decrease epsilon print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="") if i_episode % 100 == 0: print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) if np.mean(scores_window)>=14: print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window))) torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth') break return scores train()
42.717391
279
0.689567
0
0
0
0
0
0
0
0
2,075
0.52799
86e5087a507beef54f4930afdd98c56727fc0500
2,869
py
Python
models/model_factory.py
jac99/Egonn
075e00368a1676df741a35f42f6f38497da9d58f
[ "MIT" ]
9
2021-10-31T07:11:58.000Z
2022-03-29T14:06:49.000Z
models/model_factory.py
jac99/Egonn
075e00368a1676df741a35f42f6f38497da9d58f
[ "MIT" ]
null
null
null
models/model_factory.py
jac99/Egonn
075e00368a1676df741a35f42f6f38497da9d58f
[ "MIT" ]
3
2021-11-12T17:42:41.000Z
2022-03-11T00:41:47.000Z
# Warsaw University of Technology from layers.eca_block import ECABasicBlock from models.minkgl import MinkHead, MinkTrunk, MinkGL from models.minkloc import MinkLoc from third_party.minkloc3d.minkloc import MinkLoc3D from misc.utils import ModelParams def model_factory(model_params: ModelParams): in_channels = 1 if model_params.model == 'MinkLoc': model = MinkLoc(in_channels=in_channels, feature_size=model_params.feature_size, output_dim=model_params.output_dim, planes=model_params.planes, layers=model_params.layers, num_top_down=model_params.num_top_down, conv0_kernel_size=model_params.conv0_kernel_size, block=model_params.block, pooling_method=model_params.pooling) elif model_params.model == 'MinkLoc3D': model = MinkLoc3D() elif 'egonn' in model_params.model: model = create_egonn_model(model_params) else: raise NotImplementedError('Model not implemented: {}'.format(model_params.model)) return model def create_egonn_model(model_params: ModelParams): model_name = model_params.model global_normalize = False local_normalize = True if model_name == 'egonn': # THIS IS OUR BEST MODEL block = ECABasicBlock planes = [32, 64, 64, 128, 128, 128, 128] layers = [1, 1, 1, 1, 1, 1, 1] global_in_levels = [5, 6, 7] global_map_channels = 128 global_descriptor_size = 256 local_in_levels = [3, 4] local_map_channels = 64 local_descriptor_size = 128 else: raise NotImplementedError(f'Unknown model: {model_name}') # Planes list number of channels for level 1 and above global_in_channels = [planes[i-1] for i in global_in_levels] head_global = MinkHead(global_in_levels, global_in_channels, global_map_channels) if len(local_in_levels) > 0: local_in_channels = [planes[i-1] for i in local_in_levels] head_local = MinkHead(local_in_levels, local_in_channels, local_map_channels) else: head_local = None min_out_level = len(planes) if len(global_in_levels) > 0: min_out_level = min(min_out_level, min(global_in_levels)) if len(local_in_levels) > 0: min_out_level = min(min_out_level, min(local_in_levels)) trunk = MinkTrunk(in_channels=1, planes=planes, layers=layers, conv0_kernel_size=5, block=block, min_out_level=min_out_level) net = MinkGL(trunk, local_head=head_local, local_descriptor_size=local_descriptor_size, local_normalize=local_normalize, global_head=head_global, global_descriptor_size=global_descriptor_size, global_pool_method='GeM', global_normalize=global_normalize, quantizer=model_params.quantizer) return net
36.782051
100
0.694319
0
0
0
0
0
0
0
0
207
0.072151
86e596ecc94466fc1c8a56bb395c9ae7c14904e6
19,380
py
Python
mdns/Phidget22Python/Phidget22/Phidget.py
rabarar/phidget_docker
ceca56c86d27f291a4300a1257c02096862335ec
[ "MIT" ]
null
null
null
mdns/Phidget22Python/Phidget22/Phidget.py
rabarar/phidget_docker
ceca56c86d27f291a4300a1257c02096862335ec
[ "MIT" ]
null
null
null
mdns/Phidget22Python/Phidget22/Phidget.py
rabarar/phidget_docker
ceca56c86d27f291a4300a1257c02096862335ec
[ "MIT" ]
null
null
null
import sys import ctypes from Phidget22.PhidgetSupport import PhidgetSupport from Phidget22.Async import * from Phidget22.ChannelClass import ChannelClass from Phidget22.ChannelSubclass import ChannelSubclass from Phidget22.DeviceClass import DeviceClass from Phidget22.DeviceID import DeviceID from Phidget22.ErrorEventCode import ErrorEventCode from Phidget22.PhidgetException import PhidgetException class Phidget: def __init__(self): self.handle = ctypes.c_void_p() if sys.platform == 'win32': self._AttachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) else: self._AttachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) self._Attach = None self._onAttach = None if sys.platform == 'win32': self._DetachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) else: self._DetachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) self._Detach = None self._onDetach = None if sys.platform == 'win32': self._ErrorFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p) else: self._ErrorFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p) self._Error = None self._onError = None if sys.platform == 'win32': self._PropertyChangeFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p) else: self._PropertyChangeFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p) self._PropertyChange = None self._onPropertyChange = None def __eq__(self, other): return hasattr(other, 'handle') and self.handle.value == other.handle.value def __hash__(self): return self.handle.value def __str__(self): _value = (ctypes.c_char * 65536)() _valueLen = ctypes.c_int32(65536) if self.getIsChannel(): __func = PhidgetSupport.getDll().channelInfo else: __func = PhidgetSupport.getDll().deviceInfo result = __func(self.handle, ctypes.byref(_value), _valueLen) return _value.value.decode('utf- 8') def __del__(self): __func = PhidgetSupport.getDll().Phidget_delete __func.restype = ctypes.c_int32 res = __func(ctypes.byref(self.handle)) self.handle = None if res > 0: raise PhidgetException(res) def _localAttachEvent(self, handle, userPtr): if self._Attach == None: return self._Attach(self) def setOnAttachHandler(self, handler): if handler == None: self._Attach = None self._onAttach = None else: self._Attach = handler self._onAttach = self._AttachFactory(self._localAttachEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnAttachHandler __func.restype = ctypes.c_int32 res = __func(self.handle, self._onAttach, None) except RuntimeError: self._Attach = None self._onAttach = None def _localDetachEvent(self, handle, userPtr): if self._Detach == None: return self._Detach(self) def setOnDetachHandler(self, handler): if handler == None: self._Detach = None self._onDetach = None else: self._Detach = handler self._onDetach = self._DetachFactory(self._localDetachEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnDetachHandler __func.restype = ctypes.c_int32 res = __func(self.handle, self._onDetach, None) except RuntimeError: self._Detach = None self._onDetach = None def _localErrorEvent(self, handle, userPtr, Code, Description): if self._Error == None: return Description = Description.decode('utf-8') self._Error(self, Code, Description) def setOnErrorHandler(self, handler): if handler == None: self._Error = None self._onError = None else: self._Error = handler self._onError = self._ErrorFactory(self._localErrorEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnErrorHandler __func.restype = ctypes.c_int32 res = __func(self.handle, self._onError, None) except RuntimeError: self._Error = None self._onError = None def _localPropertyChangeEvent(self, handle, userPtr, propertyName): if self._PropertyChange == None: return propertyName = propertyName.decode('utf-8') self._PropertyChange(self, propertyName) def setOnPropertyChangeHandler(self, handler): if handler == None: self._PropertyChange = None self._onPropertyChange = None else: self._PropertyChange = handler self._onPropertyChange = self._PropertyChangeFactory(self._localPropertyChangeEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnPropertyChangeHandler __func.restype = ctypes.c_int32 res = __func(self.handle, self._onPropertyChange, None) except RuntimeError: self._PropertyChange = None self._onPropertyChange = None @staticmethod def finalize(flags): _flags = ctypes.c_int32(flags) __func = PhidgetSupport.getDll().Phidget_finalize __func.restype = ctypes.c_int32 result = __func(_flags) if result > 0: raise PhidgetException(result) @staticmethod def getLibraryVersion(): _LibraryVersion = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getLibraryVersion __func.restype = ctypes.c_int32 result = __func(ctypes.byref(_LibraryVersion)) if result > 0: raise PhidgetException(result) return _LibraryVersion.value.decode('utf-8') @staticmethod def getLibraryVersionNumber(): _LibraryVersionNumber = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getLibraryVersionNumber __func.restype = ctypes.c_int32 result = __func(ctypes.byref(_LibraryVersionNumber)) if result > 0: raise PhidgetException(result) return _LibraryVersionNumber.value.decode('utf-8') @staticmethod def resetLibrary(): __func = PhidgetSupport.getDll().Phidget_resetLibrary __func.restype = ctypes.c_int32 result = __func() if result > 0: raise PhidgetException(result) def getAttached(self): _Attached = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getAttached __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Attached)) if result > 0: raise PhidgetException(result) return _Attached.value def getChannel(self): _Channel = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Channel)) if result > 0: raise PhidgetException(result) return _Channel.value def setChannel(self, Channel): _Channel = ctypes.c_int(Channel) __func = PhidgetSupport.getDll().Phidget_setChannel __func.restype = ctypes.c_int32 result = __func(self.handle, _Channel) if result > 0: raise PhidgetException(result) def getChannelClass(self): _ChannelClass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannelClass __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelClass)) if result > 0: raise PhidgetException(result) return _ChannelClass.value def getChannelClassName(self): _ChannelClassName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getChannelClassName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelClassName)) if result > 0: raise PhidgetException(result) return _ChannelClassName.value.decode('utf-8') def getChannelName(self): _ChannelName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getChannelName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelName)) if result > 0: raise PhidgetException(result) return _ChannelName.value.decode('utf-8') def getChannelSubclass(self): _ChannelSubclass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannelSubclass __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelSubclass)) if result > 0: raise PhidgetException(result) return _ChannelSubclass.value def close(self): __func = PhidgetSupport.getDll().Phidget_close __func.restype = ctypes.c_int32 result = __func(self.handle) if result > 0: raise PhidgetException(result) def getDeviceChannelCount(self, cls): _cls = ctypes.c_int(cls) _count = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getDeviceChannelCount __func.restype = ctypes.c_int32 result = __func(self.handle, _cls, ctypes.byref(_count)) if result > 0: raise PhidgetException(result) return _count.value def getDeviceClass(self): _DeviceClass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceClass __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceClass)) if result > 0: raise PhidgetException(result) return _DeviceClass.value def getDeviceClassName(self): _DeviceClassName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceClassName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceClassName)) if result > 0: raise PhidgetException(result) return _DeviceClassName.value.decode('utf-8') def getDeviceID(self): _DeviceID = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceID __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceID)) if result > 0: raise PhidgetException(result) return _DeviceID.value def getDeviceLabel(self): _DeviceLabel = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceLabel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceLabel)) if result > 0: raise PhidgetException(result) return _DeviceLabel.value.decode('utf-8') def setDeviceLabel(self, DeviceLabel): _DeviceLabel = ctypes.create_string_buffer(DeviceLabel.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_setDeviceLabel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceLabel)) if result > 0: raise PhidgetException(result) def getDeviceName(self): _DeviceName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceName)) if result > 0: raise PhidgetException(result) return _DeviceName.value.decode('utf-8') def getDeviceSerialNumber(self): _DeviceSerialNumber = ctypes.c_int32() __func = PhidgetSupport.getDll().Phidget_getDeviceSerialNumber __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceSerialNumber)) if result > 0: raise PhidgetException(result) return _DeviceSerialNumber.value def setDeviceSerialNumber(self, DeviceSerialNumber): _DeviceSerialNumber = ctypes.c_int32(DeviceSerialNumber) __func = PhidgetSupport.getDll().Phidget_setDeviceSerialNumber __func.restype = ctypes.c_int32 result = __func(self.handle, _DeviceSerialNumber) if result > 0: raise PhidgetException(result) def getDeviceSKU(self): _DeviceSKU = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceSKU __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceSKU)) if result > 0: raise PhidgetException(result) return _DeviceSKU.value.decode('utf-8') def getDeviceVersion(self): _DeviceVersion = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceVersion __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceVersion)) if result > 0: raise PhidgetException(result) return _DeviceVersion.value def getHub(self): _Hub = ctypes.c_void_p() __func = PhidgetSupport.getDll().Phidget_getHub __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Hub)) if result > 0: raise PhidgetException(result) __Hub = Phidget() __Hub.handle = _Hub return __Hub def getHubPort(self): _HubPort = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPort __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPort)) if result > 0: raise PhidgetException(result) return _HubPort.value def setHubPort(self, HubPort): _HubPort = ctypes.c_int(HubPort) __func = PhidgetSupport.getDll().Phidget_setHubPort __func.restype = ctypes.c_int32 result = __func(self.handle, _HubPort) if result > 0: raise PhidgetException(result) def getHubPortCount(self): _HubPortCount = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPortCount __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortCount)) if result > 0: raise PhidgetException(result) return _HubPortCount.value def getHubPortSpeed(self): _HubPortSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getHubPortSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortSpeed)) if result > 0: raise PhidgetException(result) return _HubPortSpeed.value def setHubPortSpeed(self, HubPortSpeed): _HubPortSpeed = ctypes.c_uint32(HubPortSpeed) __func = PhidgetSupport.getDll().Phidget_setHubPortSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, _HubPortSpeed) if result > 0: raise PhidgetException(result) def getMaxHubPortSpeed(self): _MaxHubPortSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getMaxHubPortSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_MaxHubPortSpeed)) if result > 0: raise PhidgetException(result) return _MaxHubPortSpeed.value def getHubPortSupportsSetSpeed(self): _HubPortSupportsSetSpeed = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPortSupportsSetSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortSupportsSetSpeed)) if result > 0: raise PhidgetException(result) return _HubPortSupportsSetSpeed.value def getIsChannel(self): _IsChannel = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsChannel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsChannel)) if result > 0: raise PhidgetException(result) return _IsChannel.value def getIsHubPortDevice(self): _IsHubPortDevice = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsHubPortDevice __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsHubPortDevice)) if result > 0: raise PhidgetException(result) return _IsHubPortDevice.value def setIsHubPortDevice(self, IsHubPortDevice): _IsHubPortDevice = ctypes.c_int(IsHubPortDevice) __func = PhidgetSupport.getDll().Phidget_setIsHubPortDevice __func.restype = ctypes.c_int32 result = __func(self.handle, _IsHubPortDevice) if result > 0: raise PhidgetException(result) def getIsLocal(self): _IsLocal = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsLocal __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsLocal)) if result > 0: raise PhidgetException(result) return _IsLocal.value def setIsLocal(self, IsLocal): _IsLocal = ctypes.c_int(IsLocal) __func = PhidgetSupport.getDll().Phidget_setIsLocal __func.restype = ctypes.c_int32 result = __func(self.handle, _IsLocal) if result > 0: raise PhidgetException(result) def getIsRemote(self): _IsRemote = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsRemote __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsRemote)) if result > 0: raise PhidgetException(result) return _IsRemote.value def setIsRemote(self, IsRemote): _IsRemote = ctypes.c_int(IsRemote) __func = PhidgetSupport.getDll().Phidget_setIsRemote __func.restype = ctypes.c_int32 result = __func(self.handle, _IsRemote) if result > 0: raise PhidgetException(result) def open(self): __func = PhidgetSupport.getDll().Phidget_open __func.restype = ctypes.c_int32 result = __func(self.handle) if result > 0: raise PhidgetException(result) def openWaitForAttachment(self, timeout): _timeout = ctypes.c_uint32(timeout) __func = PhidgetSupport.getDll().Phidget_openWaitForAttachment __func.restype = ctypes.c_int32 result = __func(self.handle, _timeout) if result > 0: raise PhidgetException(result) def getParent(self): _Parent = ctypes.c_void_p() __func = PhidgetSupport.getDll().Phidget_getParent __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Parent)) if result > 0: raise PhidgetException(result) __Parent = Phidget() __Parent.handle = _Parent return __Parent def getServerHostname(self): _ServerHostname = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerHostname __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerHostname)) if result > 0: raise PhidgetException(result) return _ServerHostname.value.decode('utf-8') def getServerName(self): _ServerName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerName)) if result > 0: raise PhidgetException(result) return _ServerName.value.decode('utf-8') def setServerName(self, ServerName): _ServerName = ctypes.create_string_buffer(ServerName.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_setServerName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerName)) if result > 0: raise PhidgetException(result) def getServerPeerName(self): _ServerPeerName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerPeerName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerPeerName)) if result > 0: raise PhidgetException(result) return _ServerPeerName.value.decode('utf-8') def getServerUniqueName(self): _ServerUniqueName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerUniqueName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerUniqueName)) if result > 0: raise PhidgetException(result) return _ServerUniqueName.value.decode('utf-8') def getMaxVINTDeviceSpeed(self): _MaxVINTDeviceSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getMaxVINTDeviceSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_MaxVINTDeviceSpeed)) if result > 0: raise PhidgetException(result) return _MaxVINTDeviceSpeed.value def getVINTDeviceSupportsSetSpeed(self): _VINTDeviceSupportsSetSpeed = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getVINTDeviceSupportsSetSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_VINTDeviceSupportsSetSpeed)) if result > 0: raise PhidgetException(result) return _VINTDeviceSupportsSetSpeed.value def writeDeviceLabel(self, deviceLabel): _deviceLabel = ctypes.create_string_buffer(deviceLabel.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_writeDeviceLabel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_deviceLabel)) if result > 0: raise PhidgetException(result) ANY_SERIAL_NUMBER = -1 ANY_HUB_PORT = -1 ANY_CHANNEL = -1 ANY_LABEL = None INFINITE_TIMEOUT = 0 DEFAULT_TIMEOUT = 1000
26.083445
113
0.757482
18,975
0.979102
0
0
1,103
0.056914
0
0
163
0.008411
86e5ef7ddc4f844bf23ef6fa4d846ed9f0547af6
1,826
py
Python
openprocurement/auctions/geb/tests/blanks/create.py
oleksiyVeretiuk/openprocurement.auctions.geb
2965b52bf8826b9a8f8870c9a4d2052f945f5799
[ "Apache-2.0" ]
null
null
null
openprocurement/auctions/geb/tests/blanks/create.py
oleksiyVeretiuk/openprocurement.auctions.geb
2965b52bf8826b9a8f8870c9a4d2052f945f5799
[ "Apache-2.0" ]
null
null
null
openprocurement/auctions/geb/tests/blanks/create.py
oleksiyVeretiuk/openprocurement.auctions.geb
2965b52bf8826b9a8f8870c9a4d2052f945f5799
[ "Apache-2.0" ]
null
null
null
def create_auction(self): expected_http_status = '201 Created' request_data = {"data": self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.status, expected_http_status) def create_auction_check_minNumberOfQualifiedBids(self): expected_minNumberOfQualifiedBids = 2 request_data = {"data": self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['minNumberOfQualifiedBids'], expected_minNumberOfQualifiedBids) def create_auction_check_auctionParameters(self): expected_auctionParameters = {'type': 'texas'} request_data = {"data": self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['auctionParameters'], expected_auctionParameters) def create_auction_invalid_auctionPeriod(self): expected_http_status = '422 Unprocessable Entity' auction = self.auction auction.pop('auctionPeriod') request_data = {"data": self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data, status=422) self.assertEqual(response.status, expected_http_status) entrypoint = '/auctions' auction['auctionPeriod'] = {'startDate': None} response = self.app.post_json(entrypoint, request_data, status=422) self.assertEqual(response.status, expected_http_status) def create_auction_dump(self): request_data = {"data": self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data) filename = 'docs/source/tutorial/create_auction.http' self.dump(response.request, response, filename)
33.814815
71
0.728916
0
0
0
0
0
0
0
0
288
0.157722
86e649d303431093f68ab23ef3215809292e639b
4,872
py
Python
tests/integration/test_celery.py
crossscreenmedia/scout_apm_python
5cd31bf21f5acd0be0df4f40ec0bd29ec050ec01
[ "MIT" ]
null
null
null
tests/integration/test_celery.py
crossscreenmedia/scout_apm_python
5cd31bf21f5acd0be0df4f40ec0bd29ec050ec01
[ "MIT" ]
null
null
null
tests/integration/test_celery.py
crossscreenmedia/scout_apm_python
5cd31bf21f5acd0be0df4f40ec0bd29ec050ec01
[ "MIT" ]
null
null
null
# coding=utf-8 from __future__ import absolute_import, division, print_function, unicode_literals from contextlib import contextmanager import celery import pytest from celery.signals import setup_logging import scout_apm.celery from scout_apm.api import Config # http://docs.celeryproject.org/en/latest/userguide/testing.html#py-test skip_unless_celery_4_plus = pytest.mark.skipif( celery.VERSION < (4, 0), reason="pytest fixtures added in Celery 4.0" ) @setup_logging.connect def do_nothing(**kwargs): # Just by connecting to this signal, we prevent Celery from setting up # logging - and stop it from interfering with global state # http://docs.celeryproject.org/en/v4.3.0/userguide/signals.html#setup-logging pass @contextmanager def app_with_scout(app=None, config=None): """ Context manager that configures a Celery app with Scout installed. """ if app is None: app = celery.Celery("tasks", broker="memory://") # Enable Scout by default in tests. if config is None: config = {"monitor": True} # Disable running the agent. config["core_agent_launch"] = False @app.task def hello(): return "Hello World!" # Setup according to https://docs.scoutapm.com/#celery Config.set(**config) scout_apm.celery.install() try: yield app finally: scout_apm.celery.uninstall() # Reset Scout configuration. Config.reset_all() def test_hello_eager(tracked_requests): with app_with_scout() as app: result = app.tasks["tests.integration.test_celery.hello"].apply() assert result.result == "Hello World!" assert len(tracked_requests) == 1 tracked_request = tracked_requests[0] assert "task_id" in tracked_request.tags assert tracked_request.tags["is_eager"] is True assert tracked_request.tags["exchange"] == "unknown" assert tracked_request.tags["routing_key"] == "unknown" assert tracked_request.tags["queue"] == "unknown" assert tracked_request.active_spans == [] assert len(tracked_request.complete_spans) == 1 span = tracked_request.complete_spans[0] assert span.operation == "Job/tests.integration.test_celery.hello" @skip_unless_celery_4_plus def test_hello_worker(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app: result = app.tasks["tests.integration.test_celery.hello"].delay().get() assert result == "Hello World!" assert len(tracked_requests) == 1 tracked_request = tracked_requests[0] assert "task_id" in tracked_request.tags assert tracked_request.tags["is_eager"] is False assert tracked_request.tags["exchange"] == "" assert tracked_request.tags["routing_key"] == "celery" assert tracked_request.tags["queue"] == "unknown" assert ( 0.0 <= tracked_request.tags["queue_time"] < 60.0 ) # Assume test took <60 seconds assert tracked_request.active_spans == [] assert len(tracked_request.complete_spans) == 1 span = tracked_request.complete_spans[0] assert span.operation == "Job/tests.integration.test_celery.hello" @skip_unless_celery_4_plus def test_hello_worker_header_preset(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app: result = ( app.tasks["tests.integration.test_celery.hello"] .apply_async(headers={"scout_task_start": "an evil string"}) .get() ) assert result == "Hello World!" assert len(tracked_requests) == 1 tracked_request = tracked_requests[0] assert tracked_request.active_spans == [] assert len(tracked_request.complete_spans) == 1 span = tracked_request.complete_spans[0] assert span.operation == "Job/tests.integration.test_celery.hello" assert "queue_time" not in span.tags @skip_unless_celery_4_plus def test_hello_worker_chain(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app: hello = app.tasks["tests.integration.test_celery.hello"] result = (hello.si() | hello.si()).apply_async().get() assert result == "Hello World!" assert len(tracked_requests) == 2 assert [t.complete_spans[0].operation for t in tracked_requests] == [ "Job/tests.integration.test_celery.hello", "Job/tests.integration.test_celery.hello", ] assert "parent_task_id" not in tracked_requests[0].tags first_task_id = tracked_requests[0].tags["task_id"] assert tracked_requests[1].tags["parent_task_id"] == first_task_id def test_no_monitor(tracked_requests): # With an empty config, "monitor" defaults to False. with app_with_scout(config={}) as app: result = app.tasks["tests.integration.test_celery.hello"].apply() assert result.result == "Hello World!" assert tracked_requests == []
34.553191
82
0.70936
0
0
699
0.143473
3,345
0.686576
0
0
1,401
0.287562
86e65c540055ab2f761c3c998b140c5377b7f0c6
10,865
py
Python
molly/apps/places/migrations/0001_initial.py
mollyproject/mollyproject
3247c6bac3f39ce8d275d19aa410b30c6284b8a7
[ "Apache-2.0" ]
7
2015-05-16T13:27:21.000Z
2019-08-06T11:09:24.000Z
molly/apps/places/migrations/0001_initial.py
mollyproject/mollyproject
3247c6bac3f39ce8d275d19aa410b30c6284b8a7
[ "Apache-2.0" ]
null
null
null
molly/apps/places/migrations/0001_initial.py
mollyproject/mollyproject
3247c6bac3f39ce8d275d19aa410b30c6284b8a7
[ "Apache-2.0" ]
4
2015-11-27T13:36:36.000Z
2021-03-09T17:55:53.000Z
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Source' db.create_table('places_source', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('module_name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), )) db.send_create_signal('places', ['Source']) # Adding model 'EntityType' db.create_table('places_entitytype', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)), ('article', self.gf('django.db.models.fields.CharField')(max_length=2)), ('verbose_name', self.gf('django.db.models.fields.TextField')()), ('verbose_name_plural', self.gf('django.db.models.fields.TextField')()), ('show_in_nearby_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('show_in_category_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('note', self.gf('django.db.models.fields.TextField')(null=True)), )) db.send_create_signal('places', ['EntityType']) # Adding M2M table for field subtype_of on 'EntityType' db.create_table('places_entitytype_subtype_of', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('from_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)), ('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entitytype_subtype_of', ['from_entitytype_id', 'to_entitytype_id']) # Adding M2M table for field subtype_of_completion on 'EntityType' db.create_table('places_entitytype_subtype_of_completion', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('from_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)), ('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entitytype_subtype_of_completion', ['from_entitytype_id', 'to_entitytype_id']) # Adding model 'Identifier' db.create_table('places_identifier', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('value', self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places', ['Identifier']) # Adding model 'Entity' db.create_table('places_entity', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.TextField')(blank=True)), ('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Source'])), ('primary_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.EntityType'], null=True)), ('location', self.gf('django.contrib.gis.db.models.fields.PointField')(null=True)), ('geometry', self.gf('django.contrib.gis.db.models.fields.GeometryField')(null=True)), ('_metadata', self.gf('django.db.models.fields.TextField')(default='{}')), ('absolute_url', self.gf('django.db.models.fields.TextField')()), ('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Entity'], null=True)), ('is_sublocation', self.gf('django.db.models.fields.BooleanField')(default=False)), ('is_stack', self.gf('django.db.models.fields.BooleanField')(default=False)), ('identifier_scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('identifier_value', self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places', ['Entity']) # Adding M2M table for field all_types on 'Entity' db.create_table('places_entity_all_types', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entity_all_types', ['entity_id', 'entitytype_id']) # Adding M2M table for field all_types_completion on 'Entity' db.create_table('places_entity_all_types_completion', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entity_all_types_completion', ['entity_id', 'entitytype_id']) # Adding M2M table for field _identifiers on 'Entity' db.create_table('places_entity__identifiers', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('identifier', models.ForeignKey(orm['places.identifier'], null=False)) )) db.create_unique('places_entity__identifiers', ['entity_id', 'identifier_id']) def backwards(self, orm): # Deleting model 'Source' db.delete_table('places_source') # Deleting model 'EntityType' db.delete_table('places_entitytype') # Removing M2M table for field subtype_of on 'EntityType' db.delete_table('places_entitytype_subtype_of') # Removing M2M table for field subtype_of_completion on 'EntityType' db.delete_table('places_entitytype_subtype_of_completion') # Deleting model 'Identifier' db.delete_table('places_identifier') # Deleting model 'Entity' db.delete_table('places_entity') # Removing M2M table for field all_types on 'Entity' db.delete_table('places_entity_all_types') # Removing M2M table for field all_types_completion on 'Entity' db.delete_table('places_entity_all_types_completion') # Removing M2M table for field _identifiers on 'Entity' db.delete_table('places_entity__identifiers') models = { 'places.entity': { 'Meta': {'ordering': "('title',)", 'object_name': 'Entity'}, '_identifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['places.Identifier']", 'symmetrical': 'False'}), '_metadata': ('django.db.models.fields.TextField', [], {'default': "'{}'"}), 'absolute_url': ('django.db.models.fields.TextField', [], {}), 'all_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'entities'", 'blank': 'True', 'to': "orm['places.EntityType']"}), 'all_types_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'entities_completion'", 'blank': 'True', 'to': "orm['places.EntityType']"}), 'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'identifier_scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'identifier_value': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'is_stack': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_sublocation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Entity']", 'null': 'True'}), 'primary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.EntityType']", 'null': 'True'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Source']"}), 'title': ('django.db.models.fields.TextField', [], {'blank': 'True'}) }, 'places.entitytype': { 'Meta': {'ordering': "('verbose_name',)", 'object_name': 'EntityType'}, 'article': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'note': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'show_in_category_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_in_nearby_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}), 'subtype_of': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'subtypes'", 'blank': 'True', 'to': "orm['places.EntityType']"}), 'subtype_of_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'subtypes_completion'", 'blank': 'True', 'to': "orm['places.EntityType']"}), 'verbose_name': ('django.db.models.fields.TextField', [], {}), 'verbose_name_plural': ('django.db.models.fields.TextField', [], {}) }, 'places.identifier': { 'Meta': {'object_name': 'Identifier'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, 'places.source': { 'Meta': {'object_name': 'Source'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'module_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) } } complete_apps = ['places']
60.698324
211
0.620985
10,739
0.988403
0
0
0
0
0
0
6,248
0.575058
86e6c529a13c62833d2d9d91e683f2c9cc85c2b8
16,246
py
Python
sdk/python/pulumi_azure_native/servicebus/v20210601preview/get_subscription.py
polivbr/pulumi-azure-native
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/servicebus/v20210601preview/get_subscription.py
polivbr/pulumi-azure-native
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/servicebus/v20210601preview/get_subscription.py
polivbr/pulumi-azure-native
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs __all__ = [ 'GetSubscriptionResult', 'AwaitableGetSubscriptionResult', 'get_subscription', ] @pulumi.output_type class GetSubscriptionResult: """ Description of subscription resource. """ def __init__(__self__, accessed_at=None, auto_delete_on_idle=None, client_affine_properties=None, count_details=None, created_at=None, dead_lettering_on_filter_evaluation_exceptions=None, dead_lettering_on_message_expiration=None, default_message_time_to_live=None, duplicate_detection_history_time_window=None, enable_batched_operations=None, forward_dead_lettered_messages_to=None, forward_to=None, id=None, is_client_affine=None, lock_duration=None, max_delivery_count=None, message_count=None, name=None, requires_session=None, status=None, system_data=None, type=None, updated_at=None): if accessed_at and not isinstance(accessed_at, str): raise TypeError("Expected argument 'accessed_at' to be a str") pulumi.set(__self__, "accessed_at", accessed_at) if auto_delete_on_idle and not isinstance(auto_delete_on_idle, str): raise TypeError("Expected argument 'auto_delete_on_idle' to be a str") pulumi.set(__self__, "auto_delete_on_idle", auto_delete_on_idle) if client_affine_properties and not isinstance(client_affine_properties, dict): raise TypeError("Expected argument 'client_affine_properties' to be a dict") pulumi.set(__self__, "client_affine_properties", client_affine_properties) if count_details and not isinstance(count_details, dict): raise TypeError("Expected argument 'count_details' to be a dict") pulumi.set(__self__, "count_details", count_details) if created_at and not isinstance(created_at, str): raise TypeError("Expected argument 'created_at' to be a str") pulumi.set(__self__, "created_at", created_at) if dead_lettering_on_filter_evaluation_exceptions and not isinstance(dead_lettering_on_filter_evaluation_exceptions, bool): raise TypeError("Expected argument 'dead_lettering_on_filter_evaluation_exceptions' to be a bool") pulumi.set(__self__, "dead_lettering_on_filter_evaluation_exceptions", dead_lettering_on_filter_evaluation_exceptions) if dead_lettering_on_message_expiration and not isinstance(dead_lettering_on_message_expiration, bool): raise TypeError("Expected argument 'dead_lettering_on_message_expiration' to be a bool") pulumi.set(__self__, "dead_lettering_on_message_expiration", dead_lettering_on_message_expiration) if default_message_time_to_live and not isinstance(default_message_time_to_live, str): raise TypeError("Expected argument 'default_message_time_to_live' to be a str") pulumi.set(__self__, "default_message_time_to_live", default_message_time_to_live) if duplicate_detection_history_time_window and not isinstance(duplicate_detection_history_time_window, str): raise TypeError("Expected argument 'duplicate_detection_history_time_window' to be a str") pulumi.set(__self__, "duplicate_detection_history_time_window", duplicate_detection_history_time_window) if enable_batched_operations and not isinstance(enable_batched_operations, bool): raise TypeError("Expected argument 'enable_batched_operations' to be a bool") pulumi.set(__self__, "enable_batched_operations", enable_batched_operations) if forward_dead_lettered_messages_to and not isinstance(forward_dead_lettered_messages_to, str): raise TypeError("Expected argument 'forward_dead_lettered_messages_to' to be a str") pulumi.set(__self__, "forward_dead_lettered_messages_to", forward_dead_lettered_messages_to) if forward_to and not isinstance(forward_to, str): raise TypeError("Expected argument 'forward_to' to be a str") pulumi.set(__self__, "forward_to", forward_to) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if is_client_affine and not isinstance(is_client_affine, bool): raise TypeError("Expected argument 'is_client_affine' to be a bool") pulumi.set(__self__, "is_client_affine", is_client_affine) if lock_duration and not isinstance(lock_duration, str): raise TypeError("Expected argument 'lock_duration' to be a str") pulumi.set(__self__, "lock_duration", lock_duration) if max_delivery_count and not isinstance(max_delivery_count, int): raise TypeError("Expected argument 'max_delivery_count' to be a int") pulumi.set(__self__, "max_delivery_count", max_delivery_count) if message_count and not isinstance(message_count, float): raise TypeError("Expected argument 'message_count' to be a float") pulumi.set(__self__, "message_count", message_count) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if requires_session and not isinstance(requires_session, bool): raise TypeError("Expected argument 'requires_session' to be a bool") pulumi.set(__self__, "requires_session", requires_session) if status and not isinstance(status, str): raise TypeError("Expected argument 'status' to be a str") pulumi.set(__self__, "status", status) if system_data and not isinstance(system_data, dict): raise TypeError("Expected argument 'system_data' to be a dict") pulumi.set(__self__, "system_data", system_data) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) if updated_at and not isinstance(updated_at, str): raise TypeError("Expected argument 'updated_at' to be a str") pulumi.set(__self__, "updated_at", updated_at) @property @pulumi.getter(name="accessedAt") def accessed_at(self) -> str: """ Last time there was a receive request to this subscription. """ return pulumi.get(self, "accessed_at") @property @pulumi.getter(name="autoDeleteOnIdle") def auto_delete_on_idle(self) -> Optional[str]: """ ISO 8061 timeSpan idle interval after which the topic is automatically deleted. The minimum duration is 5 minutes. """ return pulumi.get(self, "auto_delete_on_idle") @property @pulumi.getter(name="clientAffineProperties") def client_affine_properties(self) -> Optional['outputs.SBClientAffinePropertiesResponse']: """ Properties specific to client affine subscriptions. """ return pulumi.get(self, "client_affine_properties") @property @pulumi.getter(name="countDetails") def count_details(self) -> 'outputs.MessageCountDetailsResponse': """ Message count details """ return pulumi.get(self, "count_details") @property @pulumi.getter(name="createdAt") def created_at(self) -> str: """ Exact time the message was created. """ return pulumi.get(self, "created_at") @property @pulumi.getter(name="deadLetteringOnFilterEvaluationExceptions") def dead_lettering_on_filter_evaluation_exceptions(self) -> Optional[bool]: """ Value that indicates whether a subscription has dead letter support on filter evaluation exceptions. """ return pulumi.get(self, "dead_lettering_on_filter_evaluation_exceptions") @property @pulumi.getter(name="deadLetteringOnMessageExpiration") def dead_lettering_on_message_expiration(self) -> Optional[bool]: """ Value that indicates whether a subscription has dead letter support when a message expires. """ return pulumi.get(self, "dead_lettering_on_message_expiration") @property @pulumi.getter(name="defaultMessageTimeToLive") def default_message_time_to_live(self) -> Optional[str]: """ ISO 8061 Default message timespan to live value. This is the duration after which the message expires, starting from when the message is sent to Service Bus. This is the default value used when TimeToLive is not set on a message itself. """ return pulumi.get(self, "default_message_time_to_live") @property @pulumi.getter(name="duplicateDetectionHistoryTimeWindow") def duplicate_detection_history_time_window(self) -> Optional[str]: """ ISO 8601 timeSpan structure that defines the duration of the duplicate detection history. The default value is 10 minutes. """ return pulumi.get(self, "duplicate_detection_history_time_window") @property @pulumi.getter(name="enableBatchedOperations") def enable_batched_operations(self) -> Optional[bool]: """ Value that indicates whether server-side batched operations are enabled. """ return pulumi.get(self, "enable_batched_operations") @property @pulumi.getter(name="forwardDeadLetteredMessagesTo") def forward_dead_lettered_messages_to(self) -> Optional[str]: """ Queue/Topic name to forward the Dead Letter message """ return pulumi.get(self, "forward_dead_lettered_messages_to") @property @pulumi.getter(name="forwardTo") def forward_to(self) -> Optional[str]: """ Queue/Topic name to forward the messages """ return pulumi.get(self, "forward_to") @property @pulumi.getter def id(self) -> str: """ Resource Id """ return pulumi.get(self, "id") @property @pulumi.getter(name="isClientAffine") def is_client_affine(self) -> Optional[bool]: """ Value that indicates whether the subscription has an affinity to the client id. """ return pulumi.get(self, "is_client_affine") @property @pulumi.getter(name="lockDuration") def lock_duration(self) -> Optional[str]: """ ISO 8061 lock duration timespan for the subscription. The default value is 1 minute. """ return pulumi.get(self, "lock_duration") @property @pulumi.getter(name="maxDeliveryCount") def max_delivery_count(self) -> Optional[int]: """ Number of maximum deliveries. """ return pulumi.get(self, "max_delivery_count") @property @pulumi.getter(name="messageCount") def message_count(self) -> float: """ Number of messages. """ return pulumi.get(self, "message_count") @property @pulumi.getter def name(self) -> str: """ Resource name """ return pulumi.get(self, "name") @property @pulumi.getter(name="requiresSession") def requires_session(self) -> Optional[bool]: """ Value indicating if a subscription supports the concept of sessions. """ return pulumi.get(self, "requires_session") @property @pulumi.getter def status(self) -> Optional[str]: """ Enumerates the possible values for the status of a messaging entity. """ return pulumi.get(self, "status") @property @pulumi.getter(name="systemData") def system_data(self) -> 'outputs.SystemDataResponse': """ The system meta data relating to this resource. """ return pulumi.get(self, "system_data") @property @pulumi.getter def type(self) -> str: """ Resource type """ return pulumi.get(self, "type") @property @pulumi.getter(name="updatedAt") def updated_at(self) -> str: """ The exact time the message was updated. """ return pulumi.get(self, "updated_at") class AwaitableGetSubscriptionResult(GetSubscriptionResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetSubscriptionResult( accessed_at=self.accessed_at, auto_delete_on_idle=self.auto_delete_on_idle, client_affine_properties=self.client_affine_properties, count_details=self.count_details, created_at=self.created_at, dead_lettering_on_filter_evaluation_exceptions=self.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration, default_message_time_to_live=self.default_message_time_to_live, duplicate_detection_history_time_window=self.duplicate_detection_history_time_window, enable_batched_operations=self.enable_batched_operations, forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to, forward_to=self.forward_to, id=self.id, is_client_affine=self.is_client_affine, lock_duration=self.lock_duration, max_delivery_count=self.max_delivery_count, message_count=self.message_count, name=self.name, requires_session=self.requires_session, status=self.status, system_data=self.system_data, type=self.type, updated_at=self.updated_at) def get_subscription(namespace_name: Optional[str] = None, resource_group_name: Optional[str] = None, subscription_name: Optional[str] = None, topic_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubscriptionResult: """ Description of subscription resource. :param str namespace_name: The namespace name :param str resource_group_name: Name of the Resource group within the Azure subscription. :param str subscription_name: The subscription name. :param str topic_name: The topic name. """ __args__ = dict() __args__['namespaceName'] = namespace_name __args__['resourceGroupName'] = resource_group_name __args__['subscriptionName'] = subscription_name __args__['topicName'] = topic_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:servicebus/v20210601preview:getSubscription', __args__, opts=opts, typ=GetSubscriptionResult).value return AwaitableGetSubscriptionResult( accessed_at=__ret__.accessed_at, auto_delete_on_idle=__ret__.auto_delete_on_idle, client_affine_properties=__ret__.client_affine_properties, count_details=__ret__.count_details, created_at=__ret__.created_at, dead_lettering_on_filter_evaluation_exceptions=__ret__.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=__ret__.dead_lettering_on_message_expiration, default_message_time_to_live=__ret__.default_message_time_to_live, duplicate_detection_history_time_window=__ret__.duplicate_detection_history_time_window, enable_batched_operations=__ret__.enable_batched_operations, forward_dead_lettered_messages_to=__ret__.forward_dead_lettered_messages_to, forward_to=__ret__.forward_to, id=__ret__.id, is_client_affine=__ret__.is_client_affine, lock_duration=__ret__.lock_duration, max_delivery_count=__ret__.max_delivery_count, message_count=__ret__.message_count, name=__ret__.name, requires_session=__ret__.requires_session, status=__ret__.status, system_data=__ret__.system_data, type=__ret__.type, updated_at=__ret__.updated_at)
45.253482
595
0.70048
13,331
0.820571
1,373
0.084513
11,871
0.730703
0
0
5,362
0.33005
86e708b4a5fa05856a6c8d0dde3c26f2006621e1
4,340
py
Python
py_cfeve/module/CFAF240400E0-030TN-A1.py
crystalfontz/CFA-EVE-Python-Library
c5aca10b9b6ee109d4df8a9a692dcef083dafc88
[ "Unlicense" ]
1
2021-12-08T00:12:02.000Z
2021-12-08T00:12:02.000Z
py_cfeve/module/CFAF240400E0-030TN-A1.py
crystalfontz/CFA-EVE-Python-Library
c5aca10b9b6ee109d4df8a9a692dcef083dafc88
[ "Unlicense" ]
null
null
null
py_cfeve/module/CFAF240400E0-030TN-A1.py
crystalfontz/CFA-EVE-Python-Library
c5aca10b9b6ee109d4df8a9a692dcef083dafc88
[ "Unlicense" ]
null
null
null
#=========================================================================== # # Crystalfontz Raspberry-Pi Python example library for FTDI / BridgeTek # EVE graphic accelerators. # #--------------------------------------------------------------------------- # # This file is part of the port/adaptation of existing C based EVE libraries # to Python for Crystalfontz EVE based displays. # # 2021-10-20 Mark Williams / Crystalfontz America Inc. # https:#www.crystalfontz.com/products/eve-accelerated-tft-displays.php #--------------------------------------------------------------------------- # # This is free and unencumbered software released into the public domain. # Anyone is free to copy, modify, publish, use, compile, sell, or # distribute this software, either in source code form or as a compiled # binary, for any purpose, commercial or non-commercial, and by any # means. # In jurisdictions that recognize copyright laws, the author or authors # of this software dedicate any and all copyright interest in the # software to the public domain. We make this dedication for the benefit # of the public at large and to the detriment of our heirs and # successors. We intend this dedication to be an overt act of # relinquishment in perpetuity of all present and future rights to this # software under copyright law. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # For more information, please refer to <http:#unlicense.org/> # #============================================================================ #EVE Device Type EVE_DEVICE = 811 # EVE Clock Speed EVE_CLOCK_SPEED = 60000000 # Touch TOUCH_RESISTIVE = False TOUCH_CAPACITIVE = False TOUCH_GOODIX_CAPACITIVE = False # Define RGB output pins order, determined by PCB layout LCD_SWIZZLE = 2 # Define active edge of PCLK. Observed by scope: # 0: Data is put out coincident with falling edge of the clock. # Rising edge of the clock is in the middle of the data. # 1: Data is put out coincident with rising edge of the clock. # Falling edge of the clock is in the middle of the data. LCD_PCLKPOL = 0 # LCD drive strength: 0=5mA, 1=10mA LCD_DRIVE_10MA = 0 # Spread Spectrum on RGB signals. Probably not a good idea at higher # PCLK frequencies. LCD_PCLK_CSPREAD = 0 #This is not a 24-bit display, so dither LCD_DITHER = 0 # Pixel clock divisor LCD_PCLK = 5 #---------------------------------------------------------------------------- # Frame_Rate = 60Hz / 16.7mS #---------------------------------------------------------------------------- # Horizontal timing # Target 60Hz frame rate, using the largest possible line time in order to # maximize the time that the EVE has to process each line. HPX = 240 # Horizontal Pixel Width HSW = 10 # Horizontal Sync Width HBP = 20 # Horizontal Back Porch HFP = 10 # Horizontal Front Porch HPP = 209 # Horizontal Pixel Padding # FTDI needs at least 1 here # Define the constants needed by the EVE based on the timing # Active width of LCD display LCD_WIDTH = HPX # Start of horizontal sync pulse LCD_HSYNC0 = HFP # End of horizontal sync pulse LCD_HSYNC1 = HFP+HSW # Start of active line LCD_HOFFSET = HFP+HSW+HBP # Total number of clocks per line LCD_HCYCLE = HPX+HFP+HSW+HBP+HPP #---------------------------------------------------------------------------- # Vertical timing VLH = 400 # Vertical Line Height VS = 2 # Vertical Sync (in lines) VBP = 2 # Vertical Back Porch VFP = 4 # Vertical Front Porch VLP = 1 # Vertical Line Padding # FTDI needs at least 1 here # Define the constants needed by the EVE based on the timing # Active height of LCD display LCD_HEIGHT = VLH # Start of vertical sync pulse LCD_VSYNC0 = VFP # End of vertical sync pulse LCD_VSYNC1 = VFP+VS # Start of active screen LCD_VOFFSET = VFP+VS+VBP # Total number of lines per screen LCD_VCYCLE = VLH+VFP+VS+VBP+VLP
38.070175
78
0.645392
0
0
0
0
0
0
0
0
3,670
0.845622
86e79f3939b52fb2b048dd2d47804d7ba195c64a
12,893
py
Python
quapy/model_selection.py
OneToolsCollection/HLT-ISTI-QuaPy
6a5c528154c2d6d38d9f3258e667727bf692fc8b
[ "BSD-3-Clause" ]
null
null
null
quapy/model_selection.py
OneToolsCollection/HLT-ISTI-QuaPy
6a5c528154c2d6d38d9f3258e667727bf692fc8b
[ "BSD-3-Clause" ]
null
null
null
quapy/model_selection.py
OneToolsCollection/HLT-ISTI-QuaPy
6a5c528154c2d6d38d9f3258e667727bf692fc8b
[ "BSD-3-Clause" ]
null
null
null
import itertools import signal from copy import deepcopy from typing import Union, Callable import numpy as np import quapy as qp from quapy.data.base import LabelledCollection from quapy.evaluation import artificial_prevalence_prediction, natural_prevalence_prediction, gen_prevalence_prediction from quapy.method.aggregative import BaseQuantifier import inspect from util import _check_sample_size class GridSearchQ(BaseQuantifier): """Grid Search optimization targeting a quantification-oriented metric. Optimizes the hyperparameters of a quantification method, based on an evaluation method and on an evaluation protocol for quantification. :param model: the quantifier to optimize :type model: BaseQuantifier :param param_grid: a dictionary with keys the parameter names and values the list of values to explore :param sample_size: the size of the samples to extract from the validation set (ignored if protocl='gen') :param protocol: either 'app' for the artificial prevalence protocol, 'npp' for the natural prevalence protocol, or 'gen' for using a custom sampling generator function :param n_prevpoints: if specified, indicates the number of equally distant points to extract from the interval [0,1] in order to define the prevalences of the samples; e.g., if n_prevpoints=5, then the prevalences for each class will be explored in [0.00, 0.25, 0.50, 0.75, 1.00]. If not specified, then eval_budget is requested. Ignored if protocol!='app'. :param n_repetitions: the number of repetitions for each combination of prevalences. This parameter is ignored for the protocol='app' if eval_budget is set and is lower than the number of combinations that would be generated using the value assigned to n_prevpoints (for the current number of classes and n_repetitions). Ignored for protocol='npp' and protocol='gen' (use eval_budget for setting a maximum number of samples in those cases). :param eval_budget: if specified, sets a ceil on the number of evaluations to perform for each hyper-parameter combination. For example, if protocol='app', there are 3 classes, n_repetitions=1 and eval_budget=20, then n_prevpoints will be set to 5, since this will generate 15 different prevalences, i.e., [0, 0, 1], [0, 0.25, 0.75], [0, 0.5, 0.5] ... [1, 0, 0], and since setting it to 6 would generate more than 20. When protocol='gen', indicates the maximum number of samples to generate, but less samples will be generated if the generator yields less samples. :param error: an error function (callable) or a string indicating the name of an error function (valid ones are those in qp.error.QUANTIFICATION_ERROR :param refit: whether or not to refit the model on the whole labelled collection (training+validation) with the best chosen hyperparameter combination. Ignored if protocol='gen' :param val_split: either a LabelledCollection on which to test the performance of the different settings, or a float in [0,1] indicating the proportion of labelled data to extract from the training set, or a callable returning a generator function each time it is invoked (only for protocol='gen'). :param n_jobs: number of parallel jobs :param random_seed: set the seed of the random generator to replicate experiments. Ignored if protocol='gen'. :param timeout: establishes a timer (in seconds) for each of the hyperparameters configurations being tested. Whenever a run takes longer than this timer, that configuration will be ignored. If all configurations end up being ignored, a TimeoutError exception is raised. If -1 (default) then no time bound is set. :param verbose: set to True to get information through the stdout """ def __init__(self, model: BaseQuantifier, param_grid: dict, sample_size: Union[int, None] = None, protocol='app', n_prevpoints: int = None, n_repetitions: int = 1, eval_budget: int = None, error: Union[Callable, str] = qp.error.mae, refit=True, val_split=0.4, n_jobs=1, random_seed=42, timeout=-1, verbose=False): self.model = model self.param_grid = param_grid self.sample_size = sample_size self.protocol = protocol.lower() self.n_prevpoints = n_prevpoints self.n_repetitions = n_repetitions self.eval_budget = eval_budget self.refit = refit self.val_split = val_split self.n_jobs = n_jobs self.random_seed = random_seed self.timeout = timeout self.verbose = verbose self.__check_error(error) assert self.protocol in {'app', 'npp', 'gen'}, \ 'unknown protocol: valid ones are "app" or "npp" for the "artificial" or the "natural" prevalence ' \ 'protocols. Use protocol="gen" when passing a generator function thorough val_split that yields a ' \ 'sample (instances) and their prevalence (ndarray) at each iteration.' assert self.eval_budget is None or isinstance(self.eval_budget, int) if self.protocol in ['npp', 'gen']: if self.protocol=='npp' and (self.eval_budget is None or self.eval_budget <= 0): raise ValueError(f'when protocol="npp" the parameter eval_budget should be ' f'indicated (and should be >0).') if self.n_repetitions != 1: print('[warning] n_repetitions has been set and will be ignored for the selected protocol') def _sout(self, msg): if self.verbose: print(f'[{self.__class__.__name__}]: {msg}') def __check_training_validation(self, training, validation): if isinstance(validation, LabelledCollection): return training, validation elif isinstance(validation, float): assert 0. < validation < 1., 'validation proportion should be in (0,1)' training, validation = training.split_stratified(train_prop=1 - validation, random_state=self.random_seed) return training, validation elif self.protocol=='gen' and inspect.isgenerator(validation()): return training, validation else: raise ValueError(f'"validation" must either be a LabelledCollection or a float in (0,1) indicating the' f'proportion of training documents to extract (type found: {type(validation)}). ' f'Optionally, "validation" can be a callable function returning a generator that yields ' f'the sample instances along with their true prevalence at each iteration by ' f'setting protocol="gen".') def __check_error(self, error): if error in qp.error.QUANTIFICATION_ERROR: self.error = error elif isinstance(error, str): self.error = qp.error.from_name(error) elif hasattr(error, '__call__'): self.error = error else: raise ValueError(f'unexpected error type; must either be a callable function or a str representing\n' f'the name of an error function in {qp.error.QUANTIFICATION_ERROR_NAMES}') def __generate_predictions(self, model, val_split): commons = { 'n_repetitions': self.n_repetitions, 'n_jobs': self.n_jobs, 'random_seed': self.random_seed, 'verbose': False } if self.protocol == 'app': return artificial_prevalence_prediction( model, val_split, self.sample_size, n_prevpoints=self.n_prevpoints, eval_budget=self.eval_budget, **commons ) elif self.protocol == 'npp': return natural_prevalence_prediction( model, val_split, self.sample_size, **commons) elif self.protocol == 'gen': return gen_prevalence_prediction(model, gen_fn=val_split, eval_budget=self.eval_budget) else: raise ValueError('unknown protocol') def fit(self, training: LabelledCollection, val_split: Union[LabelledCollection, float, Callable] = None): """ Learning routine. Fits methods with all combinations of hyperparameters and selects the one minimizing the error metric. :param training: the training set on which to optimize the hyperparameters :param val_split: either a LabelledCollection on which to test the performance of the different settings, or a float in [0,1] indicating the proportion of labelled data to extract from the training set :return: self """ if val_split is None: val_split = self.val_split training, val_split = self.__check_training_validation(training, val_split) if self.protocol != 'gen': self.sample_size = _check_sample_size(self.sample_size) params_keys = list(self.param_grid.keys()) params_values = list(self.param_grid.values()) model = self.model if self.timeout > 0: def handler(signum, frame): self._sout('timeout reached') raise TimeoutError() signal.signal(signal.SIGALRM, handler) self.param_scores_ = {} self.best_score_ = None some_timeouts = False for values in itertools.product(*params_values): params = dict({k: values[i] for i, k in enumerate(params_keys)}) if self.timeout > 0: signal.alarm(self.timeout) try: # overrides default parameters with the parameters being explored at this iteration model.set_params(**params) model.fit(training) true_prevalences, estim_prevalences = self.__generate_predictions(model, val_split) score = self.error(true_prevalences, estim_prevalences) self._sout(f'checking hyperparams={params} got {self.error.__name__} score {score:.5f}') if self.best_score_ is None or score < self.best_score_: self.best_score_ = score self.best_params_ = params self.best_model_ = deepcopy(model) self.param_scores_[str(params)] = score if self.timeout > 0: signal.alarm(0) except TimeoutError: print(f'timeout reached for config {params}') some_timeouts = True if self.best_score_ is None and some_timeouts: raise TimeoutError('all jobs took more than the timeout time to end') self._sout(f'optimization finished: best params {self.best_params_} (score={self.best_score_:.5f})') if self.refit: self._sout(f'refitting on the whole development set') self.best_model_.fit(training + val_split) return self def quantify(self, instances): """Estimate class prevalence values using the best model found after calling the :meth:`fit` method. :param instances: sample contanining the instances :return: a ndarray of shape `(n_classes)` with class prevalence estimates as according to the best model found by the model selection process. """ assert hasattr(self, 'best_model_'), 'quantify called before fit' return self.best_model().quantify(instances) @property def classes_(self): """ Classes on which the quantifier has been trained on. :return: a ndarray of shape `(n_classes)` with the class identifiers """ return self.best_model().classes_ def set_params(self, **parameters): """Sets the hyper-parameters to explore. :param parameters: a dictionary with keys the parameter names and values the list of values to explore """ self.param_grid = parameters def get_params(self, deep=True): """Returns the dictionary of hyper-parameters to explore (`param_grid`) :param deep: Unused :return: the dictionary `param_grid` """ return self.param_grid def best_model(self): """ Returns the best model found after calling the :meth:`fit` method, i.e., the one trained on the combination of hyper-parameters that minimized the error function. :return: a trained quantifier """ if hasattr(self, 'best_model_'): return self.best_model_ raise ValueError('best_model called before fit')
48.65283
119
0.649732
12,487
0.96851
0
0
237
0.018382
0
0
6,573
0.509812
86e7d2f1d51490654abf153935c59b83db56caad
597
py
Python
flasky.py
ZxShane/slam_hospital
302704b3a188cea07dddfb23595dd75f8d3cd636
[ "Apache-2.0" ]
null
null
null
flasky.py
ZxShane/slam_hospital
302704b3a188cea07dddfb23595dd75f8d3cd636
[ "Apache-2.0" ]
1
2020-11-17T16:47:19.000Z
2021-01-26T10:16:33.000Z
flasky.py
ZxShane/slam_hospital
302704b3a188cea07dddfb23595dd75f8d3cd636
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import os from flask_migrate import Migrate from app import create_app, db from app.models import User, Role, PoseToLocation app = create_app(os.getenv('FLASK_CONFIG') or 'default') migrate = Migrate(app, db) # migrate 的新建 我们需要扫描到这些文件我们才能创建 @app.shell_context_processor def make_shell_context(): return dict(db=db, User=User, Role=Role, PoseToLocation=PoseToLocation) # 单元测试 @app.cli.command() def test(): """ run the unit tests """ import unittest tests = unittest.TestLoader().discover('tests') unittest.TextTestRunner(verbosity=2).run(tests)
22.111111
75
0.730318
0
0
0
0
315
0.488372
0
0
164
0.254264
86e7e28fd96ba38477835a4f1f9a0169efabb855
2,841
py
Python
python/day09/smoke_basin.py
aesdeef/advent-of-code-2021
4561bcf12ac03d360f5b28c48ef80134f97613b9
[ "MIT" ]
2
2021-12-03T06:18:27.000Z
2021-12-06T11:28:33.000Z
python/day09/smoke_basin.py
aesdeef/advent-of-code-2021
4561bcf12ac03d360f5b28c48ef80134f97613b9
[ "MIT" ]
null
null
null
python/day09/smoke_basin.py
aesdeef/advent-of-code-2021
4561bcf12ac03d360f5b28c48ef80134f97613b9
[ "MIT" ]
null
null
null
INPUT_FILE = "../../input/09.txt" Point = tuple[int, int] Heightmap = dict[Point, int] Basin = set[Point] def parse_input() -> Heightmap: """ Parses the input and returns a Heightmap """ with open(INPUT_FILE) as f: heights = [[int(x) for x in line.strip()] for line in f] heightmap: Heightmap = dict() for (y, row) in enumerate(heights): for (x, height) in enumerate(row): heightmap[(x, y)] = height return heightmap def get_surrounding_points(heightmap: Heightmap, point: Point) -> set[Point]: """ Returns a set of surrounding points within the heightmap """ x, y = point return { (x - 1, y), (x, y - 1), (x, y + 1), (x + 1, y), } & heightmap.keys() def get_surrounding_heights(heightmap: Heightmap, point: Point) -> set[int]: """ Returns the heights of points surrounding the given point """ surrounding_points = get_surrounding_points(heightmap, point) return {heightmap[point] for point in surrounding_points} def get_low_points(heightmap: Heightmap) -> set[Point]: """ Finds the low points on the heightmap """ low_points: set[Point] = set() for point in heightmap: surrounding_heights = get_surrounding_heights(heightmap, point) if all(heightmap[point] < height for height in surrounding_heights): low_points.add(point) return low_points def solve_part1(heightmap: Heightmap, low_points: set[Point]) -> int: """ Calculates the sum of the risk levels of all low points """ return sum(1 + heightmap[point] for point in low_points) def get_basins(heightmap: Heightmap, low_points: set[Point]) -> list[Basin]: """ Finds all basins on the heightmap """ basins: list[Basin] = [] for low_point in low_points: basin: Basin = set() points_to_consider = {low_point} while points_to_consider: point = points_to_consider.pop() if heightmap[point] == 9: continue surrounding_points = get_surrounding_points(heightmap, point) points_to_consider.update(surrounding_points - basin) basin.add(point) basins.append(basin) return basins def solve_part2(heightmap: Heightmap, low_points: set[Point]) -> int: """ Calculates the product of the sizes of the three largest basins """ basins = get_basins(heightmap, low_points) basin_sizes = sorted((len(basin) for basin in basins), reverse=True) return basin_sizes[0] * basin_sizes[1] * basin_sizes[2] if __name__ == "__main__": heightmap = parse_input() low_points = get_low_points(heightmap) part1 = solve_part1(heightmap, low_points) part2 = solve_part2(heightmap, low_points) print(part1) print(part2)
27.852941
77
0.642027
0
0
0
0
0
0
0
0
483
0.170011